summaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/char
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig425
-rw-r--r--drivers/char/Makefile46
-rw-r--r--drivers/char/adi.c239
-rw-r--r--drivers/char/agp/Kconfig156
-rw-r--r--drivers/char/agp/Makefile26
-rw-r--r--drivers/char/agp/agp.h301
-rw-r--r--drivers/char/agp/ali-agp.c422
-rw-r--r--drivers/char/agp/alpha-agp.c220
-rw-r--r--drivers/char/agp/amd-k7-agp.c552
-rw-r--r--drivers/char/agp/amd64-agp.c805
-rw-r--r--drivers/char/agp/ati-agp.c576
-rw-r--r--drivers/char/agp/backend.c368
-rw-r--r--drivers/char/agp/compat_ioctl.c291
-rw-r--r--drivers/char/agp/compat_ioctl.h106
-rw-r--r--drivers/char/agp/efficeon-agp.c468
-rw-r--r--drivers/char/agp/frontend.c1068
-rw-r--r--drivers/char/agp/generic.c1416
-rw-r--r--drivers/char/agp/hp-agp.c550
-rw-r--r--drivers/char/agp/i460-agp.c659
-rw-r--r--drivers/char/agp/intel-agp.c923
-rw-r--r--drivers/char/agp/intel-agp.h194
-rw-r--r--drivers/char/agp/intel-gtt.c1464
-rw-r--r--drivers/char/agp/isoch.c465
-rw-r--r--drivers/char/agp/nvidia-agp.c467
-rw-r--r--drivers/char/agp/parisc-agp.c435
-rw-r--r--drivers/char/agp/sis-agp.c436
-rw-r--r--drivers/char/agp/sworks-agp.c568
-rw-r--r--drivers/char/agp/uninorth-agp.c729
-rw-r--r--drivers/char/agp/via-agp.c579
-rw-r--r--drivers/char/apm-emulation.c726
-rw-r--r--drivers/char/applicom.c855
-rw-r--r--drivers/char/applicom.h86
-rw-r--r--drivers/char/bsr.c346
-rw-r--r--drivers/char/ds1620.c424
-rw-r--r--drivers/char/dsp56k.c534
-rw-r--r--drivers/char/dtlk.c663
-rw-r--r--drivers/char/hangcheck-timer.c175
-rw-r--r--drivers/char/hpet.c1047
-rw-r--r--drivers/char/hw_random/Kconfig591
-rw-r--r--drivers/char/hw_random/Makefile51
-rw-r--r--drivers/char/hw_random/amd-rng.c220
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c121
-rw-r--r--drivers/char/hw_random/atmel-rng.c233
-rw-r--r--drivers/char/hw_random/ba431-rng.c219
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c215
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c269
-rw-r--r--drivers/char/hw_random/cavium-rng.c91
-rw-r--r--drivers/char/hw_random/cctrng.c665
-rw-r--r--drivers/char/hw_random/cctrng.h72
-rw-r--r--drivers/char/hw_random/cn10k-rng.c228
-rw-r--r--drivers/char/hw_random/core.c734
-rw-r--r--drivers/char/hw_random/exynos-trng.c232
-rw-r--r--drivers/char/hw_random/geode-rng.c161
-rw-r--r--drivers/char/hw_random/hisi-rng.c121
-rw-r--r--drivers/char/hw_random/histb-rng.c173
-rw-r--r--drivers/char/hw_random/imx-rngc.c331
-rw-r--r--drivers/char/hw_random/ingenic-rng.c149
-rw-r--r--drivers/char/hw_random/ingenic-trng.c130
-rw-r--r--drivers/char/hw_random/intel-rng.c418
-rw-r--r--drivers/char/hw_random/iproc-rng200.c246
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c76
-rw-r--r--drivers/char/hw_random/jh7110-trng.c393
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c285
-rw-r--r--drivers/char/hw_random/meson-rng.c78
-rw-r--r--drivers/char/hw_random/mpfs-rng.c103
-rw-r--r--drivers/char/hw_random/mtk-rng.c201
-rw-r--r--drivers/char/hw_random/mxc-rnga.c209
-rw-r--r--drivers/char/hw_random/n2-asm.S80
-rw-r--r--drivers/char/hw_random/n2-drv.c870
-rw-r--r--drivers/char/hw_random/n2rng.h152
-rw-r--r--drivers/char/hw_random/nomadik-rng.c91
-rw-r--r--drivers/char/hw_random/npcm-rng.c188
-rw-r--r--drivers/char/hw_random/octeon-rng.c118
-rw-r--r--drivers/char/hw_random/omap-rng.c569
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c181
-rw-r--r--drivers/char/hw_random/optee-rng.c305
-rw-r--r--drivers/char/hw_random/pasemi-rng.c118
-rw-r--r--drivers/char/hw_random/pic32-rng.c120
-rw-r--r--drivers/char/hw_random/powernv-rng.c71
-rw-r--r--drivers/char/hw_random/pseries-rng.c92
-rw-r--r--drivers/char/hw_random/s390-trng.c256
-rw-r--r--drivers/char/hw_random/st-rng.c124
-rw-r--r--drivers/char/hw_random/stm32-rng.c222
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c205
-rw-r--r--drivers/char/hw_random/via-rng.c227
-rw-r--r--drivers/char/hw_random/virtio-rng.c264
-rw-r--r--drivers/char/hw_random/xgene-rng.c391
-rw-r--r--drivers/char/hw_random/xiphera-trng.c147
-rw-r--r--drivers/char/ipmi/Kconfig190
-rw-r--r--drivers/char/ipmi/Makefile33
-rw-r--r--drivers/char/ipmi/bt-bmc.c493
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c377
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c696
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c910
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c223
-rw-r--r--drivers/char/ipmi/ipmi_dmi.h10
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c583
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c542
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c5573
-rw-r--r--drivers/char/ipmi/ipmi_plat_data.c124
-rw-r--r--drivers/char/ipmi/ipmi_plat_data.h25
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c316
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c724
-rw-r--r--drivers/char/ipmi/ipmi_si.h107
-rw-r--r--drivers/char/ipmi/ipmi_si_hardcode.c153
-rw-r--r--drivers/char/ipmi/ipmi_si_hotmod.c237
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2307
-rw-r--r--drivers/char/ipmi/ipmi_si_mem_io.c146
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c61
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c160
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c471
-rw-r--r--drivers/char/ipmi/ipmi_si_port_io.c114
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h104
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c585
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2162
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1344
-rw-r--r--drivers/char/ipmi/kcs_bmc.c190
-rw-r--r--drivers/char/ipmi/kcs_bmc.h46
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c684
-rw-r--r--drivers/char/ipmi/kcs_bmc_cdev_ipmi.c568
-rw-r--r--drivers/char/ipmi/kcs_bmc_client.h45
-rw-r--r--drivers/char/ipmi/kcs_bmc_device.h22
-rw-r--r--drivers/char/ipmi/kcs_bmc_npcm7xx.c253
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c159
-rw-r--r--drivers/char/ipmi/ssif_bmc.c873
-rw-r--r--drivers/char/lp.c1126
-rw-r--r--drivers/char/mem.c783
-rw-r--r--drivers/char/misc.c309
-rw-r--r--drivers/char/mspec.c295
-rw-r--r--drivers/char/mwave/3780i.c738
-rw-r--r--drivers/char/mwave/3780i.h358
-rw-r--r--drivers/char/mwave/Makefile16
-rw-r--r--drivers/char/mwave/README47
-rw-r--r--drivers/char/mwave/mwavedd.c703
-rw-r--r--drivers/char/mwave/mwavedd.h152
-rw-r--r--drivers/char/mwave/mwavepub.h89
-rw-r--r--drivers/char/mwave/smapi.c572
-rw-r--r--drivers/char/mwave/smapi.h76
-rw-r--r--drivers/char/mwave/tp3780i.c574
-rw-r--r--drivers/char/mwave/tp3780i.h103
-rw-r--r--drivers/char/nsc_gpio.c140
-rw-r--r--drivers/char/nvram.c545
-rw-r--r--drivers/char/nwbutton.c247
-rw-r--r--drivers/char/nwbutton.h40
-rw-r--r--drivers/char/nwflash.c626
-rw-r--r--drivers/char/pc8736x_gpio.c353
-rw-r--r--drivers/char/powernv-op-panel.c225
-rw-r--r--drivers/char/ppdev.c879
-rw-r--r--drivers/char/ps3flash.c445
-rw-r--r--drivers/char/random.c1699
-rw-r--r--drivers/char/scx200_gpio.c133
-rw-r--r--drivers/char/sonypi.c1548
-rw-r--r--drivers/char/tlclk.c939
-rw-r--r--drivers/char/toshiba.c520
-rw-r--r--drivers/char/tpm/Kconfig214
-rw-r--r--drivers/char/tpm/Makefile44
-rw-r--r--drivers/char/tpm/eventlog/acpi.c171
-rw-r--r--drivers/char/tpm/eventlog/common.c189
-rw-r--r--drivers/char/tpm/eventlog/common.h35
-rw-r--r--drivers/char/tpm/eventlog/efi.c125
-rw-r--r--drivers/char/tpm/eventlog/of.c108
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c296
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c159
-rw-r--r--drivers/char/tpm/st33zp24/Kconfig30
-rw-r--r--drivers/char/tpm/st33zp24/Makefile13
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c173
-rw-r--r--drivers/char/tpm/st33zp24/spi.c290
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c588
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.h41
-rw-r--r--drivers/char/tpm/tpm-chip.c679
-rw-r--r--drivers/char/tpm/tpm-dev-common.c284
-rw-r--r--drivers/char/tpm/tpm-dev.c68
-rw-r--r--drivers/char/tpm/tpm-dev.h33
-rw-r--r--drivers/char/tpm/tpm-interface.c530
-rw-r--r--drivers/char/tpm/tpm-sysfs.c529
-rw-r--r--drivers/char/tpm/tpm.h320
-rw-r--r--drivers/char/tpm/tpm1-cmd.c813
-rw-r--r--drivers/char/tpm/tpm2-cmd.c789
-rw-r--r--drivers/char/tpm/tpm2-space.c641
-rw-r--r--drivers/char/tpm/tpm_atmel.c235
-rw-r--r--drivers/char/tpm/tpm_atmel.h140
-rw-r--r--drivers/char/tpm/tpm_crb.c858
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c419
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.h40
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c219
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c732
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c666
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c757
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h73
-rw-r--r--drivers/char/tpm/tpm_infineon.c625
-rw-r--r--drivers/char/tpm/tpm_nsc.c416
-rw-r--r--drivers/char/tpm/tpm_ppi.c389
-rw-r--r--drivers/char/tpm/tpm_tis.c434
-rw-r--r--drivers/char/tpm/tpm_tis_core.c1367
-rw-r--r--drivers/char/tpm/tpm_tis_core.h228
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c404
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c795
-rw-r--r--drivers/char/tpm/tpm_tis_spi.h49
-rw-r--r--drivers/char/tpm/tpm_tis_spi_cr50.c342
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c359
-rw-r--r--drivers/char/tpm/tpm_tis_synquacer.c167
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c717
-rw-r--r--drivers/char/tpm/tpmrm-dev.c55
-rw-r--r--drivers/char/tpm/xen-tpmfront.c441
-rw-r--r--drivers/char/ttyprintk.c231
-rw-r--r--drivers/char/uv_mmtimer.c220
-rw-r--r--drivers/char/virtio_console.c2291
-rw-r--r--drivers/char/xilinx_hwicap/Makefile8
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c361
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.h54
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.c393
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.h59
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c893
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h224
-rw-r--r--drivers/char/xillybus/Kconfig52
-rw-r--r--drivers/char/xillybus/Makefile10
-rw-r--r--drivers/char/xillybus/xillybus.h126
-rw-r--r--drivers/char/xillybus/xillybus_class.c256
-rw-r--r--drivers/char/xillybus/xillybus_class.h30
-rw-r--r--drivers/char/xillybus/xillybus_core.c1991
-rw-r--r--drivers/char/xillybus/xillybus_of.c86
-rw-r--r--drivers/char/xillybus/xillybus_pcie.c127
-rw-r--r--drivers/char/xillybus/xillyusb.c2277
223 files changed, 95284 insertions, 0 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
new file mode 100644
index 0000000000..625af75833
--- /dev/null
+++ b/drivers/char/Kconfig
@@ -0,0 +1,425 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Character device configuration
+#
+
+menu "Character devices"
+
+source "drivers/tty/Kconfig"
+
+config TTY_PRINTK
+ tristate "TTY driver to output user messages via printk"
+ depends on EXPERT && TTY
+ default n
+ help
+ If you say Y here, the support for writing user messages (i.e.
+ console messages) via printk is available.
+
+ The feature is useful to inline user messages with kernel
+ messages.
+ In order to use this feature, you should output user messages
+ to /dev/ttyprintk or redirect console to this TTY, or boot
+ the kernel with console=ttyprintk.
+
+ If unsure, say N.
+
+config TTY_PRINTK_LEVEL
+ depends on TTY_PRINTK
+ int "ttyprintk log level (1-7)"
+ range 1 7
+ default "6"
+ help
+ Printk log level to use for ttyprintk messages.
+
+config PRINTER
+ tristate "Parallel printer support"
+ depends on PARPORT
+ depends on HAS_IOPORT || PARPORT_NOT_PC
+ help
+ If you intend to attach a printer to the parallel port of your Linux
+ box (as opposed to using a serial printer; if the connector at the
+ printer has 9 or 25 holes ["female"], then it's serial), say Y.
+ Also read the Printing-HOWTO, available from
+ <https://www.tldp.org/docs.html#howto>.
+
+ It is possible to share one parallel port among several devices
+ (e.g. printer and ZIP drive) and it is safe to compile the
+ corresponding drivers into the kernel.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/admin-guide/parport.rst>. The module will be called lp.
+
+ If you have several parallel ports, you can specify which ports to
+ use with the "lp" kernel command line option. (Try "man bootparam"
+ or see the documentation of your boot loader (lilo or loadlin) about
+ how to pass options to the kernel at boot time.) The syntax of the
+ "lp" command line option can be found in <file:drivers/char/lp.c>.
+
+ If you have more than 8 printers, you need to increase the LP_NO
+ macro in lp.c and the PARPORT_MAX macro in parport.h.
+
+config LP_CONSOLE
+ bool "Support for console on line printer"
+ depends on PRINTER
+ help
+ If you want kernel messages to be printed out as they occur, you
+ can have a console on the printer. This option adds support for
+ doing that; to actually get it to happen you need to pass the
+ option "console=lp0" to the kernel at boot time.
+
+ If the printer is out of paper (or off, or unplugged, or too
+ busy..) the kernel will stall until the printer is ready again.
+ By defining CONSOLE_LP_STRICT to 0 (at your own risk) you
+ can make the kernel continue when this happens,
+ but it'll lose the kernel messages.
+
+ If unsure, say N.
+
+config PPDEV
+ tristate "Support for user-space parallel port device drivers"
+ depends on PARPORT
+ help
+ Saying Y to this adds support for /dev/parport device nodes. This
+ is needed for programs that want portable access to the parallel
+ port, for instance deviceid (which displays Plug-and-Play device
+ IDs).
+
+ This is the parallel port equivalent of SCSI generic support (sg).
+ It is safe to say N to this -- it is not needed for normal printing
+ or parallel port CD-ROM/disk support.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ppdev.
+
+ If unsure, say N.
+
+config VIRTIO_CONSOLE
+ tristate "Virtio console"
+ depends on TTY
+ select HVC_DRIVER
+ select VIRTIO
+ help
+ Virtio console for use with hypervisors.
+
+ Also serves as a general-purpose serial device for data
+ transfer between the guest and host. Character devices at
+ /dev/vportNpn will be created when corresponding ports are
+ found, where N is the device number and n is the port number
+ within that device. If specified by the host, a sysfs
+ attribute called 'name' will be populated with a name for
+ the port which can be used by udev scripts to create a
+ symlink to the device.
+
+config IBM_BSR
+ tristate "IBM POWER Barrier Synchronization Register support"
+ depends on PPC_PSERIES
+ help
+ This devices exposes a hardware mechanism for fast synchronization
+ of threads across a large system which avoids bouncing a cacheline
+ between several cores on a system
+
+config POWERNV_OP_PANEL
+ tristate "IBM POWERNV Operator Panel Display support"
+ depends on PPC_POWERNV
+ default m
+ help
+ If you say Y here, a special character device node, /dev/op_panel,
+ will be created which exposes the operator panel display on IBM
+ Power Systems machines with FSPs.
+
+ If you don't require access to the operator panel display from user
+ space, say N.
+
+ If unsure, say M here to build it as a module called powernv-op-panel.
+
+source "drivers/char/ipmi/Kconfig"
+
+config DS1620
+ tristate "NetWinder thermometer support"
+ depends on ARCH_NETWINDER
+ help
+ Say Y here to include support for the thermal management hardware
+ found in the NetWinder. This driver allows the user to control the
+ temperature set points and to read the current temperature.
+
+ It is also possible to say M here to build it as a module (ds1620)
+ It is recommended to be used on a NetWinder, but it is not a
+ necessity.
+
+config NWBUTTON
+ tristate "NetWinder Button"
+ depends on ARCH_NETWINDER
+ help
+ If you say Y here and create a character device node /dev/nwbutton
+ with major and minor numbers 10 and 158 ("man mknod"), then every
+ time the orange button is pressed a number of times, the number of
+ times the button was pressed will be written to that device.
+
+ This is most useful for applications, as yet unwritten, which
+ perform actions based on how many times the button is pressed in a
+ row.
+
+ Do not hold the button down for too long, as the driver does not
+ alter the behaviour of the hardware reset circuitry attached to the
+ button; it will still execute a hard reset if the button is held
+ down for longer than approximately five seconds.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nwbutton.
+
+ Most people will answer Y to this question and "Reboot Using Button"
+ below to be able to initiate a system shutdown from the button.
+
+config NWBUTTON_REBOOT
+ bool "Reboot Using Button"
+ depends on NWBUTTON
+ help
+ If you say Y here, then you will be able to initiate a system
+ shutdown and reboot by pressing the orange button a number of times.
+ The number of presses to initiate the shutdown is two by default,
+ but this can be altered by modifying the value of NUM_PRESSES_REBOOT
+ in nwbutton.h and recompiling the driver or, if you compile the
+ driver as a module, you can specify the number of presses at load
+ time with "insmod button reboot_count=<something>".
+
+config NWFLASH
+ tristate "NetWinder flash support"
+ depends on ARCH_NETWINDER
+ help
+ If you say Y here and create a character device /dev/flash with
+ major 10 and minor 160 you can manipulate the flash ROM containing
+ the NetWinder firmware. Be careful as accidentally overwriting the
+ flash contents can render your computer unbootable. On no account
+ allow random users access to this device. :-)
+
+ To compile this driver as a module, choose M here: the
+ module will be called nwflash.
+
+ If you're not sure, say N.
+
+source "drivers/char/hw_random/Kconfig"
+
+config DTLK
+ tristate "Double Talk PC internal speech card support"
+ depends on ISA
+ help
+ This driver is for the DoubleTalk PC, a speech synthesizer
+ manufactured by RC Systems (<https://www.rcsys.com/>). It is also
+ called the `internal DoubleTalk'.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dtlk.
+
+config XILINX_HWICAP
+ tristate "Xilinx HWICAP Support"
+ depends on MICROBLAZE
+ help
+ This option enables support for Xilinx Internal Configuration
+ Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
+ FPGA platforms to partially reconfigure the FPGA at runtime.
+
+ If unsure, say N.
+
+config APPLICOM
+ tristate "Applicom intelligent fieldbus card support"
+ depends on PCI
+ help
+ This driver provides the kernel-side support for the intelligent
+ fieldbus cards made by Applicom International. More information
+ about these cards can be found on the WWW at the address
+ <https://www.applicom-int.com/>, or by email from David Woodhouse
+ <dwmw2@infradead.org>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called applicom.
+
+ If unsure, say N.
+
+config SONYPI
+ tristate "Sony Vaio Programmable I/O Control Device support"
+ depends on X86_32 && PCI && INPUT
+ help
+ This driver enables access to the Sony Programmable I/O Control
+ Device which can be found in many (all ?) Sony Vaio laptops.
+
+ If you have one of those laptops, read
+ <file:Documentation/admin-guide/laptops/sonypi.rst>, and say Y or M here.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sonypi.
+
+config MWAVE
+ tristate "ACP Modem (Mwave) support"
+ depends on X86 && TTY
+ select SERIAL_8250
+ help
+ The ACP modem (Mwave) for Linux is a WinModem. It is composed of a
+ kernel driver and a user level application. Together these components
+ support direct attachment to public switched telephone networks (PSTNs)
+ and support selected world wide countries.
+
+ This version of the ACP Modem driver supports the IBM Thinkpad 600E,
+ 600, and 770 that include on board ACP modem hardware.
+
+ The modem also supports the standard communications port interface
+ (ttySx) and is compatible with the Hayes AT Command Set.
+
+ The user level application needed to use this driver can be found at
+ the IBM Linux Technology Center (LTC) web site:
+ <http://www.ibm.com/linux/ltc/>.
+
+ If you own one of the above IBM Thinkpads which has the Mwave chipset
+ in it, say Y.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mwave.
+
+config SCx200_GPIO
+ tristate "NatSemi SCx200 GPIO Support"
+ depends on SCx200
+ select NSC_GPIO
+ help
+ Give userspace access to the GPIO pins on the National
+ Semiconductor SCx200 processors.
+
+ If compiled as a module, it will be called scx200_gpio.
+
+config PC8736x_GPIO
+ tristate "NatSemi PC8736x GPIO Support"
+ depends on X86_32 && !UML
+ default SCx200_GPIO # mostly N
+ select NSC_GPIO # needed for support routines
+ help
+ Give userspace access to the GPIO pins on the National
+ Semiconductor PC-8736x (x=[03456]) SuperIO chip. The chip
+ has multiple functional units, inc several managed by
+ hwmon/pc87360 driver. Tested with PC-87366
+
+ If compiled as a module, it will be called pc8736x_gpio.
+
+config NSC_GPIO
+ tristate "NatSemi Base GPIO Support"
+ depends on X86_32
+ # selected by SCx200_GPIO and PC8736x_GPIO
+ # what about 2 selectors differing: m != y
+ help
+ Common support used (and needed) by scx200_gpio and
+ pc8736x_gpio drivers. If those drivers are built as
+ modules, this one will be too, named nsc_gpio
+
+config DEVMEM
+ bool "/dev/mem virtual device support"
+ default y
+ help
+ Say Y here if you want to support the /dev/mem device.
+ The /dev/mem device is used to access areas of physical
+ memory.
+ When in doubt, say "Y".
+
+config NVRAM
+ tristate "/dev/nvram support"
+ depends on X86 || HAVE_ARCH_NVRAM_OPS
+ default M68K || PPC
+ help
+ If you say Y here and create a character special file /dev/nvram
+ with major number 10 and minor number 144 using mknod ("man mknod"),
+ you get read and write access to the non-volatile memory.
+
+ /dev/nvram may be used to view settings in NVRAM or to change them
+ (with some utility). It could also be used to frequently
+ save a few bits of very important data that may not be lost over
+ power-off and for which writing to disk is too insecure. Note
+ however that most NVRAM space in a PC belongs to the BIOS and you
+ should NEVER idly tamper with it. See Ralf Brown's interrupt list
+ for a guide to the use of CMOS bytes by your BIOS.
+
+ This memory is conventionally called "NVRAM" on PowerPC machines,
+ "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvram.
+
+config DEVPORT
+ bool "/dev/port character device"
+ depends on HAS_IOPORT
+ default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
+
+config HPET
+ bool "HPET - High Precision Event Timer" if (X86 || IA64)
+ default n
+ depends on ACPI
+ help
+ If you say Y here, you will have a miscdevice named "/dev/hpet/". Each
+ open selects one of the timers supported by the HPET. The timers are
+ non-periodic and/or periodic.
+
+config HPET_MMAP
+ bool "Allow mmap of HPET"
+ default y
+ depends on HPET
+ help
+ If you say Y here, user applications will be able to mmap
+ the HPET registers.
+
+config HPET_MMAP_DEFAULT
+ bool "Enable HPET MMAP access by default"
+ default y
+ depends on HPET_MMAP
+ help
+ In some hardware implementations, the page containing HPET
+ registers may also contain other things that shouldn't be
+ exposed to the user. This option selects the default (if
+ kernel parameter hpet_mmap is not set) user access to the
+ registers for applications that require it.
+
+config HANGCHECK_TIMER
+ tristate "Hangcheck timer"
+ depends on X86 || IA64 || PPC64 || S390
+ help
+ The hangcheck-timer module detects when the system has gone
+ out to lunch past a certain margin. It can reboot the system
+ or merely print a warning.
+
+config UV_MMTIMER
+ tristate "UV_MMTIMER Memory mapped RTC for SGI UV"
+ depends on X86_UV
+ default m
+ help
+ The uv_mmtimer device allows direct userspace access to the
+ UV system timer.
+
+source "drivers/char/tpm/Kconfig"
+
+config TELCLOCK
+ tristate "Telecom clock driver for ATCA SBC"
+ depends on X86
+ default n
+ help
+ The telecom clock device is specific to the MPCBL0010 and MPCBL0050
+ ATCA computers and allows direct userspace access to the
+ configuration of the telecom clock configuration settings. This
+ device is used for hardware synchronization across the ATCA backplane
+ fabric. Upon loading, the driver exports a sysfs directory,
+ /sys/devices/platform/telco_clock, with a number of files for
+ controlling the behavior of this hardware.
+
+source "drivers/s390/char/Kconfig"
+
+source "drivers/char/xillybus/Kconfig"
+
+config ADI
+ tristate "SPARC Privileged ADI driver"
+ depends on SPARC64
+ default m
+ help
+ SPARC M7 and newer processors utilize ADI (Application Data
+ Integrity) to version and protect memory. This driver provides
+ read/write access to the ADI versions for privileged processes.
+ This feature is also known as MCD (Memory Corruption Detection)
+ and SSM (Silicon Secured Memory). Intended consumers of this
+ driver include crash and makedumpfile.
+
+endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
new file mode 100644
index 0000000000..c5f532e412
--- /dev/null
+++ b/drivers/char/Makefile
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the kernel character device drivers.
+#
+
+obj-y += mem.o random.o
+obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
+obj-y += misc.o
+obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
+obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
+obj-$(CONFIG_MSPEC) += mspec.o
+obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
+obj-$(CONFIG_IBM_BSR) += bsr.o
+
+obj-$(CONFIG_PRINTER) += lp.o
+
+obj-$(CONFIG_APM_EMULATION) += apm-emulation.o
+
+obj-$(CONFIG_DTLK) += dtlk.o
+obj-$(CONFIG_APPLICOM) += applicom.o
+obj-$(CONFIG_SONYPI) += sonypi.o
+obj-$(CONFIG_HPET) += hpet.o
+obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
+obj-$(CONFIG_NVRAM) += nvram.o
+obj-$(CONFIG_TOSHIBA) += toshiba.o
+obj-$(CONFIG_DS1620) += ds1620.o
+obj-$(CONFIG_HW_RANDOM) += hw_random/
+obj-$(CONFIG_PPDEV) += ppdev.o
+obj-$(CONFIG_NWBUTTON) += nwbutton.o
+obj-$(CONFIG_NWFLASH) += nwflash.o
+obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
+obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
+obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
+obj-$(CONFIG_TELCLOCK) += tlclk.o
+
+obj-$(CONFIG_MWAVE) += mwave/
+obj-y += agp/
+
+obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
+obj-$(CONFIG_TCG_TPM) += tpm/
+
+obj-$(CONFIG_PS3_FLASH) += ps3flash.o
+
+obj-$(CONFIG_XILLYBUS_CLASS) += xillybus/
+obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
+obj-$(CONFIG_ADI) += adi.o
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
new file mode 100644
index 0000000000..751d7cc0da
--- /dev/null
+++ b/drivers/char/adi.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Privileged ADI driver for sparc64
+ *
+ * Author: Tom Hromatka <tom.hromatka@oracle.com>
+ */
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/asi.h>
+
+#define MAX_BUF_SZ PAGE_SIZE
+
+static int adi_open(struct inode *inode, struct file *file)
+{
+ file->f_mode |= FMODE_UNSIGNED_OFFSET;
+ return 0;
+}
+
+static int read_mcd_tag(unsigned long addr)
+{
+ long err;
+ int ver;
+
+ __asm__ __volatile__(
+ "1: ldxa [%[addr]] %[asi], %[ver]\n"
+ " mov 0, %[err]\n"
+ "2:\n"
+ " .section .fixup,#alloc,#execinstr\n"
+ " .align 4\n"
+ "3: sethi %%hi(2b), %%g1\n"
+ " jmpl %%g1 + %%lo(2b), %%g0\n"
+ " mov %[invalid], %[err]\n"
+ " .previous\n"
+ " .section __ex_table, \"a\"\n"
+ " .align 4\n"
+ " .word 1b, 3b\n"
+ " .previous\n"
+ : [ver] "=r" (ver), [err] "=r" (err)
+ : [addr] "r" (addr), [invalid] "i" (EFAULT),
+ [asi] "i" (ASI_MCD_REAL)
+ : "memory", "g1"
+ );
+
+ if (err)
+ return -EFAULT;
+ else
+ return ver;
+}
+
+static ssize_t adi_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offp)
+{
+ size_t ver_buf_sz, bytes_read = 0;
+ int ver_buf_idx = 0;
+ loff_t offset;
+ u8 *ver_buf;
+ ssize_t ret;
+
+ ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
+ ver_buf = kmalloc(ver_buf_sz, GFP_KERNEL);
+ if (!ver_buf)
+ return -ENOMEM;
+
+ offset = (*offp) * adi_blksize();
+
+ while (bytes_read < count) {
+ ret = read_mcd_tag(offset);
+ if (ret < 0)
+ goto out;
+
+ ver_buf[ver_buf_idx] = (u8)ret;
+ ver_buf_idx++;
+ offset += adi_blksize();
+
+ if (ver_buf_idx >= ver_buf_sz) {
+ if (copy_to_user(buf + bytes_read, ver_buf,
+ ver_buf_sz)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ bytes_read += ver_buf_sz;
+ ver_buf_idx = 0;
+
+ ver_buf_sz = min(count - bytes_read,
+ (size_t)MAX_BUF_SZ);
+ }
+ }
+
+ (*offp) += bytes_read;
+ ret = bytes_read;
+out:
+ kfree(ver_buf);
+ return ret;
+}
+
+static int set_mcd_tag(unsigned long addr, u8 ver)
+{
+ long err;
+
+ __asm__ __volatile__(
+ "1: stxa %[ver], [%[addr]] %[asi]\n"
+ " mov 0, %[err]\n"
+ "2:\n"
+ " .section .fixup,#alloc,#execinstr\n"
+ " .align 4\n"
+ "3: sethi %%hi(2b), %%g1\n"
+ " jmpl %%g1 + %%lo(2b), %%g0\n"
+ " mov %[invalid], %[err]\n"
+ " .previous\n"
+ " .section __ex_table, \"a\"\n"
+ " .align 4\n"
+ " .word 1b, 3b\n"
+ " .previous\n"
+ : [err] "=r" (err)
+ : [ver] "r" (ver), [addr] "r" (addr),
+ [invalid] "i" (EFAULT), [asi] "i" (ASI_MCD_REAL)
+ : "memory", "g1"
+ );
+
+ if (err)
+ return -EFAULT;
+ else
+ return ver;
+}
+
+static ssize_t adi_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ size_t ver_buf_sz, bytes_written = 0;
+ loff_t offset;
+ u8 *ver_buf;
+ ssize_t ret;
+ int i;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
+ ver_buf = kmalloc(ver_buf_sz, GFP_KERNEL);
+ if (!ver_buf)
+ return -ENOMEM;
+
+ offset = (*offp) * adi_blksize();
+
+ do {
+ if (copy_from_user(ver_buf, &buf[bytes_written],
+ ver_buf_sz)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (i = 0; i < ver_buf_sz; i++) {
+ ret = set_mcd_tag(offset, ver_buf[i]);
+ if (ret < 0)
+ goto out;
+
+ offset += adi_blksize();
+ }
+
+ bytes_written += ver_buf_sz;
+ ver_buf_sz = min(count - bytes_written, (size_t)MAX_BUF_SZ);
+ } while (bytes_written < count);
+
+ (*offp) += bytes_written;
+ ret = bytes_written;
+out:
+ __asm__ __volatile__("membar #Sync");
+ kfree(ver_buf);
+ return ret;
+}
+
+static loff_t adi_llseek(struct file *file, loff_t offset, int whence)
+{
+ loff_t ret = -EINVAL;
+
+ switch (whence) {
+ case SEEK_END:
+ case SEEK_DATA:
+ case SEEK_HOLE:
+ /* unsupported */
+ return -EINVAL;
+ case SEEK_CUR:
+ if (offset == 0)
+ return file->f_pos;
+
+ offset += file->f_pos;
+ break;
+ case SEEK_SET:
+ break;
+ }
+
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ ret = offset;
+ }
+
+ return ret;
+}
+
+static const struct file_operations adi_fops = {
+ .owner = THIS_MODULE,
+ .llseek = adi_llseek,
+ .open = adi_open,
+ .read = adi_read,
+ .write = adi_write,
+};
+
+static struct miscdevice adi_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = KBUILD_MODNAME,
+ .fops = &adi_fops,
+};
+
+static int __init adi_init(void)
+{
+ if (!adi_capable())
+ return -EPERM;
+
+ return misc_register(&adi_miscdev);
+}
+
+static void __exit adi_exit(void)
+{
+ misc_deregister(&adi_miscdev);
+}
+
+module_init(adi_init);
+module_exit(adi_exit);
+
+MODULE_AUTHOR("Tom Hromatka <tom.hromatka@oracle.com>");
+MODULE_DESCRIPTION("Privileged interface to ADI");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
new file mode 100644
index 0000000000..4f501e4842
--- /dev/null
+++ b/drivers/char/agp/Kconfig
@@ -0,0 +1,156 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig AGP
+ tristate "/dev/agpgart (AGP Support)"
+ depends on ALPHA || IA64 || PARISC || PPC || X86
+ depends on PCI
+ help
+ AGP (Accelerated Graphics Port) is a bus system mainly used to
+ connect graphics cards to the rest of the system.
+
+ If you have an AGP system and you say Y here, it will be possible to
+ use the AGP features of your 3D rendering video card. This code acts
+ as a sort of "AGP driver" for the motherboard's chipset.
+
+ If you need more texture memory than you can get with the AGP GART
+ (theoretically up to 256 MB, but in practice usually 64 or 128 MB
+ due to kernel allocation issues), you could use PCI accesses
+ and have up to a couple gigs of texture space.
+
+ Note that this is the only means to have X/GLX use
+ write-combining with MTRR support on the AGP bus. Without it, OpenGL
+ direct rendering will be a lot slower but still faster than PIO.
+
+ To compile this driver as a module, choose M here: the
+ module will be called agpgart.
+
+ You should say Y here if you want to use GLX or DRI.
+
+ If unsure, say N.
+
+config AGP_ALI
+ tristate "ALI chipset support"
+ depends on AGP && X86_32
+ help
+ This option gives you AGP support for the GLX component of
+ X on the following ALi chipsets. The supported chipsets
+ include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
+ For the ALi-chipset question, ALi suggests you refer to
+ <http://www.ali.com.tw/>.
+
+ The M1541 chipset can do AGP 1x and 2x, but note that there is an
+ acknowledged incompatibility with Matrox G200 cards. Due to
+ timing issues, this chipset cannot do AGP 2x with the G200.
+ This is a hardware limitation. AGP 1x seems to be fine, though.
+
+config AGP_ATI
+ tristate "ATI chipset support"
+ depends on AGP && X86_32
+ help
+ This option gives you AGP support for the GLX component of
+ X on the ATI RadeonIGP family of chipsets.
+
+config AGP_AMD
+ tristate "AMD Irongate, 761, and 762 chipset support"
+ depends on AGP && X86_32
+ help
+ This option gives you AGP support for the GLX component of
+ X on AMD Irongate, 761, and 762 chipsets.
+
+config AGP_AMD64
+ tristate "AMD Opteron/Athlon64 on-CPU GART support"
+ depends on AGP && X86 && AMD_NB
+ help
+ This option gives you AGP support for the GLX component of
+ X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
+ You still need an external AGP bridge like the AMD 8151, VIA
+ K8T400M, SiS755. It may also support other AGP bridges when loaded
+ with agp_try_unsupported=1.
+
+config AGP_INTEL
+ tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
+ depends on AGP && X86
+ select INTEL_GTT
+ help
+ This option gives you AGP support for the GLX component of X
+ on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
+ E7205 and E7505 chipsets and full support for the 810, 815, 830M,
+ 845G, 852GM, 855GM, 865G and I915 integrated graphics chipsets.
+
+
+
+config AGP_NVIDIA
+ tristate "NVIDIA nForce/nForce2 chipset support"
+ depends on AGP && X86_32
+ help
+ This option gives you AGP support for the GLX component of
+ X on NVIDIA chipsets including nForce and nForce2
+
+config AGP_SIS
+ tristate "SiS chipset support"
+ depends on AGP && X86
+ help
+ This option gives you AGP support for the GLX component of
+ X on Silicon Integrated Systems [SiS] chipsets.
+
+ Note that 5591/5592 AGP chipsets are NOT supported.
+
+
+config AGP_SWORKS
+ tristate "Serverworks LE/HE chipset support"
+ depends on AGP && X86_32
+ help
+ Say Y here to support the Serverworks AGP card. See
+ <http://www.serverworks.com/> for product descriptions and images.
+
+config AGP_VIA
+ tristate "VIA chipset support"
+ depends on AGP && X86
+ help
+ This option gives you AGP support for the GLX component of
+ X on VIA MVP3/Apollo Pro chipsets.
+
+config AGP_I460
+ tristate "Intel 460GX chipset support"
+ depends on AGP && IA64
+ help
+ This option gives you AGP GART support for the Intel 460GX chipset
+ for IA64 processors.
+
+config AGP_HP_ZX1
+ tristate "HP ZX1 chipset AGP support"
+ depends on AGP && IA64
+ help
+ This option gives you AGP GART support for the HP ZX1 chipset
+ for IA64 processors.
+
+config AGP_PARISC
+ tristate "HP Quicksilver AGP support"
+ depends on AGP && PARISC && 64BIT && IOMMU_SBA
+ help
+ This option gives you AGP GART support for the HP Quicksilver
+ AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
+ workstation...)
+
+config AGP_ALPHA_CORE
+ tristate "Alpha AGP support"
+ depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL)
+ default AGP
+
+config AGP_UNINORTH
+ tristate "Apple UniNorth & U3 AGP support"
+ depends on AGP && PPC_PMAC
+ help
+ This option gives you AGP support for Apple machines with a
+ UniNorth or U3 (Apple G5) bridge.
+
+config AGP_EFFICEON
+ tristate "Transmeta Efficeon support"
+ depends on AGP && X86_32
+ help
+ This option gives you AGP support for the Transmeta Efficeon
+ series processors with integrated northbridges.
+
+config INTEL_GTT
+ tristate
+ depends on X86 && PCI
+
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
new file mode 100644
index 0000000000..90ed8c789e
--- /dev/null
+++ b/drivers/char/agp/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+agpgart-y := backend.o generic.o isoch.o
+
+ifeq ($(CONFIG_DRM_LEGACY),y)
+agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
+agpgart-y += frontend.o
+endif
+
+
+obj-$(CONFIG_AGP) += agpgart.o
+obj-$(CONFIG_AGP_ALI) += ali-agp.o
+obj-$(CONFIG_AGP_ATI) += ati-agp.o
+obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
+obj-$(CONFIG_AGP_AMD64) += amd64-agp.o
+obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
+obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
+obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
+obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
+obj-$(CONFIG_AGP_I460) += i460-agp.o
+obj-$(CONFIG_AGP_INTEL) += intel-agp.o
+obj-$(CONFIG_INTEL_GTT) += intel-gtt.o
+obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
+obj-$(CONFIG_AGP_SIS) += sis-agp.o
+obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o
+obj-$(CONFIG_AGP_UNINORTH) += uninorth-agp.o
+obj-$(CONFIG_AGP_VIA) += via-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
new file mode 100644
index 0000000000..8771dcc9b8
--- /dev/null
+++ b/drivers/char/agp/agp.h
@@ -0,0 +1,301 @@
+/*
+ * AGPGART
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2004 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_BACKEND_PRIV_H
+#define _AGP_BACKEND_PRIV_H 1
+
+#include <asm/agp.h> /* for flush_agp_cache() */
+
+#define PFX "agpgart: "
+
+//#define AGP_DEBUG 1
+#ifdef AGP_DEBUG
+#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __func__ , ## y)
+#else
+#define DBG(x,y...) do { } while (0)
+#endif
+
+extern struct agp_bridge_data *agp_bridge;
+
+enum aper_size_type {
+ U8_APER_SIZE,
+ U16_APER_SIZE,
+ U32_APER_SIZE,
+ LVL2_APER_SIZE,
+ FIXED_APER_SIZE
+};
+
+struct gatt_mask {
+ unsigned long mask;
+ u32 type;
+ /* totally device specific, for integrated chipsets that
+ * might have different types of memory masks. For other
+ * devices this will probably be ignored */
+};
+
+#define AGP_PAGE_DESTROY_UNMAP 1
+#define AGP_PAGE_DESTROY_FREE 2
+
+struct aper_size_info_8 {
+ int size;
+ int num_entries;
+ int page_order;
+ u8 size_value;
+};
+
+struct aper_size_info_16 {
+ int size;
+ int num_entries;
+ int page_order;
+ u16 size_value;
+};
+
+struct aper_size_info_32 {
+ int size;
+ int num_entries;
+ int page_order;
+ u32 size_value;
+};
+
+struct aper_size_info_lvl2 {
+ int size;
+ int num_entries;
+ u32 size_value;
+};
+
+struct aper_size_info_fixed {
+ int size;
+ int num_entries;
+ int page_order;
+};
+
+struct agp_bridge_driver {
+ struct module *owner;
+ const void *aperture_sizes;
+ int num_aperture_sizes;
+ enum aper_size_type size_type;
+ bool cant_use_aperture;
+ bool needs_scratch_page;
+ const struct gatt_mask *masks;
+ int (*fetch_size)(void);
+ int (*configure)(void);
+ void (*agp_enable)(struct agp_bridge_data *, u32);
+ void (*cleanup)(void);
+ void (*tlb_flush)(struct agp_memory *);
+ unsigned long (*mask_memory)(struct agp_bridge_data *, dma_addr_t, int);
+ void (*cache_flush)(void);
+ int (*create_gatt_table)(struct agp_bridge_data *);
+ int (*free_gatt_table)(struct agp_bridge_data *);
+ int (*insert_memory)(struct agp_memory *, off_t, int);
+ int (*remove_memory)(struct agp_memory *, off_t, int);
+ struct agp_memory *(*alloc_by_type) (size_t, int);
+ void (*free_by_type)(struct agp_memory *);
+ struct page *(*agp_alloc_page)(struct agp_bridge_data *);
+ int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t);
+ void (*agp_destroy_page)(struct page *, int flags);
+ void (*agp_destroy_pages)(struct agp_memory *);
+ int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
+};
+
+struct agp_bridge_data {
+ const struct agp_version *version;
+ const struct agp_bridge_driver *driver;
+ const struct vm_operations_struct *vm_ops;
+ void *previous_size;
+ void *current_size;
+ void *dev_private_data;
+ struct pci_dev *dev;
+ u32 __iomem *gatt_table;
+ u32 *gatt_table_real;
+ unsigned long scratch_page;
+ struct page *scratch_page_page;
+ dma_addr_t scratch_page_dma;
+ unsigned long gart_bus_addr;
+ unsigned long gatt_bus_addr;
+ u32 mode;
+ enum chipset_type type;
+ unsigned long *key_list;
+ atomic_t current_memory_agp;
+ atomic_t agp_in_use;
+ int max_memory_agp; /* in number of pages */
+ int aperture_size_idx;
+ int capndx;
+ int flags;
+ char major_version;
+ char minor_version;
+ struct list_head list;
+ u32 apbase_config;
+ /* list of agp_memory mapped to the aperture */
+ struct list_head mapped_list;
+ spinlock_t mapped_lock;
+};
+
+#define KB(x) ((x) * 1024)
+#define MB(x) (KB (KB (x)))
+#define GB(x) (MB (KB (x)))
+
+#define A_SIZE_8(x) ((struct aper_size_info_8 *) x)
+#define A_SIZE_16(x) ((struct aper_size_info_16 *) x)
+#define A_SIZE_32(x) ((struct aper_size_info_32 *) x)
+#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x)
+#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x)
+#define A_IDX8(bridge) (A_SIZE_8((bridge)->driver->aperture_sizes) + i)
+#define A_IDX16(bridge) (A_SIZE_16((bridge)->driver->aperture_sizes) + i)
+#define A_IDX32(bridge) (A_SIZE_32((bridge)->driver->aperture_sizes) + i)
+#define MAXKEY (4096 * 32)
+
+#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
+
+
+struct agp_device_ids {
+ unsigned short device_id; /* first, to make table easier to read */
+ enum chipset_type chipset;
+ const char *chipset_name;
+ int (*chipset_setup) (struct pci_dev *pdev); /* used to override generic */
+};
+
+/* Driver registration */
+struct agp_bridge_data *agp_alloc_bridge(void);
+void agp_put_bridge(struct agp_bridge_data *bridge);
+int agp_add_bridge(struct agp_bridge_data *bridge);
+void agp_remove_bridge(struct agp_bridge_data *bridge);
+
+/* Frontend routines. */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+int agp_frontend_initialize(void);
+void agp_frontend_cleanup(void);
+#else
+static inline int agp_frontend_initialize(void) { return 0; }
+static inline void agp_frontend_cleanup(void) {}
+#endif
+
+/* Generic routines. */
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge);
+struct agp_memory *agp_create_memory(int scratch_pages);
+int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type);
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
+void agp_generic_free_by_type(struct agp_memory *curr);
+struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge);
+int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge,
+ struct agp_memory *memory, size_t page_count);
+void agp_generic_destroy_page(struct page *page, int flags);
+void agp_generic_destroy_pages(struct agp_memory *memory);
+void agp_free_key(int key);
+int agp_num_entries(void);
+u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
+void agp_device_command(u32 command, bool agp_v3);
+int agp_3_5_enable(struct agp_bridge_data *bridge);
+void global_cache_flush(void);
+void get_agp_version(struct agp_bridge_data *bridge);
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t phys, int type);
+int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type);
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
+
+/* generic functions for user-populated AGP memory types */
+struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
+void agp_alloc_page_array(size_t size, struct agp_memory *mem);
+static inline void agp_free_page_array(struct agp_memory *mem)
+{
+ kvfree(mem->pages);
+}
+
+
+/* generic routines for agp>=3 */
+int agp3_generic_fetch_size(void);
+void agp3_generic_tlbflush(struct agp_memory *mem);
+int agp3_generic_configure(void);
+void agp3_generic_cleanup(void);
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
+/* aperture sizes have been standardised since v3 */
+#define AGP_GENERIC_SIZES_ENTRIES 11
+extern const struct aper_size_info_16 agp3_generic_sizes[];
+
+extern int agp_off;
+extern int agp_try_unsupported_boot;
+
+long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+/* Chipset independent registers (from AGP Spec) */
+#define AGP_APBASE 0x10
+#define AGP_APERTURE_BAR 0
+
+#define AGPSTAT 0x4
+#define AGPCMD 0x8
+#define AGPNISTAT 0xc
+#define AGPCTRL 0x10
+#define AGPAPSIZE 0x14
+#define AGPNEPG 0x16
+#define AGPGARTLO 0x18
+#define AGPGARTHI 0x1c
+#define AGPNICMD 0x20
+
+#define AGP_MAJOR_VERSION_SHIFT (20)
+#define AGP_MINOR_VERSION_SHIFT (16)
+
+#define AGPSTAT_RQ_DEPTH (0xff000000)
+#define AGPSTAT_RQ_DEPTH_SHIFT 24
+
+#define AGPSTAT_CAL_MASK (1<<12|1<<11|1<<10)
+#define AGPSTAT_ARQSZ (1<<15|1<<14|1<<13)
+#define AGPSTAT_ARQSZ_SHIFT 13
+
+#define AGPSTAT_SBA (1<<9)
+#define AGPSTAT_AGP_ENABLE (1<<8)
+#define AGPSTAT_FW (1<<4)
+#define AGPSTAT_MODE_3_0 (1<<3)
+
+#define AGPSTAT2_1X (1<<0)
+#define AGPSTAT2_2X (1<<1)
+#define AGPSTAT2_4X (1<<2)
+
+#define AGPSTAT3_RSVD (1<<2)
+#define AGPSTAT3_8X (1<<1)
+#define AGPSTAT3_4X (1)
+
+#define AGPCTRL_APERENB (1<<8)
+#define AGPCTRL_GTLBEN (1<<7)
+
+#define AGP2_RESERVED_MASK 0x00fffcc8
+#define AGP3_RESERVED_MASK 0x00ff00c4
+
+#define AGP_ERRATA_FASTWRITES 1<<0
+#define AGP_ERRATA_SBA 1<<1
+#define AGP_ERRATA_1X 1<<2
+
+#endif /* _AGP_BACKEND_PRIV_H */
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
new file mode 100644
index 0000000000..760d9a9312
--- /dev/null
+++ b/drivers/char/agp/ali-agp.c
@@ -0,0 +1,422 @@
+/*
+ * ALi AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <asm/page.h> /* PAGE_SIZE */
+#include "agp.h"
+
+#define ALI_AGPCTRL 0xb8
+#define ALI_ATTBASE 0xbc
+#define ALI_TLBCTRL 0xc0
+#define ALI_TAGCTRL 0xc4
+#define ALI_CACHE_FLUSH_CTRL 0xD0
+#define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000
+#define ALI_CACHE_FLUSH_EN 0x100
+
+static int ali_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_32 *values;
+
+ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+ temp &= ~(0xfffffff0);
+ values = A_SIZE_32(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void ali_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+ temp &= 0xfffffff0;
+ temp |= (1<<0 | 1<<1);
+ pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp);
+}
+
+static void ali_cleanup(void)
+{
+ struct aper_size_info_32 *previous_size;
+ u32 temp;
+
+ previous_size = A_SIZE_32(agp_bridge->previous_size);
+
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+// clear tag
+ pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL,
+ ((temp & 0xffffff00) | 0x00000001|0x00000002));
+
+ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE,
+ ((temp & 0x00000ff0) | previous_size->size_value));
+}
+
+static int ali_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_32 *current_size;
+
+ current_size = A_SIZE_32(agp_bridge->current_size);
+
+ /* aperture size and gatt addr */
+ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+ temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000))
+ | (current_size->size_value & 0xf));
+ pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp);
+
+ /* tlb control */
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+#if 0
+ if (agp_bridge->type == ALI_M1541) {
+ u32 nlvm_addr = 0;
+
+ switch (current_size->size_value) {
+ case 0: break;
+ case 1: nlvm_addr = 0x100000;break;
+ case 2: nlvm_addr = 0x200000;break;
+ case 3: nlvm_addr = 0x400000;break;
+ case 4: nlvm_addr = 0x800000;break;
+ case 6: nlvm_addr = 0x1000000;break;
+ case 7: nlvm_addr = 0x2000000;break;
+ case 8: nlvm_addr = 0x4000000;break;
+ case 9: nlvm_addr = 0x8000000;break;
+ case 10: nlvm_addr = 0x10000000;break;
+ default: break;
+ }
+ nlvm_addr--;
+ nlvm_addr&=0xfff00000;
+
+ nlvm_addr+= agp_bridge->gart_bus_addr;
+ nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
+ dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n",
+ nlvm_addr);
+ }
+#endif
+
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+ temp &= 0xffffff7f; //enable TLB
+ pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp);
+
+ return 0;
+}
+
+
+static void m1541_cache_flush(void)
+{
+ int i, page_count;
+ u32 temp;
+
+ global_cache_flush();
+
+ page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order;
+ for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
+ pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ (agp_bridge->gatt_bus_addr + i)) |
+ ALI_CACHE_FLUSH_EN));
+ }
+}
+
+static struct page *m1541_alloc_page(struct agp_bridge_data *bridge)
+{
+ struct page *page = agp_generic_alloc_page(agp_bridge);
+ u32 temp;
+
+ if (!page)
+ return NULL;
+
+ pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ page_to_phys(page)) | ALI_CACHE_FLUSH_EN ));
+ return page;
+}
+
+static void ali_destroy_page(struct page *page, int flags)
+{
+ if (page) {
+ if (flags & AGP_PAGE_DESTROY_UNMAP) {
+ global_cache_flush(); /* is this really needed? --hch */
+ agp_generic_destroy_page(page, flags);
+ } else
+ agp_generic_destroy_page(page, flags);
+ }
+}
+
+static void m1541_destroy_page(struct page *page, int flags)
+{
+ u32 temp;
+
+ if (page == NULL)
+ return;
+
+ if (flags & AGP_PAGE_DESTROY_UNMAP) {
+ global_cache_flush();
+
+ pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
+ }
+ agp_generic_destroy_page(page, flags);
+}
+
+
+/* Setup function */
+
+static const struct aper_size_info_32 ali_generic_sizes[7] =
+{
+ {256, 65536, 6, 10},
+ {128, 32768, 5, 9},
+ {64, 16384, 4, 8},
+ {32, 8192, 3, 7},
+ {16, 4096, 2, 6},
+ {8, 2048, 1, 4},
+ {4, 1024, 0, 3}
+};
+
+static const struct agp_bridge_driver ali_generic_bridge = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = ali_generic_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = ali_configure,
+ .fetch_size = ali_fetch_size,
+ .cleanup = ali_cleanup,
+ .tlb_flush = ali_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = ali_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver ali_m1541_bridge = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = ali_generic_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = ali_configure,
+ .fetch_size = ali_fetch_size,
+ .cleanup = ali_cleanup,
+ .tlb_flush = ali_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = m1541_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = m1541_alloc_page,
+ .agp_destroy_page = m1541_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+
+static struct agp_device_ids ali_agp_device_ids[] =
+{
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1541,
+ .chipset_name = "M1541",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1621,
+ .chipset_name = "M1621",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1631,
+ .chipset_name = "M1631",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1632,
+ .chipset_name = "M1632",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1641,
+ .chipset_name = "M1641",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1644,
+ .chipset_name = "M1644",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1647,
+ .chipset_name = "M1647",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1651,
+ .chipset_name = "M1651",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1671,
+ .chipset_name = "M1671",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1681,
+ .chipset_name = "M1681",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1683,
+ .chipset_name = "M1683",
+ },
+
+ { }, /* dummy final entry, always present */
+};
+
+static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = ali_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ u8 hidden_1621_id, cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name; j++) {
+ if (pdev->device == devs[j].device_id)
+ goto found;
+ }
+
+ dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
+ pdev->vendor, pdev->device);
+ return -ENODEV;
+
+
+found:
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_AL_M1541:
+ bridge->driver = &ali_m1541_bridge;
+ break;
+ case PCI_DEVICE_ID_AL_M1621:
+ pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
+ switch (hidden_1621_id) {
+ case 0x31:
+ devs[j].chipset_name = "M1631";
+ break;
+ case 0x32:
+ devs[j].chipset_name = "M1632";
+ break;
+ case 0x41:
+ devs[j].chipset_name = "M1641";
+ break;
+ case 0x43:
+ devs[j].chipset_name = "M1621";
+ break;
+ case 0x47:
+ devs[j].chipset_name = "M1647";
+ break;
+ case 0x51:
+ devs[j].chipset_name = "M1651";
+ break;
+ default:
+ break;
+ }
+ fallthrough;
+ default:
+ bridge->driver = &ali_generic_bridge;
+ }
+
+ dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_ali_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static const struct pci_device_id agp_ali_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_ali_pci_table);
+
+static struct pci_driver agp_ali_pci_driver = {
+ .name = "agpgart-ali",
+ .id_table = agp_ali_pci_table,
+ .probe = agp_ali_probe,
+ .remove = agp_ali_remove,
+};
+
+static int __init agp_ali_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_ali_pci_driver);
+}
+
+static void __exit agp_ali_cleanup(void)
+{
+ pci_unregister_driver(&agp_ali_pci_driver);
+}
+
+module_init(agp_ali_init);
+module_exit(agp_ali_cleanup);
+
+MODULE_AUTHOR("Dave Jones");
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
new file mode 100644
index 0000000000..c9bf2c2198
--- /dev/null
+++ b/drivers/char/agp/alpha-agp.c
@@ -0,0 +1,220 @@
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/machvec.h>
+#include <asm/agp_backend.h>
+#include "../../../arch/alpha/kernel/pci_impl.h"
+
+#include "agp.h"
+
+static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ dma_addr_t dma_addr;
+ unsigned long pa;
+ struct page *page;
+
+ dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base;
+ pa = agp->ops->translate(agp, dma_addr);
+
+ if (pa == (unsigned long)-EINVAL)
+ return VM_FAULT_SIGBUS; /* no translation */
+
+ /*
+ * Get the page, inc the use count, and return it
+ */
+ page = virt_to_page(__va(pa));
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static struct aper_size_info_fixed alpha_core_agp_sizes[] =
+{
+ { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
+};
+
+static const struct vm_operations_struct alpha_core_agp_vm_ops = {
+ .fault = alpha_core_agp_vm_fault,
+};
+
+
+static int alpha_core_agp_fetch_size(void)
+{
+ return alpha_core_agp_sizes[0].size;
+}
+
+static int alpha_core_agp_configure(void)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ agp_bridge->gart_bus_addr = agp->aperture.bus_base;
+ return 0;
+}
+
+static void alpha_core_agp_cleanup(void)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+
+ agp->ops->cleanup(agp);
+}
+
+static void alpha_core_agp_tlbflush(struct agp_memory *mem)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ alpha_mv.mv_pci_tbi(agp->hose, 0, -1);
+}
+
+static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ alpha_agp_info *agp = bridge->dev_private_data;
+
+ agp->mode.lw = agp_collect_device_status(bridge, mode,
+ agp->capability.lw);
+
+ agp->mode.bits.enable = 1;
+ agp->ops->configure(agp);
+
+ agp_device_command(agp->mode.lw, false);
+}
+
+static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ int num_entries, status;
+ void *temp;
+
+ if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+ return -EINVAL;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ status = agp->ops->bind(agp, pg_start, mem);
+ mb();
+ alpha_core_agp_tlbflush(mem);
+
+ return status;
+}
+
+static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ int status;
+
+ status = agp->ops->unbind(agp, pg_start, mem);
+ alpha_core_agp_tlbflush(mem);
+ return status;
+}
+
+static int alpha_core_agp_create_free_gatt_table(struct agp_bridge_data *a)
+{
+ return 0;
+}
+
+struct agp_bridge_driver alpha_core_agp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = alpha_core_agp_sizes,
+ .num_aperture_sizes = 1,
+ .size_type = FIXED_APER_SIZE,
+ .cant_use_aperture = true,
+ .masks = NULL,
+
+ .fetch_size = alpha_core_agp_fetch_size,
+ .configure = alpha_core_agp_configure,
+ .agp_enable = alpha_core_agp_enable,
+ .cleanup = alpha_core_agp_cleanup,
+ .tlb_flush = alpha_core_agp_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = alpha_core_agp_create_free_gatt_table,
+ .free_gatt_table = alpha_core_agp_create_free_gatt_table,
+ .insert_memory = alpha_core_agp_insert_memory,
+ .remove_memory = alpha_core_agp_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+struct agp_bridge_data *alpha_bridge;
+
+int __init
+alpha_core_agp_setup(void)
+{
+ alpha_agp_info *agp = alpha_mv.agp_info();
+ struct pci_dev *pdev; /* faked */
+ struct aper_size_info_fixed *aper_size;
+
+ if (!agp)
+ return -ENODEV;
+ if (agp->ops->setup(agp))
+ return -ENODEV;
+
+ /*
+ * Build the aperture size descriptor
+ */
+ aper_size = alpha_core_agp_sizes;
+ aper_size->size = agp->aperture.size / (1024 * 1024);
+ aper_size->num_entries = agp->aperture.size / PAGE_SIZE;
+ aper_size->page_order = __ffs(aper_size->num_entries / 1024);
+
+ /*
+ * Build a fake pci_dev struct
+ */
+ pdev = pci_alloc_dev(NULL);
+ if (!pdev)
+ return -ENOMEM;
+ pdev->vendor = 0xffff;
+ pdev->device = 0xffff;
+ pdev->sysdata = agp->hose;
+
+ alpha_bridge = agp_alloc_bridge();
+ if (!alpha_bridge)
+ goto fail;
+
+ alpha_bridge->driver = &alpha_core_agp_driver;
+ alpha_bridge->vm_ops = &alpha_core_agp_vm_ops;
+ alpha_bridge->current_size = aper_size; /* only 1 size */
+ alpha_bridge->dev_private_data = agp;
+ alpha_bridge->dev = pdev;
+ alpha_bridge->mode = agp->capability.lw;
+
+ printk(KERN_INFO PFX "Detected AGP on hose %d\n", agp->hose->index);
+ return agp_add_bridge(alpha_bridge);
+
+ fail:
+ kfree(pdev);
+ return -ENOMEM;
+}
+
+static int __init agp_alpha_core_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ if (alpha_mv.agp_info)
+ return alpha_core_agp_setup();
+ return -ENODEV;
+}
+
+static void __exit agp_alpha_core_cleanup(void)
+{
+ agp_remove_bridge(alpha_bridge);
+ agp_put_bridge(alpha_bridge);
+}
+
+module_init(agp_alpha_core_init);
+module_exit(agp_alpha_core_cleanup);
+
+MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
new file mode 100644
index 0000000000..55397ba765
--- /dev/null
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -0,0 +1,552 @@
+/*
+ * AMD K7 AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/set_memory.h>
+#include "agp.h"
+
+#define AMD_MMBASE_BAR 1
+#define AMD_APSIZE 0xac
+#define AMD_MODECNTL 0xb0
+#define AMD_MODECNTL2 0xb2
+#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
+#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
+#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
+#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
+
+static const struct pci_device_id agp_amdk7_pci_table[];
+
+struct amd_page_map {
+ unsigned long *real;
+ unsigned long __iomem *remapped;
+};
+
+static struct _amd_irongate_private {
+ volatile u8 __iomem *registers;
+ struct amd_page_map **gatt_pages;
+ int num_tables;
+} amd_irongate_private;
+
+static int amd_create_page_map(struct amd_page_map *page_map)
+{
+ int i;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL)
+ return -ENOMEM;
+
+ set_memory_uc((unsigned long)page_map->real, 1);
+ page_map->remapped = page_map->real;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+ writel(agp_bridge->scratch_page, page_map->remapped+i);
+ readl(page_map->remapped+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+static void amd_free_page_map(struct amd_page_map *page_map)
+{
+ set_memory_wb((unsigned long)page_map->real, 1);
+ free_page((unsigned long) page_map->real);
+}
+
+static void amd_free_gatt_pages(void)
+{
+ int i;
+ struct amd_page_map **tables;
+ struct amd_page_map *entry;
+
+ tables = amd_irongate_private.gatt_pages;
+ for (i = 0; i < amd_irongate_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL)
+ amd_free_page_map(entry);
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+ amd_irongate_private.gatt_pages = NULL;
+}
+
+static int amd_create_gatt_pages(int nr_tables)
+{
+ struct amd_page_map **tables;
+ struct amd_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kcalloc(nr_tables + 1, sizeof(struct amd_page_map *),
+ GFP_KERNEL);
+ if (tables == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_tables; i++) {
+ entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL);
+ tables[i] = entry;
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ retval = amd_create_page_map(entry);
+ if (retval != 0)
+ break;
+ }
+ amd_irongate_private.num_tables = i;
+ amd_irongate_private.gatt_pages = tables;
+
+ if (retval != 0)
+ amd_free_gatt_pages();
+
+ return retval;
+}
+
+/* Since we don't need contiguous memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int amd_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct aper_size_info_lvl2 *value;
+ struct amd_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+ int retval;
+ int i;
+
+ value = A_SIZE_LVL2(agp_bridge->current_size);
+ retval = amd_create_page_map(&page_dir);
+ if (retval != 0)
+ return retval;
+
+ retval = amd_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ amd_free_page_map(&page_dir);
+ return retval;
+ }
+
+ agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+ agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+ /* Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+
+ addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
+ agp_bridge->gart_bus_addr = addr;
+
+ /* Calculate the agp offset */
+ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+ writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
+ page_dir.remapped+GET_PAGE_DIR_OFF(addr));
+ readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
+ }
+
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+static int amd_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct amd_page_map page_dir;
+
+ page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+ page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+ amd_free_gatt_pages();
+ amd_free_page_map(&page_dir);
+ return 0;
+}
+
+static int amd_irongate_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_lvl2 *values;
+
+ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+ temp = (temp & 0x0000000e);
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int amd_irongate_configure(void)
+{
+ struct aper_size_info_lvl2 *current_size;
+ phys_addr_t reg;
+ u32 temp;
+ u16 enable_reg;
+
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ if (!amd_irongate_private.registers) {
+ /* Get the memory mapped registers */
+ reg = pci_resource_start(agp_bridge->dev, AMD_MMBASE_BAR);
+ amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096);
+ if (!amd_irongate_private.registers)
+ return -ENOMEM;
+ }
+
+ /* Write out the address of the gatt table */
+ writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
+ readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */
+
+ /* Write the Sync register */
+ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
+
+ /* Set indexing mode */
+ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
+
+ /* Write the enable register */
+ enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
+ enable_reg = (enable_reg | 0x0004);
+ writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
+ readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
+
+ /* Write out the size register */
+ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
+ pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
+
+ /* Flush the tlb */
+ writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
+ readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/
+ return 0;
+}
+
+static void amd_irongate_cleanup(void)
+{
+ struct aper_size_info_lvl2 *previous_size;
+ u32 temp;
+ u16 enable_reg;
+
+ previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+
+ enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
+ enable_reg = (enable_reg & ~(0x0004));
+ writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
+ readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
+
+ /* Write back the previous size and disable gart translation */
+ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
+ iounmap((void __iomem *) amd_irongate_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually. However
+ * currently it just flushes the whole table. Which is probably
+ * more efficient, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void amd_irongate_tlbflush(struct agp_memory *temp)
+{
+ writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
+ readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */
+}
+
+static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
+ return -EINVAL;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_generic_mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]),
+ mem->type),
+ cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+ amd_irongate_tlbflush(mem);
+ return 0;
+}
+
+static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
+ return -EINVAL;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+
+ amd_irongate_tlbflush(mem);
+ return 0;
+}
+
+static const struct aper_size_info_lvl2 amd_irongate_sizes[7] =
+{
+ {2048, 524288, 0x0000000c},
+ {1024, 262144, 0x0000000a},
+ {512, 131072, 0x00000008},
+ {256, 65536, 0x00000006},
+ {128, 32768, 0x00000004},
+ {64, 16384, 0x00000002},
+ {32, 8192, 0x00000000}
+};
+
+static const struct gatt_mask amd_irongate_masks[] =
+{
+ {.mask = 1, .type = 0}
+};
+
+static const struct agp_bridge_driver amd_irongate_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = amd_irongate_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = amd_irongate_configure,
+ .fetch_size = amd_irongate_fetch_size,
+ .cleanup = amd_irongate_cleanup,
+ .tlb_flush = amd_irongate_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = amd_irongate_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = amd_create_gatt_table,
+ .free_gatt_table = amd_free_gatt_table,
+ .insert_memory = amd_insert_memory,
+ .remove_memory = amd_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static struct agp_device_ids amd_agp_device_ids[] =
+{
+ {
+ .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006,
+ .chipset_name = "Irongate",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E,
+ .chipset_name = "761",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C,
+ .chipset_name = "760MP",
+ },
+ { }, /* dummy final entry, always present */
+};
+
+static int agp_amdk7_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ j = ent - agp_amdk7_pci_table;
+ dev_info(&pdev->dev, "AMD %s chipset\n",
+ amd_agp_device_ids[j].chipset_name);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &amd_irongate_driver;
+ bridge->dev_private_data = &amd_irongate_private;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /* 751 Errata (22564_B-1.PDF)
+ erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
+ system controller may experience noise due to strong drive strengths
+ */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
+ struct pci_dev *gfxcard=NULL;
+
+ cap_ptr = 0;
+ while (!cap_ptr) {
+ gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
+ if (!gfxcard) {
+ dev_info(&pdev->dev, "no AGP VGA controller\n");
+ return -ENODEV;
+ }
+ cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
+ }
+
+ /* With so many variants of NVidia cards, it's simpler just
+ to blacklist them all, and then whitelist them as needed
+ (if necessary at all). */
+ if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
+ agp_bridge->flags |= AGP_ERRATA_1X;
+ dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
+ }
+ pci_dev_put(gfxcard);
+ }
+
+ /* 761 Errata (23613_F.pdf)
+ * Revisions B0/B1 were a disaster.
+ * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
+ * erratum 45: Timing problem prevents fast writes -- Disable fast write.
+ * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
+ * With this lot disabled, we should prevent lockups. */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
+ if (pdev->revision == 0x10 || pdev->revision == 0x11) {
+ agp_bridge->flags = AGP_ERRATA_FASTWRITES;
+ agp_bridge->flags |= AGP_ERRATA_SBA;
+ agp_bridge->flags |= AGP_ERRATA_1X;
+ dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
+ }
+ }
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_amdk7_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static int agp_amdk7_resume(struct device *dev)
+{
+ return amd_irongate_driver.configure();
+}
+
+/* must be the same order as name table above */
+static const struct pci_device_id agp_amdk7_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_FE_GATE_7006,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_FE_GATE_700E,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_FE_GATE_700C,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_amdk7_pm_ops, NULL, agp_amdk7_resume);
+
+static struct pci_driver agp_amdk7_pci_driver = {
+ .name = "agpgart-amdk7",
+ .id_table = agp_amdk7_pci_table,
+ .probe = agp_amdk7_probe,
+ .remove = agp_amdk7_remove,
+ .driver.pm = &agp_amdk7_pm_ops,
+};
+
+static int __init agp_amdk7_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_amdk7_pci_driver);
+}
+
+static void __exit agp_amdk7_cleanup(void)
+{
+ pci_unregister_driver(&agp_amdk7_pci_driver);
+}
+
+module_init(agp_amdk7_init);
+module_exit(agp_amdk7_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
new file mode 100644
index 0000000000..ce86514366
--- /dev/null
+++ b/drivers/char/agp/amd64-agp.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2001-2003 SuSE Labs.
+ * Distributed under the GNU public license, v2.
+ *
+ * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge.
+ * It also includes support for the AMD 8151 AGP bridge,
+ * although it doesn't actually do much, as all the real
+ * work is done in the northbridge(s).
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/mmzone.h>
+#include <asm/page.h> /* PAGE_SIZE */
+#include <asm/e820/api.h>
+#include <asm/amd_nb.h>
+#include <asm/gart.h>
+#include "agp.h"
+
+/* NVIDIA K8 registers */
+#define NVIDIA_X86_64_0_APBASE 0x10
+#define NVIDIA_X86_64_1_APBASE1 0x50
+#define NVIDIA_X86_64_1_APLIMIT1 0x54
+#define NVIDIA_X86_64_1_APSIZE 0xa8
+#define NVIDIA_X86_64_1_APBASE2 0xd8
+#define NVIDIA_X86_64_1_APLIMIT2 0xdc
+
+/* ULi K8 registers */
+#define ULI_X86_64_BASE_ADDR 0x10
+#define ULI_X86_64_HTT_FEA_REG 0x50
+#define ULI_X86_64_ENU_SCR_REG 0x54
+
+static struct resource *aperture_resource;
+static bool __initdata agp_try_unsupported = 1;
+static int agp_bridges_found;
+
+static void amd64_tlbflush(struct agp_memory *temp)
+{
+ amd_flush_garts();
+}
+
+static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ long long tmp;
+ int mask_type;
+ struct agp_bridge_data *bridge = mem->bridge;
+ u32 pte;
+
+ num_entries = agp_num_entries();
+
+ if (type != mem->type)
+ return -EINVAL;
+ mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+ if (mask_type != 0)
+ return -EINVAL;
+
+
+ /* Make sure we can fit the range in the gatt table. */
+ /* FIXME: could wrap */
+ if (((unsigned long)pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+
+ /* gatt table should be empty. */
+ while (j < (pg_start + mem->page_count)) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ tmp = agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]),
+ mask_type);
+
+ BUG_ON(tmp & 0xffffff0000000ffcULL);
+ pte = (tmp & 0x000000ff00000000ULL) >> 28;
+ pte |=(tmp & 0x00000000fffff000ULL);
+ pte |= GPTE_VALID | GPTE_COHERENT;
+
+ writel(pte, agp_bridge->gatt_table+j);
+ readl(agp_bridge->gatt_table+j); /* PCI Posting. */
+ }
+ amd64_tlbflush(mem);
+ return 0;
+}
+
+/*
+ * This hack alters the order element according
+ * to the size of a long. It sucks. I totally disown this, even
+ * though it does appear to work for the most part.
+ */
+static struct aper_size_info_32 amd64_aperture_sizes[7] =
+{
+ {32, 8192, 3+(sizeof(long)/8), 0 },
+ {64, 16384, 4+(sizeof(long)/8), 1<<1 },
+ {128, 32768, 5+(sizeof(long)/8), 1<<2 },
+ {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 },
+ {512, 131072, 7+(sizeof(long)/8), 1<<3 },
+ {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
+ {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
+};
+
+
+/*
+ * Get the current Aperture size from the x86-64.
+ * Note, that there may be multiple x86-64's, but we just return
+ * the value from the first one we find. The set_size functions
+ * keep the rest coherent anyway. Or at least should do.
+ */
+static int amd64_fetch_size(void)
+{
+ struct pci_dev *dev;
+ int i;
+ u32 temp;
+ struct aper_size_info_32 *values;
+
+ dev = node_to_amd_nb(0)->misc;
+ if (dev==NULL)
+ return 0;
+
+ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
+ temp = (temp & 0xe);
+ values = A_SIZE_32(amd64_aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+/*
+ * In a multiprocessor x86-64 system, this function gets
+ * called once for each CPU.
+ */
+static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
+{
+ u64 aperturebase;
+ u32 tmp;
+ u64 aper_base;
+
+ /* Address to map to */
+ pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
+ aperturebase = (u64)tmp << 25;
+ aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
+
+ enable_gart_translation(hammer, gatt_table);
+
+ return aper_base;
+}
+
+
+static const struct aper_size_info_32 amd_8151_sizes[7] =
+{
+ {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */
+ {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */
+ {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */
+ {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */
+ {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */
+ {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */
+ {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */
+};
+
+static int amd_8151_configure(void)
+{
+ unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
+ int i;
+
+ if (!amd_nb_has_feature(AMD_NB_GART))
+ return 0;
+
+ /* Configure AGP regs in each x86-64 host bridge. */
+ for (i = 0; i < amd_nb_num(); i++) {
+ agp_bridge->gart_bus_addr =
+ amd64_configure(node_to_amd_nb(i)->misc, gatt_bus);
+ }
+ amd_flush_garts();
+ return 0;
+}
+
+
+static void amd64_cleanup(void)
+{
+ u32 tmp;
+ int i;
+
+ if (!amd_nb_has_feature(AMD_NB_GART))
+ return;
+
+ for (i = 0; i < amd_nb_num(); i++) {
+ struct pci_dev *dev = node_to_amd_nb(i)->misc;
+ /* disable gart translation */
+ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
+ tmp &= ~GARTEN;
+ pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
+ }
+}
+
+
+static const struct agp_bridge_driver amd_8151_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = amd_8151_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = amd_8151_configure,
+ .fetch_size = amd64_fetch_size,
+ .cleanup = amd64_cleanup,
+ .tlb_flush = amd64_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = amd64_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+/* Some basic sanity checks for the aperture. */
+static int agp_aperture_valid(u64 aper, u32 size)
+{
+ if (!aperture_valid(aper, size, 32*1024*1024))
+ return 0;
+
+ /* Request the Aperture. This catches cases when someone else
+ already put a mapping in there - happens with some very broken BIOS
+
+ Maybe better to use pci_assign_resource/pci_enable_device instead
+ trusting the bridges? */
+ if (!aperture_resource &&
+ !(aperture_resource = request_mem_region(aper, size, "aperture"))) {
+ printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * W*s centric BIOS sometimes only set up the aperture in the AGP
+ * bridge, not the northbridge. On AMD64 this is handled early
+ * in aperture.c, but when IOMMU is not enabled or we run
+ * on a 32bit kernel this needs to be redone.
+ * Unfortunately it is impossible to fix the aperture here because it's too late
+ * to allocate that much memory. But at least error out cleanly instead of
+ * crashing.
+ */
+static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
+{
+ u64 aper, nb_aper;
+ int order = 0;
+ u32 nb_order, nb_base;
+ u16 apsize;
+
+ pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
+ nb_order = (nb_order >> 1) & 7;
+ pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
+ nb_aper = (u64)nb_base << 25;
+
+ /* Northbridge seems to contain crap. Try the AGP bridge. */
+
+ pci_read_config_word(agp, cap+0x14, &apsize);
+ if (apsize == 0xffff) {
+ if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
+ return 0;
+ return -1;
+ }
+
+ apsize &= 0xfff;
+ /* Some BIOS use weird encodings not in the AGPv3 table. */
+ if (apsize & 0xff)
+ apsize |= 0xf00;
+ order = 7 - hweight16(apsize);
+
+ aper = pci_bus_address(agp, AGP_APERTURE_BAR);
+
+ /*
+ * On some sick chips APSIZE is 0. This means it wants 4G
+ * so let double check that order, and lets trust the AMD NB settings
+ */
+ if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
+ dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
+ 32 << order);
+ order = nb_order;
+ }
+
+ if (nb_order >= order) {
+ if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
+ return 0;
+ }
+
+ dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
+ aper, 32 << order);
+ if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
+ return -1;
+
+ gart_set_size_and_enable(nb, order);
+ pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
+
+ return 0;
+}
+
+static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
+{
+ int i;
+
+ if (!amd_nb_num())
+ return -ENODEV;
+
+ if (!amd_nb_has_feature(AMD_NB_GART))
+ return -ENODEV;
+
+ i = 0;
+ for (i = 0; i < amd_nb_num(); i++) {
+ struct pci_dev *dev = node_to_amd_nb(i)->misc;
+ if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
+ dev_err(&dev->dev, "no usable aperture found\n");
+#ifdef __x86_64__
+ /* should port this to i386 */
+ dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
+#endif
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/* Handle AMD 8151 quirks */
+static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
+{
+ char *revstring;
+
+ switch (pdev->revision) {
+ case 0x01: revstring="A0"; break;
+ case 0x02: revstring="A1"; break;
+ case 0x11: revstring="B0"; break;
+ case 0x12: revstring="B1"; break;
+ case 0x13: revstring="B2"; break;
+ case 0x14: revstring="B3"; break;
+ default: revstring="??"; break;
+ }
+
+ dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
+
+ /*
+ * Work around errata.
+ * Chips before B2 stepping incorrectly reporting v3.5
+ */
+ if (pdev->revision < 0x13) {
+ dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
+ bridge->major_version = 3;
+ bridge->minor_version = 0;
+ }
+}
+
+
+static const struct aper_size_info_32 uli_sizes[7] =
+{
+ {256, 65536, 6, 10},
+ {128, 32768, 5, 9},
+ {64, 16384, 4, 8},
+ {32, 8192, 3, 7},
+ {16, 4096, 2, 6},
+ {8, 2048, 1, 4},
+ {4, 1024, 0, 3}
+};
+static int uli_agp_init(struct pci_dev *pdev)
+{
+ u32 httfea,baseaddr,enuscr;
+ struct pci_dev *dev1;
+ int i, ret;
+ unsigned size = amd64_fetch_size();
+
+ dev_info(&pdev->dev, "setting up ULi AGP\n");
+ dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
+ if (dev1 == NULL) {
+ dev_info(&pdev->dev, "can't find ULi secondary device\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
+ if (uli_sizes[i].size == size)
+ break;
+
+ if (i == ARRAY_SIZE(uli_sizes)) {
+ dev_info(&pdev->dev, "no ULi size found for %d\n", size);
+ ret = -ENODEV;
+ goto put;
+ }
+
+ /* shadow x86-64 registers into ULi registers */
+ pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
+ &httfea);
+
+ /* if x86-64 aperture base is beyond 4G, exit here */
+ if ((httfea & 0x7fff) >> (32 - 25)) {
+ ret = -ENODEV;
+ goto put;
+ }
+
+ httfea = (httfea& 0x7fff) << 25;
+
+ pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
+ baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
+ baseaddr|= httfea;
+ pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
+
+ enuscr= httfea+ (size * 1024 * 1024) - 1;
+ pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
+ pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
+ ret = 0;
+put:
+ pci_dev_put(dev1);
+ return ret;
+}
+
+
+static const struct aper_size_info_32 nforce3_sizes[5] =
+{
+ {512, 131072, 7, 0x00000000 },
+ {256, 65536, 6, 0x00000008 },
+ {128, 32768, 5, 0x0000000C },
+ {64, 16384, 4, 0x0000000E },
+ {32, 8192, 3, 0x0000000F }
+};
+
+/* Handle shadow device of the Nvidia NForce3 */
+/* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
+static int nforce3_agp_init(struct pci_dev *pdev)
+{
+ u32 tmp, apbase, apbar, aplimit;
+ struct pci_dev *dev1;
+ int i, ret;
+ unsigned size = amd64_fetch_size();
+
+ dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
+
+ dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
+ if (dev1 == NULL) {
+ dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
+ if (nforce3_sizes[i].size == size)
+ break;
+
+ if (i == ARRAY_SIZE(nforce3_sizes)) {
+ dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
+ ret = -ENODEV;
+ goto put;
+ }
+
+ pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
+ tmp &= ~(0xf);
+ tmp |= nforce3_sizes[i].size_value;
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
+
+ /* shadow x86-64 registers into NVIDIA registers */
+ pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
+ &apbase);
+
+ /* if x86-64 aperture base is beyond 4G, exit here */
+ if ( (apbase & 0x7fff) >> (32 - 25) ) {
+ dev_info(&pdev->dev, "aperture base > 4G\n");
+ ret = -ENODEV;
+ goto put;
+ }
+
+ apbase = (apbase & 0x7fff) << 25;
+
+ pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
+ apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
+ apbar |= apbase;
+ pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
+
+ aplimit = apbase + (size * 1024 * 1024) - 1;
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
+
+ ret = 0;
+put:
+ pci_dev_put(dev1);
+
+ return ret;
+}
+
+static int agp_amd64_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ int err;
+
+ /* The Highlander principle */
+ if (agp_bridges_found)
+ return -ENODEV;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* Could check for AGPv3 here */
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
+ amd8151_init(pdev, bridge);
+ } else {
+ dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
+ }
+
+ bridge->driver = &amd_8151_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+
+ if (cache_nbs(pdev, cap_ptr) == -1) {
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
+ int ret = nforce3_agp_init(pdev);
+ if (ret) {
+ agp_put_bridge(bridge);
+ return ret;
+ }
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_AL) {
+ int ret = uli_agp_init(pdev);
+ if (ret) {
+ agp_put_bridge(bridge);
+ return ret;
+ }
+ }
+
+ pci_set_drvdata(pdev, bridge);
+ err = agp_add_bridge(bridge);
+ if (err < 0)
+ return err;
+
+ agp_bridges_found++;
+ return 0;
+}
+
+static void agp_amd64_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ release_mem_region(virt_to_phys(bridge->gatt_table_real),
+ amd64_aperture_sizes[bridge->aperture_size_idx].size);
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+
+ agp_bridges_found--;
+}
+
+static int agp_amd64_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
+ nforce3_agp_init(pdev);
+
+ return amd_8151_configure();
+}
+
+static const struct pci_device_id agp_amd64_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_8151_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* ULi M1689 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = PCI_DEVICE_ID_AL_M1689,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T800Pro */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_K8T800PRO_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_8385_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8M800 / K8N800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_8380_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8M890 / K8N890 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_VT3336,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T890 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_3238_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T800/K8M800/K8N800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_838X_1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* NForce3 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE3,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* SIS 755 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_755,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* SIS 760 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_760,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* ALI/ULI M1695 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = 0x1695,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
+
+static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
+ { PCI_DEVICE_CLASS(0, 0) },
+ { }
+};
+
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume);
+
+static struct pci_driver agp_amd64_pci_driver = {
+ .name = "agpgart-amd64",
+ .id_table = agp_amd64_pci_table,
+ .probe = agp_amd64_probe,
+ .remove = agp_amd64_remove,
+ .driver.pm = &agp_amd64_pm_ops,
+};
+
+
+/* Not static due to IOMMU code calling it early. */
+int __init agp_amd64_init(void)
+{
+ int err = 0;
+
+ if (agp_off)
+ return -EINVAL;
+
+ err = pci_register_driver(&agp_amd64_pci_driver);
+ if (err < 0)
+ return err;
+
+ if (agp_bridges_found == 0) {
+ if (!agp_try_unsupported && !agp_try_unsupported_boot) {
+ printk(KERN_INFO PFX "No supported AGP bridge found.\n");
+#ifdef MODULE
+ printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
+#else
+ printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
+#endif
+ pci_unregister_driver(&agp_amd64_pci_driver);
+ return -ENODEV;
+ }
+
+ /* First check that we have at least one AMD64 NB */
+ if (!amd_nb_num()) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
+ return -ENODEV;
+ }
+
+ /* Look for any AGP bridge */
+ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
+ err = driver_attach(&agp_amd64_pci_driver.driver);
+ if (err == 0 && agp_bridges_found == 0) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
+ err = -ENODEV;
+ }
+ }
+ return err;
+}
+
+static int __init agp_amd64_mod_init(void)
+{
+#ifndef MODULE
+ if (gart_iommu_aperture)
+ return agp_bridges_found ? 0 : -ENODEV;
+#endif
+ return agp_amd64_init();
+}
+
+static void __exit agp_amd64_cleanup(void)
+{
+#ifndef MODULE
+ if (gart_iommu_aperture)
+ return;
+#endif
+ if (aperture_resource)
+ release_resource(aperture_resource);
+ pci_unregister_driver(&agp_amd64_pci_driver);
+}
+
+module_init(agp_amd64_mod_init);
+module_exit(agp_amd64_cleanup);
+
+MODULE_AUTHOR("Dave Jones, Andi Kleen");
+module_param(agp_try_unsupported, bool, 0);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
new file mode 100644
index 0000000000..3c1fce48aa
--- /dev/null
+++ b/drivers/char/agp/ati-agp.c
@@ -0,0 +1,576 @@
+/*
+ * ATi AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/agp_backend.h>
+#include <asm/agp.h>
+#include <asm/set_memory.h>
+#include "agp.h"
+
+#define ATI_GART_MMBASE_BAR 1
+#define ATI_RS100_APSIZE 0xac
+#define ATI_RS100_IG_AGPMODE 0xb0
+#define ATI_RS300_APSIZE 0xf8
+#define ATI_RS300_IG_AGPMODE 0xfc
+#define ATI_GART_FEATURE_ID 0x00
+#define ATI_GART_BASE 0x04
+#define ATI_GART_CACHE_SZBASE 0x08
+#define ATI_GART_CACHE_CNTRL 0x0c
+#define ATI_GART_CACHE_ENTRY_CNTRL 0x10
+
+
+static const struct aper_size_info_lvl2 ati_generic_sizes[7] =
+{
+ {2048, 524288, 0x0000000c},
+ {1024, 262144, 0x0000000a},
+ {512, 131072, 0x00000008},
+ {256, 65536, 0x00000006},
+ {128, 32768, 0x00000004},
+ {64, 16384, 0x00000002},
+ {32, 8192, 0x00000000}
+};
+
+static struct gatt_mask ati_generic_masks[] =
+{
+ { .mask = 1, .type = 0}
+};
+
+
+struct ati_page_map {
+ unsigned long *real;
+ unsigned long __iomem *remapped;
+};
+
+static struct _ati_generic_private {
+ volatile u8 __iomem *registers;
+ struct ati_page_map **gatt_pages;
+ int num_tables;
+} ati_generic_private;
+
+static int ati_create_page_map(struct ati_page_map *page_map)
+{
+ int i, err;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL)
+ return -ENOMEM;
+
+ set_memory_uc((unsigned long)page_map->real, 1);
+ err = map_page_into_agp(virt_to_page(page_map->real));
+ if (err) {
+ free_page((unsigned long)page_map->real);
+ return err;
+ }
+ page_map->remapped = page_map->real;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+ writel(agp_bridge->scratch_page, page_map->remapped+i);
+ readl(page_map->remapped+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+
+static void ati_free_page_map(struct ati_page_map *page_map)
+{
+ unmap_page_from_agp(virt_to_page(page_map->real));
+ set_memory_wb((unsigned long)page_map->real, 1);
+ free_page((unsigned long) page_map->real);
+}
+
+
+static void ati_free_gatt_pages(void)
+{
+ int i;
+ struct ati_page_map **tables;
+ struct ati_page_map *entry;
+
+ tables = ati_generic_private.gatt_pages;
+ for (i = 0; i < ati_generic_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL)
+ ati_free_page_map(entry);
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+}
+
+
+static int ati_create_gatt_pages(int nr_tables)
+{
+ struct ati_page_map **tables;
+ struct ati_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kcalloc(nr_tables + 1, sizeof(struct ati_page_map *),
+ GFP_KERNEL);
+ if (tables == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_tables; i++) {
+ entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL);
+ tables[i] = entry;
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ retval = ati_create_page_map(entry);
+ if (retval != 0)
+ break;
+ }
+ ati_generic_private.num_tables = i;
+ ati_generic_private.gatt_pages = tables;
+
+ if (retval != 0)
+ ati_free_gatt_pages();
+
+ return retval;
+}
+
+static int is_r200(void)
+{
+ if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) ||
+ (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) ||
+ (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) ||
+ (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250))
+ return 1;
+ return 0;
+}
+
+static int ati_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_lvl2 *values;
+
+ if (is_r200())
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ else
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+
+ temp = (temp & 0x0000000e);
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void ati_tlbflush(struct agp_memory * mem)
+{
+ writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL);
+ readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */
+}
+
+static void ati_cleanup(void)
+{
+ struct aper_size_info_lvl2 *previous_size;
+ u32 temp;
+
+ previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+
+ /* Write back the previous size and disable gart translation */
+ if (is_r200()) {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
+ } else {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
+ }
+ iounmap((volatile u8 __iomem *)ati_generic_private.registers);
+}
+
+
+static int ati_configure(void)
+{
+ phys_addr_t reg;
+ u32 temp;
+
+ /* Get the memory mapped registers */
+ reg = pci_resource_start(agp_bridge->dev, ATI_GART_MMBASE_BAR);
+ ati_generic_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096);
+
+ if (!ati_generic_private.registers)
+ return -ENOMEM;
+
+ if (is_r200())
+ pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000);
+ else
+ pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000);
+
+ /* address to map to */
+ /*
+ agp_bridge.gart_bus_addr = pci_bus_address(agp_bridge.dev,
+ AGP_APERTURE_BAR);
+ printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
+ */
+ writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
+ readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/
+
+ /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
+ pci_read_config_dword(agp_bridge->dev, PCI_COMMAND, &temp);
+ pci_write_config_dword(agp_bridge->dev, PCI_COMMAND, temp | (1<<14));
+
+ /* Write out the address of the gatt table */
+ writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
+ readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */
+
+ return 0;
+}
+
+
+static int agp_ati_resume(struct device *dev)
+{
+ return ati_configure();
+}
+
+/*
+ *Since we don't need contiguous memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#undef GET_GATT
+#define GET_GATT(addr) (ati_generic_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int ati_insert_memory(struct agp_memory * mem,
+ off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+ int mask_type;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+ if (mask_type != 0 || type != mem->type)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ /*CACHE_FLUSH(); */
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]),
+ mem->type),
+ cur_gatt+GET_GATT_OFF(addr));
+ }
+ readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
+ int type)
+{
+ int i;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+ int mask_type;
+
+ mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+ if (mask_type != 0 || type != mem->type)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
+ readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int ati_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct aper_size_info_lvl2 *value;
+ struct ati_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+ int retval;
+ u32 temp;
+ int i;
+ struct aper_size_info_lvl2 *current_size;
+
+ value = A_SIZE_LVL2(agp_bridge->current_size);
+ retval = ati_create_page_map(&page_dir);
+ if (retval != 0)
+ return retval;
+
+ retval = ati_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ ati_free_page_map(&page_dir);
+ return retval;
+ }
+
+ agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+ agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+ /* Write out the size register */
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ if (is_r200()) {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+ | 0x00000001);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ } else {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+ | 0x00000001);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+ }
+
+ /*
+ * Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+ addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
+ agp_bridge->gart_bus_addr = addr;
+
+ /* Calculate the agp offset */
+ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+ writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
+ page_dir.remapped+GET_PAGE_DIR_OFF(addr));
+ readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
+ }
+
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
+ return 0;
+}
+
+static int ati_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct ati_page_map page_dir;
+
+ page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+ page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+ ati_free_gatt_pages();
+ ati_free_page_map(&page_dir);
+ return 0;
+}
+
+static const struct agp_bridge_driver ati_generic_bridge = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = ati_generic_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = ati_configure,
+ .fetch_size = ati_fetch_size,
+ .cleanup = ati_cleanup,
+ .tlb_flush = ati_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = ati_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = ati_create_gatt_table,
+ .free_gatt_table = ati_free_gatt_table,
+ .insert_memory = ati_insert_memory,
+ .remove_memory = ati_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+
+static struct agp_device_ids ati_agp_device_ids[] =
+{
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS100,
+ .chipset_name = "IGP320/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS200,
+ .chipset_name = "IGP330/340/345/350/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS200_B,
+ .chipset_name = "IGP345M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS250,
+ .chipset_name = "IGP7000/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_100,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_133,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_166,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_200,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS350_133,
+ .chipset_name = "IGP9000/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS350_200,
+ .chipset_name = "IGP9100/M",
+ },
+ { }, /* dummy final entry, always present */
+};
+
+static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = ati_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name; j++) {
+ if (pdev->device == devs[j].device_id)
+ goto found;
+ }
+
+ dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n",
+ pdev->vendor, pdev->device);
+ return -ENODEV;
+
+found:
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ bridge->driver = &ati_generic_bridge;
+
+ dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_ati_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static const struct pci_device_id agp_ati_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_ATI,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_ati_pm_ops, NULL, agp_ati_resume);
+
+static struct pci_driver agp_ati_pci_driver = {
+ .name = "agpgart-ati",
+ .id_table = agp_ati_pci_table,
+ .probe = agp_ati_probe,
+ .remove = agp_ati_remove,
+ .driver.pm = &agp_ati_pm_ops,
+};
+
+static int __init agp_ati_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_ati_pci_driver);
+}
+
+static void __exit agp_ati_cleanup(void)
+{
+ pci_unregister_driver(&agp_ati_pci_driver);
+}
+
+module_init(agp_ati_init);
+module_exit(agp_ati_cleanup);
+
+MODULE_AUTHOR("Dave Jones");
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
new file mode 100644
index 0000000000..0e19c600db
--- /dev/null
+++ b/drivers/char/agp/backend.c
@@ -0,0 +1,368 @@
+/*
+ * AGPGART driver backend routines.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include "agp.h"
+
+/* Due to XFree86 brain-damage, we can't go to 1.0 until they
+ * fix some real stupidity. It's only by chance we can bump
+ * past 0.99 at all due to some boolean logic error. */
+#define AGPGART_VERSION_MAJOR 0
+#define AGPGART_VERSION_MINOR 103
+static const struct agp_version agp_current_version =
+{
+ .major = AGPGART_VERSION_MAJOR,
+ .minor = AGPGART_VERSION_MINOR,
+};
+
+struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) =
+ &agp_generic_find_bridge;
+
+struct agp_bridge_data *agp_bridge;
+LIST_HEAD(agp_bridges);
+EXPORT_SYMBOL(agp_bridge);
+EXPORT_SYMBOL(agp_bridges);
+EXPORT_SYMBOL(agp_find_bridge);
+
+/**
+ * agp_backend_acquire - attempt to acquire an agp backend.
+ * @pdev: the PCI device
+ *
+ */
+struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+
+ bridge = agp_find_bridge(pdev);
+
+ if (!bridge)
+ return NULL;
+
+ if (atomic_read(&bridge->agp_in_use))
+ return NULL;
+ atomic_inc(&bridge->agp_in_use);
+ return bridge;
+}
+EXPORT_SYMBOL(agp_backend_acquire);
+
+
+/**
+ * agp_backend_release - release the lock on the agp backend.
+ * @bridge: the AGP backend to release
+ *
+ * The caller must insure that the graphics aperture translation table
+ * is read for use by another entity.
+ *
+ * (Ensure that all memory it bound is unbound.)
+ */
+void agp_backend_release(struct agp_bridge_data *bridge)
+{
+
+ if (bridge)
+ atomic_dec(&bridge->agp_in_use);
+}
+EXPORT_SYMBOL(agp_backend_release);
+
+
+static const struct { int mem, agp; } maxes_table[] = {
+ {0, 0},
+ {32, 4},
+ {64, 28},
+ {128, 96},
+ {256, 204},
+ {512, 440},
+ {1024, 942},
+ {2048, 1920},
+ {4096, 3932}
+};
+
+static int agp_find_max(void)
+{
+ long memory, index, result;
+
+#if PAGE_SHIFT < 20
+ memory = totalram_pages() >> (20 - PAGE_SHIFT);
+#else
+ memory = totalram_pages() << (PAGE_SHIFT - 20);
+#endif
+ index = 1;
+
+ while ((memory > maxes_table[index].mem) && (index < 8))
+ index++;
+
+ result = maxes_table[index - 1].agp +
+ ( (memory - maxes_table[index - 1].mem) *
+ (maxes_table[index].agp - maxes_table[index - 1].agp)) /
+ (maxes_table[index].mem - maxes_table[index - 1].mem);
+
+ result = result << (20 - PAGE_SHIFT);
+ return result;
+}
+
+
+static int agp_backend_initialize(struct agp_bridge_data *bridge)
+{
+ int size_value, rc, got_gatt=0, got_keylist=0;
+
+ bridge->max_memory_agp = agp_find_max();
+ bridge->version = &agp_current_version;
+
+ if (bridge->driver->needs_scratch_page) {
+ struct page *page = bridge->driver->agp_alloc_page(bridge);
+
+ if (!page) {
+ dev_err(&bridge->dev->dev,
+ "can't get memory for scratch page\n");
+ return -ENOMEM;
+ }
+
+ bridge->scratch_page_page = page;
+ bridge->scratch_page_dma = page_to_phys(page);
+
+ bridge->scratch_page = bridge->driver->mask_memory(bridge,
+ bridge->scratch_page_dma, 0);
+ }
+
+ size_value = bridge->driver->fetch_size();
+ if (size_value == 0) {
+ dev_err(&bridge->dev->dev, "can't determine aperture size\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+ if (bridge->driver->create_gatt_table(bridge)) {
+ dev_err(&bridge->dev->dev,
+ "can't get memory for graphics translation table\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ got_gatt = 1;
+
+ bridge->key_list = vzalloc(PAGE_SIZE * 4);
+ if (bridge->key_list == NULL) {
+ dev_err(&bridge->dev->dev,
+ "can't allocate memory for key lists\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ got_keylist = 1;
+
+ /* FIXME vmalloc'd memory not guaranteed contiguous */
+
+ if (bridge->driver->configure()) {
+ dev_err(&bridge->dev->dev, "error configuring host chipset\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+ INIT_LIST_HEAD(&bridge->mapped_list);
+ spin_lock_init(&bridge->mapped_lock);
+
+ return 0;
+
+err_out:
+ if (bridge->driver->needs_scratch_page) {
+ struct page *page = bridge->scratch_page_page;
+
+ bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
+ bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
+ }
+ if (got_gatt)
+ bridge->driver->free_gatt_table(bridge);
+ if (got_keylist) {
+ vfree(bridge->key_list);
+ bridge->key_list = NULL;
+ }
+ return rc;
+}
+
+/* cannot be __exit b/c as it could be called from __init code */
+static void agp_backend_cleanup(struct agp_bridge_data *bridge)
+{
+ if (bridge->driver->cleanup)
+ bridge->driver->cleanup();
+ if (bridge->driver->free_gatt_table)
+ bridge->driver->free_gatt_table(bridge);
+
+ vfree(bridge->key_list);
+ bridge->key_list = NULL;
+
+ if (bridge->driver->agp_destroy_page &&
+ bridge->driver->needs_scratch_page) {
+ struct page *page = bridge->scratch_page_page;
+
+ bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
+ bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
+ }
+}
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+struct agp_bridge_data *agp_alloc_bridge(void)
+{
+ struct agp_bridge_data *bridge;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return NULL;
+
+ atomic_set(&bridge->agp_in_use, 0);
+ atomic_set(&bridge->current_memory_agp, 0);
+
+ if (list_empty(&agp_bridges))
+ agp_bridge = bridge;
+
+ return bridge;
+}
+EXPORT_SYMBOL(agp_alloc_bridge);
+
+
+void agp_put_bridge(struct agp_bridge_data *bridge)
+{
+ kfree(bridge);
+
+ if (list_empty(&agp_bridges))
+ agp_bridge = NULL;
+}
+EXPORT_SYMBOL(agp_put_bridge);
+
+
+int agp_add_bridge(struct agp_bridge_data *bridge)
+{
+ int error;
+
+ if (agp_off) {
+ error = -ENODEV;
+ goto err_put_bridge;
+ }
+
+ if (!bridge->dev) {
+ printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
+ error = -EINVAL;
+ goto err_put_bridge;
+ }
+
+ /* Grab reference on the chipset driver. */
+ if (!try_module_get(bridge->driver->owner)) {
+ dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
+ error = -EINVAL;
+ goto err_put_bridge;
+ }
+
+ error = agp_backend_initialize(bridge);
+ if (error) {
+ dev_info(&bridge->dev->dev,
+ "agp_backend_initialize() failed\n");
+ goto err_out;
+ }
+
+ if (list_empty(&agp_bridges)) {
+ error = agp_frontend_initialize();
+ if (error) {
+ dev_info(&bridge->dev->dev,
+ "agp_frontend_initialize() failed\n");
+ goto frontend_err;
+ }
+
+ dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
+ bridge->driver->fetch_size(), bridge->gart_bus_addr);
+
+ }
+
+ list_add(&bridge->list, &agp_bridges);
+ return 0;
+
+frontend_err:
+ agp_backend_cleanup(bridge);
+err_out:
+ module_put(bridge->driver->owner);
+err_put_bridge:
+ agp_put_bridge(bridge);
+ return error;
+}
+EXPORT_SYMBOL_GPL(agp_add_bridge);
+
+
+void agp_remove_bridge(struct agp_bridge_data *bridge)
+{
+ agp_backend_cleanup(bridge);
+ list_del(&bridge->list);
+ if (list_empty(&agp_bridges))
+ agp_frontend_cleanup();
+ module_put(bridge->driver->owner);
+}
+EXPORT_SYMBOL_GPL(agp_remove_bridge);
+
+int agp_off;
+int agp_try_unsupported_boot;
+EXPORT_SYMBOL(agp_off);
+EXPORT_SYMBOL(agp_try_unsupported_boot);
+
+static int __init agp_init(void)
+{
+ if (!agp_off)
+ printk(KERN_INFO "Linux agpgart interface v%d.%d\n",
+ AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
+ return 0;
+}
+
+static void __exit agp_exit(void)
+{
+}
+
+#ifndef MODULE
+static __init int agp_setup(char *s)
+{
+ if (!strcmp(s,"off"))
+ agp_off = 1;
+ if (!strcmp(s,"try_unsupported"))
+ agp_try_unsupported_boot = 1;
+ return 1;
+}
+__setup("agp=", agp_setup);
+#endif
+
+MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
+MODULE_DESCRIPTION("AGP GART driver");
+MODULE_LICENSE("GPL and additional rights");
+MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
+
+module_init(agp_init);
+module_exit(agp_exit);
+
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
new file mode 100644
index 0000000000..52ffe1706c
--- /dev/null
+++ b/drivers/char/agp/compat_ioctl.c
@@ -0,0 +1,291 @@
+/*
+ * AGPGART driver frontend compatibility ioctls
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/agpgart.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "agp.h"
+#include "compat_ioctl.h"
+
+static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_info32 userinfo;
+ struct agp_kern_info kerninfo;
+
+ agp_copy_info(agp_bridge, &kerninfo);
+
+ userinfo.version.major = kerninfo.version.major;
+ userinfo.version.minor = kerninfo.version.minor;
+ userinfo.bridge_id = kerninfo.device->vendor |
+ (kerninfo.device->device << 16);
+ userinfo.agp_mode = kerninfo.mode;
+ userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
+ userinfo.aper_size = kerninfo.aper_size;
+ userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
+ userinfo.pg_used = kerninfo.current_memory;
+
+ if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_region32 ureserve;
+ struct agp_region kreserve;
+ struct agp_client *client;
+ struct agp_file_private *client_priv;
+
+ DBG("");
+ if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
+ return -EFAULT;
+
+ if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
+ return -EFAULT;
+
+ kreserve.pid = ureserve.pid;
+ kreserve.seg_count = ureserve.seg_count;
+
+ client = agp_find_client_by_pid(kreserve.pid);
+
+ if (kreserve.seg_count == 0) {
+ /* remove a client */
+ client_priv = agp_find_private(kreserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ if (client == NULL) {
+ /* client is already removed */
+ return 0;
+ }
+ return agp_remove_client(kreserve.pid);
+ } else {
+ struct agp_segment32 *usegment;
+ struct agp_segment *ksegment;
+ int seg;
+
+ if (ureserve.seg_count >= 16384)
+ return -EINVAL;
+
+ usegment = kmalloc_array(ureserve.seg_count,
+ sizeof(*usegment),
+ GFP_KERNEL);
+ if (!usegment)
+ return -ENOMEM;
+
+ ksegment = kmalloc_array(kreserve.seg_count,
+ sizeof(*ksegment),
+ GFP_KERNEL);
+ if (!ksegment) {
+ kfree(usegment);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
+ sizeof(*usegment) * ureserve.seg_count)) {
+ kfree(usegment);
+ kfree(ksegment);
+ return -EFAULT;
+ }
+
+ for (seg = 0; seg < ureserve.seg_count; seg++) {
+ ksegment[seg].pg_start = usegment[seg].pg_start;
+ ksegment[seg].pg_count = usegment[seg].pg_count;
+ ksegment[seg].prot = usegment[seg].prot;
+ }
+
+ kfree(usegment);
+ kreserve.seg_list = ksegment;
+
+ if (client == NULL) {
+ /* Create the client and add the segment */
+ client = agp_create_client(kreserve.pid);
+
+ if (client == NULL) {
+ kfree(ksegment);
+ return -ENOMEM;
+ }
+ client_priv = agp_find_private(kreserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ }
+ return agp_create_segment(client, &kreserve);
+ }
+ /* Will never really happen */
+ return -EINVAL;
+}
+
+static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_allocate32 alloc;
+
+ DBG("");
+ if (copy_from_user(&alloc, arg, sizeof(alloc)))
+ return -EFAULT;
+
+ memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
+
+ if (memory == NULL)
+ return -ENOMEM;
+
+ alloc.key = memory->key;
+ alloc.physical = memory->physical;
+
+ if (copy_to_user(arg, &alloc, sizeof(alloc))) {
+ agp_free_memory_wrap(memory);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_bind32 bind_info;
+ struct agp_memory *memory;
+
+ DBG("");
+ if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(bind_info.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_bind_memory(memory, bind_info.pg_start);
+}
+
+static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_unbind32 unbind;
+
+ DBG("");
+ if (copy_from_user(&unbind, arg, sizeof(unbind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(unbind.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_unbind_memory(memory);
+}
+
+long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct agp_file_private *curr_priv = file->private_data;
+ int ret_val = -ENOTTY;
+
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ if ((agp_fe.current_controller == NULL) &&
+ (cmd != AGPIOC_ACQUIRE32)) {
+ ret_val = -EINVAL;
+ goto ioctl_out;
+ }
+ if ((agp_fe.backend_acquired != true) &&
+ (cmd != AGPIOC_ACQUIRE32)) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ if (cmd != AGPIOC_ACQUIRE32) {
+ if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
+ ret_val = -EPERM;
+ goto ioctl_out;
+ }
+ /* Use the original pid of the controller,
+ * in case it's threaded */
+
+ if (agp_fe.current_controller->pid != curr_priv->my_pid) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ }
+
+ switch (cmd) {
+ case AGPIOC_INFO32:
+ ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_ACQUIRE32:
+ ret_val = agpioc_acquire_wrap(curr_priv);
+ break;
+
+ case AGPIOC_RELEASE32:
+ ret_val = agpioc_release_wrap(curr_priv);
+ break;
+
+ case AGPIOC_SETUP32:
+ ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_RESERVE32:
+ ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_PROTECT32:
+ ret_val = agpioc_protect_wrap(curr_priv);
+ break;
+
+ case AGPIOC_ALLOCATE32:
+ ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_DEALLOCATE32:
+ ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
+ break;
+
+ case AGPIOC_BIND32:
+ ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_UNBIND32:
+ ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_CHIPSET_FLUSH32:
+ break;
+ }
+
+ioctl_out:
+ DBG("ioctl returns %d\n", ret_val);
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return ret_val;
+}
+
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
new file mode 100644
index 0000000000..f30e0fd979
--- /dev/null
+++ b/drivers/char/agp/compat_ioctl.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_COMPAT_IOCTL_H
+#define _AGP_COMPAT_IOCTL_H
+
+#include <linux/compat.h>
+#include <linux/agpgart.h>
+
+#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t)
+#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1)
+#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2)
+#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t)
+#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t)
+#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t)
+#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
+#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
+#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
+#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
+#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
+
+struct agp_info32 {
+ struct agp_version version; /* version of the driver */
+ u32 bridge_id; /* bridge vendor/device */
+ u32 agp_mode; /* mode info of bridge */
+ compat_long_t aper_base; /* base of aperture */
+ compat_size_t aper_size; /* size of aperture */
+ compat_size_t pg_total; /* max pages (swap + system) */
+ compat_size_t pg_system; /* max pages (system) */
+ compat_size_t pg_used; /* current pages used */
+};
+
+/*
+ * The "prot" down below needs still a "sleep" flag somehow ...
+ */
+struct agp_segment32 {
+ compat_off_t pg_start; /* starting page to populate */
+ compat_size_t pg_count; /* number of pages */
+ compat_int_t prot; /* prot flags for mmap */
+};
+
+struct agp_region32 {
+ compat_pid_t pid; /* pid of process */
+ compat_size_t seg_count; /* number of segments */
+ struct agp_segment32 *seg_list;
+};
+
+struct agp_allocate32 {
+ compat_int_t key; /* tag of allocation */
+ compat_size_t pg_count; /* number of pages */
+ u32 type; /* 0 == normal, other devspec */
+ u32 physical; /* device specific (some devices
+ * need a phys address of the
+ * actual page behind the gatt
+ * table) */
+};
+
+struct agp_bind32 {
+ compat_int_t key; /* tag of allocation */
+ compat_off_t pg_start; /* starting page to populate */
+};
+
+struct agp_unbind32 {
+ compat_int_t key; /* tag of allocation */
+ u32 priority; /* priority for paging out */
+};
+
+extern struct agp_front_data agp_fe;
+
+int agpioc_acquire_wrap(struct agp_file_private *priv);
+int agpioc_release_wrap(struct agp_file_private *priv);
+int agpioc_protect_wrap(struct agp_file_private *priv);
+int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
+int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
+struct agp_file_private *agp_find_private(pid_t pid);
+struct agp_client *agp_create_client(pid_t id);
+int agp_remove_client(pid_t id);
+int agp_create_segment(struct agp_client *client, struct agp_region *region);
+void agp_free_memory_wrap(struct agp_memory *memory);
+struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
+struct agp_memory *agp_find_mem_by_key(int key);
+struct agp_client *agp_find_client_by_pid(pid_t id);
+
+#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
new file mode 100644
index 0000000000..f28d423192
--- /dev/null
+++ b/drivers/char/agp/efficeon-agp.c
@@ -0,0 +1,468 @@
+/*
+ * Transmeta's Efficeon AGPGART driver.
+ *
+ * Based upon a diff by Linus around November '02.
+ *
+ * Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com>
+ * and H. Peter Anvin <hpa@transmeta.com>.
+ */
+
+/*
+ * NOTE-cpg-040217:
+ *
+ * - when compiled as a module, after loading the module,
+ * it will refuse to unload, indicating it is in use,
+ * when it is not.
+ * - no s3 (suspend to ram) testing.
+ * - tested on the efficeon integrated nothbridge for tens
+ * of iterations of starting x and glxgears.
+ * - tested with radeon 9000 and radeon mobility m9 cards
+ * - tested with c3/c4 enabled (with the mobility m9 card)
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/gfp.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include "agp.h"
+#include "intel-agp.h"
+
+/*
+ * The real differences to the generic AGP code is
+ * in the GART mappings - a two-level setup with the
+ * first level being an on-chip 64-entry table.
+ *
+ * The page array is filled through the ATTPAGE register
+ * (Aperture Translation Table Page Register) at 0xB8. Bits:
+ * 31:20: physical page address
+ * 11:9: Page Attribute Table Index (PATI)
+ * must match the PAT index for the
+ * mapped pages (the 2nd level page table pages
+ * themselves should be just regular WB-cacheable,
+ * so this is normally zero.)
+ * 8: Present
+ * 7:6: reserved, write as zero
+ * 5:0: GATT directory index: which 1st-level entry
+ *
+ * The Efficeon AGP spec requires pages to be WB-cacheable
+ * but to be explicitly CLFLUSH'd after any changes.
+ */
+#define EFFICEON_ATTPAGE 0xb8
+#define EFFICEON_L1_SIZE 64 /* Number of PDE pages */
+
+#define EFFICEON_PATI (0 << 9)
+#define EFFICEON_PRESENT (1 << 8)
+
+static struct _efficeon_private {
+ unsigned long l1_table[EFFICEON_L1_SIZE];
+} efficeon_private;
+
+static const struct gatt_mask efficeon_generic_masks[] =
+{
+ {.mask = 0x00000001, .type = 0}
+};
+
+/* This function does the same thing as mask_memory() for this chipset... */
+static inline unsigned long efficeon_mask_memory(struct page *page)
+{
+ unsigned long addr = page_to_phys(page);
+ return addr | 0x00000001;
+}
+
+static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] =
+{
+ {256, 65536, 0},
+ {128, 32768, 32},
+ {64, 16384, 48},
+ {32, 8192, 56}
+};
+
+/*
+ * Control interfaces are largely identical to
+ * the legacy Intel 440BX..
+ */
+
+static int efficeon_fetch_size(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_lvl2 *values;
+
+ pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void efficeon_tlbflush(struct agp_memory * mem)
+{
+ printk(KERN_DEBUG PFX "efficeon_tlbflush()\n");
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+}
+
+static void efficeon_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_lvl2 *previous_size;
+
+ printk(KERN_DEBUG PFX "efficeon_cleanup()\n");
+ previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
+ previous_size->size_value);
+}
+
+static int efficeon_configure(void)
+{
+ u16 temp2;
+ struct aper_size_info_lvl2 *current_size;
+
+ printk(KERN_DEBUG PFX "efficeon_configure()\n");
+
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+
+ /* paccfg/nbxcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
+ (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11));
+ /* clear any possible error conditions */
+ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
+ return 0;
+}
+
+static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ int index, freed = 0;
+
+ for (index = 0; index < EFFICEON_L1_SIZE; index++) {
+ unsigned long page = efficeon_private.l1_table[index];
+ if (page) {
+ efficeon_private.l1_table[index] = 0;
+ free_page(page);
+ freed++;
+ }
+ printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n",
+ agp_bridge->dev, EFFICEON_ATTPAGE, index);
+ pci_write_config_dword(agp_bridge->dev,
+ EFFICEON_ATTPAGE, index);
+ }
+ printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed);
+ return 0;
+}
+
+
+/*
+ * Since we don't need contiguous memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#undef GET_GATT
+#define GET_GATT(addr) (efficeon_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int index;
+ const int pati = EFFICEON_PATI;
+ const int present = EFFICEON_PRESENT;
+ const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
+ int num_entries, l1_pages;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries);
+
+ /* There are 2^10 PTE pages per PDE page */
+ BUG_ON(num_entries & 0x3ff);
+ l1_pages = num_entries >> 10;
+
+ for (index = 0 ; index < l1_pages ; index++) {
+ int offset;
+ unsigned long page;
+ unsigned long value;
+
+ page = efficeon_private.l1_table[index];
+ BUG_ON(page);
+
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page) {
+ efficeon_free_gatt_table(agp_bridge);
+ return -ENOMEM;
+ }
+
+ for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
+ clflush((char *)page+offset);
+
+ efficeon_private.l1_table[index] = page;
+
+ value = virt_to_phys((unsigned long *)page) | pati | present | index;
+
+ pci_write_config_dword(agp_bridge->dev,
+ EFFICEON_ATTPAGE, value);
+ }
+
+ return 0;
+}
+
+static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int i, count = mem->page_count, num_entries;
+ unsigned int *page, *last_page;
+ const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
+ const unsigned long clflush_mask = ~(clflush_chunk-1);
+
+ printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count);
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ last_page = NULL;
+ for (i = 0; i < count; i++) {
+ int index = pg_start + i;
+ unsigned long insert = efficeon_mask_memory(mem->pages[i]);
+
+ page = (unsigned int *) efficeon_private.l1_table[index >> 10];
+
+ if (!page)
+ continue;
+
+ page += (index & 0x3ff);
+ *page = insert;
+
+ /* clflush is slow, so don't clflush until we have to */
+ if (last_page &&
+ (((unsigned long)page^(unsigned long)last_page) &
+ clflush_mask))
+ clflush(last_page);
+
+ last_page = page;
+ }
+
+ if ( last_page )
+ clflush(last_page);
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int i, count = mem->page_count, num_entries;
+
+ printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count);
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ int index = pg_start + i;
+ unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10];
+
+ if (!page)
+ continue;
+ page += (index & 0x3ff);
+ *page = 0;
+ }
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+
+static const struct agp_bridge_driver efficeon_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = efficeon_generic_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .configure = efficeon_configure,
+ .fetch_size = efficeon_fetch_size,
+ .cleanup = efficeon_cleanup,
+ .tlb_flush = efficeon_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = efficeon_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+
+ // Efficeon-specific GATT table setup / populate / teardown
+ .create_gatt_table = efficeon_create_gatt_table,
+ .free_gatt_table = efficeon_free_gatt_table,
+ .insert_memory = efficeon_insert_memory,
+ .remove_memory = efficeon_remove_memory,
+ .cant_use_aperture = false, // true might be faster?
+
+ // Generic
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static int agp_efficeon_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ struct resource *r;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* Probe for Efficeon controller */
+ if (pdev->device != PCI_DEVICE_ID_EFFICEON) {
+ printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n",
+ pdev->device);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n");
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &efficeon_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR PFX "Unable to Enable PCI device\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ /*
+ * The following fixes the case where the BIOS has "forgotten" to
+ * provide an address range for the GART.
+ * 20030610 - hamish@zot.org
+ */
+ r = &pdev->resource[0];
+ if (!r->start && r->end) {
+ if (pci_assign_resource(pdev, 0)) {
+ printk(KERN_ERR PFX "could not assign resource 0\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+ }
+
+ /* Fill in the mode register */
+ if (cap_ptr) {
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+ }
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_efficeon_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static int agp_efficeon_resume(struct device *dev)
+{
+ printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
+ return efficeon_configure();
+}
+
+static const struct pci_device_id agp_efficeon_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_TRANSMETA,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_efficeon_pm_ops, NULL, agp_efficeon_resume);
+
+MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
+
+static struct pci_driver agp_efficeon_pci_driver = {
+ .name = "agpgart-efficeon",
+ .id_table = agp_efficeon_pci_table,
+ .probe = agp_efficeon_probe,
+ .remove = agp_efficeon_remove,
+ .driver.pm = &agp_efficeon_pm_ops,
+};
+
+static int __init agp_efficeon_init(void)
+{
+ static int agp_initialised=0;
+
+ if (agp_off)
+ return -EINVAL;
+
+ if (agp_initialised == 1)
+ return 0;
+ agp_initialised=1;
+
+ return pci_register_driver(&agp_efficeon_pci_driver);
+}
+
+static void __exit agp_efficeon_cleanup(void)
+{
+ pci_unregister_driver(&agp_efficeon_pci_driver);
+}
+
+module_init(agp_efficeon_init);
+module_exit(agp_efficeon_cleanup);
+
+MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
new file mode 100644
index 0000000000..321118a9cf
--- /dev/null
+++ b/drivers/char/agp/frontend.c
@@ -0,0 +1,1068 @@
+/*
+ * AGPGART driver frontend
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pci.h>
+#include <linux/miscdevice.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+
+#include "agp.h"
+#include "compat_ioctl.h"
+
+struct agp_front_data agp_fe;
+
+struct agp_memory *agp_find_mem_by_key(int key)
+{
+ struct agp_memory *curr;
+
+ if (agp_fe.current_controller == NULL)
+ return NULL;
+
+ curr = agp_fe.current_controller->pool;
+
+ while (curr != NULL) {
+ if (curr->key == key)
+ break;
+ curr = curr->next;
+ }
+
+ DBG("key=%d -> mem=%p", key, curr);
+ return curr;
+}
+
+static void agp_remove_from_pool(struct agp_memory *temp)
+{
+ struct agp_memory *prev;
+ struct agp_memory *next;
+
+ /* Check to see if this is even in the memory pool */
+
+ DBG("mem=%p", temp);
+ if (agp_find_mem_by_key(temp->key) != NULL) {
+ next = temp->next;
+ prev = temp->prev;
+
+ if (prev != NULL) {
+ prev->next = next;
+ if (next != NULL)
+ next->prev = prev;
+
+ } else {
+ /* This is the first item on the list */
+ if (next != NULL)
+ next->prev = NULL;
+
+ agp_fe.current_controller->pool = next;
+ }
+ }
+}
+
+/*
+ * Routines for managing each client's segment list -
+ * These routines handle adding and removing segments
+ * to each auth'ed client.
+ */
+
+static struct
+agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
+ unsigned long offset,
+ int size, pgprot_t page_prot)
+{
+ struct agp_segment_priv *seg;
+ int i;
+ off_t pg_start;
+ size_t pg_count;
+
+ pg_start = offset / 4096;
+ pg_count = size / 4096;
+ seg = *(client->segments);
+
+ for (i = 0; i < client->num_segments; i++) {
+ if ((seg[i].pg_start == pg_start) &&
+ (seg[i].pg_count == pg_count) &&
+ (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
+ return seg + i;
+ }
+ }
+
+ return NULL;
+}
+
+static void agp_remove_seg_from_client(struct agp_client *client)
+{
+ DBG("client=%p", client);
+
+ if (client->segments != NULL) {
+ if (*(client->segments) != NULL) {
+ DBG("Freeing %p from client %p", *(client->segments), client);
+ kfree(*(client->segments));
+ }
+ DBG("Freeing %p from client %p", client->segments, client);
+ kfree(client->segments);
+ client->segments = NULL;
+ }
+}
+
+static void agp_add_seg_to_client(struct agp_client *client,
+ struct agp_segment_priv ** seg, int num_segments)
+{
+ struct agp_segment_priv **prev_seg;
+
+ prev_seg = client->segments;
+
+ if (prev_seg != NULL)
+ agp_remove_seg_from_client(client);
+
+ DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
+ client->num_segments = num_segments;
+ client->segments = seg;
+}
+
+static pgprot_t agp_convert_mmap_flags(int prot)
+{
+ unsigned long prot_bits;
+
+ prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED;
+ return vm_get_page_prot(prot_bits);
+}
+
+int agp_create_segment(struct agp_client *client, struct agp_region *region)
+{
+ struct agp_segment_priv **ret_seg;
+ struct agp_segment_priv *seg;
+ struct agp_segment *user_seg;
+ size_t i;
+
+ seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
+ if (seg == NULL) {
+ kfree(region->seg_list);
+ region->seg_list = NULL;
+ return -ENOMEM;
+ }
+ user_seg = region->seg_list;
+
+ for (i = 0; i < region->seg_count; i++) {
+ seg[i].pg_start = user_seg[i].pg_start;
+ seg[i].pg_count = user_seg[i].pg_count;
+ seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
+ }
+ kfree(region->seg_list);
+ region->seg_list = NULL;
+
+ ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
+ if (ret_seg == NULL) {
+ kfree(seg);
+ return -ENOMEM;
+ }
+ *ret_seg = seg;
+ agp_add_seg_to_client(client, ret_seg, region->seg_count);
+ return 0;
+}
+
+/* End - Routines for managing each client's segment list */
+
+/* This function must only be called when current_controller != NULL */
+static void agp_insert_into_pool(struct agp_memory * temp)
+{
+ struct agp_memory *prev;
+
+ prev = agp_fe.current_controller->pool;
+
+ if (prev != NULL) {
+ prev->prev = temp;
+ temp->next = prev;
+ }
+ agp_fe.current_controller->pool = temp;
+}
+
+
+/* File private list routines */
+
+struct agp_file_private *agp_find_private(pid_t pid)
+{
+ struct agp_file_private *curr;
+
+ curr = agp_fe.file_priv_list;
+
+ while (curr != NULL) {
+ if (curr->my_pid == pid)
+ return curr;
+ curr = curr->next;
+ }
+
+ return NULL;
+}
+
+static void agp_insert_file_private(struct agp_file_private * priv)
+{
+ struct agp_file_private *prev;
+
+ prev = agp_fe.file_priv_list;
+
+ if (prev != NULL)
+ prev->prev = priv;
+ priv->next = prev;
+ agp_fe.file_priv_list = priv;
+}
+
+static void agp_remove_file_private(struct agp_file_private * priv)
+{
+ struct agp_file_private *next;
+ struct agp_file_private *prev;
+
+ next = priv->next;
+ prev = priv->prev;
+
+ if (prev != NULL) {
+ prev->next = next;
+
+ if (next != NULL)
+ next->prev = prev;
+
+ } else {
+ if (next != NULL)
+ next->prev = NULL;
+
+ agp_fe.file_priv_list = next;
+ }
+}
+
+/* End - File flag list routines */
+
+/*
+ * Wrappers for agp_free_memory & agp_allocate_memory
+ * These make sure that internal lists are kept updated.
+ */
+void agp_free_memory_wrap(struct agp_memory *memory)
+{
+ agp_remove_from_pool(memory);
+ agp_free_memory(memory);
+}
+
+struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
+{
+ struct agp_memory *memory;
+
+ memory = agp_allocate_memory(agp_bridge, pg_count, type);
+ if (memory == NULL)
+ return NULL;
+
+ agp_insert_into_pool(memory);
+ return memory;
+}
+
+/* Routines for managing the list of controllers -
+ * These routines manage the current controller, and the list of
+ * controllers
+ */
+
+static struct agp_controller *agp_find_controller_by_pid(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = agp_fe.controllers;
+
+ while (controller != NULL) {
+ if (controller->pid == id)
+ return controller;
+ controller = controller->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_controller *agp_create_controller(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL);
+ if (controller == NULL)
+ return NULL;
+
+ controller->pid = id;
+ return controller;
+}
+
+static int agp_insert_controller(struct agp_controller *controller)
+{
+ struct agp_controller *prev_controller;
+
+ prev_controller = agp_fe.controllers;
+ controller->next = prev_controller;
+
+ if (prev_controller != NULL)
+ prev_controller->prev = controller;
+
+ agp_fe.controllers = controller;
+
+ return 0;
+}
+
+static void agp_remove_all_clients(struct agp_controller *controller)
+{
+ struct agp_client *client;
+ struct agp_client *temp;
+
+ client = controller->clients;
+
+ while (client) {
+ struct agp_file_private *priv;
+
+ temp = client;
+ agp_remove_seg_from_client(temp);
+ priv = agp_find_private(temp->pid);
+
+ if (priv != NULL) {
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ }
+ client = client->next;
+ kfree(temp);
+ }
+}
+
+static void agp_remove_all_memory(struct agp_controller *controller)
+{
+ struct agp_memory *memory;
+ struct agp_memory *temp;
+
+ memory = controller->pool;
+
+ while (memory) {
+ temp = memory;
+ memory = memory->next;
+ agp_free_memory_wrap(temp);
+ }
+}
+
+static int agp_remove_controller(struct agp_controller *controller)
+{
+ struct agp_controller *prev_controller;
+ struct agp_controller *next_controller;
+
+ prev_controller = controller->prev;
+ next_controller = controller->next;
+
+ if (prev_controller != NULL) {
+ prev_controller->next = next_controller;
+ if (next_controller != NULL)
+ next_controller->prev = prev_controller;
+
+ } else {
+ if (next_controller != NULL)
+ next_controller->prev = NULL;
+
+ agp_fe.controllers = next_controller;
+ }
+
+ agp_remove_all_memory(controller);
+ agp_remove_all_clients(controller);
+
+ if (agp_fe.current_controller == controller) {
+ agp_fe.current_controller = NULL;
+ agp_fe.backend_acquired = false;
+ agp_backend_release(agp_bridge);
+ }
+ kfree(controller);
+ return 0;
+}
+
+static void agp_controller_make_current(struct agp_controller *controller)
+{
+ struct agp_client *clients;
+
+ clients = controller->clients;
+
+ while (clients != NULL) {
+ struct agp_file_private *priv;
+
+ priv = agp_find_private(clients->pid);
+
+ if (priv != NULL) {
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ }
+ clients = clients->next;
+ }
+
+ agp_fe.current_controller = controller;
+}
+
+static void agp_controller_release_current(struct agp_controller *controller,
+ struct agp_file_private *controller_priv)
+{
+ struct agp_client *clients;
+
+ clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
+ clients = controller->clients;
+
+ while (clients != NULL) {
+ struct agp_file_private *priv;
+
+ priv = agp_find_private(clients->pid);
+
+ if (priv != NULL)
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+
+ clients = clients->next;
+ }
+
+ agp_fe.current_controller = NULL;
+ agp_fe.used_by_controller = false;
+ agp_backend_release(agp_bridge);
+}
+
+/*
+ * Routines for managing client lists -
+ * These routines are for managing the list of auth'ed clients.
+ */
+
+static struct agp_client
+*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
+{
+ struct agp_client *client;
+
+ if (controller == NULL)
+ return NULL;
+
+ client = controller->clients;
+
+ while (client != NULL) {
+ if (client->pid == id)
+ return client;
+ client = client->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_controller *agp_find_controller_for_client(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = agp_fe.controllers;
+
+ while (controller != NULL) {
+ if ((agp_find_client_in_controller(controller, id)) != NULL)
+ return controller;
+ controller = controller->next;
+ }
+
+ return NULL;
+}
+
+struct agp_client *agp_find_client_by_pid(pid_t id)
+{
+ struct agp_client *temp;
+
+ if (agp_fe.current_controller == NULL)
+ return NULL;
+
+ temp = agp_find_client_in_controller(agp_fe.current_controller, id);
+ return temp;
+}
+
+static void agp_insert_client(struct agp_client *client)
+{
+ struct agp_client *prev_client;
+
+ prev_client = agp_fe.current_controller->clients;
+ client->next = prev_client;
+
+ if (prev_client != NULL)
+ prev_client->prev = client;
+
+ agp_fe.current_controller->clients = client;
+ agp_fe.current_controller->num_clients++;
+}
+
+struct agp_client *agp_create_client(pid_t id)
+{
+ struct agp_client *new_client;
+
+ new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL);
+ if (new_client == NULL)
+ return NULL;
+
+ new_client->pid = id;
+ agp_insert_client(new_client);
+ return new_client;
+}
+
+int agp_remove_client(pid_t id)
+{
+ struct agp_client *client;
+ struct agp_client *prev_client;
+ struct agp_client *next_client;
+ struct agp_controller *controller;
+
+ controller = agp_find_controller_for_client(id);
+ if (controller == NULL)
+ return -EINVAL;
+
+ client = agp_find_client_in_controller(controller, id);
+ if (client == NULL)
+ return -EINVAL;
+
+ prev_client = client->prev;
+ next_client = client->next;
+
+ if (prev_client != NULL) {
+ prev_client->next = next_client;
+ if (next_client != NULL)
+ next_client->prev = prev_client;
+
+ } else {
+ if (next_client != NULL)
+ next_client->prev = NULL;
+ controller->clients = next_client;
+ }
+
+ controller->num_clients--;
+ agp_remove_seg_from_client(client);
+ kfree(client);
+ return 0;
+}
+
+/* End - Routines for managing client lists */
+
+/* File Operations */
+
+static int agp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned int size, current_size;
+ unsigned long offset;
+ struct agp_client *client;
+ struct agp_file_private *priv = file->private_data;
+ struct agp_kern_info kerninfo;
+
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ if (agp_fe.backend_acquired != true)
+ goto out_eperm;
+
+ if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
+ goto out_eperm;
+
+ agp_copy_info(agp_bridge, &kerninfo);
+ size = vma->vm_end - vma->vm_start;
+ current_size = kerninfo.aper_size;
+ current_size = current_size * 0x100000;
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ DBG("%lx:%lx", offset, offset+size);
+
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
+ if ((size + offset) > current_size)
+ goto out_inval;
+
+ client = agp_find_client_by_pid(current->pid);
+
+ if (client == NULL)
+ goto out_eperm;
+
+ if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
+ goto out_inval;
+
+ DBG("client vm_ops=%p", kerninfo.vm_ops);
+ if (kerninfo.vm_ops) {
+ vma->vm_ops = kerninfo.vm_ops;
+ } else if (io_remap_pfn_range(vma, vma->vm_start,
+ (kerninfo.aper_base + offset) >> PAGE_SHIFT,
+ size,
+ pgprot_writecombine(vma->vm_page_prot))) {
+ goto out_again;
+ }
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return 0;
+ }
+
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+ if (size != current_size)
+ goto out_inval;
+
+ DBG("controller vm_ops=%p", kerninfo.vm_ops);
+ if (kerninfo.vm_ops) {
+ vma->vm_ops = kerninfo.vm_ops;
+ } else if (io_remap_pfn_range(vma, vma->vm_start,
+ kerninfo.aper_base >> PAGE_SHIFT,
+ size,
+ pgprot_writecombine(vma->vm_page_prot))) {
+ goto out_again;
+ }
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return 0;
+ }
+
+out_eperm:
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return -EPERM;
+
+out_inval:
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return -EINVAL;
+
+out_again:
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return -EAGAIN;
+}
+
+static int agp_release(struct inode *inode, struct file *file)
+{
+ struct agp_file_private *priv = file->private_data;
+
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ DBG("priv=%p", priv);
+
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+ struct agp_controller *controller;
+
+ controller = agp_find_controller_by_pid(priv->my_pid);
+
+ if (controller != NULL) {
+ if (controller == agp_fe.current_controller)
+ agp_controller_release_current(controller, priv);
+ agp_remove_controller(controller);
+ controller = NULL;
+ }
+ }
+
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
+ agp_remove_client(priv->my_pid);
+
+ agp_remove_file_private(priv);
+ kfree(priv);
+ file->private_data = NULL;
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return 0;
+}
+
+static int agp_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ struct agp_file_private *priv;
+ struct agp_client *client;
+
+ if (minor != AGPGART_MINOR)
+ return -ENXIO;
+
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
+ if (priv == NULL) {
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return -ENOMEM;
+ }
+
+ set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
+ priv->my_pid = current->pid;
+
+ if (capable(CAP_SYS_RAWIO))
+ /* Root priv, can be controller */
+ set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
+
+ client = agp_find_client_by_pid(current->pid);
+
+ if (client != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ }
+ file->private_data = (void *) priv;
+ agp_insert_file_private(priv);
+ DBG("private=%p, client=%p", priv, client);
+
+ mutex_unlock(&(agp_fe.agp_mutex));
+
+ return 0;
+}
+
+static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_info userinfo;
+ struct agp_kern_info kerninfo;
+
+ agp_copy_info(agp_bridge, &kerninfo);
+
+ memset(&userinfo, 0, sizeof(userinfo));
+ userinfo.version.major = kerninfo.version.major;
+ userinfo.version.minor = kerninfo.version.minor;
+ userinfo.bridge_id = kerninfo.device->vendor |
+ (kerninfo.device->device << 16);
+ userinfo.agp_mode = kerninfo.mode;
+ userinfo.aper_base = kerninfo.aper_base;
+ userinfo.aper_size = kerninfo.aper_size;
+ userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
+ userinfo.pg_used = kerninfo.current_memory;
+
+ if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int agpioc_acquire_wrap(struct agp_file_private *priv)
+{
+ struct agp_controller *controller;
+
+ DBG("");
+
+ if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
+ return -EPERM;
+
+ if (agp_fe.current_controller != NULL)
+ return -EBUSY;
+
+ if (!agp_bridge)
+ return -ENODEV;
+
+ if (atomic_read(&agp_bridge->agp_in_use))
+ return -EBUSY;
+
+ atomic_inc(&agp_bridge->agp_in_use);
+
+ agp_fe.backend_acquired = true;
+
+ controller = agp_find_controller_by_pid(priv->my_pid);
+
+ if (controller != NULL) {
+ agp_controller_make_current(controller);
+ } else {
+ controller = agp_create_controller(priv->my_pid);
+
+ if (controller == NULL) {
+ agp_fe.backend_acquired = false;
+ agp_backend_release(agp_bridge);
+ return -ENOMEM;
+ }
+ agp_insert_controller(controller);
+ agp_controller_make_current(controller);
+ }
+
+ set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ return 0;
+}
+
+int agpioc_release_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ agp_controller_release_current(agp_fe.current_controller, priv);
+ return 0;
+}
+
+int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_setup mode;
+
+ DBG("");
+ if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
+ return -EFAULT;
+
+ agp_enable(agp_bridge, mode.agp_mode);
+ return 0;
+}
+
+static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_region reserve;
+ struct agp_client *client;
+ struct agp_file_private *client_priv;
+
+ DBG("");
+ if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+ return -EFAULT;
+
+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
+ return -EFAULT;
+
+ client = agp_find_client_by_pid(reserve.pid);
+
+ if (reserve.seg_count == 0) {
+ /* remove a client */
+ client_priv = agp_find_private(reserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ if (client == NULL) {
+ /* client is already removed */
+ return 0;
+ }
+ return agp_remove_client(reserve.pid);
+ } else {
+ struct agp_segment *segment;
+
+ if (reserve.seg_count >= 16384)
+ return -EINVAL;
+
+ segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
+ GFP_KERNEL);
+
+ if (segment == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(segment, (void __user *) reserve.seg_list,
+ sizeof(struct agp_segment) * reserve.seg_count)) {
+ kfree(segment);
+ return -EFAULT;
+ }
+ reserve.seg_list = segment;
+
+ if (client == NULL) {
+ /* Create the client and add the segment */
+ client = agp_create_client(reserve.pid);
+
+ if (client == NULL) {
+ kfree(segment);
+ return -ENOMEM;
+ }
+ client_priv = agp_find_private(reserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ }
+ return agp_create_segment(client, &reserve);
+ }
+ /* Will never really happen */
+ return -EINVAL;
+}
+
+int agpioc_protect_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ /* This function is not currently implemented */
+ return -EINVAL;
+}
+
+static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_allocate alloc;
+
+ DBG("");
+ if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
+ return -EFAULT;
+
+ if (alloc.type >= AGP_USER_TYPES)
+ return -EINVAL;
+
+ memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
+
+ if (memory == NULL)
+ return -ENOMEM;
+
+ alloc.key = memory->key;
+ alloc.physical = memory->physical;
+
+ if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
+ agp_free_memory_wrap(memory);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
+{
+ struct agp_memory *memory;
+
+ DBG("");
+ memory = agp_find_mem_by_key(arg);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ agp_free_memory_wrap(memory);
+ return 0;
+}
+
+static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_bind bind_info;
+ struct agp_memory *memory;
+
+ DBG("");
+ if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(bind_info.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_bind_memory(memory, bind_info.pg_start);
+}
+
+static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_unbind unbind;
+
+ DBG("");
+ if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(unbind.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_unbind_memory(memory);
+}
+
+static long agp_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct agp_file_private *curr_priv = file->private_data;
+ int ret_val = -ENOTTY;
+
+ DBG("priv=%p, cmd=%x", curr_priv, cmd);
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ if ((agp_fe.current_controller == NULL) &&
+ (cmd != AGPIOC_ACQUIRE)) {
+ ret_val = -EINVAL;
+ goto ioctl_out;
+ }
+ if ((agp_fe.backend_acquired != true) &&
+ (cmd != AGPIOC_ACQUIRE)) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ if (cmd != AGPIOC_ACQUIRE) {
+ if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
+ ret_val = -EPERM;
+ goto ioctl_out;
+ }
+ /* Use the original pid of the controller,
+ * in case it's threaded */
+
+ if (agp_fe.current_controller->pid != curr_priv->my_pid) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ }
+
+ switch (cmd) {
+ case AGPIOC_INFO:
+ ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_ACQUIRE:
+ ret_val = agpioc_acquire_wrap(curr_priv);
+ break;
+
+ case AGPIOC_RELEASE:
+ ret_val = agpioc_release_wrap(curr_priv);
+ break;
+
+ case AGPIOC_SETUP:
+ ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_RESERVE:
+ ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_PROTECT:
+ ret_val = agpioc_protect_wrap(curr_priv);
+ break;
+
+ case AGPIOC_ALLOCATE:
+ ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_DEALLOCATE:
+ ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
+ break;
+
+ case AGPIOC_BIND:
+ ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_UNBIND:
+ ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_CHIPSET_FLUSH:
+ break;
+ }
+
+ioctl_out:
+ DBG("ioctl returns %d\n", ret_val);
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return ret_val;
+}
+
+static const struct file_operations agp_fops =
+{
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = agp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_agp_ioctl,
+#endif
+ .mmap = agp_mmap,
+ .open = agp_open,
+ .release = agp_release,
+};
+
+static struct miscdevice agp_miscdev =
+{
+ .minor = AGPGART_MINOR,
+ .name = "agpgart",
+ .fops = &agp_fops
+};
+
+int agp_frontend_initialize(void)
+{
+ memset(&agp_fe, 0, sizeof(struct agp_front_data));
+ mutex_init(&(agp_fe.agp_mutex));
+
+ if (misc_register(&agp_miscdev)) {
+ printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
+ return -EIO;
+ }
+ return 0;
+}
+
+void agp_frontend_cleanup(void)
+{
+ misc_deregister(&agp_miscdev);
+}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
new file mode 100644
index 0000000000..3ffbb1c80c
--- /dev/null
+++ b/drivers/char/agp/generic.c
@@ -0,0 +1,1416 @@
+/*
+ * AGPGART driver.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2005 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+#include "agp.h"
+
+__u32 *agp_gatt_table;
+int agp_memory_reserved;
+
+/*
+ * Needed by the Nforce GART driver for the time being. Would be
+ * nice to do this some other way instead of needing this export.
+ */
+EXPORT_SYMBOL_GPL(agp_memory_reserved);
+
+/*
+ * Generic routines for handling agp_memory structures -
+ * They use the basic page allocation routines to do the brunt of the work.
+ */
+
+void agp_free_key(int key)
+{
+ if (key < 0)
+ return;
+
+ if (key < MAXKEY)
+ clear_bit(key, agp_bridge->key_list);
+}
+EXPORT_SYMBOL(agp_free_key);
+
+
+static int agp_get_key(void)
+{
+ int bit;
+
+ bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
+ if (bit < MAXKEY) {
+ set_bit(bit, agp_bridge->key_list);
+ return bit;
+ }
+ return -1;
+}
+
+/*
+ * Use kmalloc if possible for the page list. Otherwise fall back to
+ * vmalloc. This speeds things up and also saves memory for small AGP
+ * regions.
+ */
+
+void agp_alloc_page_array(size_t size, struct agp_memory *mem)
+{
+ mem->pages = kvmalloc(size, GFP_KERNEL);
+}
+EXPORT_SYMBOL(agp_alloc_page_array);
+
+static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
+{
+ struct agp_memory *new;
+ unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
+
+ if (INT_MAX/sizeof(struct page *) < num_agp_pages)
+ return NULL;
+
+ new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
+ if (new == NULL)
+ return NULL;
+
+ new->key = agp_get_key();
+
+ if (new->key < 0) {
+ kfree(new);
+ return NULL;
+ }
+
+ agp_alloc_page_array(alloc_size, new);
+
+ if (new->pages == NULL) {
+ agp_free_key(new->key);
+ kfree(new);
+ return NULL;
+ }
+ new->num_scratch_pages = 0;
+ return new;
+}
+
+struct agp_memory *agp_create_memory(int scratch_pages)
+{
+ struct agp_memory *new;
+
+ new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
+ if (new == NULL)
+ return NULL;
+
+ new->key = agp_get_key();
+
+ if (new->key < 0) {
+ kfree(new);
+ return NULL;
+ }
+
+ agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
+
+ if (new->pages == NULL) {
+ agp_free_key(new->key);
+ kfree(new);
+ return NULL;
+ }
+ new->num_scratch_pages = scratch_pages;
+ new->type = AGP_NORMAL_MEMORY;
+ return new;
+}
+EXPORT_SYMBOL(agp_create_memory);
+
+/**
+ * agp_free_memory - free memory associated with an agp_memory pointer.
+ *
+ * @curr: agp_memory pointer to be freed.
+ *
+ * It is the only function that can be called when the backend is not owned
+ * by the caller. (So it can free memory on client death.)
+ */
+void agp_free_memory(struct agp_memory *curr)
+{
+ size_t i;
+
+ if (curr == NULL)
+ return;
+
+ if (curr->is_bound)
+ agp_unbind_memory(curr);
+
+ if (curr->type >= AGP_USER_TYPES) {
+ agp_generic_free_by_type(curr);
+ return;
+ }
+
+ if (curr->type != 0) {
+ curr->bridge->driver->free_by_type(curr);
+ return;
+ }
+ if (curr->page_count != 0) {
+ if (curr->bridge->driver->agp_destroy_pages) {
+ curr->bridge->driver->agp_destroy_pages(curr);
+ } else {
+
+ for (i = 0; i < curr->page_count; i++) {
+ curr->bridge->driver->agp_destroy_page(
+ curr->pages[i],
+ AGP_PAGE_DESTROY_UNMAP);
+ }
+ for (i = 0; i < curr->page_count; i++) {
+ curr->bridge->driver->agp_destroy_page(
+ curr->pages[i],
+ AGP_PAGE_DESTROY_FREE);
+ }
+ }
+ }
+ agp_free_key(curr->key);
+ agp_free_page_array(curr);
+ kfree(curr);
+}
+EXPORT_SYMBOL(agp_free_memory);
+
+#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+
+/**
+ * agp_allocate_memory - allocate a group of pages of a certain type.
+ *
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
+ * @page_count: size_t argument of the number of pages
+ * @type: u32 argument of the type of memory to be allocated.
+ *
+ * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
+ * maps to physical ram. Any other type is device dependent.
+ *
+ * It returns NULL whenever memory is unavailable.
+ */
+struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
+ size_t page_count, u32 type)
+{
+ int scratch_pages;
+ struct agp_memory *new;
+ size_t i;
+ int cur_memory;
+
+ if (!bridge)
+ return NULL;
+
+ cur_memory = atomic_read(&bridge->current_memory_agp);
+ if ((cur_memory + page_count > bridge->max_memory_agp) ||
+ (cur_memory + page_count < page_count))
+ return NULL;
+
+ if (type >= AGP_USER_TYPES) {
+ new = agp_generic_alloc_user(page_count, type);
+ if (new)
+ new->bridge = bridge;
+ return new;
+ }
+
+ if (type != 0) {
+ new = bridge->driver->alloc_by_type(page_count, type);
+ if (new)
+ new->bridge = bridge;
+ return new;
+ }
+
+ scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+
+ new = agp_create_memory(scratch_pages);
+
+ if (new == NULL)
+ return NULL;
+
+ if (bridge->driver->agp_alloc_pages) {
+ if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
+ agp_free_memory(new);
+ return NULL;
+ }
+ new->bridge = bridge;
+ return new;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ struct page *page = bridge->driver->agp_alloc_page(bridge);
+
+ if (page == NULL) {
+ agp_free_memory(new);
+ return NULL;
+ }
+ new->pages[i] = page;
+ new->page_count++;
+ }
+ new->bridge = bridge;
+
+ return new;
+}
+EXPORT_SYMBOL(agp_allocate_memory);
+
+
+/* End - Generic routines for handling agp_memory structures */
+
+
+static int agp_return_size(void)
+{
+ int current_size;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ switch (agp_bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ current_size = A_SIZE_8(temp)->size;
+ break;
+ case U16_APER_SIZE:
+ current_size = A_SIZE_16(temp)->size;
+ break;
+ case U32_APER_SIZE:
+ current_size = A_SIZE_32(temp)->size;
+ break;
+ case LVL2_APER_SIZE:
+ current_size = A_SIZE_LVL2(temp)->size;
+ break;
+ case FIXED_APER_SIZE:
+ current_size = A_SIZE_FIX(temp)->size;
+ break;
+ default:
+ current_size = 0;
+ break;
+ }
+
+ current_size -= (agp_memory_reserved / (1024*1024));
+ if (current_size <0)
+ current_size = 0;
+ return current_size;
+}
+
+
+int agp_num_entries(void)
+{
+ int num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ switch (agp_bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ num_entries = A_SIZE_LVL2(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved>>PAGE_SHIFT;
+ if (num_entries<0)
+ num_entries = 0;
+ return num_entries;
+}
+EXPORT_SYMBOL_GPL(agp_num_entries);
+
+
+/**
+ * agp_copy_info - copy bridge state information
+ *
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
+ * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
+ *
+ * This function copies information about the agp bridge device and the state of
+ * the agp backend into an agp_kern_info pointer.
+ */
+int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
+{
+ memset(info, 0, sizeof(struct agp_kern_info));
+ if (!bridge) {
+ info->chipset = NOT_SUPPORTED;
+ return -EIO;
+ }
+
+ info->version.major = bridge->version->major;
+ info->version.minor = bridge->version->minor;
+ info->chipset = SUPPORTED;
+ info->device = bridge->dev;
+ if (bridge->mode & AGPSTAT_MODE_3_0)
+ info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
+ else
+ info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
+ info->aper_base = bridge->gart_bus_addr;
+ info->aper_size = agp_return_size();
+ info->max_memory = bridge->max_memory_agp;
+ info->current_memory = atomic_read(&bridge->current_memory_agp);
+ info->cant_use_aperture = bridge->driver->cant_use_aperture;
+ info->vm_ops = bridge->vm_ops;
+ info->page_mask = ~0UL;
+ return 0;
+}
+EXPORT_SYMBOL(agp_copy_info);
+
+/* End - Routine to copy over information structure */
+
+/*
+ * Routines for handling swapping of agp_memory into the GATT -
+ * These routines take agp_memory and insert them into the GATT.
+ * They call device specific routines to actually write to the GATT.
+ */
+
+/**
+ * agp_bind_memory - Bind an agp_memory structure into the GATT.
+ *
+ * @curr: agp_memory pointer
+ * @pg_start: an offset into the graphics aperture translation table
+ *
+ * It returns -EINVAL if the pointer == NULL.
+ * It returns -EBUSY if the area of the table requested is already in use.
+ */
+int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
+{
+ int ret_val;
+
+ if (curr == NULL)
+ return -EINVAL;
+
+ if (curr->is_bound) {
+ printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
+ return -EINVAL;
+ }
+ if (!curr->is_flushed) {
+ curr->bridge->driver->cache_flush();
+ curr->is_flushed = true;
+ }
+
+ ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
+
+ if (ret_val != 0)
+ return ret_val;
+
+ curr->is_bound = true;
+ curr->pg_start = pg_start;
+ spin_lock(&agp_bridge->mapped_lock);
+ list_add(&curr->mapped_list, &agp_bridge->mapped_list);
+ spin_unlock(&agp_bridge->mapped_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_bind_memory);
+
+
+/**
+ * agp_unbind_memory - Removes an agp_memory structure from the GATT
+ *
+ * @curr: agp_memory pointer to be removed from the GATT.
+ *
+ * It returns -EINVAL if this piece of agp_memory is not currently bound to
+ * the graphics aperture translation table or if the agp_memory pointer == NULL
+ */
+int agp_unbind_memory(struct agp_memory *curr)
+{
+ int ret_val;
+
+ if (curr == NULL)
+ return -EINVAL;
+
+ if (!curr->is_bound) {
+ printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
+ return -EINVAL;
+ }
+
+ ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
+
+ if (ret_val != 0)
+ return ret_val;
+
+ curr->is_bound = false;
+ curr->pg_start = 0;
+ spin_lock(&curr->bridge->mapped_lock);
+ list_del(&curr->mapped_list);
+ spin_unlock(&curr->bridge->mapped_lock);
+ return 0;
+}
+EXPORT_SYMBOL(agp_unbind_memory);
+
+
+/* End - Routines for handling swapping of agp_memory into the GATT */
+
+
+/* Generic Agp routines - Start */
+static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+ u32 tmp;
+
+ if (*requested_mode & AGP2_RESERVED_MASK) {
+ printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
+ *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
+ *requested_mode &= ~AGP2_RESERVED_MASK;
+ }
+
+ /*
+ * Some dumb bridges are programmed to disobey the AGP2 spec.
+ * This is likely a BIOS misprogramming rather than poweron default, or
+ * it would be a lot more common.
+ * https://bugs.freedesktop.org/show_bug.cgi?id=8816
+ * AGPv2 spec 6.1.9 states:
+ * The RATE field indicates the data transfer rates supported by this
+ * device. A.G.P. devices must report all that apply.
+ * Fix them up as best we can.
+ */
+ switch (*bridge_agpstat & 7) {
+ case 4:
+ *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
+ "Fixing up support for x2 & x1\n");
+ break;
+ case 2:
+ *bridge_agpstat |= AGPSTAT2_1X;
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
+ "Fixing up support for x1\n");
+ break;
+ default:
+ break;
+ }
+
+ /* Check the speed bits make sense. Only one should be set. */
+ tmp = *requested_mode & 7;
+ switch (tmp) {
+ case 0:
+ printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
+ *requested_mode |= AGPSTAT2_1X;
+ break;
+ case 1:
+ case 2:
+ break;
+ case 3:
+ *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
+ break;
+ case 4:
+ break;
+ case 5:
+ case 6:
+ case 7:
+ *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
+ break;
+ }
+
+ /* disable SBA if it's not supported */
+ if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ /* Set rate */
+ if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
+ *bridge_agpstat &= ~AGPSTAT2_4X;
+
+ if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
+ *bridge_agpstat &= ~AGPSTAT2_2X;
+
+ if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
+ *bridge_agpstat &= ~AGPSTAT2_1X;
+
+ /* Now we know what mode it should be, clear out the unwanted bits. */
+ if (*bridge_agpstat & AGPSTAT2_4X)
+ *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
+
+ if (*bridge_agpstat & AGPSTAT2_2X)
+ *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
+
+ if (*bridge_agpstat & AGPSTAT2_1X)
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
+
+ /* Apply any errata. */
+ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+
+ if (agp_bridge->flags & AGP_ERRATA_SBA)
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ if (agp_bridge->flags & AGP_ERRATA_1X) {
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+ *bridge_agpstat |= AGPSTAT2_1X;
+ }
+
+ /* If we've dropped down to 1X, disable fast writes. */
+ if (*bridge_agpstat & AGPSTAT2_1X)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+}
+
+/*
+ * requested_mode = Mode requested by (typically) X.
+ * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
+ * vga_agpstat = PCI_AGP_STATUS from graphic card.
+ */
+static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+ u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
+ u32 tmp;
+
+ if (*requested_mode & AGP3_RESERVED_MASK) {
+ printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
+ *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
+ *requested_mode &= ~AGP3_RESERVED_MASK;
+ }
+
+ /* Check the speed bits make sense. */
+ tmp = *requested_mode & 7;
+ if (tmp == 0) {
+ printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
+ *requested_mode |= AGPSTAT3_4X;
+ }
+ if (tmp >= 3) {
+ printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
+ *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
+ }
+
+ /* ARQSZ - Set the value to the maximum one.
+ * Don't allow the mode register to override values. */
+ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
+ max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
+
+ /* Calibration cycle.
+ * Don't allow the mode register to override values. */
+ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
+ min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
+
+ /* SBA *must* be supported for AGP v3 */
+ *bridge_agpstat |= AGPSTAT_SBA;
+
+ /*
+ * Set speed.
+ * Check for invalid speeds. This can happen when applications
+ * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
+ */
+ if (*requested_mode & AGPSTAT_MODE_3_0) {
+ /*
+ * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
+ * have been passed a 3.0 mode, but with 2.x speed bits set.
+ * AGP2.x 4x -> AGP3.0 4x.
+ */
+ if (*requested_mode & AGPSTAT2_4X) {
+ printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
+ current->comm, *requested_mode);
+ *requested_mode &= ~AGPSTAT2_4X;
+ *requested_mode |= AGPSTAT3_4X;
+ }
+ } else {
+ /*
+ * The caller doesn't know what they are doing. We are in 3.0 mode,
+ * but have been passed an AGP 2.x mode.
+ * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
+ */
+ printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
+ current->comm, *requested_mode);
+ *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
+ *requested_mode |= AGPSTAT3_4X;
+ }
+
+ if (*requested_mode & AGPSTAT3_8X) {
+ if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
+ return;
+ }
+ if (!(*vga_agpstat & AGPSTAT3_8X)) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
+ return;
+ }
+ /* All set, bridge & device can do AGP x8*/
+ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ goto done;
+
+ } else if (*requested_mode & AGPSTAT3_4X) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ goto done;
+
+ } else {
+
+ /*
+ * If we didn't specify an AGP mode, we see if both
+ * the graphics card, and the bridge can do x8, and use if so.
+ * If not, we fall back to x4 mode.
+ */
+ if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
+ printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
+ "supported by bridge & card (x8).\n");
+ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ } else {
+ printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
+ if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+ printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
+ *bridge_agpstat, origbridge);
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ }
+ if (!(*vga_agpstat & AGPSTAT3_8X)) {
+ printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
+ *vga_agpstat, origvga);
+ *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *vga_agpstat |= AGPSTAT3_4X;
+ }
+ }
+ }
+
+done:
+ /* Apply any errata. */
+ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+
+ if (agp_bridge->flags & AGP_ERRATA_SBA)
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ if (agp_bridge->flags & AGP_ERRATA_1X) {
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+ *bridge_agpstat |= AGPSTAT2_1X;
+ }
+}
+
+
+/**
+ * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
+ * @requested_mode: requested agp_stat from userspace (Typically from X)
+ * @bridge_agpstat: current agp_stat from AGP bridge.
+ *
+ * This function will hunt for an AGP graphics card, and try to match
+ * the requested mode to the capabilities of both the bridge and the card.
+ */
+u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
+{
+ struct pci_dev *device = NULL;
+ u32 vga_agpstat;
+ u8 cap_ptr;
+
+ for (;;) {
+ device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
+ if (!device) {
+ printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
+ return 0;
+ }
+ cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (cap_ptr)
+ break;
+ }
+
+ /*
+ * Ok, here we have a AGP device. Disable impossible
+ * settings, and adjust the readqueue to the minimum.
+ */
+ pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
+
+ /* adjust RQ depth */
+ bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
+ min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
+ min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
+
+ /* disable FW if it's not supported */
+ if (!((bridge_agpstat & AGPSTAT_FW) &&
+ (vga_agpstat & AGPSTAT_FW) &&
+ (requested_mode & AGPSTAT_FW)))
+ bridge_agpstat &= ~AGPSTAT_FW;
+
+ /* Check to see if we are operating in 3.0 mode */
+ if (agp_bridge->mode & AGPSTAT_MODE_3_0)
+ agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+ else
+ agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+
+ pci_dev_put(device);
+ return bridge_agpstat;
+}
+EXPORT_SYMBOL(agp_collect_device_status);
+
+
+void agp_device_command(u32 bridge_agpstat, bool agp_v3)
+{
+ struct pci_dev *device = NULL;
+ int mode;
+
+ mode = bridge_agpstat & 0x7;
+ if (agp_v3)
+ mode *= 4;
+
+ for_each_pci_dev(device) {
+ u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+
+ dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
+ agp_v3 ? 3 : 2, mode);
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
+ }
+}
+EXPORT_SYMBOL(agp_device_command);
+
+
+void get_agp_version(struct agp_bridge_data *bridge)
+{
+ u32 ncapid;
+
+ /* Exit early if already set by errata workarounds. */
+ if (bridge->major_version != 0)
+ return;
+
+ pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
+ bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+ bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
+}
+EXPORT_SYMBOL(get_agp_version);
+
+
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
+{
+ u32 bridge_agpstat, temp;
+
+ get_agp_version(agp_bridge);
+
+ dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+ agp_bridge->major_version, agp_bridge->minor_version);
+
+ pci_read_config_dword(agp_bridge->dev,
+ agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
+
+ bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
+ if (bridge_agpstat == 0)
+ /* Something bad happened. FIXME: Return error code? */
+ return;
+
+ bridge_agpstat |= AGPSTAT_AGP_ENABLE;
+
+ /* Do AGP version specific frobbing. */
+ if (bridge->major_version >= 3) {
+ if (bridge->mode & AGPSTAT_MODE_3_0) {
+ /* If we have 3.5, we can do the isoch stuff. */
+ if (bridge->minor_version >= 5)
+ agp_3_5_enable(bridge);
+ agp_device_command(bridge_agpstat, true);
+ return;
+ } else {
+ /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
+ bridge_agpstat &= ~(7<<10) ;
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx+AGPCTRL, &temp);
+ temp |= (1<<9);
+ pci_write_config_dword(bridge->dev,
+ bridge->capndx+AGPCTRL, temp);
+
+ dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
+ }
+ }
+
+ /* AGP v<3 */
+ agp_device_command(bridge_agpstat, false);
+}
+EXPORT_SYMBOL(agp_generic_enable);
+
+
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ char *table;
+ char *table_end;
+ int page_order;
+ int num_entries;
+ int i;
+ void *temp;
+ struct page *page;
+
+ /* The generic routines can't handle 2 level gatt's */
+ if (bridge->driver->size_type == LVL2_APER_SIZE)
+ return -EINVAL;
+
+ table = NULL;
+ i = bridge->aperture_size_idx;
+ temp = bridge->current_size;
+ page_order = num_entries = 0;
+
+ if (bridge->driver->size_type != FIXED_APER_SIZE) {
+ do {
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ page_order =
+ A_SIZE_8(temp)->page_order;
+ num_entries =
+ A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ page_order = A_SIZE_16(temp)->page_order;
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ page_order = A_SIZE_32(temp)->page_order;
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ /* This case will never really happen. */
+ case FIXED_APER_SIZE:
+ case LVL2_APER_SIZE:
+ default:
+ page_order = num_entries = 0;
+ break;
+ }
+
+ table = alloc_gatt_pages(page_order);
+
+ if (table == NULL) {
+ i++;
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ bridge->current_size = A_IDX8(bridge);
+ break;
+ case U16_APER_SIZE:
+ bridge->current_size = A_IDX16(bridge);
+ break;
+ case U32_APER_SIZE:
+ bridge->current_size = A_IDX32(bridge);
+ break;
+ /* These cases will never really happen. */
+ case FIXED_APER_SIZE:
+ case LVL2_APER_SIZE:
+ default:
+ break;
+ }
+ temp = bridge->current_size;
+ } else {
+ bridge->aperture_size_idx = i;
+ }
+ } while (!table && (i < bridge->driver->num_aperture_sizes));
+ } else {
+ page_order = ((struct aper_size_info_fixed *) temp)->page_order;
+ num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
+ table = alloc_gatt_pages(page_order);
+ }
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ SetPageReserved(page);
+
+ bridge->gatt_table_real = (u32 *) table;
+ agp_gatt_table = (void *)table;
+
+ bridge->driver->cache_flush();
+#ifdef CONFIG_X86
+ if (set_memory_uc((unsigned long)table, 1 << page_order))
+ printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
+
+ bridge->gatt_table = (u32 __iomem *)table;
+#else
+ bridge->gatt_table = ioremap(virt_to_phys(table),
+ (PAGE_SIZE * (1 << page_order)));
+ bridge->driver->cache_flush();
+#endif
+
+ if (bridge->gatt_table == NULL) {
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+ free_gatt_pages(table, page_order);
+
+ return -ENOMEM;
+ }
+ bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
+
+ /* AK: bogus, should encode addresses > 4GB */
+ for (i = 0; i < num_entries; i++) {
+ writel(bridge->scratch_page, bridge->gatt_table+i);
+ readl(bridge->gatt_table+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_create_gatt_table);
+
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ char *table, *table_end;
+ void *temp;
+ struct page *page;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ page_order = A_SIZE_8(temp)->page_order;
+ break;
+ case U16_APER_SIZE:
+ page_order = A_SIZE_16(temp)->page_order;
+ break;
+ case U32_APER_SIZE:
+ page_order = A_SIZE_32(temp)->page_order;
+ break;
+ case FIXED_APER_SIZE:
+ page_order = A_SIZE_FIX(temp)->page_order;
+ break;
+ case LVL2_APER_SIZE:
+ /* The generic routines can't deal with 2 level gatt's */
+ return -EINVAL;
+ default:
+ page_order = 0;
+ break;
+ }
+
+ /* Do not worry about freeing memory, because if this is
+ * called, then all agp memory is deallocated and removed
+ * from the table. */
+
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
+#else
+ iounmap(bridge->gatt_table);
+#endif
+ table = (char *) bridge->gatt_table_real;
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+ free_gatt_pages(bridge->gatt_table_real, page_order);
+
+ agp_gatt_table = NULL;
+ bridge->gatt_table = NULL;
+ bridge->gatt_table_real = NULL;
+ bridge->gatt_bus_addr = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_free_gatt_table);
+
+
+int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int num_entries;
+ size_t i;
+ off_t j;
+ void *temp;
+ struct agp_bridge_data *bridge;
+ int mask_type;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ /* The generic routines can't deal with 2 level gatt's */
+ return -EINVAL;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved/PAGE_SIZE;
+ if (num_entries < 0) num_entries = 0;
+
+ if (type != mem->type)
+ return -EINVAL;
+
+ mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+ if (mask_type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
+ return -EINVAL;
+
+ j = pg_start;
+
+ while (j < (pg_start + mem->page_count)) {
+ if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ bridge->driver->cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(bridge->driver->mask_memory(bridge,
+ page_to_phys(mem->pages[i]),
+ mask_type),
+ bridge->gatt_table+j);
+ }
+ readl(bridge->gatt_table+j-1); /* PCI Posting. */
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_insert_memory);
+
+
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ size_t i;
+ struct agp_bridge_data *bridge;
+ int mask_type, num_entries;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if (type != mem->type)
+ return -EINVAL;
+
+ num_entries = agp_num_entries();
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
+ return -EINVAL;
+
+ mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+ if (mask_type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+
+ /* AK: bogus, should encode addresses > 4GB */
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(bridge->scratch_page, bridge->gatt_table+i);
+ }
+ readl(bridge->gatt_table+i-1); /* PCI Posting. */
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_remove_memory);
+
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(agp_generic_alloc_by_type);
+
+void agp_generic_free_by_type(struct agp_memory *curr)
+{
+ agp_free_page_array(curr);
+ agp_free_key(curr->key);
+ kfree(curr);
+}
+EXPORT_SYMBOL(agp_generic_free_by_type);
+
+struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
+{
+ struct agp_memory *new;
+ int i;
+ int pages;
+
+ pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+ new = agp_create_user_memory(page_count);
+ if (new == NULL)
+ return NULL;
+
+ for (i = 0; i < page_count; i++)
+ new->pages[i] = NULL;
+ new->page_count = 0;
+ new->type = type;
+ new->num_scratch_pages = pages;
+
+ return new;
+}
+EXPORT_SYMBOL(agp_generic_alloc_user);
+
+/*
+ * Basic Page Allocation Routines -
+ * These routines handle page allocation and by default they reserve the allocated
+ * memory. They also handle incrementing the current_memory_agp value, Which is checked
+ * against a maximum value.
+ */
+
+int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
+{
+ struct page * page;
+ int i, ret = -ENOMEM;
+
+ for (i = 0; i < num_pages; i++) {
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ /* agp_free_memory() needs gart address */
+ if (page == NULL)
+ goto out;
+
+#ifndef CONFIG_X86
+ map_page_into_agp(page);
+#endif
+ get_page(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+
+ mem->pages[i] = page;
+ mem->page_count++;
+ }
+
+#ifdef CONFIG_X86
+ set_pages_array_uc(mem->pages, num_pages);
+#endif
+ ret = 0;
+out:
+ return ret;
+}
+EXPORT_SYMBOL(agp_generic_alloc_pages);
+
+struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
+{
+ struct page * page;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return NULL;
+
+ map_page_into_agp(page);
+
+ get_page(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page;
+}
+EXPORT_SYMBOL(agp_generic_alloc_page);
+
+void agp_generic_destroy_pages(struct agp_memory *mem)
+{
+ int i;
+ struct page *page;
+
+ if (!mem)
+ return;
+
+#ifdef CONFIG_X86
+ set_pages_array_wb(mem->pages, mem->page_count);
+#endif
+
+ for (i = 0; i < mem->page_count; i++) {
+ page = mem->pages[i];
+
+#ifndef CONFIG_X86
+ unmap_page_from_agp(page);
+#endif
+ put_page(page);
+ __free_page(page);
+ atomic_dec(&agp_bridge->current_memory_agp);
+ mem->pages[i] = NULL;
+ }
+}
+EXPORT_SYMBOL(agp_generic_destroy_pages);
+
+void agp_generic_destroy_page(struct page *page, int flags)
+{
+ if (page == NULL)
+ return;
+
+ if (flags & AGP_PAGE_DESTROY_UNMAP)
+ unmap_page_from_agp(page);
+
+ if (flags & AGP_PAGE_DESTROY_FREE) {
+ put_page(page);
+ __free_page(page);
+ atomic_dec(&agp_bridge->current_memory_agp);
+ }
+}
+EXPORT_SYMBOL(agp_generic_destroy_page);
+
+/* End Basic Page Allocation Routines */
+
+
+/**
+ * agp_enable - initialise the agp point-to-point connection.
+ *
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
+ * @mode: agp mode register value to configure with.
+ */
+void agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ if (!bridge)
+ return;
+ bridge->driver->agp_enable(bridge, mode);
+}
+EXPORT_SYMBOL(agp_enable);
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
+{
+ if (list_empty(&agp_bridges))
+ return NULL;
+
+ return agp_bridge;
+}
+
+static void ipi_handler(void *null)
+{
+ flush_agp_cache();
+}
+
+void global_cache_flush(void)
+{
+ on_each_cpu(ipi_handler, NULL, 1);
+}
+EXPORT_SYMBOL(global_cache_flush);
+
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* memory type is ignored in the generic routine */
+ if (bridge->driver->masks)
+ return addr | bridge->driver->masks[0].mask;
+ else
+ return addr;
+}
+EXPORT_SYMBOL(agp_generic_mask_memory);
+
+int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ if (type >= AGP_USER_TYPES)
+ return 0;
+ return type;
+}
+EXPORT_SYMBOL(agp_generic_type_to_mask_type);
+
+/*
+ * These functions are implemented according to the AGPv3 spec,
+ * which covers implementation details that had previously been
+ * left open.
+ */
+
+int agp3_generic_fetch_size(void)
+{
+ u16 temp_size;
+ int i;
+ struct aper_size_info_16 *values;
+
+ pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp_size == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(agp3_generic_fetch_size);
+
+void agp3_generic_tlbflush(struct agp_memory *mem)
+{
+ u32 ctrl;
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
+}
+EXPORT_SYMBOL(agp3_generic_tlbflush);
+
+int agp3_generic_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* set aperture size */
+ pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
+ /* set gart pointer */
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
+ /* enable aperture and GTLB */
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
+ return 0;
+}
+EXPORT_SYMBOL(agp3_generic_configure);
+
+void agp3_generic_cleanup(void)
+{
+ u32 ctrl;
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
+}
+EXPORT_SYMBOL(agp3_generic_cleanup);
+
+const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
+{
+ {4096, 1048576, 10,0x000},
+ {2048, 524288, 9, 0x800},
+ {1024, 262144, 8, 0xc00},
+ { 512, 131072, 7, 0xe00},
+ { 256, 65536, 6, 0xf00},
+ { 128, 32768, 5, 0xf20},
+ { 64, 16384, 4, 0xf30},
+ { 32, 8192, 3, 0xf38},
+ { 16, 4096, 2, 0xf3c},
+ { 8, 2048, 1, 0xf3e},
+ { 4, 1024, 0, 0xf3f}
+};
+EXPORT_SYMBOL(agp3_generic_sizes);
+
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
new file mode 100644
index 0000000000..84d9adbb62
--- /dev/null
+++ b/drivers/char/agp/hp-agp.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HP zx1 AGPGART routines.
+ *
+ * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+
+#include <asm/acpi-ext.h>
+
+#include "agp.h"
+
+#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
+
+/* HP ZX1 IOC registers */
+#define HP_ZX1_IBASE 0x300
+#define HP_ZX1_IMASK 0x308
+#define HP_ZX1_PCOM 0x310
+#define HP_ZX1_TCNFG 0x318
+#define HP_ZX1_PDIR_BASE 0x320
+
+#define HP_ZX1_IOVA_BASE GB(1UL)
+#define HP_ZX1_IOVA_SIZE GB(1UL)
+#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
+#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
+
+#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
+#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
+
+#define AGP8X_MODE_BIT 3
+#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
+
+/* AGP bridge need not be PCI device, but DRM thinks it is. */
+static struct pci_dev fake_bridge_dev;
+
+static int hp_zx1_gart_found;
+
+static struct aper_size_info_fixed hp_zx1_sizes[] =
+{
+ {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
+};
+
+static struct gatt_mask hp_zx1_masks[] =
+{
+ {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
+};
+
+static struct _hp_private {
+ volatile u8 __iomem *ioc_regs;
+ volatile u8 __iomem *lba_regs;
+ int lba_cap_offset;
+ u64 *io_pdir; // PDIR for entire IOVA
+ u64 *gatt; // PDIR just for GART (subset of above)
+ u64 gatt_entries;
+ u64 iova_base;
+ u64 gart_base;
+ u64 gart_size;
+ u64 io_pdir_size;
+ int io_pdir_owner; // do we own it, or share it with sba_iommu?
+ int io_page_size;
+ int io_tlb_shift;
+ int io_tlb_ps; // IOC ps config
+ int io_pages_per_kpage;
+} hp_private;
+
+static int __init hp_zx1_ioc_shared(void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
+
+ /*
+ * IOC already configured by sba_iommu module; just use
+ * its setup. We assume:
+ * - IOVA space is 1Gb in size
+ * - first 512Mb is IOMMU, second 512Mb is GART
+ */
+ hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
+ switch (hp->io_tlb_ps) {
+ case 0: hp->io_tlb_shift = 12; break;
+ case 1: hp->io_tlb_shift = 13; break;
+ case 2: hp->io_tlb_shift = 14; break;
+ case 3: hp->io_tlb_shift = 16; break;
+ default:
+ printk(KERN_ERR PFX "Invalid IOTLB page size "
+ "configuration 0x%x\n", hp->io_tlb_ps);
+ hp->gatt = NULL;
+ hp->gatt_entries = 0;
+ return -ENODEV;
+ }
+ hp->io_page_size = 1 << hp->io_tlb_shift;
+ hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+ hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
+ hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
+
+ hp->gart_size = HP_ZX1_GART_SIZE;
+ hp->gatt_entries = hp->gart_size / hp->io_page_size;
+
+ hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
+ hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+
+ if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
+ /* Normal case when no AGP device in system */
+ hp->gatt = NULL;
+ hp->gatt_entries = 0;
+ printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
+ "GART disabled\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init
+hp_zx1_ioc_owner (void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
+
+ /*
+ * Select an IOV page size no larger than system page size.
+ */
+ if (PAGE_SIZE >= KB(64)) {
+ hp->io_tlb_shift = 16;
+ hp->io_tlb_ps = 3;
+ } else if (PAGE_SIZE >= KB(16)) {
+ hp->io_tlb_shift = 14;
+ hp->io_tlb_ps = 2;
+ } else if (PAGE_SIZE >= KB(8)) {
+ hp->io_tlb_shift = 13;
+ hp->io_tlb_ps = 1;
+ } else {
+ hp->io_tlb_shift = 12;
+ hp->io_tlb_ps = 0;
+ }
+ hp->io_page_size = 1 << hp->io_tlb_shift;
+ hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+ hp->iova_base = HP_ZX1_IOVA_BASE;
+ hp->gart_size = HP_ZX1_GART_SIZE;
+ hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
+
+ hp->gatt_entries = hp->gart_size / hp->io_page_size;
+ hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
+
+ return 0;
+}
+
+static int __init
+hp_zx1_ioc_init (u64 hpa)
+{
+ struct _hp_private *hp = &hp_private;
+
+ hp->ioc_regs = ioremap(hpa, 1024);
+ if (!hp->ioc_regs)
+ return -ENOMEM;
+
+ /*
+ * If the IOTLB is currently disabled, we can take it over.
+ * Otherwise, we have to share with sba_iommu.
+ */
+ hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
+
+ if (hp->io_pdir_owner)
+ return hp_zx1_ioc_owner();
+
+ return hp_zx1_ioc_shared();
+}
+
+static int
+hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
+{
+ u16 status;
+ u8 pos, id;
+ int ttl = 48;
+
+ status = readw(hpa+PCI_STATUS);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+ pos = readb(hpa+PCI_CAPABILITY_LIST);
+ while (ttl-- && pos >= 0x40) {
+ pos &= ~3;
+ id = readb(hpa+pos+PCI_CAP_LIST_ID);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
+ }
+ return 0;
+}
+
+static int __init
+hp_zx1_lba_init (u64 hpa)
+{
+ struct _hp_private *hp = &hp_private;
+ int cap;
+
+ hp->lba_regs = ioremap(hpa, 256);
+ if (!hp->lba_regs)
+ return -ENOMEM;
+
+ hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
+
+ cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
+ if (cap != PCI_CAP_ID_AGP) {
+ printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
+ cap, hp->lba_cap_offset);
+ iounmap(hp->lba_regs);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+hp_zx1_fetch_size(void)
+{
+ int size;
+
+ size = hp_private.gart_size / MB(1);
+ hp_zx1_sizes[0].size = size;
+ agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
+ return size;
+}
+
+static int
+hp_zx1_configure (void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ agp_bridge->gart_bus_addr = hp->gart_base;
+ agp_bridge->capndx = hp->lba_cap_offset;
+ agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
+
+ if (hp->io_pdir_owner) {
+ writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
+ readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
+ writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
+ readl(hp->ioc_regs+HP_ZX1_TCNFG);
+ writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
+ readl(hp->ioc_regs+HP_ZX1_IMASK);
+ writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
+ readl(hp->ioc_regs+HP_ZX1_IBASE);
+ writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
+ readl(hp->ioc_regs+HP_ZX1_PCOM);
+ }
+
+ return 0;
+}
+
+static void
+hp_zx1_cleanup (void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ if (hp->ioc_regs) {
+ if (hp->io_pdir_owner) {
+ writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
+ readq(hp->ioc_regs+HP_ZX1_IBASE);
+ }
+ iounmap(hp->ioc_regs);
+ }
+ if (hp->lba_regs)
+ iounmap(hp->lba_regs);
+}
+
+static void
+hp_zx1_tlbflush (struct agp_memory *mem)
+{
+ struct _hp_private *hp = &hp_private;
+
+ writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
+ readq(hp->ioc_regs+HP_ZX1_PCOM);
+}
+
+static int
+hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
+{
+ struct _hp_private *hp = &hp_private;
+ int i;
+
+ if (hp->io_pdir_owner) {
+ hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
+ get_order(hp->io_pdir_size));
+ if (!hp->io_pdir) {
+ printk(KERN_ERR PFX "Couldn't allocate contiguous "
+ "memory for I/O PDIR\n");
+ hp->gatt = NULL;
+ hp->gatt_entries = 0;
+ return -ENOMEM;
+ }
+ memset(hp->io_pdir, 0, hp->io_pdir_size);
+
+ hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+ }
+
+ for (i = 0; i < hp->gatt_entries; i++) {
+ hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
+ }
+
+ return 0;
+}
+
+static int
+hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
+{
+ struct _hp_private *hp = &hp_private;
+
+ if (hp->io_pdir_owner)
+ free_pages((unsigned long) hp->io_pdir,
+ get_order(hp->io_pdir_size));
+ else
+ hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
+ return 0;
+}
+
+static int
+hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _hp_private *hp = &hp_private;
+ int i, k;
+ off_t j, io_pg_start;
+ int io_pg_count;
+
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
+ return -EINVAL;
+ }
+
+ io_pg_start = hp->io_pages_per_kpage * pg_start;
+ io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+ if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
+ return -EINVAL;
+ }
+
+ j = io_pg_start;
+ while (j < (io_pg_start + io_pg_count)) {
+ if (hp->gatt[j]) {
+ return -EBUSY;
+ }
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+ unsigned long paddr;
+
+ paddr = page_to_phys(mem->pages[i]);
+ for (k = 0;
+ k < hp->io_pages_per_kpage;
+ k++, j++, paddr += hp->io_page_size) {
+ hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
+ }
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int
+hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _hp_private *hp = &hp_private;
+ int i, io_pg_start, io_pg_count;
+
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
+ return -EINVAL;
+ }
+
+ io_pg_start = hp->io_pages_per_kpage * pg_start;
+ io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+ for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+ hp->gatt[i] = agp_bridge->scratch_page;
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static unsigned long
+hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
+{
+ return HP_ZX1_PDIR_VALID_BIT | addr;
+}
+
+static void
+hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
+{
+ struct _hp_private *hp = &hp_private;
+ u32 command;
+
+ command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
+ command = agp_collect_device_status(bridge, mode, command);
+ command |= 0x00000100;
+
+ writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
+
+ agp_device_command(command, (mode & AGP8X_MODE) != 0);
+}
+
+const struct agp_bridge_driver hp_zx1_driver = {
+ .owner = THIS_MODULE,
+ .size_type = FIXED_APER_SIZE,
+ .configure = hp_zx1_configure,
+ .fetch_size = hp_zx1_fetch_size,
+ .cleanup = hp_zx1_cleanup,
+ .tlb_flush = hp_zx1_tlbflush,
+ .mask_memory = hp_zx1_mask_memory,
+ .masks = hp_zx1_masks,
+ .agp_enable = hp_zx1_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = hp_zx1_create_gatt_table,
+ .free_gatt_table = hp_zx1_free_gatt_table,
+ .insert_memory = hp_zx1_insert_memory,
+ .remove_memory = hp_zx1_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+};
+
+static int __init
+hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
+{
+ struct agp_bridge_data *bridge;
+ int error = 0;
+
+ error = hp_zx1_ioc_init(ioc_hpa);
+ if (error)
+ goto fail;
+
+ error = hp_zx1_lba_init(lba_hpa);
+ if (error)
+ goto fail;
+
+ bridge = agp_alloc_bridge();
+ if (!bridge) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ bridge->driver = &hp_zx1_driver;
+
+ fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
+ fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
+ bridge->dev = &fake_bridge_dev;
+
+ error = agp_add_bridge(bridge);
+ fail:
+ if (error)
+ hp_zx1_cleanup();
+ return error;
+}
+
+static acpi_status __init
+zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
+{
+ acpi_handle handle, parent;
+ acpi_status status;
+ struct acpi_device_info *info;
+ u64 lba_hpa, sba_hpa, length;
+ int match;
+
+ status = hp_acpi_csr_space(obj, &lba_hpa, &length);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* keep looking for another bridge */
+
+ /* Look for an enclosing IOC scope and find its CSR space */
+ handle = obj;
+ do {
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
+ /* TBD check _CID also */
+ match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
+ kfree(info);
+ if (match) {
+ status = hp_acpi_csr_space(handle, &sba_hpa, &length);
+ if (ACPI_SUCCESS(status))
+ break;
+ else {
+ printk(KERN_ERR PFX "Detected HP ZX1 "
+ "AGP LBA but no IOC.\n");
+ return AE_OK;
+ }
+ }
+ }
+
+ status = acpi_get_parent(handle, &parent);
+ handle = parent;
+ } while (ACPI_SUCCESS(status));
+
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* found no enclosing IOC */
+
+ if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
+ return AE_OK;
+
+ printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
+ "(ioc=%llx, lba=%llx)\n", (char *)context,
+ sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
+
+ hp_zx1_gart_found = 1;
+ return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
+}
+
+static int __init
+agp_hp_init (void)
+{
+ if (agp_off)
+ return -EINVAL;
+
+ acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
+ if (hp_zx1_gart_found)
+ return 0;
+
+ acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
+ if (hp_zx1_gart_found)
+ return 0;
+
+ return -ENODEV;
+}
+
+static void __exit
+agp_hp_cleanup (void)
+{
+}
+
+module_init(agp_hp_init);
+module_exit(agp_hp_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
new file mode 100644
index 0000000000..15b240ea48
--- /dev/null
+++ b/drivers/char/agp/i460-agp.c
@@ -0,0 +1,659 @@
+/*
+ * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
+ * the "Intel 460GTX Chipset Software Developer's Manual":
+ * http://www.intel.com/design/archives/itanium/downloads/248704.htm
+ */
+/*
+ * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
+ * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/agp_backend.h>
+#include <linux/log2.h>
+
+#include "agp.h"
+
+#define INTEL_I460_BAPBASE 0x98
+#define INTEL_I460_GXBCTL 0xa0
+#define INTEL_I460_AGPSIZ 0xa2
+#define INTEL_I460_ATTBASE 0xfe200000
+#define INTEL_I460_GATT_VALID (1UL << 24)
+#define INTEL_I460_GATT_COHERENT (1UL << 25)
+
+/*
+ * The i460 can operate with large (4MB) pages, but there is no sane way to support this
+ * within the current kernel/DRM environment, so we disable the relevant code for now.
+ * See also comments in ia64_alloc_page()...
+ */
+#define I460_LARGE_IO_PAGES 0
+
+#if I460_LARGE_IO_PAGES
+# define I460_IO_PAGE_SHIFT i460.io_page_shift
+#else
+# define I460_IO_PAGE_SHIFT 12
+#endif
+
+#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
+#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
+#define I460_SRAM_IO_DISABLE (1 << 4)
+#define I460_BAPBASE_ENABLE (1 << 3)
+#define I460_AGPSIZ_MASK 0x7
+#define I460_4M_PS (1 << 1)
+
+/* Control bits for Out-Of-GART coherency and Burst Write Combining */
+#define I460_GXBCTL_OOG (1UL << 0)
+#define I460_GXBCTL_BWC (1UL << 2)
+
+/*
+ * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
+ * gatt_table and gatt_table_real pointers a "void *"...
+ */
+#define RD_GATT(index) readl((u32 *) i460.gatt + (index))
+#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
+/*
+ * The 460 spec says we have to read the last location written to make sure that all
+ * writes have taken effect
+ */
+#define WR_FLUSH_GATT(index) RD_GATT(index)
+
+static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type);
+
+static struct {
+ void *gatt; /* ioremap'd GATT area */
+
+ /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
+ u8 io_page_shift;
+
+ /* BIOS configures chipset to one of 2 possible apbase values: */
+ u8 dynamic_apbase;
+
+ /* structure for tracking partial use of 4MB GART pages: */
+ struct lp_desc {
+ unsigned long *alloced_map; /* bitmap of kernel-pages in use */
+ int refcount; /* number of kernel pages using the large page */
+ u64 paddr; /* physical address of large page */
+ struct page *page; /* page pointer */
+ } *lp_desc;
+} i460;
+
+static const struct aper_size_info_8 i460_sizes[3] =
+{
+ /*
+ * The 32GB aperture is only available with a 4M GART page size. Due to the
+ * dynamic GART page size, we can't figure out page_order or num_entries until
+ * runtime.
+ */
+ {32768, 0, 0, 4},
+ {1024, 0, 0, 2},
+ {256, 0, 0, 1}
+};
+
+static struct gatt_mask i460_masks[] =
+{
+ {
+ .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
+ .type = 0
+ }
+};
+
+static int i460_fetch_size (void)
+{
+ int i;
+ u8 temp;
+ struct aper_size_info_8 *values;
+
+ /* Determine the GART page size */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp);
+ i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12;
+ pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift);
+
+ if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
+ printk(KERN_ERR PFX
+ "I/O (GART) page-size %luKB doesn't match expected "
+ "size %luKB\n",
+ 1UL << (i460.io_page_shift - 10),
+ 1UL << (I460_IO_PAGE_SHIFT));
+ return 0;
+ }
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
+
+ /* Exit now if the IO drivers for the GART SRAMS are turned off */
+ if (temp & I460_SRAM_IO_DISABLE) {
+ printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
+ printk(KERN_ERR PFX "AGPGART operation not possible\n");
+ return 0;
+ }
+
+ /* Make sure we don't try to create an 2 ^ 23 entry GATT */
+ if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
+ printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
+ return 0;
+ }
+
+ /* Determine the proper APBASE register */
+ if (temp & I460_BAPBASE_ENABLE)
+ i460.dynamic_apbase = INTEL_I460_BAPBASE;
+ else
+ i460.dynamic_apbase = AGP_APBASE;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ /*
+ * Dynamically calculate the proper num_entries and page_order values for
+ * the define aperture sizes. Take care not to shift off the end of
+ * values[i].size.
+ */
+ values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
+ values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
+ }
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ /* Neglect control bits when matching up size_value */
+ if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+/* There isn't anything to do here since 460 has no GART TLB. */
+static void i460_tlb_flush (struct agp_memory *mem)
+{
+ return;
+}
+
+/*
+ * This utility function is needed to prevent corruption of the control bits
+ * which are stored along with the aperture size in 460's AGPSIZ register
+ */
+static void i460_write_agpsiz (u8 size_value)
+{
+ u8 temp;
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ,
+ ((temp & ~I460_AGPSIZ_MASK) | size_value));
+}
+
+static void i460_cleanup (void)
+{
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ i460_write_agpsiz(previous_size->size_value);
+
+ if (I460_IO_PAGE_SHIFT > PAGE_SHIFT)
+ kfree(i460.lp_desc);
+}
+
+static int i460_configure (void)
+{
+ union {
+ u32 small[2];
+ u64 large;
+ } temp;
+ size_t size;
+ u8 scratch;
+ struct aper_size_info_8 *current_size;
+
+ temp.large = 0;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+ i460_write_agpsiz(current_size->size_value);
+
+ /*
+ * Do the necessary rigmarole to read all eight bytes of APBASE.
+ * This has to be done since the AGP aperture can be above 4GB on
+ * 460 based systems.
+ */
+ pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0]));
+ pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1]));
+
+ /* Clear BAR control bits */
+ agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1);
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL,
+ (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
+
+ /*
+ * Initialize partial allocation trackers if a GART page is bigger than a kernel
+ * page.
+ */
+ if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
+ size = current_size->num_entries * sizeof(i460.lp_desc[0]);
+ i460.lp_desc = kzalloc(size, GFP_KERNEL);
+ if (!i460.lp_desc)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int i460_create_gatt_table (struct agp_bridge_data *bridge)
+{
+ int page_order, num_entries, i;
+ void *temp;
+
+ /*
+ * Load up the fixed address of the GART SRAMS which hold our GATT table.
+ */
+ temp = agp_bridge->current_size;
+ page_order = A_SIZE_8(temp)->page_order;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order);
+ if (!i460.gatt) {
+ printk(KERN_ERR PFX "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ /* These are no good, the should be removed from the agp_bridge strucure... */
+ agp_bridge->gatt_table_real = NULL;
+ agp_bridge->gatt_table = NULL;
+ agp_bridge->gatt_bus_addr = 0;
+
+ for (i = 0; i < num_entries; ++i)
+ WR_GATT(i, 0);
+ WR_FLUSH_GATT(i - 1);
+ return 0;
+}
+
+static int i460_free_gatt_table (struct agp_bridge_data *bridge)
+{
+ int num_entries, i;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ for (i = 0; i < num_entries; ++i)
+ WR_GATT(i, 0);
+ WR_FLUSH_GATT(num_entries - 1);
+
+ iounmap(i460.gatt);
+ return 0;
+}
+
+/*
+ * The following functions are called when the I/O (GART) page size is smaller than
+ * PAGE_SIZE.
+ */
+
+static int i460_insert_memory_small_io_page (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ unsigned long paddr, io_pg_start, io_page_size;
+ int i, j, k, num_entries;
+ void *temp;
+
+ pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
+ mem, pg_start, type, page_to_phys(mem->pages[0]));
+
+ if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+ return -EINVAL;
+
+ io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
+ printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+ return -EINVAL;
+ }
+
+ j = io_pg_start;
+ while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
+ if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
+ pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
+ j, RD_GATT(j));
+ return -EBUSY;
+ }
+ j++;
+ }
+
+ io_page_size = 1UL << I460_IO_PAGE_SHIFT;
+ for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+ paddr = page_to_phys(mem->pages[i]);
+ for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
+ WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
+ }
+ WR_FLUSH_GATT(j - 1);
+ return 0;
+}
+
+static int i460_remove_memory_small_io_page(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i;
+
+ pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
+ mem, pg_start, type);
+
+ pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
+
+ for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++)
+ WR_GATT(i, 0);
+ WR_FLUSH_GATT(i - 1);
+ return 0;
+}
+
+#if I460_LARGE_IO_PAGES
+
+/*
+ * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
+ *
+ * This situation is interesting since AGP memory allocations that are smaller than a
+ * single GART page are possible. The i460.lp_desc array tracks partial allocation of the
+ * large GART pages to work around this issue.
+ *
+ * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
+ * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
+ * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
+ */
+
+static int i460_alloc_large_page (struct lp_desc *lp)
+{
+ unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
+ size_t map_size;
+
+ lp->page = alloc_pages(GFP_KERNEL, order);
+ if (!lp->page) {
+ printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
+ return -ENOMEM;
+ }
+
+ map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
+ lp->alloced_map = kzalloc(map_size, GFP_KERNEL);
+ if (!lp->alloced_map) {
+ __free_pages(lp->page, order);
+ printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
+ return -ENOMEM;
+ }
+
+ lp->paddr = page_to_phys(lp->page);
+ lp->refcount = 0;
+ atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+ return 0;
+}
+
+static void i460_free_large_page (struct lp_desc *lp)
+{
+ kfree(lp->alloced_map);
+ lp->alloced_map = NULL;
+
+ __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
+ atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+}
+
+static int i460_insert_memory_large_io_page (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, start_offset, end_offset, idx, pg, num_entries;
+ struct lp_desc *start, *end, *lp;
+ void *temp;
+
+ if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+ return -EINVAL;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ /* Figure out what pg_start means in terms of our large GART pages */
+ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
+ end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
+ start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
+ end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
+
+ if (end > i460.lp_desc + num_entries) {
+ printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+ return -EINVAL;
+ }
+
+ /* Check if the requested region of the aperture is free */
+ for (lp = start; lp <= end; ++lp) {
+ if (!lp->alloced_map)
+ continue; /* OK, the entire large page is available... */
+
+ for (idx = ((lp == start) ? start_offset : 0);
+ idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+ idx++)
+ {
+ if (test_bit(idx, lp->alloced_map))
+ return -EBUSY;
+ }
+ }
+
+ for (lp = start, i = 0; lp <= end; ++lp) {
+ if (!lp->alloced_map) {
+ /* Allocate new GART pages... */
+ if (i460_alloc_large_page(lp) < 0)
+ return -ENOMEM;
+ pg = lp - i460.lp_desc;
+ WR_GATT(pg, i460_mask_memory(agp_bridge,
+ lp->paddr, 0));
+ WR_FLUSH_GATT(pg);
+ }
+
+ for (idx = ((lp == start) ? start_offset : 0);
+ idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+ idx++, i++)
+ {
+ mem->pages[i] = lp->page;
+ __set_bit(idx, lp->alloced_map);
+ ++lp->refcount;
+ }
+ }
+ return 0;
+}
+
+static int i460_remove_memory_large_io_page (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, pg, start_offset, end_offset, idx, num_entries;
+ struct lp_desc *start, *end, *lp;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ /* Figure out what pg_start means in terms of our large GART pages */
+ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
+ end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
+ start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
+ end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
+
+ for (i = 0, lp = start; lp <= end; ++lp) {
+ for (idx = ((lp == start) ? start_offset : 0);
+ idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+ idx++, i++)
+ {
+ mem->pages[i] = NULL;
+ __clear_bit(idx, lp->alloced_map);
+ --lp->refcount;
+ }
+
+ /* Free GART pages if they are unused */
+ if (lp->refcount == 0) {
+ pg = lp - i460.lp_desc;
+ WR_GATT(pg, 0);
+ WR_FLUSH_GATT(pg);
+ i460_free_large_page(lp);
+ }
+ }
+ return 0;
+}
+
+/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
+
+static int i460_insert_memory (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ return i460_insert_memory_small_io_page(mem, pg_start, type);
+ else
+ return i460_insert_memory_large_io_page(mem, pg_start, type);
+}
+
+static int i460_remove_memory (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ return i460_remove_memory_small_io_page(mem, pg_start, type);
+ else
+ return i460_remove_memory_large_io_page(mem, pg_start, type);
+}
+
+/*
+ * If the I/O (GART) page size is bigger than the kernel page size, we don't want to
+ * allocate memory until we know where it is to be bound in the aperture (a
+ * multi-kernel-page alloc might fit inside of an already allocated GART page).
+ *
+ * Let's just hope nobody counts on the allocated AGP memory being there before bind time
+ * (I don't think current drivers do)...
+ */
+static struct page *i460_alloc_page (struct agp_bridge_data *bridge)
+{
+ void *page;
+
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
+ page = agp_generic_alloc_page(agp_bridge);
+ } else
+ /* Returning NULL would cause problems */
+ /* AK: really dubious code. */
+ page = (void *)~0UL;
+ return page;
+}
+
+static void i460_destroy_page (struct page *page, int flags)
+{
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
+ agp_generic_destroy_page(page, flags);
+ }
+}
+
+#endif /* I460_LARGE_IO_PAGES */
+
+static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* Make sure the returned address is a valid GATT entry */
+ return bridge->driver->masks[0].mask
+ | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
+}
+
+const struct agp_bridge_driver intel_i460_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = i460_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 3,
+ .configure = i460_configure,
+ .fetch_size = i460_fetch_size,
+ .cleanup = i460_cleanup,
+ .tlb_flush = i460_tlb_flush,
+ .mask_memory = i460_mask_memory,
+ .masks = i460_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = i460_create_gatt_table,
+ .free_gatt_table = i460_free_gatt_table,
+#if I460_LARGE_IO_PAGES
+ .insert_memory = i460_insert_memory,
+ .remove_memory = i460_remove_memory,
+ .agp_alloc_page = i460_alloc_page,
+ .agp_destroy_page = i460_destroy_page,
+#else
+ .insert_memory = i460_insert_memory_small_io_page,
+ .remove_memory = i460_remove_memory_small_io_page,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+#endif
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+};
+
+static int agp_intel_i460_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &intel_i460_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_intel_i460_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_intel_i460_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_84460GX,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
+
+static struct pci_driver agp_intel_i460_pci_driver = {
+ .name = "agpgart-intel-i460",
+ .id_table = agp_intel_i460_pci_table,
+ .probe = agp_intel_i460_probe,
+ .remove = agp_intel_i460_remove,
+};
+
+static int __init agp_intel_i460_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_intel_i460_pci_driver);
+}
+
+static void __exit agp_intel_i460_cleanup(void)
+{
+ pci_unregister_driver(&agp_intel_i460_pci_driver);
+}
+
+module_init(agp_intel_i460_init);
+module_exit(agp_intel_i460_cleanup);
+
+MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
new file mode 100644
index 0000000000..c518b3a9db
--- /dev/null
+++ b/drivers/char/agp/intel-agp.c
@@ -0,0 +1,923 @@
+/*
+ * Intel AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+#include <drm/intel-gtt.h>
+
+static int intel_fetch_size(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_16 *values;
+
+ pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int __intel_8xx_fetch_size(u8 temp)
+{
+ int i;
+ struct aper_size_info_8 *values;
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+static int intel_8xx_fetch_size(void)
+{
+ u8 temp;
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+ return __intel_8xx_fetch_size(temp);
+}
+
+static int intel_815_fetch_size(void)
+{
+ u8 temp;
+
+ /* Intel 815 chipsets have a _weird_ APSIZE register with only
+ * one non-reserved bit, so mask the others out ... */
+ pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+ temp &= (1 << 3);
+
+ return __intel_8xx_fetch_size(temp);
+}
+
+static void intel_tlbflush(struct agp_memory *mem)
+{
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+}
+
+
+static void intel_8xx_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+ pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7));
+ pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7));
+}
+
+
+static void intel_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_16 *previous_size;
+
+ previous_size = A_SIZE_16(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static void intel_8xx_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static int intel_configure(void)
+{
+ u16 temp2;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+
+ /* paccfg/nbxcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
+ (temp2 & ~(1 << 10)) | (1 << 9));
+ /* clear any possible error conditions */
+ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
+ return 0;
+}
+
+static int intel_815_configure(void)
+{
+ u32 addr;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ /* attbase - aperture base */
+ /* the Intel 815 chipset spec. says that bits 29-31 in the
+ * ATTBASE register are reserved -> try not to write them */
+ if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
+ dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high");
+ return -EINVAL;
+ }
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
+ addr &= INTEL_815_ATTBASE_MASK;
+ addr |= agp_bridge->gatt_bus_addr;
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* apcont */
+ pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1));
+
+ /* clear any possible error conditions */
+ /* Oddness : this chipset seems to have no ERRSTS register ! */
+ return 0;
+}
+
+static void intel_820_tlbflush(struct agp_memory *mem)
+{
+ return;
+}
+
+static void intel_820_cleanup(void)
+{
+ u8 temp;
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR,
+ temp & ~(1 << 1));
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+ previous_size->size_value);
+}
+
+
+static int intel_820_configure(void)
+{
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* global enable aperture access */
+ /* This flag is not accessed through MCHCFG register as in */
+ /* i850 chipset. */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_840_configure(void)
+{
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000);
+ return 0;
+}
+
+static int intel_845_configure(void)
+{
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ if (agp_bridge->apbase_config != 0) {
+ pci_write_config_dword(agp_bridge->dev, AGP_APBASE,
+ agp_bridge->apbase_config);
+ } else {
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+ agp_bridge->apbase_config = agp_bridge->gart_bus_addr;
+ }
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* agpm */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
+ /* clear any possible error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_850_configure(void)
+{
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_860_configure(void)
+{
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700);
+ return 0;
+}
+
+static int intel_830mp_configure(void)
+{
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* gmch */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c);
+ return 0;
+}
+
+static int intel_7505_configure(void)
+{
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mchcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9));
+
+ return 0;
+}
+
+/* Setup function */
+static const struct gatt_mask intel_generic_masks[] =
+{
+ {.mask = 0x00000017, .type = 0}
+};
+
+static const struct aper_size_info_8 intel_815_sizes[2] =
+{
+ {64, 16384, 4, 0},
+ {32, 8192, 3, 8},
+};
+
+static const struct aper_size_info_8 intel_8xx_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56},
+ {16, 4096, 2, 60},
+ {8, 2048, 1, 62},
+ {4, 1024, 0, 63}
+};
+
+static const struct aper_size_info_16 intel_generic_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56},
+ {16, 4096, 2, 60},
+ {8, 2048, 1, 62},
+ {4, 1024, 0, 63}
+};
+
+static const struct aper_size_info_8 intel_830mp_sizes[4] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56}
+};
+
+static const struct agp_bridge_driver intel_generic_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_generic_sizes,
+ .size_type = U16_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_configure,
+ .fetch_size = intel_fetch_size,
+ .cleanup = intel_cleanup,
+ .tlb_flush = intel_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_815_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_815_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 2,
+ .needs_scratch_page = true,
+ .configure = intel_815_configure,
+ .fetch_size = intel_815_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_820_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_820_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_820_cleanup,
+ .tlb_flush = intel_820_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_830mp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_830mp_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_830mp_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_840_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_840_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_845_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_845_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_850_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_850_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_860_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_860_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_7505_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_7505_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+static const struct intel_agp_driver_description {
+ unsigned int chip_id;
+ char *name;
+ const struct agp_bridge_driver *driver;
+} intel_agp_chipsets[] = {
+ { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
+ { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
+ { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
+ { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
+ { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
+ { PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
+ { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
+ { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
+ { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
+ { 0, NULL, NULL }
+};
+
+static int agp_intel_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr = 0;
+ struct resource *r;
+ int i, err;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->capndx = cap_ptr;
+
+ if (intel_gmch_probe(pdev, NULL, bridge))
+ goto found_gmch;
+
+ for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+ /* In case that multiple models of gfx chip may
+ stand on same host bridge type, this can be
+ sure we detect the right IGD. */
+ if (pdev->device == intel_agp_chipsets[i].chip_id) {
+ bridge->driver = intel_agp_chipsets[i].driver;
+ break;
+ }
+ }
+
+ if (!bridge->driver) {
+ if (cap_ptr)
+ dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ bridge->dev = pdev;
+ bridge->dev_private_data = NULL;
+
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+
+ /*
+ * The following fixes the case where the BIOS has "forgotten" to
+ * provide an address range for the GART.
+ * 20030610 - hamish@zot.org
+ * This happens before pci_enable_device() intentionally;
+ * calling pci_enable_device() before assigning the resource
+ * will result in the GART being disabled on machines with such
+ * BIOSs (the GART ends up with a BAR starting at 0, which
+ * conflicts a lot of other devices).
+ */
+ r = &pdev->resource[0];
+ if (!r->start && r->end) {
+ if (pci_assign_resource(pdev, 0)) {
+ dev_err(&pdev->dev, "can't assign resource 0\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "can't enable PCI device\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ /* Fill in the mode register */
+ if (cap_ptr) {
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+ }
+
+found_gmch:
+ pci_set_drvdata(pdev, bridge);
+ err = agp_add_bridge(bridge);
+ return err;
+}
+
+static void agp_intel_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+
+ intel_gmch_remove();
+
+ agp_put_bridge(bridge);
+}
+
+static int agp_intel_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ bridge->driver->configure();
+
+ return 0;
+}
+
+static const struct pci_device_id agp_intel_pci_table[] = {
+#define ID(x) \
+ { \
+ .class = (PCI_CLASS_BRIDGE_HOST << 8), \
+ .class_mask = ~0, \
+ .vendor = PCI_VENDOR_ID_INTEL, \
+ .device = x, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ }
+ ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */
+ ID(PCI_DEVICE_ID_INTEL_82443LX_0),
+ ID(PCI_DEVICE_ID_INTEL_82443BX_0),
+ ID(PCI_DEVICE_ID_INTEL_82443GX_0),
+ ID(PCI_DEVICE_ID_INTEL_82810_MC1),
+ ID(PCI_DEVICE_ID_INTEL_82810_MC3),
+ ID(PCI_DEVICE_ID_INTEL_82810E_MC),
+ ID(PCI_DEVICE_ID_INTEL_82815_MC),
+ ID(PCI_DEVICE_ID_INTEL_82820_HB),
+ ID(PCI_DEVICE_ID_INTEL_82820_UP_HB),
+ ID(PCI_DEVICE_ID_INTEL_82830_HB),
+ ID(PCI_DEVICE_ID_INTEL_82840_HB),
+ ID(PCI_DEVICE_ID_INTEL_82845_HB),
+ ID(PCI_DEVICE_ID_INTEL_82845G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82850_HB),
+ ID(PCI_DEVICE_ID_INTEL_82854_HB),
+ ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82860_HB),
+ ID(PCI_DEVICE_ID_INTEL_82865_HB),
+ ID(PCI_DEVICE_ID_INTEL_82875_HB),
+ ID(PCI_DEVICE_ID_INTEL_7505_0),
+ ID(PCI_DEVICE_ID_INTEL_7205_0),
+ ID(PCI_DEVICE_ID_INTEL_E7221_HB),
+ ID(PCI_DEVICE_ID_INTEL_82915G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
+ ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB),
+ ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
+ ID(PCI_DEVICE_ID_INTEL_82G35_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965GME_HB),
+ ID(PCI_DEVICE_ID_INTEL_G33_HB),
+ ID(PCI_DEVICE_ID_INTEL_Q35_HB),
+ ID(PCI_DEVICE_ID_INTEL_Q33_HB),
+ ID(PCI_DEVICE_ID_INTEL_GM45_HB),
+ ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB),
+ ID(PCI_DEVICE_ID_INTEL_Q45_HB),
+ ID(PCI_DEVICE_ID_INTEL_G45_HB),
+ ID(PCI_DEVICE_ID_INTEL_G41_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_intel_pm_ops, NULL, agp_intel_resume);
+
+static struct pci_driver agp_intel_pci_driver = {
+ .name = "agpgart-intel",
+ .id_table = agp_intel_pci_table,
+ .probe = agp_intel_probe,
+ .remove = agp_intel_remove,
+ .driver.pm = &agp_intel_pm_ops,
+};
+
+static int __init agp_intel_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_intel_pci_driver);
+}
+
+static void __exit agp_intel_cleanup(void)
+{
+ pci_unregister_driver(&agp_intel_pci_driver);
+}
+
+module_init(agp_intel_init);
+module_exit(agp_intel_cleanup);
+
+MODULE_AUTHOR("Dave Jones, Various @Intel");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644
index 0000000000..164bf65195
--- /dev/null
+++ b/drivers/char/agp/intel-agp.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+#ifndef _INTEL_AGP_H
+#define _INTEL_AGP_H
+
+/* Intel registers */
+#define INTEL_APSIZE 0xb4
+#define INTEL_ATTBASE 0xb8
+#define INTEL_AGPCTRL 0xb0
+#define INTEL_NBXCFG 0x50
+#define INTEL_ERRSTS 0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_ENABLED 0x4
+#define I830_GMCH_MEM_MASK 0x1
+#define I830_GMCH_MEM_64M 0x1
+#define I830_GMCH_MEM_128M 0
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+#define I830_RDRAM_CHANNEL_TYPE 0x03010
+#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS 0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_MASK 0xF0
+#define I855_GMCH_GMS_STOLEN_0M 0x0
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I85X_CAPID 0x44
+#define I85X_VARIANT_MASK 0x7
+#define I85X_VARIANT_SHIFT 5
+#define I855_GME 0x0
+#define I855_GM 0x4
+#define I852_GME 0x2
+#define I852_GM 0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM 0x51
+#define INTEL_I845_ERRSTS 0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG 0x50
+#define INTEL_I860_ERRSTS 0xc8
+
+/* Intel i810 registers */
+#define I810_GMADR_BAR 0
+#define I810_MMADR_BAR 1
+#define I810_PTE_BASE 0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL 0x00000002
+#define I810_PTE_VALID 0x00000001
+#define I830_PTE_SYSTEM_CACHED 0x00000006
+
+#define I810_SMRAM_MISCC 0x70
+#define I810_GFX_MEM_WIN_SIZE 0x00010000
+#define I810_GFX_MEM_WIN_32M 0x00010000
+#define I810_GMS 0x000000c0
+#define I810_GMS_DISABLE 0x00000000
+#define I810_PGETBL_CTL 0x2020
+#define I810_PGETBL_ENABLED 0x00000001
+/* Note: PGETBL_CTL2 has a different offset on G33. */
+#define I965_PGETBL_CTL2 0x20c4
+#define I965_PGETBL_SIZE_MASK 0x0000000e
+#define I965_PGETBL_SIZE_512KB (0 << 1)
+#define I965_PGETBL_SIZE_256KB (1 << 1)
+#define I965_PGETBL_SIZE_128KB (2 << 1)
+#define I965_PGETBL_SIZE_1MB (3 << 1)
+#define I965_PGETBL_SIZE_2MB (4 << 1)
+#define I965_PGETBL_SIZE_1_5MB (5 << 1)
+#define G33_GMCH_SIZE_MASK (3 << 8)
+#define G33_GMCH_SIZE_1M (1 << 8)
+#define G33_GMCH_SIZE_2M (2 << 8)
+#define G4x_GMCH_SIZE_MASK (0xf << 8)
+#define G4x_GMCH_SIZE_1M (0x1 << 8)
+#define G4x_GMCH_SIZE_2M (0x3 << 8)
+#define G4x_GMCH_SIZE_VT_EN (0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
+
+#define GFX_FLSH_CNTL 0x2170 /* 915+ */
+
+#define I810_DRAM_CTL 0x3000
+#define I810_DRAM_ROW_0 0x00000001
+#define I810_DRAM_ROW_0_SDRAM 0x00000001
+
+/* Intel 815 register */
+#define INTEL_815_APCONT 0x51
+#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR 0x51
+#define INTEL_I820_ERRSTS 0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG 0x50
+#define INTEL_I840_ERRSTS 0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG 0x50
+#define INTEL_I850_ERRSTS 0xc8
+
+/* intel 915G registers */
+#define I915_GMADR_BAR 2
+#define I915_MMADR_BAR 0
+#define I915_PTE_BAR 3
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+
+#define I915_IFPADDR 0x60
+#define I830_HIC 0x70
+
+/* Intel 965G registers */
+#define I965_MSAC 0x62
+#define I965_IFPADDR 0x70
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE 0x74
+#define INTEL_I7505_NCAPID 0x60
+#define INTEL_I7505_NISTAT 0x6c
+#define INTEL_I7505_ATTBASE 0x78
+#define INTEL_I7505_ERRSTS 0x42
+#define INTEL_I7505_AGPCTRL 0x70
+#define INTEL_I7505_MCHCFG 0x50
+
+/* pci devices ids */
+#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
+#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
+#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
+#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
+#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
+#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
+#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
+#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
+#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
+#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
+#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
+#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
+#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
+#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
+#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
+#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
+#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
+#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
+#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
+#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
+#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
+#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
+#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
+
+#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 0000000000..bf6716ff86
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1464 @@
+/*
+ * Intel GTT (Graphics Translation Table) routines
+ *
+ * Caveat: This driver implements the linux agp interface, but this is far from
+ * a agp driver! GTT support ended up here for purely historical reasons: The
+ * old userspace intel graphics drivers needed an interface to map memory into
+ * the GTT. And the drm provides a default interface for graphic devices sitting
+ * on an agp port. So it made sense to fake the GTT support as an agp port to
+ * avoid having to create a new api.
+ *
+ * With gem this does not make much sense anymore, just needlessly complicates
+ * the code. But as long as the old graphics stack is still support, it's stuck
+ * here.
+ *
+ * /fairy-tale-mode off
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <linux/iommu.h>
+#include <linux/delay.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+#include <drm/intel-gtt.h>
+#include <asm/set_memory.h>
+
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_INTEL_IOMMU
+#define USE_PCI_DMA_API 1
+#else
+#define USE_PCI_DMA_API 0
+#endif
+
+struct intel_gtt_driver {
+ unsigned int gen : 8;
+ unsigned int is_g33 : 1;
+ unsigned int is_pineview : 1;
+ unsigned int is_ironlake : 1;
+ unsigned int has_pgtbl_enable : 1;
+ unsigned int dma_mask_size : 8;
+ /* Chipset specific GTT setup */
+ int (*setup)(void);
+ /* This should undo anything done in ->setup() save the unmapping
+ * of the mmio register file, that's done in the generic code. */
+ void (*cleanup)(void);
+ void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ /* Flags is a more or less chipset specific opaque value.
+ * For chipsets that need to support old ums (non-gem) code, this
+ * needs to be identical to the various supported agp memory types! */
+ bool (*check_flags)(unsigned int flags);
+ void (*chipset_flush)(void);
+};
+
+static struct _intel_private {
+ const struct intel_gtt_driver *driver;
+ struct pci_dev *pcidev; /* device one */
+ struct pci_dev *bridge_dev;
+ u8 __iomem *registers;
+ phys_addr_t gtt_phys_addr;
+ u32 PGETBL_save;
+ u32 __iomem *gtt; /* I915G */
+ bool clear_fake_agp; /* on first access via agp, fill with scratch */
+ int num_dcache_entries;
+ void __iomem *i9xx_flush_page;
+ char *i81x_gtt_table;
+ struct resource ifp_resource;
+ int resource_valid;
+ struct page *scratch_page;
+ phys_addr_t scratch_page_dma;
+ int refcount;
+ /* Whether i915 needs to use the dmar apis or not. */
+ unsigned int needs_dmar : 1;
+ phys_addr_t gma_bus_addr;
+ /* Size of memory reserved for graphics by the BIOS */
+ resource_size_t stolen_size;
+ /* Total number of gtt entries. */
+ unsigned int gtt_total_entries;
+ /* Part of the gtt that is mappable by the cpu, for those chips where
+ * this is not the full gtt. */
+ unsigned int gtt_mappable_entries;
+} intel_private;
+
+#define INTEL_GTT_GEN intel_private.driver->gen
+#define IS_G33 intel_private.driver->is_g33
+#define IS_PINEVIEW intel_private.driver->is_pineview
+#define IS_IRONLAKE intel_private.driver->is_ironlake
+#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static int intel_gtt_map_memory(struct page **pages,
+ unsigned int num_entries,
+ struct sg_table *st)
+{
+ struct scatterlist *sg;
+ int i;
+
+ DBG("try mapping %lu pages\n", (unsigned long)num_entries);
+
+ if (sg_alloc_table(st, num_entries, GFP_KERNEL))
+ goto err;
+
+ for_each_sg(st->sgl, sg, num_entries, i)
+ sg_set_page(sg, pages[i], PAGE_SIZE, 0);
+
+ if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents,
+ DMA_BIDIRECTIONAL))
+ goto err;
+
+ return 0;
+
+err:
+ sg_free_table(st);
+ return -ENOMEM;
+}
+
+static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
+{
+ struct sg_table st;
+ DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+ dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg,
+ DMA_BIDIRECTIONAL);
+
+ st.sgl = sg_list;
+ st.orig_nents = st.nents = num_sg;
+
+ sg_free_table(&st);
+}
+
+static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ return;
+}
+
+/* Exists to support ARGB cursors */
+static struct page *i8xx_alloc_pages(void)
+{
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
+ if (page == NULL)
+ return NULL;
+
+ if (set_pages_uc(page, 4) < 0) {
+ set_pages_wb(page, 4);
+ __free_pages(page, 2);
+ return NULL;
+ }
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page;
+}
+
+static void i8xx_destroy_pages(struct page *page)
+{
+ if (page == NULL)
+ return;
+
+ set_pages_wb(page, 4);
+ __free_pages(page, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+#endif
+
+#define I810_GTT_ORDER 4
+static int i810_setup(void)
+{
+ phys_addr_t reg_addr;
+ char *gtt_table;
+
+ /* i81x does not preallocate the gtt. It's always 64kb in size. */
+ gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
+ if (gtt_table == NULL)
+ return -ENOMEM;
+ intel_private.i81x_gtt_table = gtt_table;
+
+ reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
+
+ intel_private.registers = ioremap(reg_addr, KB(64));
+ if (!intel_private.registers)
+ return -ENOMEM;
+
+ writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
+ intel_private.registers+I810_PGETBL_CTL);
+
+ intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
+
+ if ((readl(intel_private.registers+I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ dev_info(&intel_private.pcidev->dev,
+ "detected 4MB dedicated video ram\n");
+ intel_private.num_dcache_entries = 1024;
+ }
+
+ return 0;
+}
+
+static void i810_cleanup(void)
+{
+ writel(0, intel_private.registers+I810_PGETBL_CTL);
+ free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
+}
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if ((pg_start + mem->page_count)
+ > intel_private.num_dcache_entries)
+ return -EINVAL;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ dma_addr_t addr = i << PAGE_SHIFT;
+ intel_private.driver->write_entry(addr,
+ i, type);
+ }
+ wmb();
+
+ return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+ struct page *page;
+
+ switch (pg_count) {
+ case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
+ break;
+ case 4:
+ /* kludge to get 4 physical pages for ARGB cursor */
+ page = i8xx_alloc_pages();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (page == NULL)
+ return NULL;
+
+ new = agp_create_memory(pg_count);
+ if (new == NULL)
+ return NULL;
+
+ new->pages[0] = page;
+ if (pg_count == 4) {
+ /* kludge to get 4 physical pages for ARGB cursor */
+ new->pages[1] = new->pages[0] + 1;
+ new->pages[2] = new->pages[1] + 1;
+ new->pages[3] = new->pages[2] + 1;
+ }
+ new->page_count = pg_count;
+ new->num_scratch_pages = pg_count;
+ new->type = AGP_PHYS_MEMORY;
+ new->physical = page_to_phys(new->pages[0]);
+ return new;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+ agp_free_key(curr->key);
+ if (curr->type == AGP_PHYS_MEMORY) {
+ if (curr->page_count == 4)
+ i8xx_destroy_pages(curr->pages[0]);
+ else {
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_UNMAP);
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_FREE);
+ }
+ agp_free_page_array(curr);
+ }
+ kfree(curr);
+}
+#endif
+
+static int intel_gtt_setup_scratch_page(void)
+{
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return -ENOMEM;
+ set_pages_uc(page, 1);
+
+ if (intel_private.needs_dmar) {
+ dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&intel_private.pcidev->dev, dma_addr)) {
+ __free_page(page);
+ return -EINVAL;
+ }
+
+ intel_private.scratch_page_dma = dma_addr;
+ } else
+ intel_private.scratch_page_dma = page_to_phys(page);
+
+ intel_private.scratch_page = page;
+
+ return 0;
+}
+
+static void i810_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags = I810_PTE_VALID;
+
+ switch (flags) {
+ case AGP_DCACHE_MEMORY:
+ pte_flags |= I810_PTE_LOCAL;
+ break;
+ case AGP_USER_CACHED_MEMORY:
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+ break;
+ }
+
+ writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static resource_size_t intel_gtt_stolen_size(void)
+{
+ u16 gmch_ctrl;
+ u8 rdct;
+ int local = 0;
+ static const int ddt[4] = { 0, 16, 32, 64 };
+ resource_size_t stolen_size = 0;
+
+ if (INTEL_GTT_GEN == 1)
+ return 0; /* no stolen mem on i81x */
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+
+ if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ stolen_size = KB(512);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+ stolen_size = MB(1);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+ stolen_size = MB(8);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+ stolen_size = (I830_RDRAM_ND(rdct) + 1) *
+ MB(ddt[I830_RDRAM_DDT(rdct)]);
+ local = 1;
+ break;
+ default:
+ stolen_size = 0;
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+ stolen_size = MB(1);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+ stolen_size = MB(4);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+ stolen_size = MB(8);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+ stolen_size = MB(16);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+ stolen_size = MB(32);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+ stolen_size = MB(48);
+ break;
+ case I915_GMCH_GMS_STOLEN_64M:
+ stolen_size = MB(64);
+ break;
+ case G33_GMCH_GMS_STOLEN_128M:
+ stolen_size = MB(128);
+ break;
+ case G33_GMCH_GMS_STOLEN_256M:
+ stolen_size = MB(256);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ stolen_size = MB(96);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ stolen_size = MB(160);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ stolen_size = MB(224);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ stolen_size = MB(352);
+ break;
+ default:
+ stolen_size = 0;
+ break;
+ }
+ }
+
+ if (stolen_size > 0) {
+ dev_info(&intel_private.bridge_dev->dev, "detected %lluK %s memory\n",
+ (u64)stolen_size / KB(1), local ? "local" : "stolen");
+ } else {
+ dev_info(&intel_private.bridge_dev->dev,
+ "no pre-allocated video memory detected\n");
+ stolen_size = 0;
+ }
+
+ return stolen_size;
+}
+
+static void i965_adjust_pgetbl_size(unsigned int size_flag)
+{
+ u32 pgetbl_ctl, pgetbl_ctl2;
+
+ /* ensure that ppgtt is disabled */
+ pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
+ pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
+ writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
+
+ /* write the new ggtt size */
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+ pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
+ pgetbl_ctl |= size_flag;
+ writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
+}
+
+static unsigned int i965_gtt_total_entries(void)
+{
+ int size;
+ u32 pgetbl_ctl;
+ u16 gmch_ctl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctl);
+
+ if (INTEL_GTT_GEN == 5) {
+ switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
+ case G4x_GMCH_SIZE_1M:
+ case G4x_GMCH_SIZE_VT_1M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
+ break;
+ case G4x_GMCH_SIZE_VT_1_5M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
+ break;
+ case G4x_GMCH_SIZE_2M:
+ case G4x_GMCH_SIZE_VT_2M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
+ break;
+ }
+ }
+
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = KB(128);
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = KB(256);
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = KB(512);
+ break;
+ /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
+ case I965_PGETBL_SIZE_1MB:
+ size = KB(1024);
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = KB(2048);
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = KB(1024 + 512);
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = KB(512);
+ }
+
+ return size/4;
+}
+
+static unsigned int intel_gtt_total_entries(void)
+{
+ if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
+ return i965_gtt_total_entries();
+ else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ return intel_private.gtt_mappable_entries;
+ }
+}
+
+static unsigned int intel_gtt_mappable_entries(void)
+{
+ unsigned int aperture_size;
+
+ if (INTEL_GTT_GEN == 1) {
+ u32 smram_miscc;
+
+ pci_read_config_dword(intel_private.bridge_dev,
+ I810_SMRAM_MISCC, &smram_miscc);
+
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
+ == I810_GFX_MEM_WIN_32M)
+ aperture_size = MB(32);
+ else
+ aperture_size = MB(64);
+ } else if (INTEL_GTT_GEN == 2) {
+ u16 gmch_ctrl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
+ aperture_size = MB(64);
+ else
+ aperture_size = MB(128);
+ } else {
+ /* 9xx supports large sizes, just look at the length */
+ aperture_size = pci_resource_len(intel_private.pcidev, 2);
+ }
+
+ return aperture_size >> PAGE_SHIFT;
+}
+
+static void intel_gtt_teardown_scratch_page(void)
+{
+ set_pages_wb(intel_private.scratch_page, 1);
+ if (intel_private.needs_dmar)
+ dma_unmap_page(&intel_private.pcidev->dev,
+ intel_private.scratch_page_dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(intel_private.scratch_page);
+}
+
+static void intel_gtt_cleanup(void)
+{
+ intel_private.driver->cleanup();
+
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+
+ intel_gtt_teardown_scratch_page();
+}
+
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline int needs_ilk_vtd_wa(void)
+{
+ const unsigned short gpu_devid = intel_private.pcidev->device;
+
+ /*
+ * Query iommu subsystem to see if we need the workaround. Presumably
+ * that was loaded first.
+ */
+ return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ device_iommu_mapped(&intel_private.pcidev->dev));
+}
+
+static bool intel_gtt_can_wc(void)
+{
+ if (INTEL_GTT_GEN <= 2)
+ return false;
+
+ if (INTEL_GTT_GEN >= 6)
+ return false;
+
+ /* Reports of major corruption with ILK vt'd enabled */
+ if (needs_ilk_vtd_wa())
+ return false;
+
+ return true;
+}
+
+static int intel_gtt_init(void)
+{
+ u32 gtt_map_size;
+ int ret, bar;
+
+ ret = intel_private.driver->setup();
+ if (ret != 0)
+ return ret;
+
+ intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
+ intel_private.gtt_total_entries = intel_gtt_total_entries();
+
+ /* save the PGETBL reg for resume */
+ intel_private.PGETBL_save =
+ readl(intel_private.registers+I810_PGETBL_CTL)
+ & ~I810_PGETBL_ENABLED;
+ /* we only ever restore the register when enabling the PGTBL... */
+ if (HAS_PGTBL_EN)
+ intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
+
+ dev_info(&intel_private.bridge_dev->dev,
+ "detected gtt size: %dK total, %dK mappable\n",
+ intel_private.gtt_total_entries * 4,
+ intel_private.gtt_mappable_entries * 4);
+
+ gtt_map_size = intel_private.gtt_total_entries * 4;
+
+ intel_private.gtt = NULL;
+ if (intel_gtt_can_wc())
+ intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
+ gtt_map_size);
+ if (intel_private.gtt == NULL)
+ intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
+ gtt_map_size);
+ if (intel_private.gtt == NULL) {
+ intel_private.driver->cleanup();
+ iounmap(intel_private.registers);
+ return -ENOMEM;
+ }
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+ global_cache_flush(); /* FIXME: ? */
+#endif
+
+ intel_private.stolen_size = intel_gtt_stolen_size();
+
+ intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
+
+ ret = intel_gtt_setup_scratch_page();
+ if (ret != 0) {
+ intel_gtt_cleanup();
+ return ret;
+ }
+
+ if (INTEL_GTT_GEN <= 2)
+ bar = I810_GMADR_BAR;
+ else
+ bar = I915_GMADR_BAR;
+
+ intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
+ {32, 8192, 3},
+ {64, 16384, 4},
+ {128, 32768, 5},
+ {256, 65536, 6},
+ {512, 131072, 7},
+};
+
+static int intel_fake_agp_fetch_size(void)
+{
+ int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
+ unsigned int aper_size;
+ int i;
+
+ aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
+
+ for (i = 0; i < num_sizes; i++) {
+ if (aper_size == intel_fake_agp_sizes[i].size) {
+ agp_bridge->current_size =
+ (void *) (intel_fake_agp_sizes + i);
+ return aper_size;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static void i830_cleanup(void)
+{
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB. So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out. It appears to work.
+ */
+static void i830_chipset_flush(void)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* Forcibly evict everything from the CPU write buffers.
+ * clflush appears to be insufficient.
+ */
+ wbinvd_on_all_cpus();
+
+ /* Now we've only seen documents for this magic bit on 855GM,
+ * we hope it exists for the other gen2 chipsets...
+ *
+ * Also works as advertised on my 845G.
+ */
+ writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+ intel_private.registers+I830_HIC);
+
+ while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ udelay(50);
+ }
+}
+
+static void i830_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags = I810_PTE_VALID;
+
+ if (flags == AGP_USER_CACHED_MEMORY)
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+
+ writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
+}
+
+bool intel_gmch_enable_gtt(void)
+{
+ u8 __iomem *reg;
+
+ if (INTEL_GTT_GEN == 2) {
+ u16 gmch_ctrl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, gmch_ctrl);
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+ if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
+ dev_err(&intel_private.pcidev->dev,
+ "failed to enable the GTT: GMCH_CTRL=%x\n",
+ gmch_ctrl);
+ return false;
+ }
+ }
+
+ /* On the resume path we may be adjusting the PGTBL value, so
+ * be paranoid and flush all chipset write buffers...
+ */
+ if (INTEL_GTT_GEN >= 3)
+ writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+ reg = intel_private.registers+I810_PGETBL_CTL;
+ writel(intel_private.PGETBL_save, reg);
+ if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
+ dev_err(&intel_private.pcidev->dev,
+ "failed to enable the GTT: PGETBL=%x [expected %x]\n",
+ readl(reg), intel_private.PGETBL_save);
+ return false;
+ }
+
+ if (INTEL_GTT_GEN >= 3)
+ writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+ return true;
+}
+EXPORT_SYMBOL(intel_gmch_enable_gtt);
+
+static int i830_setup(void)
+{
+ phys_addr_t reg_addr;
+
+ reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
+
+ intel_private.registers = ioremap(reg_addr, KB(64));
+ if (!intel_private.registers)
+ return -ENOMEM;
+
+ intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ agp_bridge->gatt_table_real = NULL;
+ agp_bridge->gatt_table = NULL;
+ agp_bridge->gatt_bus_addr = 0;
+
+ return 0;
+}
+
+static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ return 0;
+}
+
+static int intel_fake_agp_configure(void)
+{
+ if (!intel_gmch_enable_gtt())
+ return -EIO;
+
+ intel_private.clear_fake_agp = true;
+ agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
+
+ return 0;
+}
+#endif
+
+static bool i830_check_flags(unsigned int flags)
+{
+ switch (flags) {
+ case 0:
+ case AGP_PHYS_MEMORY:
+ case AGP_USER_CACHED_MEMORY:
+ case AGP_USER_MEMORY:
+ return true;
+ }
+
+ return false;
+}
+
+void intel_gmch_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags)
+{
+ intel_private.driver->write_entry(addr, pg, flags);
+ readl(intel_private.gtt + pg);
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gmch_gtt_insert_page);
+
+void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
+ unsigned int pg_start,
+ unsigned int flags)
+{
+ struct scatterlist *sg;
+ unsigned int len, m;
+ int i, j;
+
+ j = pg_start;
+
+ /* sg may merge pages, but we have to separate
+ * per-page addr for GTT */
+ for_each_sg(st->sgl, sg, st->nents, i) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ intel_private.driver->write_entry(addr, j, flags);
+ j++;
+ }
+ }
+ readl(intel_private.gtt + j - 1);
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
+ unsigned int num_entries,
+ struct page **pages,
+ unsigned int flags)
+{
+ int i, j;
+
+ for (i = 0, j = first_entry; i < num_entries; i++, j++) {
+ dma_addr_t addr = page_to_phys(pages[i]);
+ intel_private.driver->write_entry(addr,
+ j, flags);
+ }
+ wmb();
+}
+
+static int intel_fake_agp_insert_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int ret = -EINVAL;
+
+ if (intel_private.clear_fake_agp) {
+ int start = intel_private.stolen_size / PAGE_SIZE;
+ int end = intel_private.gtt_mappable_entries;
+ intel_gmch_gtt_clear_range(start, end - start);
+ intel_private.clear_fake_agp = false;
+ }
+
+ if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
+ return i810_insert_dcache_entries(mem, pg_start, type);
+
+ if (mem->page_count == 0)
+ goto out;
+
+ if (pg_start + mem->page_count > intel_private.gtt_total_entries)
+ goto out_err;
+
+ if (type != mem->type)
+ goto out_err;
+
+ if (!intel_private.driver->check_flags(type))
+ goto out_err;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ if (intel_private.needs_dmar) {
+ struct sg_table st;
+
+ ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
+ if (ret != 0)
+ return ret;
+
+ intel_gmch_gtt_insert_sg_entries(&st, pg_start, type);
+ mem->sg_list = st.sgl;
+ mem->num_sg = st.nents;
+ } else
+ intel_gmch_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
+ type);
+
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+#endif
+
+void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
+{
+ unsigned int i;
+
+ for (i = first_entry; i < (first_entry + num_entries); i++) {
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
+ }
+ wmb();
+}
+EXPORT_SYMBOL(intel_gmch_gtt_clear_range);
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ if (mem->page_count == 0)
+ return 0;
+
+ intel_gmch_gtt_clear_range(pg_start, mem->page_count);
+
+ if (intel_private.needs_dmar) {
+ intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
+ mem->sg_list = NULL;
+ mem->num_sg = 0;
+ }
+
+ return 0;
+}
+
+static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
+ int type)
+{
+ struct agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
+ if (pg_count != intel_private.num_dcache_entries)
+ return NULL;
+
+ new = agp_create_memory(1);
+ if (new == NULL)
+ return NULL;
+
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ agp_free_page_array(new);
+ return new;
+ }
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+ /* always return NULL for other allocation types for now */
+ return NULL;
+}
+#endif
+
+static int intel_alloc_chipset_flush_resource(void)
+{
+ int ret;
+ ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+ pcibios_align_resource, intel_private.bridge_dev);
+
+ return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+ int ret;
+ u32 temp;
+
+ pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
+ if (!(temp & 0x1)) {
+ intel_alloc_chipset_flush_resource();
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ temp &= ~1;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = temp;
+ intel_private.ifp_resource.end = temp + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+ u32 temp_hi, temp_lo;
+ int ret;
+
+ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
+
+ if (!(temp_lo & 0x1)) {
+
+ intel_alloc_chipset_flush_resource();
+
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
+ upper_32_bits(intel_private.ifp_resource.start));
+ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ u64 l64;
+
+ temp_lo &= ~0x1;
+ l64 = ((u64)temp_hi << 32) | temp_lo;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = l64;
+ intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+ /* return if already configured */
+ if (intel_private.ifp_resource.start)
+ return;
+
+ if (INTEL_GTT_GEN == 6)
+ return;
+
+ /* setup a resource for this object */
+ intel_private.ifp_resource.name = "Intel Flush Page";
+ intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+ /* Setup chipset flush for 915 */
+ if (IS_G33 || INTEL_GTT_GEN >= 4) {
+ intel_i965_g33_setup_chipset_flush();
+ } else {
+ intel_i915_setup_chipset_flush();
+ }
+
+ if (intel_private.ifp_resource.start)
+ intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
+ if (!intel_private.i9xx_flush_page)
+ dev_err(&intel_private.pcidev->dev,
+ "can't ioremap flush page - no chipset flushing\n");
+}
+
+static void i9xx_cleanup(void)
+{
+ if (intel_private.i9xx_flush_page)
+ iounmap(intel_private.i9xx_flush_page);
+ if (intel_private.resource_valid)
+ release_resource(&intel_private.ifp_resource);
+ intel_private.ifp_resource.start = 0;
+ intel_private.resource_valid = 0;
+}
+
+static void i9xx_chipset_flush(void)
+{
+ wmb();
+ if (intel_private.i9xx_flush_page)
+ writel(1, intel_private.i9xx_flush_page);
+}
+
+static void i965_write_entry(dma_addr_t addr,
+ unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags;
+
+ pte_flags = I810_PTE_VALID;
+ if (flags == AGP_USER_CACHED_MEMORY)
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+
+ /* Shift high bits down */
+ addr |= (addr >> 28) & 0xf0;
+ writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static int i9xx_setup(void)
+{
+ phys_addr_t reg_addr;
+ int size = KB(512);
+
+ reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
+
+ intel_private.registers = ioremap(reg_addr, size);
+ if (!intel_private.registers)
+ return -ENOMEM;
+
+ switch (INTEL_GTT_GEN) {
+ case 3:
+ intel_private.gtt_phys_addr =
+ pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
+ break;
+ case 5:
+ intel_private.gtt_phys_addr = reg_addr + MB(2);
+ break;
+ default:
+ intel_private.gtt_phys_addr = reg_addr + KB(512);
+ break;
+ }
+
+ intel_i9xx_setup_flush();
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+static const struct agp_bridge_driver intel_fake_agp_driver = {
+ .owner = THIS_MODULE,
+ .size_type = FIXED_APER_SIZE,
+ .aperture_sizes = intel_fake_agp_sizes,
+ .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
+ .configure = intel_fake_agp_configure,
+ .fetch_size = intel_fake_agp_fetch_size,
+ .cleanup = intel_gtt_cleanup,
+ .agp_enable = intel_fake_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_fake_agp_create_gatt_table,
+ .free_gatt_table = intel_fake_agp_free_gatt_table,
+ .insert_memory = intel_fake_agp_insert_entries,
+ .remove_memory = intel_fake_agp_remove_entries,
+ .alloc_by_type = intel_fake_agp_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+};
+#endif
+
+static const struct intel_gtt_driver i81x_gtt_driver = {
+ .gen = 1,
+ .has_pgtbl_enable = 1,
+ .dma_mask_size = 32,
+ .setup = i810_setup,
+ .cleanup = i810_cleanup,
+ .check_flags = i830_check_flags,
+ .write_entry = i810_write_entry,
+};
+static const struct intel_gtt_driver i8xx_gtt_driver = {
+ .gen = 2,
+ .has_pgtbl_enable = 1,
+ .setup = i830_setup,
+ .cleanup = i830_cleanup,
+ .write_entry = i830_write_entry,
+ .dma_mask_size = 32,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i830_chipset_flush,
+};
+static const struct intel_gtt_driver i915_gtt_driver = {
+ .gen = 3,
+ .has_pgtbl_enable = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ /* i945 is the last gpu to need phys mem (for overlay and cursors). */
+ .write_entry = i830_write_entry,
+ .dma_mask_size = 32,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g33_gtt_driver = {
+ .gen = 3,
+ .is_g33 = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver pineview_gtt_driver = {
+ .gen = 3,
+ .is_pineview = 1, .is_g33 = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver i965_gtt_driver = {
+ .gen = 4,
+ .has_pgtbl_enable = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g4x_gtt_driver = {
+ .gen = 5,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver ironlake_gtt_driver = {
+ .gen = 5,
+ .is_ironlake = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+
+/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+static const struct intel_gtt_driver_description {
+ unsigned int gmch_chip_id;
+ char *name;
+ const struct intel_gtt_driver *gtt_driver;
+} intel_gtt_chipsets[] = {
+ { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82854_IG, "854",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82865_IG, "865",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
+ &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
+ &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
+ &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
+ &pineview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
+ &pineview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
+ "HD Graphics", &ironlake_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+ "HD Graphics", &ironlake_gtt_driver },
+ { 0, NULL, NULL }
+};
+
+static int find_gmch(u16 device)
+{
+ struct pci_dev *gmch_device;
+
+ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
+ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
+ device, gmch_device);
+ }
+
+ if (!gmch_device)
+ return 0;
+
+ intel_private.pcidev = gmch_device;
+ return 1;
+}
+
+int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+ struct agp_bridge_data *bridge)
+{
+ int i, mask;
+
+ for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
+ if (gpu_pdev) {
+ if (gpu_pdev->device ==
+ intel_gtt_chipsets[i].gmch_chip_id) {
+ intel_private.pcidev = pci_dev_get(gpu_pdev);
+ intel_private.driver =
+ intel_gtt_chipsets[i].gtt_driver;
+
+ break;
+ }
+ } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
+ intel_private.driver =
+ intel_gtt_chipsets[i].gtt_driver;
+ break;
+ }
+ }
+
+ if (!intel_private.driver)
+ return 0;
+
+#if IS_ENABLED(CONFIG_AGP_INTEL)
+ if (bridge) {
+ if (INTEL_GTT_GEN > 1)
+ return 0;
+
+ bridge->driver = &intel_fake_agp_driver;
+ bridge->dev_private_data = &intel_private;
+ bridge->dev = bridge_pdev;
+ }
+#endif
+
+
+ /*
+ * Can be called from the fake agp driver but also directly from
+ * drm/i915.ko. Hence we need to check whether everything is set up
+ * already.
+ */
+ if (intel_private.refcount++)
+ return 1;
+
+ intel_private.bridge_dev = pci_dev_get(bridge_pdev);
+
+ dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
+
+ if (bridge) {
+ mask = intel_private.driver->dma_mask_size;
+ if (dma_set_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask %d-bit failed!\n",
+ mask);
+ else
+ dma_set_coherent_mask(&intel_private.pcidev->dev,
+ DMA_BIT_MASK(mask));
+ }
+
+ if (intel_gtt_init() != 0) {
+ intel_gmch_remove();
+
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(intel_gmch_probe);
+
+void intel_gmch_gtt_get(u64 *gtt_total,
+ phys_addr_t *mappable_base,
+ resource_size_t *mappable_end)
+{
+ *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
+ *mappable_base = intel_private.gma_bus_addr;
+ *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
+}
+EXPORT_SYMBOL(intel_gmch_gtt_get);
+
+void intel_gmch_gtt_flush(void)
+{
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gmch_gtt_flush);
+
+void intel_gmch_remove(void)
+{
+ if (--intel_private.refcount)
+ return;
+
+ if (intel_private.scratch_page)
+ intel_gtt_teardown_scratch_page();
+ if (intel_private.pcidev)
+ pci_dev_put(intel_private.pcidev);
+ if (intel_private.bridge_dev)
+ pci_dev_put(intel_private.bridge_dev);
+ intel_private.driver = NULL;
+}
+EXPORT_SYMBOL(intel_gmch_remove);
+
+MODULE_AUTHOR("Dave Jones, Various @Intel");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
new file mode 100644
index 0000000000..7ecf20a6d1
--- /dev/null
+++ b/drivers/char/agp/isoch.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Setup routines for AGP 3.5 compliant bridges.
+ */
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "agp.h"
+
+/* Generic AGP 3.5 enabling routines */
+
+struct agp_3_5_dev {
+ struct list_head list;
+ u8 capndx;
+ u32 maxbw;
+ struct pci_dev *dev;
+};
+
+static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
+{
+ struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
+ struct list_head *pos;
+
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ if (cur->maxbw > n->maxbw)
+ break;
+ }
+ list_add_tail(new, pos);
+}
+
+static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
+{
+ struct agp_3_5_dev *cur;
+ struct pci_dev *dev;
+ struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
+ u32 nistat;
+
+ INIT_LIST_HEAD(head);
+
+ for (pos=start; pos!=head; ) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
+ cur->maxbw = (nistat >> 16) & 0xff;
+
+ tmp = pos;
+ pos = pos->next;
+ agp_3_5_dev_list_insert(head, tmp);
+ }
+}
+
+/*
+ * Initialize all isochronous transfer parameters for an AGP 3.0
+ * node (i.e. a host bridge in combination with the adapters
+ * lying behind it...)
+ */
+
+static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
+ struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+ /*
+ * Convenience structure to make the calculations clearer
+ * here. The field names come straight from the AGP 3.0 spec.
+ */
+ struct isoch_data {
+ u32 maxbw;
+ u32 n;
+ u32 y;
+ u32 l;
+ u32 rq;
+ struct agp_3_5_dev *dev;
+ };
+
+ struct pci_dev *td = bridge->dev, *dev;
+ struct list_head *head = &dev_list->list, *pos;
+ struct agp_3_5_dev *cur;
+ struct isoch_data *master, target;
+ unsigned int cdev = 0;
+ u32 mnistat, tnistat, tstatus, mcmd;
+ u16 tnicmd, mnicmd;
+ u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
+ u32 step, rem, rem_isoch, rem_async;
+ int ret = 0;
+
+ /*
+ * We'll work with an array of isoch_data's (one for each
+ * device in dev_list) throughout this function.
+ */
+ master = kmalloc_array(ndevs, sizeof(*master), GFP_KERNEL);
+ if (master == NULL) {
+ ret = -ENOMEM;
+ goto get_out;
+ }
+
+ /*
+ * Sort the device list by maxbw. We need to do this because the
+ * spec suggests that the devices with the smallest requirements
+ * have their resources allocated first, with all remaining resources
+ * falling to the device with the largest requirement.
+ *
+ * We don't exactly do this, we divide target resources by ndevs
+ * and split them amongst the AGP 3.0 devices. The remainder of such
+ * division operations are dropped on the last device, sort of like
+ * the spec mentions it should be done.
+ *
+ * We can't do this sort when we initially construct the dev_list
+ * because we don't know until this function whether isochronous
+ * transfers are enabled and consequently whether maxbw will mean
+ * anything.
+ */
+ agp_3_5_dev_list_sort(dev_list, ndevs);
+
+ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+
+ /* Extract power-on defaults from the target */
+ target.maxbw = (tnistat >> 16) & 0xff;
+ target.n = (tnistat >> 8) & 0xff;
+ target.y = (tnistat >> 6) & 0x3;
+ target.l = (tnistat >> 3) & 0x7;
+ target.rq = (tstatus >> 24) & 0xff;
+
+ y_max = target.y;
+
+ /*
+ * Extract power-on defaults for each device in dev_list. Along
+ * the way, calculate the total isochronous bandwidth required
+ * by these devices and the largest requested payload size.
+ */
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
+
+ master[cdev].maxbw = (mnistat >> 16) & 0xff;
+ master[cdev].n = (mnistat >> 8) & 0xff;
+ master[cdev].y = (mnistat >> 6) & 0x3;
+ master[cdev].dev = cur;
+
+ tot_bw += master[cdev].maxbw;
+ y_max = max(y_max, master[cdev].y);
+
+ cdev++;
+ }
+
+ /* Check if this configuration has any chance of working */
+ if (tot_bw > target.maxbw) {
+ dev_err(&td->dev, "isochronous bandwidth required "
+ "by AGP 3.0 devices exceeds that which is supported by "
+ "the AGP 3.0 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ target.y = y_max;
+
+ /*
+ * Write the calculated payload size into the target's NICMD
+ * register. Doing this directly effects the ISOCH_N value
+ * in the target's NISTAT register, so we need to do this now
+ * to get an accurate value for ISOCH_N later.
+ */
+ pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
+ tnicmd &= ~(0x3 << 6);
+ tnicmd |= target.y << 6;
+ pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
+
+ /* Reread the target's ISOCH_N */
+ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+ target.n = (tnistat >> 8) & 0xff;
+
+ /* Calculate the minimum ISOCH_N needed by each master */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ master[cdev].y = target.y;
+ master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
+
+ tot_n += master[cdev].n;
+ }
+
+ /* Exit if the minimal ISOCH_N allocation among the masters is more
+ * than the target can handle. */
+ if (tot_n > target.n) {
+ dev_err(&td->dev, "number of isochronous "
+ "transactions per period required by AGP 3.0 devices "
+ "exceeds that which is supported by the AGP 3.0 "
+ "bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ /* Calculate left over ISOCH_N capability in the target. We'll give
+ * this to the hungriest device (as per the spec) */
+ rem = target.n - tot_n;
+
+ /*
+ * Calculate the minimum isochronous RQ depth needed by each master.
+ * Along the way, distribute the extra ISOCH_N capability calculated
+ * above.
+ */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ /*
+ * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y
+ * byte isochronous writes will be broken into 64B pieces.
+ * This means we need to budget more RQ depth to account for
+ * these kind of writes (each isochronous write is actually
+ * many writes on the AGP bus).
+ */
+ master[cdev].rq = master[cdev].n;
+ if (master[cdev].y > 0x1)
+ master[cdev].rq *= (1 << (master[cdev].y - 1));
+
+ tot_rq += master[cdev].rq;
+ }
+ master[ndevs-1].n += rem;
+
+ /* Figure the number of isochronous and asynchronous RQ slots the
+ * target is providing. */
+ rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
+ rq_async = target.rq - rq_isoch;
+
+ /* Exit if the minimal RQ needs of the masters exceeds what the target
+ * can provide. */
+ if (tot_rq > rq_isoch) {
+ dev_err(&td->dev, "number of request queue slots "
+ "required by the isochronous bandwidth requested by "
+ "AGP 3.0 devices exceeds the number provided by the "
+ "AGP 3.0 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ /* Calculate asynchronous RQ capability in the target (per master) as
+ * well as the total number of leftover isochronous RQ slots. */
+ step = rq_async / ndevs;
+ rem_async = step + (rq_async % ndevs);
+ rem_isoch = rq_isoch - tot_rq;
+
+ /* Distribute the extra RQ slots calculated above and write our
+ * isochronous settings out to the actual devices. */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ cur = master[cdev].dev;
+ dev = cur->dev;
+
+ master[cdev].rq += (cdev == ndevs - 1)
+ ? (rem_async + rem_isoch) : step;
+
+ pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
+ pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
+
+ mnicmd &= ~(0xff << 8);
+ mnicmd &= ~(0x3 << 6);
+ mcmd &= ~(0xff << 24);
+
+ mnicmd |= master[cdev].n << 8;
+ mnicmd |= master[cdev].y << 6;
+ mcmd |= master[cdev].rq << 24;
+
+ pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
+ pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
+ }
+
+free_and_exit:
+ kfree(master);
+
+get_out:
+ return ret;
+}
+
+/*
+ * This function basically allocates request queue slots among the
+ * AGP 3.0 systems in nonisochronous nodes. The algorithm is
+ * pretty stupid, divide the total number of RQ slots provided by the
+ * target by ndevs. Distribute this many slots to each AGP 3.0 device,
+ * giving any left over slots to the last device in dev_list.
+ */
+static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
+ struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+ struct agp_3_5_dev *cur;
+ struct list_head *head = &dev_list->list, *pos;
+ u32 tstatus, mcmd;
+ u32 trq, mrq, rem;
+ unsigned int cdev = 0;
+
+ pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
+
+ trq = (tstatus >> 24) & 0xff;
+ mrq = trq / ndevs;
+
+ rem = mrq + (trq % ndevs);
+
+ for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+
+ pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
+ mcmd &= ~(0xff << 24);
+ mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
+ pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
+ }
+}
+
+/*
+ * Fully configure and enable an AGP 3.0 host bridge and all the devices
+ * lying behind it.
+ */
+int agp_3_5_enable(struct agp_bridge_data *bridge)
+{
+ struct pci_dev *td = bridge->dev, *dev = NULL;
+ u8 mcapndx;
+ u32 isoch;
+ u32 tstatus, mstatus, ncapid;
+ u32 mmajor;
+ u16 mpstat;
+ struct agp_3_5_dev *dev_list, *cur;
+ struct list_head *head, *pos;
+ unsigned int ndevs = 0;
+ int ret = 0;
+
+ /* Extract some power-on defaults from the target */
+ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+ isoch = (tstatus >> 17) & 0x1;
+ if (isoch == 0) /* isoch xfers not available, bail out. */
+ return -ENODEV;
+
+ /*
+ * Allocate a head for our AGP 3.5 device list
+ * (multiple AGP v3 devices are allowed behind a single bridge).
+ */
+ if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto get_out;
+ }
+ head = &dev_list->list;
+ INIT_LIST_HEAD(head);
+
+ /* Find all AGP devices, and add them to dev_list. */
+ for_each_pci_dev(dev) {
+ mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
+ if (mcapndx == 0)
+ continue;
+
+ switch ((dev->class >>8) & 0xff00) {
+ case 0x0600: /* Bridge */
+ /* Skip bridges. We should call this function for each one. */
+ continue;
+
+ case 0x0001: /* Unclassified device */
+ /* Don't know what this is, but log it for investigation. */
+ if (mcapndx != 0) {
+ dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
+ pci_name(dev),
+ dev->vendor, dev->device);
+ }
+ continue;
+
+ case 0x0300: /* Display controller */
+ case 0x0400: /* Multimedia controller */
+ if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto free_and_exit;
+ }
+ cur->dev = dev;
+
+ pos = &cur->list;
+ list_add(pos, head);
+ ndevs++;
+ continue;
+
+ default:
+ continue;
+ }
+ }
+
+ /*
+ * Take an initial pass through the devices lying behind our host
+ * bridge. Make sure each one is actually an AGP 3.0 device, otherwise
+ * exit with an error message. Along the way store the AGP 3.0
+ * cap_ptr for each device
+ */
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ pci_read_config_word(dev, PCI_STATUS, &mpstat);
+ if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
+ continue;
+
+ pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
+ if (mcapndx != 0) {
+ do {
+ pci_read_config_dword(dev, mcapndx, &ncapid);
+ if ((ncapid & 0xff) != 2)
+ mcapndx = (ncapid >> 8) & 0xff;
+ }
+ while (((ncapid & 0xff) != 2) && (mcapndx != 0));
+ }
+
+ if (mcapndx == 0) {
+ dev_err(&td->dev, "woah! Non-AGP device %s on "
+ "secondary bus of AGP 3.5 bridge!\n",
+ pci_name(dev));
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+ if (mmajor < 3) {
+ dev_err(&td->dev, "woah! AGP 2.0 device %s on "
+ "secondary bus of AGP 3.5 bridge operating "
+ "with AGP 3.0 electricals!\n", pci_name(dev));
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ cur->capndx = mcapndx;
+
+ pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
+
+ if (((mstatus >> 3) & 0x1) == 0) {
+ dev_err(&td->dev, "woah! AGP 3.x device %s not "
+ "operating in AGP 3.x mode on secondary bus "
+ "of AGP 3.5 bridge operating with AGP 3.0 "
+ "electricals!\n", pci_name(dev));
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+ }
+
+ /*
+ * Call functions to divide target resources amongst the AGP 3.0
+ * masters. This process is dramatically different depending on
+ * whether isochronous transfers are supported.
+ */
+ if (isoch) {
+ ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
+ if (ret) {
+ dev_info(&td->dev, "something bad happened setting "
+ "up isochronous xfers; falling back to "
+ "non-isochronous xfer mode\n");
+ } else {
+ goto free_and_exit;
+ }
+ }
+ agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
+
+free_and_exit:
+ /* Be sure to free the dev_list */
+ for (pos=head->next; pos!=head; ) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+
+ pos = pos->next;
+ kfree(cur);
+ }
+ kfree(dev_list);
+
+get_out:
+ return ret;
+}
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
new file mode 100644
index 0000000000..dbcbc06cc2
--- /dev/null
+++ b/drivers/char/agp/nvidia-agp.c
@@ -0,0 +1,467 @@
+/*
+ * Nvidia AGPGART routines.
+ * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
+ * to work in 2.5 by Dave Jones.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+#include "agp.h"
+
+/* NVIDIA registers */
+#define NVIDIA_0_APSIZE 0x80
+#define NVIDIA_1_WBC 0xf0
+#define NVIDIA_2_GARTCTRL 0xd0
+#define NVIDIA_2_APBASE 0xd8
+#define NVIDIA_2_APLIMIT 0xdc
+#define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4)
+#define NVIDIA_3_APBASE 0x50
+#define NVIDIA_3_APLIMIT 0x54
+
+
+static struct _nvidia_private {
+ struct pci_dev *dev_1;
+ struct pci_dev *dev_2;
+ struct pci_dev *dev_3;
+ volatile u32 __iomem *aperture;
+ int num_active_entries;
+ off_t pg_offset;
+ u32 wbc_mask;
+} nvidia_private;
+
+
+static int nvidia_fetch_size(void)
+{
+ int i;
+ u8 size_value;
+ struct aper_size_info_8 *values;
+
+ pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value);
+ size_value &= 0x0f;
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (size_value == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+#define SYSCFG 0xC0010010
+#define IORR_BASE0 0xC0010016
+#define IORR_MASK0 0xC0010017
+#define AMD_K7_NUM_IORR 2
+
+static int nvidia_init_iorr(u32 base, u32 size)
+{
+ u32 base_hi, base_lo;
+ u32 mask_hi, mask_lo;
+ u32 sys_hi, sys_lo;
+ u32 iorr_addr, free_iorr_addr;
+
+ /* Find the iorr that is already used for the base */
+ /* If not found, determine the uppermost available iorr */
+ free_iorr_addr = AMD_K7_NUM_IORR;
+ for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) {
+ rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
+ rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
+
+ if ((base_lo & 0xfffff000) == (base & 0xfffff000))
+ break;
+
+ if ((mask_lo & 0x00000800) == 0)
+ free_iorr_addr = iorr_addr;
+ }
+
+ if (iorr_addr >= AMD_K7_NUM_IORR) {
+ iorr_addr = free_iorr_addr;
+ if (iorr_addr >= AMD_K7_NUM_IORR)
+ return -EINVAL;
+ }
+ base_hi = 0x0;
+ base_lo = (base & ~0xfff) | 0x18;
+ mask_hi = 0xf;
+ mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800;
+ wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
+ wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
+
+ rdmsr(SYSCFG, sys_lo, sys_hi);
+ sys_lo |= 0x00100000;
+ wrmsr(SYSCFG, sys_lo, sys_hi);
+
+ return 0;
+}
+
+static int nvidia_configure(void)
+{
+ int i, rc, num_dirs;
+ u32 apbase, aplimit;
+ phys_addr_t apbase_phys;
+ struct aper_size_info_8 *current_size;
+ u32 temp;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ apbase = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
+ agp_bridge->gart_bus_addr = apbase;
+ aplimit = apbase + (current_size->size * 1024 * 1024) - 1;
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase);
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit);
+ pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase);
+ pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit);
+ if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024)))
+ return rc;
+
+ /* directory size is 64k */
+ num_dirs = current_size->size / 64;
+ nvidia_private.num_active_entries = current_size->num_entries;
+ nvidia_private.pg_offset = 0;
+ if (num_dirs == 0) {
+ num_dirs = 1;
+ nvidia_private.num_active_entries /= (64 / current_size->size);
+ nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) &
+ ~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE;
+ }
+
+ /* attbase */
+ for (i = 0; i < 8; i++) {
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i),
+ (agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1);
+ }
+
+ /* gtlb control */
+ pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11);
+
+ /* gart control */
+ pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
+ pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100);
+
+ /* map aperture */
+ apbase_phys = pci_resource_start(agp_bridge->dev, AGP_APERTURE_BAR);
+ nvidia_private.aperture =
+ (volatile u32 __iomem *) ioremap(apbase_phys, 33 * PAGE_SIZE);
+
+ if (!nvidia_private.aperture)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nvidia_cleanup(void)
+{
+ struct aper_size_info_8 *previous_size;
+ u32 temp;
+
+ /* gart control */
+ pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
+ pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100));
+
+ /* gtlb control */
+ pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11));
+
+ /* unmap aperture */
+ iounmap((void __iomem *) nvidia_private.aperture);
+
+ /* restore previous aperture size */
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
+ previous_size->size_value);
+
+ /* restore iorr for previous aperture size */
+ nvidia_init_iorr(agp_bridge->gart_bus_addr,
+ previous_size->size * 1024 * 1024);
+}
+
+
+/*
+ * Note we can't use the generic routines, even though they are 99% the same.
+ * Aperture sizes <64M still requires a full 64k GART directory, but
+ * only use the portion of the TLB entries that correspond to the apertures
+ * alignment inside the surrounding 64M block.
+ */
+extern int agp_memory_reserved;
+
+static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, j;
+ int mask_type;
+
+ mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+ if (mask_type != 0 || type != mem->type)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if ((pg_start + mem->page_count) >
+ (nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
+ return -EINVAL;
+
+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
+ return -EBUSY;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ agp_bridge->gatt_table+nvidia_private.pg_offset+j);
+ }
+
+ /* PCI Posting. */
+ readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j - 1);
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+
+static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i;
+
+ int mask_type;
+
+ mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+ if (mask_type != 0 || type != mem->type)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++)
+ writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i);
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+
+static void nvidia_tlbflush(struct agp_memory *mem)
+{
+ unsigned long end;
+ u32 wbc_reg;
+ u32 __maybe_unused temp;
+ int i;
+
+ /* flush chipset */
+ if (nvidia_private.wbc_mask) {
+ pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg);
+ wbc_reg |= nvidia_private.wbc_mask;
+ pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg);
+
+ end = jiffies + 3*HZ;
+ do {
+ pci_read_config_dword(nvidia_private.dev_1,
+ NVIDIA_1_WBC, &wbc_reg);
+ if (time_before_eq(end, jiffies)) {
+ printk(KERN_ERR PFX
+ "TLB flush took more than 3 seconds.\n");
+ }
+ } while (wbc_reg & nvidia_private.wbc_mask);
+ }
+
+ /* flush TLB entries */
+ for (i = 0; i < 32 + 1; i++)
+ temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
+ for (i = 0; i < 32 + 1; i++)
+ temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
+}
+
+
+static const struct aper_size_info_8 nvidia_generic_sizes[5] =
+{
+ {512, 131072, 7, 0},
+ {256, 65536, 6, 8},
+ {128, 32768, 5, 12},
+ {64, 16384, 4, 14},
+ /* The 32M mode still requires a 64k gatt */
+ {32, 16384, 4, 15}
+};
+
+
+static const struct gatt_mask nvidia_generic_masks[] =
+{
+ { .mask = 1, .type = 0}
+};
+
+
+static const struct agp_bridge_driver nvidia_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = nvidia_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 5,
+ .needs_scratch_page = true,
+ .configure = nvidia_configure,
+ .fetch_size = nvidia_fetch_size,
+ .cleanup = nvidia_cleanup,
+ .tlb_flush = nvidia_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = nvidia_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = nvidia_insert_memory,
+ .remove_memory = nvidia_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static int agp_nvidia_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+
+ nvidia_private.dev_1 =
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ (unsigned int)pdev->bus->number,
+ PCI_DEVFN(0, 1));
+ nvidia_private.dev_2 =
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ (unsigned int)pdev->bus->number,
+ PCI_DEVFN(0, 2));
+ nvidia_private.dev_3 =
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ (unsigned int)pdev->bus->number,
+ PCI_DEVFN(30, 0));
+
+ if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
+ printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
+ "chipset, but could not find the secondary devices.\n");
+ return -ENODEV;
+ }
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_NVIDIA_NFORCE:
+ printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n");
+ nvidia_private.wbc_mask = 0x00010000;
+ break;
+ case PCI_DEVICE_ID_NVIDIA_NFORCE2:
+ printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n");
+ nvidia_private.wbc_mask = 0x80000000;
+ break;
+ default:
+ printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n",
+ pdev->device);
+ return -ENODEV;
+ }
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &nvidia_driver;
+ bridge->dev_private_data = &nvidia_private;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_nvidia_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static int agp_nvidia_resume(struct device *dev)
+{
+ /* reconfigure AGP hardware again */
+ nvidia_configure();
+
+ return 0;
+}
+
+static const struct pci_device_id agp_nvidia_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_nvidia_pm_ops, NULL, agp_nvidia_resume);
+
+static struct pci_driver agp_nvidia_pci_driver = {
+ .name = "agpgart-nvidia",
+ .id_table = agp_nvidia_pci_table,
+ .probe = agp_nvidia_probe,
+ .remove = agp_nvidia_remove,
+ .driver.pm = &agp_nvidia_pm_ops,
+};
+
+static int __init agp_nvidia_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_nvidia_pci_driver);
+}
+
+static void __exit agp_nvidia_cleanup(void)
+{
+ pci_unregister_driver(&agp_nvidia_pci_driver);
+ pci_dev_put(nvidia_private.dev_1);
+ pci_dev_put(nvidia_private.dev_2);
+ pci_dev_put(nvidia_private.dev_3);
+}
+
+module_init(agp_nvidia_init);
+module_exit(agp_nvidia_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("NVIDIA Corporation");
+
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
new file mode 100644
index 0000000000..edbc4d3381
--- /dev/null
+++ b/drivers/char/agp/parisc-agp.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HP Quicksilver AGP GART routines
+ *
+ * Copyright (c) 2006, Kyle McMartin <kyle@parisc-linux.org>
+ *
+ * Based on drivers/char/agpgart/hp-agp.c which is
+ * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/klist.h>
+#include <linux/agp_backend.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+
+#include <asm/parisc-device.h>
+#include <asm/ropes.h>
+
+#include "agp.h"
+
+#define DRVNAME "quicksilver"
+#define DRVPFX DRVNAME ": "
+
+#define AGP8X_MODE_BIT 3
+#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
+
+static unsigned long
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ int type);
+
+static struct _parisc_agp_info {
+ void __iomem *ioc_regs;
+ void __iomem *lba_regs;
+
+ int lba_cap_offset;
+
+ __le64 *gatt;
+ u64 gatt_entries;
+
+ u64 gart_base;
+ u64 gart_size;
+
+ int io_page_size;
+ int io_pages_per_kpage;
+} parisc_agp_info;
+
+static struct gatt_mask parisc_agp_masks[] =
+{
+ {
+ .mask = SBA_PDIR_VALID_BIT,
+ .type = 0
+ }
+};
+
+static struct aper_size_info_fixed parisc_agp_sizes[] =
+{
+ {0, 0, 0}, /* filled in by parisc_agp_fetch_size() */
+};
+
+static int
+parisc_agp_fetch_size(void)
+{
+ int size;
+
+ size = parisc_agp_info.gart_size / MB(1);
+ parisc_agp_sizes[0].size = size;
+ agp_bridge->current_size = (void *) &parisc_agp_sizes[0];
+
+ return size;
+}
+
+static int
+parisc_agp_configure(void)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+
+ agp_bridge->gart_bus_addr = info->gart_base;
+ agp_bridge->capndx = info->lba_cap_offset;
+ agp_bridge->mode = readl(info->lba_regs+info->lba_cap_offset+PCI_AGP_STATUS);
+
+ return 0;
+}
+
+static void
+parisc_agp_tlbflush(struct agp_memory *mem)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+
+ /* force fdc ops to be visible to IOMMU */
+ asm_io_sync();
+
+ writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
+ readq(info->ioc_regs+IOC_PCOM); /* flush */
+}
+
+static int
+parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ int i;
+
+ for (i = 0; i < info->gatt_entries; i++) {
+ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ }
+
+ return 0;
+}
+
+static int
+parisc_agp_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+
+ info->gatt[0] = SBA_AGPGART_COOKIE;
+
+ return 0;
+}
+
+static int
+parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ int i, k;
+ off_t j, io_pg_start;
+ int io_pg_count;
+
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
+ return -EINVAL;
+ }
+
+ io_pg_start = info->io_pages_per_kpage * pg_start;
+ io_pg_count = info->io_pages_per_kpage * mem->page_count;
+ if ((io_pg_start + io_pg_count) > info->gatt_entries) {
+ return -EINVAL;
+ }
+
+ j = io_pg_start;
+ while (j < (io_pg_start + io_pg_count)) {
+ if (info->gatt[j])
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+ unsigned long paddr;
+
+ paddr = page_to_phys(mem->pages[i]);
+ for (k = 0;
+ k < info->io_pages_per_kpage;
+ k++, j++, paddr += info->io_page_size) {
+ info->gatt[j] = cpu_to_le64(
+ parisc_agp_mask_memory(agp_bridge,
+ paddr, type));
+ asm_io_fdc(&info->gatt[j]);
+ }
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+
+ return 0;
+}
+
+static int
+parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ int i, io_pg_start, io_pg_count;
+
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
+ return -EINVAL;
+ }
+
+ io_pg_start = info->io_pages_per_kpage * pg_start;
+ io_pg_count = info->io_pages_per_kpage * mem->page_count;
+ for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static unsigned long
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ int type)
+{
+ unsigned ci; /* coherent index */
+ dma_addr_t pa;
+
+ pa = addr & IOVP_MASK;
+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa)));
+
+ pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
+ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
+
+ /* return native (big-endian) PDIR entry */
+ return pa;
+}
+
+static void
+parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ u32 command;
+
+ command = readl(info->lba_regs + info->lba_cap_offset + PCI_AGP_STATUS);
+
+ command = agp_collect_device_status(bridge, mode, command);
+ command |= 0x00000100;
+
+ writel(command, info->lba_regs + info->lba_cap_offset + PCI_AGP_COMMAND);
+
+ agp_device_command(command, (mode & AGP8X_MODE) != 0);
+}
+
+static const struct agp_bridge_driver parisc_agp_driver = {
+ .owner = THIS_MODULE,
+ .size_type = FIXED_APER_SIZE,
+ .configure = parisc_agp_configure,
+ .fetch_size = parisc_agp_fetch_size,
+ .tlb_flush = parisc_agp_tlbflush,
+ .mask_memory = parisc_agp_mask_memory,
+ .masks = parisc_agp_masks,
+ .agp_enable = parisc_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = parisc_agp_create_gatt_table,
+ .free_gatt_table = parisc_agp_free_gatt_table,
+ .insert_memory = parisc_agp_insert_memory,
+ .remove_memory = parisc_agp_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+};
+
+static int __init
+agp_ioc_init(void __iomem *ioc_regs)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ u64 iova_base, io_tlb_ps;
+ __le64 *io_pdir;
+ int io_tlb_shift;
+
+ printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
+
+ info->ioc_regs = ioc_regs;
+
+ io_tlb_ps = readq(info->ioc_regs+IOC_TCNFG);
+ switch (io_tlb_ps) {
+ case 0: io_tlb_shift = 12; break;
+ case 1: io_tlb_shift = 13; break;
+ case 2: io_tlb_shift = 14; break;
+ case 3: io_tlb_shift = 16; break;
+ default:
+ printk(KERN_ERR DRVPFX "Invalid IOTLB page size "
+ "configuration 0x%llx\n", io_tlb_ps);
+ info->gatt = NULL;
+ info->gatt_entries = 0;
+ return -ENODEV;
+ }
+ info->io_page_size = 1 << io_tlb_shift;
+ info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size;
+
+ iova_base = readq(info->ioc_regs+IOC_IBASE) & ~0x1;
+ info->gart_base = iova_base + PLUTO_IOVA_SIZE - PLUTO_GART_SIZE;
+
+ info->gart_size = PLUTO_GART_SIZE;
+ info->gatt_entries = info->gart_size / info->io_page_size;
+
+ io_pdir = phys_to_virt(readq(info->ioc_regs+IOC_PDIR_BASE));
+ info->gatt = &io_pdir[(PLUTO_IOVA_SIZE/2) >> PAGE_SHIFT];
+
+ if (info->gatt[0] != SBA_AGPGART_COOKIE) {
+ info->gatt = NULL;
+ info->gatt_entries = 0;
+ printk(KERN_ERR DRVPFX "No reserved IO PDIR entry found; "
+ "GART disabled\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init
+lba_find_capability(int cap)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ u16 status;
+ u8 pos, id;
+ int ttl = 48;
+
+ status = readw(info->lba_regs + PCI_STATUS);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+ pos = readb(info->lba_regs + PCI_CAPABILITY_LIST);
+ while (ttl-- && pos >= 0x40) {
+ pos &= ~3;
+ id = readb(info->lba_regs + pos + PCI_CAP_LIST_ID);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos = readb(info->lba_regs + pos + PCI_CAP_LIST_NEXT);
+ }
+ return 0;
+}
+
+static int __init
+agp_lba_init(void __iomem *lba_hpa)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ int cap;
+
+ info->lba_regs = lba_hpa;
+ info->lba_cap_offset = lba_find_capability(PCI_CAP_ID_AGP);
+
+ cap = readl(lba_hpa + info->lba_cap_offset) & 0xff;
+ if (cap != PCI_CAP_ID_AGP) {
+ printk(KERN_ERR DRVPFX "Invalid capability ID 0x%02x at 0x%x\n",
+ cap, info->lba_cap_offset);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init
+parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa)
+{
+ struct pci_dev *fake_bridge_dev = NULL;
+ struct agp_bridge_data *bridge;
+ int error = 0;
+
+ fake_bridge_dev = pci_alloc_dev(NULL);
+ if (!fake_bridge_dev) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ error = agp_ioc_init(ioc_hpa);
+ if (error)
+ goto fail;
+
+ error = agp_lba_init(lba_hpa);
+ if (error)
+ goto fail;
+
+ bridge = agp_alloc_bridge();
+ if (!bridge) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ bridge->driver = &parisc_agp_driver;
+
+ fake_bridge_dev->vendor = PCI_VENDOR_ID_HP;
+ fake_bridge_dev->device = PCI_DEVICE_ID_HP_PCIX_LBA;
+ bridge->dev = fake_bridge_dev;
+
+ error = agp_add_bridge(bridge);
+ if (error)
+ goto fail;
+ return 0;
+
+fail:
+ kfree(fake_bridge_dev);
+ return error;
+}
+
+static int __init
+find_quicksilver(struct device *dev, void *data)
+{
+ struct parisc_device **lba = data;
+ struct parisc_device *padev = to_parisc_device(dev);
+
+ if (IS_QUICKSILVER(padev))
+ *lba = padev;
+
+ return 0;
+}
+
+static int __init
+parisc_agp_init(void)
+{
+ int err = -1;
+ struct parisc_device *sba = NULL, *lba = NULL;
+ struct lba_device *lbadev = NULL;
+
+ if (!sba_list)
+ goto out;
+
+ /* Find our parent Pluto */
+ sba = sba_list->dev;
+ if (!IS_PLUTO(sba)) {
+ printk(KERN_INFO DRVPFX "No Pluto found, so no AGPGART for you.\n");
+ goto out;
+ }
+
+ /* Now search our Pluto for our precious AGP device... */
+ device_for_each_child(&sba->dev, &lba, find_quicksilver);
+
+ if (!lba) {
+ printk(KERN_INFO DRVPFX "No AGP devices found.\n");
+ goto out;
+ }
+
+ lbadev = parisc_get_drvdata(lba);
+
+ /* w00t, let's go find our cookies... */
+ parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr);
+
+ return 0;
+
+out:
+ return err;
+}
+
+module_init(parisc_agp_init);
+
+MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
new file mode 100644
index 0000000000..484bb101c5
--- /dev/null
+++ b/drivers/char/agp/sis-agp.c
@@ -0,0 +1,436 @@
+/*
+ * SiS AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include "agp.h"
+
+#define SIS_ATTBASE 0x90
+#define SIS_APSIZE 0x94
+#define SIS_TLBCNTRL 0x97
+#define SIS_TLBFLUSH 0x98
+
+#define PCI_DEVICE_ID_SI_662 0x0662
+#define PCI_DEVICE_ID_SI_671 0x0671
+
+static bool agp_sis_force_delay = 0;
+static int agp_sis_agp_spec = -1;
+
+static int sis_fetch_size(void)
+{
+ u8 temp_size;
+ int i;
+ struct aper_size_info_8 *values;
+
+ pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size);
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if ((temp_size == values[i].size_value) ||
+ ((temp_size & ~(0x07)) ==
+ (values[i].size_value & ~(0x07)))) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void sis_tlbflush(struct agp_memory *mem)
+{
+ pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02);
+}
+
+static int sis_configure(void)
+{
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+ pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05);
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+ pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE,
+ agp_bridge->gatt_bus_addr);
+ pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
+ current_size->size_value);
+ return 0;
+}
+
+static void sis_cleanup(void)
+{
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
+ (previous_size->size_value & ~(0x03)));
+}
+
+static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ struct pci_dev *device = NULL;
+ u32 command;
+ int rate;
+
+ dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+ agp_bridge->major_version, agp_bridge->minor_version);
+
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
+ command = agp_collect_device_status(bridge, mode, command);
+ command |= AGPSTAT_AGP_ENABLE;
+ rate = (command & 0x7) << 2;
+
+ for_each_pci_dev(device) {
+ u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+
+ dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n",
+ pci_name(device), rate);
+
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
+
+ /*
+ * Weird: on some sis chipsets any rate change in the target
+ * command register triggers a 5ms screwup during which the master
+ * cannot be configured
+ */
+ if (device->device == bridge->dev->device) {
+ dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n");
+ msleep(10);
+ }
+ }
+}
+
+static const struct aper_size_info_8 sis_generic_sizes[7] =
+{
+ {256, 65536, 6, 99},
+ {128, 32768, 5, 83},
+ {64, 16384, 4, 67},
+ {32, 8192, 3, 51},
+ {16, 4096, 2, 35},
+ {8, 2048, 1, 19},
+ {4, 1024, 0, 3}
+};
+
+static struct agp_bridge_driver sis_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = sis_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = sis_configure,
+ .fetch_size = sis_fetch_size,
+ .cleanup = sis_cleanup,
+ .tlb_flush = sis_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+// chipsets that require the 'delay hack'
+static int sis_broken_chipsets[] = {
+ PCI_DEVICE_ID_SI_648,
+ PCI_DEVICE_ID_SI_746,
+ 0 // terminator
+};
+
+static void sis_get_driver(struct agp_bridge_data *bridge)
+{
+ int i;
+
+ for (i=0; sis_broken_chipsets[i]!=0; ++i)
+ if (bridge->dev->device==sis_broken_chipsets[i])
+ break;
+
+ if (sis_broken_chipsets[i] || agp_sis_force_delay)
+ sis_driver.agp_enable=sis_delayed_enable;
+
+ // sis chipsets that indicate less than agp3.5
+ // are not actually fully agp3 compliant
+ if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5
+ && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) {
+ sis_driver.aperture_sizes = agp3_generic_sizes;
+ sis_driver.size_type = U16_APER_SIZE;
+ sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES;
+ sis_driver.configure = agp3_generic_configure;
+ sis_driver.fetch_size = agp3_generic_fetch_size;
+ sis_driver.cleanup = agp3_generic_cleanup;
+ sis_driver.tlb_flush = agp3_generic_tlbflush;
+ }
+}
+
+
+static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+
+ dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &sis_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ get_agp_version(bridge);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+ sis_get_driver(bridge);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_sis_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static int agp_sis_resume(__attribute__((unused)) struct device *dev)
+{
+ return sis_driver.configure();
+}
+
+static const struct pci_device_id agp_sis_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_5591,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_530,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_540,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_550,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_620,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_630,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_635,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_645,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_646,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_648,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_650,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_651,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_655,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_661,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_662,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_671,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_730,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_735,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_740,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_741,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_745,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_746,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, NULL, agp_sis_resume);
+
+static struct pci_driver agp_sis_pci_driver = {
+ .name = "agpgart-sis",
+ .id_table = agp_sis_pci_table,
+ .probe = agp_sis_probe,
+ .remove = agp_sis_remove,
+ .driver.pm = &agp_sis_pm_ops,
+};
+
+static int __init agp_sis_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_sis_pci_driver);
+}
+
+static void __exit agp_sis_cleanup(void)
+{
+ pci_unregister_driver(&agp_sis_pci_driver);
+}
+
+module_init(agp_sis_init);
+module_exit(agp_sis_cleanup);
+
+module_param(agp_sis_force_delay, bool, 0);
+MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack");
+module_param(agp_sis_agp_spec, int, 0);
+MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
new file mode 100644
index 0000000000..b91da5998d
--- /dev/null
+++ b/drivers/char/agp/sworks-agp.c
@@ -0,0 +1,568 @@
+/*
+ * Serverworks AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/agp_backend.h>
+#include <asm/set_memory.h>
+#include "agp.h"
+
+#define SVWRKS_COMMAND 0x04
+#define SVWRKS_APSIZE 0x10
+#define SVWRKS_MMBASE 0x14
+#define SVWRKS_CACHING 0x4b
+#define SVWRKS_AGP_ENABLE 0x60
+#define SVWRKS_FEATURE 0x68
+
+#define SVWRKS_SIZE_MASK 0xfe000000
+
+/* Memory mapped registers */
+#define SVWRKS_GART_CACHE 0x02
+#define SVWRKS_GATTBASE 0x04
+#define SVWRKS_TLBFLUSH 0x10
+#define SVWRKS_POSTFLUSH 0x14
+#define SVWRKS_DIRFLUSH 0x0c
+
+
+struct serverworks_page_map {
+ unsigned long *real;
+ unsigned long __iomem *remapped;
+};
+
+static struct _serverworks_private {
+ struct pci_dev *svrwrks_dev; /* device one */
+ volatile u8 __iomem *registers;
+ struct serverworks_page_map **gatt_pages;
+ int num_tables;
+ struct serverworks_page_map scratch_dir;
+
+ int gart_addr_ofs;
+ int mm_addr_ofs;
+} serverworks_private;
+
+static int serverworks_create_page_map(struct serverworks_page_map *page_map)
+{
+ int i;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL) {
+ return -ENOMEM;
+ }
+
+ set_memory_uc((unsigned long)page_map->real, 1);
+ page_map->remapped = page_map->real;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
+ writel(agp_bridge->scratch_page, page_map->remapped+i);
+ /* Red Pen: Everyone else does pci posting flush here */
+
+ return 0;
+}
+
+static void serverworks_free_page_map(struct serverworks_page_map *page_map)
+{
+ set_memory_wb((unsigned long)page_map->real, 1);
+ free_page((unsigned long) page_map->real);
+}
+
+static void serverworks_free_gatt_pages(void)
+{
+ int i;
+ struct serverworks_page_map **tables;
+ struct serverworks_page_map *entry;
+
+ tables = serverworks_private.gatt_pages;
+ for (i = 0; i < serverworks_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL) {
+ serverworks_free_page_map(entry);
+ }
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+}
+
+static int serverworks_create_gatt_pages(int nr_tables)
+{
+ struct serverworks_page_map **tables;
+ struct serverworks_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kcalloc(nr_tables + 1, sizeof(struct serverworks_page_map *),
+ GFP_KERNEL);
+ if (tables == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_tables; i++) {
+ entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ tables[i] = entry;
+ retval = serverworks_create_page_map(entry);
+ if (retval != 0) break;
+ }
+ serverworks_private.num_tables = nr_tables;
+ serverworks_private.gatt_pages = tables;
+
+ if (retval != 0) serverworks_free_gatt_pages();
+
+ return retval;
+}
+
+#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+#ifndef GET_PAGE_DIR_OFF
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#endif
+
+#ifndef GET_PAGE_DIR_IDX
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#endif
+
+#ifndef GET_GATT_OFF
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#endif
+
+static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct aper_size_info_lvl2 *value;
+ struct serverworks_page_map page_dir;
+ int retval;
+ u32 temp;
+ int i;
+
+ value = A_SIZE_LVL2(agp_bridge->current_size);
+ retval = serverworks_create_page_map(&page_dir);
+ if (retval != 0) {
+ return retval;
+ }
+ retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ return retval;
+ }
+ /* Create a fake scratch directory */
+ for (i = 0; i < 1024; i++) {
+ writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
+ writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
+ }
+
+ retval = serverworks_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ serverworks_free_page_map(&serverworks_private.scratch_dir);
+ return retval;
+ }
+
+ agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+ agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+ /* Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+
+ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* Calculate the agp offset */
+ for (i = 0; i < value->num_entries / 1024; i++)
+ writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
+
+ return 0;
+}
+
+static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct serverworks_page_map page_dir;
+
+ page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+ page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+ serverworks_free_gatt_pages();
+ serverworks_free_page_map(&page_dir);
+ serverworks_free_page_map(&serverworks_private.scratch_dir);
+ return 0;
+}
+
+static int serverworks_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ u32 temp2;
+ struct aper_size_info_lvl2 *values;
+
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
+ pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
+ SVWRKS_SIZE_MASK);
+ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
+ pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
+ temp2 &= SVWRKS_SIZE_MASK;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp2 == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually. However
+ * currently it just flushes the whole table. Which is probably
+ * more efficient, since agp_memory blocks can be a large number of
+ * entries.
+ */
+static void serverworks_tlbflush(struct agp_memory *temp)
+{
+ unsigned long timeout;
+
+ writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
+ timeout = jiffies + 3*HZ;
+ while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
+ cpu_relax();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&serverworks_private.svrwrks_dev->dev,
+ "TLB post flush took more than 3 seconds\n");
+ break;
+ }
+ }
+
+ writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
+ timeout = jiffies + 3*HZ;
+ while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
+ cpu_relax();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&serverworks_private.svrwrks_dev->dev,
+ "TLB Dir flush took more than 3 seconds\n");
+ break;
+ }
+ }
+}
+
+static int serverworks_configure(void)
+{
+ u32 temp;
+ u8 enable_reg;
+ u16 cap_reg;
+
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
+ temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+ if (!serverworks_private.registers) {
+ dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
+ return -ENOMEM;
+ }
+
+ writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
+ readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
+
+ writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
+ readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
+
+ cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
+ cap_reg &= ~0x0007;
+ cap_reg |= 0x4;
+ writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
+ readw(serverworks_private.registers+SVWRKS_COMMAND);
+
+ pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
+ enable_reg |= 0x1; /* Agp Enable bit */
+ pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
+ serverworks_tlbflush(NULL);
+
+ agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
+
+ pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
+ enable_reg &= ~0x3;
+ pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
+
+ pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
+ enable_reg |= (1<<6);
+ pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
+
+ return 0;
+}
+
+static void serverworks_cleanup(void)
+{
+ iounmap((void __iomem *) serverworks_private.registers);
+}
+
+static int serverworks_insert_memory(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+ if ((pg_start + mem->page_count) > num_entries) {
+ return -EINVAL;
+ }
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mem->type),
+ cur_gatt+GET_GATT_OFF(addr));
+ }
+ serverworks_tlbflush(mem);
+ return 0;
+}
+
+static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ global_cache_flush();
+ serverworks_tlbflush(mem);
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
+ serverworks_tlbflush(mem);
+ return 0;
+}
+
+static const struct gatt_mask serverworks_masks[] =
+{
+ {.mask = 1, .type = 0}
+};
+
+static const struct aper_size_info_lvl2 serverworks_sizes[7] =
+{
+ {2048, 524288, 0x80000000},
+ {1024, 262144, 0xc0000000},
+ {512, 131072, 0xe0000000},
+ {256, 65536, 0xf0000000},
+ {128, 32768, 0xf8000000},
+ {64, 16384, 0xfc000000},
+ {32, 8192, 0xfe000000}
+};
+
+static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ u32 command;
+
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ bridge->capndx + PCI_AGP_STATUS,
+ &command);
+
+ command = agp_collect_device_status(bridge, mode, command);
+
+ command &= ~0x10; /* disable FW */
+ command &= ~0x08;
+
+ command |= 0x100;
+
+ pci_write_config_dword(serverworks_private.svrwrks_dev,
+ bridge->capndx + PCI_AGP_COMMAND,
+ command);
+
+ agp_device_command(command, false);
+}
+
+static const struct agp_bridge_driver sworks_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = serverworks_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = serverworks_configure,
+ .fetch_size = serverworks_fetch_size,
+ .cleanup = serverworks_cleanup,
+ .tlb_flush = serverworks_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = serverworks_masks,
+ .agp_enable = serverworks_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = serverworks_create_gatt_table,
+ .free_gatt_table = serverworks_free_gatt_table,
+ .insert_memory = serverworks_insert_memory,
+ .remove_memory = serverworks_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static int agp_serverworks_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ struct pci_dev *bridge_dev;
+ u32 temp, temp2;
+ u8 cap_ptr = 0;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+
+ switch (pdev->device) {
+ case 0x0006:
+ dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
+ return -ENODEV;
+
+ case PCI_DEVICE_ID_SERVERWORKS_HE:
+ case PCI_DEVICE_ID_SERVERWORKS_LE:
+ case 0x0007:
+ break;
+
+ default:
+ if (cap_ptr)
+ dev_err(&pdev->dev, "unsupported Serverworks chipset "
+ "[%04x/%04x]\n", pdev->vendor, pdev->device);
+ return -ENODEV;
+ }
+
+ /* Everything is on func 1 here so we are hardcoding function one */
+ bridge_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ (unsigned int)pdev->bus->number,
+ PCI_DEVFN(0, 1));
+ if (!bridge_dev) {
+ dev_info(&pdev->dev, "can't find secondary device\n");
+ return -ENODEV;
+ }
+
+ serverworks_private.svrwrks_dev = bridge_dev;
+ serverworks_private.gart_addr_ofs = 0x10;
+
+ pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
+ if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
+ if (temp2 != 0) {
+ dev_info(&pdev->dev, "64 bit aperture address, "
+ "but top bits are not zero; disabling AGP\n");
+ return -ENODEV;
+ }
+ serverworks_private.mm_addr_ofs = 0x18;
+ } else
+ serverworks_private.mm_addr_ofs = 0x14;
+
+ pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
+ if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(pdev,
+ serverworks_private.mm_addr_ofs + 4, &temp2);
+ if (temp2 != 0) {
+ dev_info(&pdev->dev, "64 bit MMIO address, but top "
+ "bits are not zero; disabling AGP\n");
+ return -ENODEV;
+ }
+ }
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &sworks_driver;
+ bridge->dev_private_data = &serverworks_private;
+ bridge->dev = pci_dev_get(pdev);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_serverworks_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ pci_dev_put(bridge->dev);
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+ pci_dev_put(serverworks_private.svrwrks_dev);
+ serverworks_private.svrwrks_dev = NULL;
+}
+
+static struct pci_device_id agp_serverworks_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SERVERWORKS,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
+
+static struct pci_driver agp_serverworks_pci_driver = {
+ .name = "agpgart-serverworks",
+ .id_table = agp_serverworks_pci_table,
+ .probe = agp_serverworks_probe,
+ .remove = agp_serverworks_remove,
+};
+
+static int __init agp_serverworks_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_serverworks_pci_driver);
+}
+
+static void __exit agp_serverworks_cleanup(void)
+{
+ pci_unregister_driver(&agp_serverworks_pci_driver);
+}
+
+module_init(agp_serverworks_init);
+module_exit(agp_serverworks_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
new file mode 100644
index 0000000000..84411b13c4
--- /dev/null
+++ b/drivers/char/agp/uninorth-agp.c
@@ -0,0 +1,729 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UniNorth AGPGART routines.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <asm/uninorth.h>
+#include <asm/prom.h>
+#include <asm/pmac_feature.h>
+#include "agp.h"
+
+/*
+ * NOTES for uninorth3 (G5 AGP) supports :
+ *
+ * There maybe also possibility to have bigger cache line size for
+ * agp (see pmac_pci.c and look for cache line). Need to be investigated
+ * by someone.
+ *
+ * PAGE size are hardcoded but this may change, see asm/page.h.
+ *
+ * Jerome Glisse <j.glisse@gmail.com>
+ */
+static int uninorth_rev;
+static int is_u3;
+static u32 scratch_value;
+
+#define DEFAULT_APERTURE_SIZE 256
+#define DEFAULT_APERTURE_STRING "256"
+static char *aperture = NULL;
+
+static int uninorth_fetch_size(void)
+{
+ int i, size = 0;
+ struct aper_size_info_32 *values =
+ A_SIZE_32(agp_bridge->driver->aperture_sizes);
+
+ if (aperture) {
+ char *save = aperture;
+
+ size = memparse(aperture, &aperture) >> 20;
+ aperture = save;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
+ if (size == values[i].size)
+ break;
+
+ if (i == agp_bridge->driver->num_aperture_sizes) {
+ dev_err(&agp_bridge->dev->dev, "invalid aperture size, "
+ "using default\n");
+ size = 0;
+ aperture = NULL;
+ }
+ }
+
+ if (!size) {
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
+ if (values[i].size == DEFAULT_APERTURE_SIZE)
+ break;
+ }
+
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *)(values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+}
+
+static void uninorth_tlbflush(struct agp_memory *mem)
+{
+ u32 ctrl = UNI_N_CFG_GART_ENABLE;
+
+ if (is_u3)
+ ctrl |= U3_N_CFG_GART_PERFRD;
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ ctrl | UNI_N_CFG_GART_INVAL);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl);
+
+ if (!mem && uninorth_rev <= 0x30) {
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ ctrl | UNI_N_CFG_GART_2xRESET);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ ctrl);
+ }
+}
+
+static void uninorth_cleanup(void)
+{
+ u32 tmp;
+
+ pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp);
+ if (!(tmp & UNI_N_CFG_GART_ENABLE))
+ return;
+ tmp |= UNI_N_CFG_GART_INVAL;
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0);
+
+ if (uninorth_rev <= 0x30) {
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ UNI_N_CFG_GART_2xRESET);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ 0);
+ }
+}
+
+static int uninorth_configure(void)
+{
+ struct aper_size_info_32 *current_size;
+
+ current_size = A_SIZE_32(agp_bridge->current_size);
+
+ dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n",
+ current_size->size_value);
+
+ /* aperture size and gatt addr */
+ pci_write_config_dword(agp_bridge->dev,
+ UNI_N_CFG_GART_BASE,
+ (agp_bridge->gatt_bus_addr & 0xfffff000)
+ | current_size->size_value);
+
+ /* HACK ALERT
+ * UniNorth seem to be buggy enough not to handle properly when
+ * the AGP aperture isn't mapped at bus physical address 0
+ */
+ agp_bridge->gart_bus_addr = 0;
+#ifdef CONFIG_PPC64
+ /* Assume U3 or later on PPC64 systems */
+ /* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE,
+ (agp_bridge->gatt_bus_addr >> 32) & 0xf);
+#else
+ pci_write_config_dword(agp_bridge->dev,
+ UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr);
+#endif
+
+ if (is_u3) {
+ pci_write_config_dword(agp_bridge->dev,
+ UNI_N_CFG_GART_DUMMY_PAGE,
+ page_to_phys(agp_bridge->scratch_page_page) >> 12);
+ }
+
+ return 0;
+}
+
+static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, num_entries;
+ void *temp;
+ u32 *gp;
+ int mask_type;
+
+ if (type != mem->type)
+ return -EINVAL;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+ if (mask_type != 0) {
+ /* We know nothing of memory types */
+ return -EINVAL;
+ }
+
+ if (mem->page_count == 0)
+ return 0;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_32(temp)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ gp = (u32 *) &agp_bridge->gatt_table[pg_start];
+ for (i = 0; i < mem->page_count; ++i) {
+ if (gp[i] != scratch_value) {
+ dev_info(&agp_bridge->dev->dev,
+ "uninorth_insert_memory: entry 0x%x occupied (%x)\n",
+ i, gp[i]);
+ return -EBUSY;
+ }
+ }
+
+ for (i = 0; i < mem->page_count; i++) {
+ if (is_u3)
+ gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
+ else
+ gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) |
+ 0x1UL);
+ flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
+ (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
+ }
+ mb();
+ uninorth_tlbflush(mem);
+
+ return 0;
+}
+
+static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ size_t i;
+ u32 *gp;
+ int mask_type;
+
+ if (type != mem->type)
+ return -EINVAL;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+ if (mask_type != 0) {
+ /* We know nothing of memory types */
+ return -EINVAL;
+ }
+
+ if (mem->page_count == 0)
+ return 0;
+
+ gp = (u32 *) &agp_bridge->gatt_table[pg_start];
+ for (i = 0; i < mem->page_count; ++i) {
+ gp[i] = scratch_value;
+ }
+ mb();
+ uninorth_tlbflush(mem);
+
+ return 0;
+}
+
+static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ u32 command, scratch, status;
+ int timeout;
+
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx + PCI_AGP_STATUS,
+ &status);
+
+ command = agp_collect_device_status(bridge, mode, status);
+ command |= PCI_AGP_COMMAND_AGP;
+
+ if (uninorth_rev == 0x21) {
+ /*
+ * Darwin disable AGP 4x on this revision, thus we
+ * may assume it's broken. This is an AGP2 controller.
+ */
+ command &= ~AGPSTAT2_4X;
+ }
+
+ if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) {
+ /*
+ * We need to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1,
+ * 2.2 and 2.3, Darwin do so.
+ */
+ if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7)
+ command = (command & ~AGPSTAT_RQ_DEPTH)
+ | (7 << AGPSTAT_RQ_DEPTH_SHIFT);
+ }
+
+ uninorth_tlbflush(NULL);
+
+ timeout = 0;
+ do {
+ pci_write_config_dword(bridge->dev,
+ bridge->capndx + PCI_AGP_COMMAND,
+ command);
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx + PCI_AGP_COMMAND,
+ &scratch);
+ } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
+ if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
+ dev_err(&bridge->dev->dev, "can't write UniNorth AGP "
+ "command register\n");
+
+ if (uninorth_rev >= 0x30) {
+ /* This is an AGP V3 */
+ agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0);
+ } else {
+ /* AGP V2 */
+ agp_device_command(command, false);
+ }
+
+ uninorth_tlbflush(NULL);
+}
+
+#ifdef CONFIG_PM
+/*
+ * These Power Management routines are _not_ called by the normal PCI PM layer,
+ * but directly by the video driver through function pointers in the device
+ * tree.
+ */
+static int agp_uninorth_suspend(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+ u32 cmd;
+ u8 agp;
+ struct pci_dev *device = NULL;
+
+ bridge = agp_find_bridge(pdev);
+ if (bridge == NULL)
+ return -ENODEV;
+
+ /* Only one suspend supported */
+ if (bridge->dev_private_data)
+ return 0;
+
+ /* turn off AGP on the video chip, if it was enabled */
+ for_each_pci_dev(device) {
+ /* Don't touch the bridge yet, device first */
+ if (device == pdev)
+ continue;
+ /* Only deal with devices on the same bus here, no Mac has a P2P
+ * bridge on the AGP port, and mucking around the entire PCI
+ * tree is source of problems on some machines because of a bug
+ * in some versions of pci_find_capability() when hitting a dead
+ * device
+ */
+ if (device->bus != pdev->bus)
+ continue;
+ agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+ pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
+ if (!(cmd & PCI_AGP_COMMAND_AGP))
+ continue;
+ dev_info(&pdev->dev, "disabling AGP on device %s\n",
+ pci_name(device));
+ cmd &= ~PCI_AGP_COMMAND_AGP;
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
+ }
+
+ /* turn off AGP on the bridge */
+ agp = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
+ bridge->dev_private_data = (void *)(long)cmd;
+ if (cmd & PCI_AGP_COMMAND_AGP) {
+ dev_info(&pdev->dev, "disabling AGP on bridge\n");
+ cmd &= ~PCI_AGP_COMMAND_AGP;
+ pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
+ }
+ /* turn off the GART */
+ uninorth_cleanup();
+
+ return 0;
+}
+
+static int agp_uninorth_resume(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+ u32 command;
+
+ bridge = agp_find_bridge(pdev);
+ if (bridge == NULL)
+ return -ENODEV;
+
+ command = (long)bridge->dev_private_data;
+ bridge->dev_private_data = NULL;
+ if (!(command & PCI_AGP_COMMAND_AGP))
+ return 0;
+
+ uninorth_agp_enable(bridge, command);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct {
+ struct page **pages_arr;
+} uninorth_priv;
+
+static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ char *table;
+ char *table_end;
+ int size;
+ int page_order;
+ int num_entries;
+ int i;
+ void *temp;
+ struct page *page;
+
+ /* We can't handle 2 level gatt's */
+ if (bridge->driver->size_type == LVL2_APER_SIZE)
+ return -EINVAL;
+
+ table = NULL;
+ i = bridge->aperture_size_idx;
+ temp = bridge->current_size;
+ size = page_order = num_entries = 0;
+
+ do {
+ size = A_SIZE_32(temp)->size;
+ page_order = A_SIZE_32(temp)->page_order;
+ num_entries = A_SIZE_32(temp)->num_entries;
+
+ table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+
+ if (table == NULL) {
+ i++;
+ bridge->current_size = A_IDX32(bridge);
+ } else {
+ bridge->aperture_size_idx = i;
+ }
+ } while (!table && (i < bridge->driver->num_aperture_sizes));
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ uninorth_priv.pages_arr = kmalloc_array(1 << page_order,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (uninorth_priv.pages_arr == NULL)
+ goto enomem;
+
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end);
+ page++, i++) {
+ SetPageReserved(page);
+ uninorth_priv.pages_arr[i] = page;
+ }
+
+ bridge->gatt_table_real = (u32 *) table;
+ /* Need to clear out any dirty data still sitting in caches */
+ flush_dcache_range((unsigned long)table,
+ (unsigned long)table_end + 1);
+ bridge->gatt_table = vmap(uninorth_priv.pages_arr, (1 << page_order), 0, PAGE_KERNEL_NCG);
+
+ if (bridge->gatt_table == NULL)
+ goto enomem;
+
+ bridge->gatt_bus_addr = virt_to_phys(table);
+
+ if (is_u3)
+ scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
+ else
+ scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
+ 0x1UL);
+ for (i = 0; i < num_entries; i++)
+ bridge->gatt_table[i] = scratch_value;
+
+ return 0;
+
+enomem:
+ kfree(uninorth_priv.pages_arr);
+ if (table)
+ free_pages((unsigned long)table, page_order);
+ return -ENOMEM;
+}
+
+static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ char *table, *table_end;
+ void *temp;
+ struct page *page;
+
+ temp = bridge->current_size;
+ page_order = A_SIZE_32(temp)->page_order;
+
+ /* Do not worry about freeing memory, because if this is
+ * called, then all agp memory is deallocated and removed
+ * from the table.
+ */
+
+ vunmap(bridge->gatt_table);
+ kfree(uninorth_priv.pages_arr);
+ table = (char *) bridge->gatt_table_real;
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+ free_pages((unsigned long) bridge->gatt_table_real, page_order);
+
+ return 0;
+}
+
+static void null_cache_flush(void)
+{
+ mb();
+}
+
+/* Setup function */
+
+static const struct aper_size_info_32 uninorth_sizes[] =
+{
+ {256, 65536, 6, 64},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 16},
+ {32, 8192, 3, 8},
+ {16, 4096, 2, 4},
+ {8, 2048, 1, 2},
+ {4, 1024, 0, 1}
+};
+
+/*
+ * Not sure that u3 supports that high aperture sizes but it
+ * would strange if it did not :)
+ */
+static const struct aper_size_info_32 u3_sizes[] =
+{
+ {512, 131072, 7, 128},
+ {256, 65536, 6, 64},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 16},
+ {32, 8192, 3, 8},
+ {16, 4096, 2, 4},
+ {8, 2048, 1, 2},
+ {4, 1024, 0, 1}
+};
+
+const struct agp_bridge_driver uninorth_agp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = (void *)uninorth_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = ARRAY_SIZE(uninorth_sizes),
+ .configure = uninorth_configure,
+ .fetch_size = uninorth_fetch_size,
+ .cleanup = uninorth_cleanup,
+ .tlb_flush = uninorth_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .cache_flush = null_cache_flush,
+ .agp_enable = uninorth_agp_enable,
+ .create_gatt_table = uninorth_create_gatt_table,
+ .free_gatt_table = uninorth_free_gatt_table,
+ .insert_memory = uninorth_insert_memory,
+ .remove_memory = uninorth_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+ .needs_scratch_page = true,
+};
+
+const struct agp_bridge_driver u3_agp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = (void *)u3_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = ARRAY_SIZE(u3_sizes),
+ .configure = uninorth_configure,
+ .fetch_size = uninorth_fetch_size,
+ .cleanup = uninorth_cleanup,
+ .tlb_flush = uninorth_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .cache_flush = null_cache_flush,
+ .agp_enable = uninorth_agp_enable,
+ .create_gatt_table = uninorth_create_gatt_table,
+ .free_gatt_table = uninorth_free_gatt_table,
+ .insert_memory = uninorth_insert_memory,
+ .remove_memory = uninorth_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+ .needs_scratch_page = true,
+};
+
+static struct agp_device_ids uninorth_agp_device_ids[] = {
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP,
+ .chipset_name = "UniNorth",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP_P,
+ .chipset_name = "UniNorth/Pangea",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP15,
+ .chipset_name = "UniNorth 1.5",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP2,
+ .chipset_name = "UniNorth 2",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_U3_AGP,
+ .chipset_name = "U3",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_U3L_AGP,
+ .chipset_name = "U3L",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_U3H_AGP,
+ .chipset_name = "U3H",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_IPID2_AGP,
+ .chipset_name = "UniNorth/Intrepid2",
+ },
+};
+
+static int agp_uninorth_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = uninorth_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ struct device_node *uninorth_node;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (cap_ptr == 0)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name != NULL; ++j) {
+ if (pdev->device == devs[j].device_id) {
+ dev_info(&pdev->dev, "Apple %s chipset\n",
+ devs[j].chipset_name);
+ goto found;
+ }
+ }
+
+ dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
+ return -ENODEV;
+
+ found:
+ /* Set revision to 0 if we could not read it. */
+ uninorth_rev = 0;
+ is_u3 = 0;
+ /* Locate core99 Uni-N */
+ uninorth_node = of_find_node_by_name(NULL, "uni-n");
+ /* Locate G5 u3 */
+ if (uninorth_node == NULL) {
+ is_u3 = 1;
+ uninorth_node = of_find_node_by_name(NULL, "u3");
+ }
+ if (uninorth_node) {
+ const int *revprop = of_get_property(uninorth_node,
+ "device-rev", NULL);
+ if (revprop != NULL)
+ uninorth_rev = *revprop & 0x3f;
+ of_node_put(uninorth_node);
+ }
+
+#ifdef CONFIG_PM
+ /* Inform platform of our suspend/resume caps */
+ pmac_register_agp_pm(pdev, agp_uninorth_suspend, agp_uninorth_resume);
+#endif
+
+ /* Allocate & setup our driver */
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ if (is_u3)
+ bridge->driver = &u3_agp_driver;
+ else
+ bridge->driver = &uninorth_agp_driver;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+ bridge->flags = AGP_ERRATA_FASTWRITES;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_uninorth_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+#ifdef CONFIG_PM
+ /* Inform platform of our suspend/resume caps */
+ pmac_register_agp_pm(pdev, NULL, NULL);
+#endif
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static const struct pci_device_id agp_uninorth_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_APPLE,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table);
+
+static struct pci_driver agp_uninorth_pci_driver = {
+ .name = "agpgart-uninorth",
+ .id_table = agp_uninorth_pci_table,
+ .probe = agp_uninorth_probe,
+ .remove = agp_uninorth_remove,
+};
+
+static int __init agp_uninorth_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_uninorth_pci_driver);
+}
+
+static void __exit agp_uninorth_cleanup(void)
+{
+ pci_unregister_driver(&agp_uninorth_pci_driver);
+}
+
+module_init(agp_uninorth_init);
+module_exit(agp_uninorth_cleanup);
+
+module_param(aperture, charp, 0);
+MODULE_PARM_DESC(aperture,
+ "Aperture size, must be power of two between 4MB and an\n"
+ "\t\tupper limit specific to the UniNorth revision.\n"
+ "\t\tDefault: " DEFAULT_APERTURE_STRING "M");
+
+MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
new file mode 100644
index 0000000000..bc5140af2d
--- /dev/null
+++ b/drivers/char/agp/via-agp.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VIA AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+static const struct pci_device_id agp_via_pci_table[];
+
+#define VIA_GARTCTRL 0x80
+#define VIA_APSIZE 0x84
+#define VIA_ATTBASE 0x88
+
+#define VIA_AGP3_GARTCTRL 0x90
+#define VIA_AGP3_APSIZE 0x94
+#define VIA_AGP3_ATTBASE 0x98
+#define VIA_AGPSEL 0xfd
+
+static int via_fetch_size(void)
+{
+ int i;
+ u8 temp;
+ struct aper_size_info_8 *values;
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+ pci_read_config_byte(agp_bridge->dev, VIA_APSIZE, &temp);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ printk(KERN_ERR PFX "Unknown aperture size from AGP bridge (0x%x)\n", temp);
+ return 0;
+}
+
+
+static int via_configure(void)
+{
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
+ current_size->size_value);
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* GART control register */
+ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f);
+
+ /* attbase - aperture GATT base */
+ pci_write_config_dword(agp_bridge->dev, VIA_ATTBASE,
+ (agp_bridge->gatt_bus_addr & 0xfffff000) | 3);
+ return 0;
+}
+
+
+static void via_cleanup(void)
+{
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
+ previous_size->size_value);
+ /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
+ * during reinitialization.
+ */
+}
+
+
+static void via_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, VIA_GARTCTRL, &temp);
+ temp |= (1<<7);
+ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
+ temp &= ~(1<<7);
+ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
+}
+
+
+static const struct aper_size_info_8 via_generic_sizes[9] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 128},
+ {64, 16384, 4, 192},
+ {32, 8192, 3, 224},
+ {16, 4096, 2, 240},
+ {8, 2048, 1, 248},
+ {4, 1024, 0, 252},
+ {2, 512, 0, 254},
+ {1, 256, 0, 255}
+};
+
+
+static int via_fetch_size_agp3(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_16 *values;
+
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+ pci_read_config_word(agp_bridge->dev, VIA_AGP3_APSIZE, &temp);
+ temp &= 0xfff;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+
+static int via_configure_agp3(void)
+{
+ u32 temp;
+
+ /* address to map to */
+ agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
+ AGP_APERTURE_BAR);
+
+ /* attbase - aperture GATT base */
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE,
+ agp_bridge->gatt_bus_addr & 0xfffff000);
+
+ /* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch
+ * translation table first.
+ * 2. Enable AGP aperture in RX91<0>. This bit controls the enabling of the
+ * graphics AGP aperture for the AGP3.0 port.
+ */
+ pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7));
+ return 0;
+}
+
+
+static void via_cleanup_agp3(void)
+{
+ struct aper_size_info_16 *previous_size;
+
+ previous_size = A_SIZE_16(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value);
+}
+
+
+static void via_tlbflush_agp3(struct agp_memory *mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7));
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp);
+}
+
+
+static const struct agp_bridge_driver via_agp3_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = agp3_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 10,
+ .needs_scratch_page = true,
+ .configure = via_configure_agp3,
+ .fetch_size = via_fetch_size_agp3,
+ .cleanup = via_cleanup_agp3,
+ .tlb_flush = via_tlbflush_agp3,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver via_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = via_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 9,
+ .needs_scratch_page = true,
+ .configure = via_configure,
+ .fetch_size = via_fetch_size,
+ .cleanup = via_cleanup,
+ .tlb_flush = via_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static struct agp_device_ids via_agp_device_ids[] =
+{
+ {
+ .device_id = PCI_DEVICE_ID_VIA_82C597_0,
+ .chipset_name = "Apollo VP3",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_82C598_0,
+ .chipset_name = "Apollo MVP3",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8501_0,
+ .chipset_name = "Apollo MVP4",
+ },
+
+ /* VT8601 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8601_0,
+ .chipset_name = "Apollo ProMedia/PLE133Ta",
+ },
+
+ /* VT82C693A / VT28C694T */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_82C691_0,
+ .chipset_name = "Apollo Pro 133",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8371_0,
+ .chipset_name = "KX133",
+ },
+
+ /* VT8633 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8633_0,
+ .chipset_name = "Pro 266",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_XN266,
+ .chipset_name = "Apollo Pro266",
+ },
+
+ /* VT8361 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8361,
+ .chipset_name = "KLE133",
+ },
+
+ /* VT8365 / VT8362 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8363_0,
+ .chipset_name = "Twister-K/KT133x/KM133",
+ },
+
+ /* VT8753A */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8753_0,
+ .chipset_name = "P4X266",
+ },
+
+ /* VT8366 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8367_0,
+ .chipset_name = "KT266/KY266x/KT333",
+ },
+
+ /* VT8633 (for CuMine/ Celeron) */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8653_0,
+ .chipset_name = "Pro266T",
+ },
+
+ /* KM266 / PM266 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_XM266,
+ .chipset_name = "PM266/KM266",
+ },
+
+ /* CLE266 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_862X_0,
+ .chipset_name = "CLE266",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8377_0,
+ .chipset_name = "KT400/KT400A/KT600",
+ },
+
+ /* VT8604 / VT8605 / VT8603
+ * (Apollo Pro133A chipset with S3 Savage4) */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8605_0,
+ .chipset_name = "ProSavage PM133/PL133/PN133"
+ },
+
+ /* P4M266x/P4N266 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8703_51_0,
+ .chipset_name = "P4M266x/P4N266",
+ },
+
+ /* VT8754 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8754C_0,
+ .chipset_name = "PT800",
+ },
+
+ /* P4X600 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8763_0,
+ .chipset_name = "P4X600"
+ },
+
+ /* KM400 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8378_0,
+ .chipset_name = "KM400/KM400A",
+ },
+
+ /* PT880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_PT880,
+ .chipset_name = "PT880",
+ },
+
+ /* PT880 Ultra */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_PT880ULTRA,
+ .chipset_name = "PT880 Ultra",
+ },
+
+ /* PT890 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8783_0,
+ .chipset_name = "PT890",
+ },
+
+ /* PM800/PN800/PM880/PN880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_PX8X0_0,
+ .chipset_name = "PM800/PN800/PM880/PN880",
+ },
+ /* KT880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_3269_0,
+ .chipset_name = "KT880",
+ },
+ /* KTxxx/Px8xx */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_83_87XX_1,
+ .chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx",
+ },
+ /* P4M800 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_3296_0,
+ .chipset_name = "P4M800",
+ },
+ /* P4M800CE */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_P4M800CE,
+ .chipset_name = "VT3314",
+ },
+ /* VT3324 / CX700 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_VT3324,
+ .chipset_name = "CX700",
+ },
+ /* VT3336 - this is a chipset for AMD Athlon/K8 CPU. Due to K8's unique
+ * architecture, the AGP resource and behavior are different from
+ * the traditional AGP which resides only in chipset. AGP is used
+ * by 3D driver which wasn't available for the VT3336 and VT3364
+ * generation until now. Unfortunately, by testing, VT3364 works
+ * but VT3336 doesn't. - explanation from via, just leave this as
+ * as a placeholder to avoid future patches adding it back in.
+ */
+#if 0
+ {
+ .device_id = PCI_DEVICE_ID_VIA_VT3336,
+ .chipset_name = "VT3336",
+ },
+#endif
+ /* P4M890 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_P4M890,
+ .chipset_name = "P4M890",
+ },
+ /* P4M900 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_VT3364,
+ .chipset_name = "P4M900",
+ },
+ { }, /* dummy final entry, always present */
+};
+
+
+/*
+ * VIA's AGP3 chipsets do magick to put the AGP bridge compliant
+ * with the same standards version as the graphics card.
+ */
+static void check_via_agp3 (struct agp_bridge_data *bridge)
+{
+ u8 reg;
+
+ pci_read_config_byte(bridge->dev, VIA_AGPSEL, &reg);
+ /* Check AGP 2.0 compatibility mode. */
+ if ((reg & (1<<1))==0)
+ bridge->driver = &via_agp3_driver;
+}
+
+
+static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = via_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ int j = 0;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ j = ent - agp_via_pci_table;
+ printk (KERN_INFO PFX "Detected VIA %s chipset\n", devs[j].chipset_name);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+ bridge->driver = &via_driver;
+
+ /*
+ * Garg, there are KT400s with KT266 IDs.
+ */
+ if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) {
+ /* Is there a KT400 subsystem ? */
+ if (pdev->subsystem_device == PCI_DEVICE_ID_VIA_8377_0) {
+ printk(KERN_INFO PFX "Found KT400 in disguise as a KT266.\n");
+ check_via_agp3(bridge);
+ }
+ }
+
+ /* If this is an AGP3 bridge, check which mode its in and adjust. */
+ get_agp_version(bridge);
+ if (bridge->major_version >= 3)
+ check_via_agp3(bridge);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void agp_via_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static int agp_via_resume(struct device *dev)
+{
+ struct agp_bridge_data *bridge = dev_get_drvdata(dev);
+
+ if (bridge->driver == &via_agp3_driver)
+ return via_configure_agp3();
+ else if (bridge->driver == &via_driver)
+ return via_configure();
+
+ return 0;
+}
+
+/* must be the same order as name table above */
+static const struct pci_device_id agp_via_pci_table[] = {
+#define ID(x) \
+ { \
+ .class = (PCI_CLASS_BRIDGE_HOST << 8), \
+ .class_mask = ~0, \
+ .vendor = PCI_VENDOR_ID_VIA, \
+ .device = x, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ }
+ ID(PCI_DEVICE_ID_VIA_82C597_0),
+ ID(PCI_DEVICE_ID_VIA_82C598_0),
+ ID(PCI_DEVICE_ID_VIA_8501_0),
+ ID(PCI_DEVICE_ID_VIA_8601_0),
+ ID(PCI_DEVICE_ID_VIA_82C691_0),
+ ID(PCI_DEVICE_ID_VIA_8371_0),
+ ID(PCI_DEVICE_ID_VIA_8633_0),
+ ID(PCI_DEVICE_ID_VIA_XN266),
+ ID(PCI_DEVICE_ID_VIA_8361),
+ ID(PCI_DEVICE_ID_VIA_8363_0),
+ ID(PCI_DEVICE_ID_VIA_8753_0),
+ ID(PCI_DEVICE_ID_VIA_8367_0),
+ ID(PCI_DEVICE_ID_VIA_8653_0),
+ ID(PCI_DEVICE_ID_VIA_XM266),
+ ID(PCI_DEVICE_ID_VIA_862X_0),
+ ID(PCI_DEVICE_ID_VIA_8377_0),
+ ID(PCI_DEVICE_ID_VIA_8605_0),
+ ID(PCI_DEVICE_ID_VIA_8703_51_0),
+ ID(PCI_DEVICE_ID_VIA_8754C_0),
+ ID(PCI_DEVICE_ID_VIA_8763_0),
+ ID(PCI_DEVICE_ID_VIA_8378_0),
+ ID(PCI_DEVICE_ID_VIA_PT880),
+ ID(PCI_DEVICE_ID_VIA_PT880ULTRA),
+ ID(PCI_DEVICE_ID_VIA_8783_0),
+ ID(PCI_DEVICE_ID_VIA_PX8X0_0),
+ ID(PCI_DEVICE_ID_VIA_3269_0),
+ ID(PCI_DEVICE_ID_VIA_83_87XX_1),
+ ID(PCI_DEVICE_ID_VIA_3296_0),
+ ID(PCI_DEVICE_ID_VIA_P4M800CE),
+ ID(PCI_DEVICE_ID_VIA_VT3324),
+ ID(PCI_DEVICE_ID_VIA_P4M890),
+ ID(PCI_DEVICE_ID_VIA_VT3364),
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(agp_via_pm_ops, NULL, agp_via_resume);
+
+static struct pci_driver agp_via_pci_driver = {
+ .name = "agpgart-via",
+ .id_table = agp_via_pci_table,
+ .probe = agp_via_probe,
+ .remove = agp_via_remove,
+ .driver.pm = &agp_via_pm_ops,
+};
+
+
+static int __init agp_via_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_via_pci_driver);
+}
+
+static void __exit agp_via_cleanup(void)
+{
+ pci_unregister_driver(&agp_via_pci_driver);
+}
+
+module_init(agp_via_init);
+module_exit(agp_via_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dave Jones");
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
new file mode 100644
index 0000000000..e795390b07
--- /dev/null
+++ b/drivers/char/apm-emulation.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * bios-less APM driver for ARM Linux
+ * Jamey Hicks <jamey@crl.dec.com>
+ * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
+ *
+ * APM 1.2 Reference:
+ * Intel Corporation, Microsoft Corporation. Advanced Power Management
+ * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
+ *
+ * This document is available from Microsoft at:
+ * http://www.microsoft.com/whdc/archive/amp_12.mspx
+ */
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/apm_bios.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/apm-emulation.h>
+#include <linux/freezer.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+
+/*
+ * One option can be changed at boot time as follows:
+ * apm=on/off enable/disable APM
+ */
+
+/*
+ * Maximum number of events stored
+ */
+#define APM_MAX_EVENTS 16
+
+struct apm_queue {
+ unsigned int event_head;
+ unsigned int event_tail;
+ apm_event_t events[APM_MAX_EVENTS];
+};
+
+/*
+ * thread states (for threads using a writable /dev/apm_bios fd):
+ *
+ * SUSPEND_NONE: nothing happening
+ * SUSPEND_PENDING: suspend event queued for thread and pending to be read
+ * SUSPEND_READ: suspend event read, pending acknowledgement
+ * SUSPEND_ACKED: acknowledgement received from thread (via ioctl),
+ * waiting for resume
+ * SUSPEND_ACKTO: acknowledgement timeout
+ * SUSPEND_DONE: thread had acked suspend and is now notified of
+ * resume
+ *
+ * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume
+ *
+ * A thread migrates in one of three paths:
+ * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE
+ * -6-> ACKTO -7-> NONE
+ * NONE -8-> WAIT -9-> NONE
+ *
+ * While in PENDING or READ, the thread is accounted for in the
+ * suspend_acks_pending counter.
+ *
+ * The transitions are invoked as follows:
+ * 1: suspend event is signalled from the core PM code
+ * 2: the suspend event is read from the fd by the userspace thread
+ * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack)
+ * 4: core PM code signals that we have resumed
+ * 5: APM_IOC_SUSPEND ioctl returns
+ *
+ * 6: the notifier invoked from the core PM code timed out waiting
+ * for all relevant threds to enter ACKED state and puts those
+ * that haven't into ACKTO
+ * 7: those threads issue APM_IOC_SUSPEND ioctl too late,
+ * get an error
+ *
+ * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend),
+ * ioctl code invokes pm_suspend()
+ * 9: pm_suspend() returns indicating resume
+ */
+enum apm_suspend_state {
+ SUSPEND_NONE,
+ SUSPEND_PENDING,
+ SUSPEND_READ,
+ SUSPEND_ACKED,
+ SUSPEND_ACKTO,
+ SUSPEND_WAIT,
+ SUSPEND_DONE,
+};
+
+/*
+ * The per-file APM data
+ */
+struct apm_user {
+ struct list_head list;
+
+ unsigned int suser: 1;
+ unsigned int writer: 1;
+ unsigned int reader: 1;
+
+ int suspend_result;
+ enum apm_suspend_state suspend_state;
+
+ struct apm_queue queue;
+};
+
+/*
+ * Local variables
+ */
+static atomic_t suspend_acks_pending = ATOMIC_INIT(0);
+static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0);
+static int apm_disabled;
+static struct task_struct *kapmd_tsk;
+
+static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
+static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
+
+/*
+ * This is a list of everyone who has opened /dev/apm_bios
+ */
+static DECLARE_RWSEM(user_list_lock);
+static LIST_HEAD(apm_user_list);
+
+/*
+ * kapmd info. kapmd provides us a process context to handle
+ * "APM" events within - specifically necessary if we're going
+ * to be suspending the system.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
+static DEFINE_SPINLOCK(kapmd_queue_lock);
+static struct apm_queue kapmd_queue;
+
+static DEFINE_MUTEX(state_lock);
+
+static const char driver_version[] = "1.13"; /* no spaces */
+
+
+
+/*
+ * Compatibility cruft until the IPAQ people move over to the new
+ * interface.
+ */
+static void __apm_get_power_status(struct apm_power_info *info)
+{
+}
+
+/*
+ * This allows machines to provide their own "apm get power status" function.
+ */
+void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
+EXPORT_SYMBOL(apm_get_power_status);
+
+
+/*
+ * APM event queue management.
+ */
+static inline int queue_empty(struct apm_queue *q)
+{
+ return q->event_head == q->event_tail;
+}
+
+static inline apm_event_t queue_get_event(struct apm_queue *q)
+{
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ return q->events[q->event_tail];
+}
+
+static void queue_add_event(struct apm_queue *q, apm_event_t event)
+{
+ q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
+ if (q->event_head == q->event_tail) {
+ static int notified;
+
+ if (notified++ == 0)
+ printk(KERN_ERR "apm: an event queue overflowed\n");
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ }
+ q->events[q->event_head] = event;
+}
+
+static void queue_event(apm_event_t event)
+{
+ struct apm_user *as;
+
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as->reader)
+ queue_add_event(&as->queue, event);
+ }
+ up_read(&user_list_lock);
+ wake_up_interruptible(&apm_waitqueue);
+}
+
+static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct apm_user *as = fp->private_data;
+ apm_event_t event;
+ int i = count, ret = 0;
+
+ if (count < sizeof(apm_event_t))
+ return -EINVAL;
+
+ if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
+
+ while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
+ event = queue_get_event(&as->queue);
+
+ ret = -EFAULT;
+ if (copy_to_user(buf, &event, sizeof(event)))
+ break;
+
+ mutex_lock(&state_lock);
+ if (as->suspend_state == SUSPEND_PENDING &&
+ (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
+ as->suspend_state = SUSPEND_READ;
+ mutex_unlock(&state_lock);
+
+ buf += sizeof(event);
+ i -= sizeof(event);
+ }
+
+ if (i < count)
+ ret = count - i;
+
+ return ret;
+}
+
+static __poll_t apm_poll(struct file *fp, poll_table * wait)
+{
+ struct apm_user *as = fp->private_data;
+
+ poll_wait(fp, &apm_waitqueue, wait);
+ return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM;
+}
+
+/*
+ * apm_ioctl - handle APM ioctl
+ *
+ * APM_IOC_SUSPEND
+ * This IOCTL is overloaded, and performs two functions. It is used to:
+ * - initiate a suspend
+ * - acknowledge a suspend read from /dev/apm_bios.
+ * Only when everyone who has opened /dev/apm_bios with write permission
+ * has acknowledge does the actual suspend happen.
+ */
+static long
+apm_ioctl(struct file *filp, u_int cmd, u_long arg)
+{
+ struct apm_user *as = filp->private_data;
+ int err = -EINVAL;
+
+ if (!as->suser || !as->writer)
+ return -EPERM;
+
+ switch (cmd) {
+ case APM_IOC_SUSPEND:
+ mutex_lock(&state_lock);
+
+ as->suspend_result = -EINTR;
+
+ switch (as->suspend_state) {
+ case SUSPEND_READ:
+ /*
+ * If we read a suspend command from /dev/apm_bios,
+ * then the corresponding APM_IOC_SUSPEND ioctl is
+ * interpreted as an acknowledge.
+ */
+ as->suspend_state = SUSPEND_ACKED;
+ atomic_dec(&suspend_acks_pending);
+ mutex_unlock(&state_lock);
+
+ /*
+ * suspend_acks_pending changed, the notifier needs to
+ * be woken up for this
+ */
+ wake_up(&apm_suspend_waitqueue);
+
+ /*
+ * Wait for the suspend/resume to complete. If there
+ * are pending acknowledges, we wait here for them.
+ * wait_event_freezable() is interruptible and pending
+ * signal can cause busy looping. We aren't doing
+ * anything critical, chill a bit on each iteration.
+ */
+ while (wait_event_freezable(apm_suspend_waitqueue,
+ as->suspend_state != SUSPEND_ACKED))
+ msleep(10);
+ break;
+ case SUSPEND_ACKTO:
+ as->suspend_result = -ETIMEDOUT;
+ mutex_unlock(&state_lock);
+ break;
+ default:
+ as->suspend_state = SUSPEND_WAIT;
+ mutex_unlock(&state_lock);
+
+ /*
+ * Otherwise it is a request to suspend the system.
+ * Just invoke pm_suspend(), we'll handle it from
+ * there via the notifier.
+ */
+ as->suspend_result = pm_suspend(PM_SUSPEND_MEM);
+ }
+
+ mutex_lock(&state_lock);
+ err = as->suspend_result;
+ as->suspend_state = SUSPEND_NONE;
+ mutex_unlock(&state_lock);
+ break;
+ }
+
+ return err;
+}
+
+static int apm_release(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as = filp->private_data;
+
+ filp->private_data = NULL;
+
+ down_write(&user_list_lock);
+ list_del(&as->list);
+ up_write(&user_list_lock);
+
+ /*
+ * We are now unhooked from the chain. As far as new
+ * events are concerned, we no longer exist.
+ */
+ mutex_lock(&state_lock);
+ if (as->suspend_state == SUSPEND_PENDING ||
+ as->suspend_state == SUSPEND_READ)
+ atomic_dec(&suspend_acks_pending);
+ mutex_unlock(&state_lock);
+
+ wake_up(&apm_suspend_waitqueue);
+
+ kfree(as);
+ return 0;
+}
+
+static int apm_open(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as;
+
+ as = kzalloc(sizeof(*as), GFP_KERNEL);
+ if (as) {
+ /*
+ * XXX - this is a tiny bit broken, when we consider BSD
+ * process accounting. If the device is opened by root, we
+ * instantly flag that we used superuser privs. Who knows,
+ * we might close the device immediately without doing a
+ * privileged operation -- cevans
+ */
+ as->suser = capable(CAP_SYS_ADMIN);
+ as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
+ as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
+
+ down_write(&user_list_lock);
+ list_add(&as->list, &apm_user_list);
+ up_write(&user_list_lock);
+
+ filp->private_data = as;
+ }
+
+ return as ? 0 : -ENOMEM;
+}
+
+static const struct file_operations apm_bios_fops = {
+ .owner = THIS_MODULE,
+ .read = apm_read,
+ .poll = apm_poll,
+ .unlocked_ioctl = apm_ioctl,
+ .open = apm_open,
+ .release = apm_release,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice apm_device = {
+ .minor = APM_MINOR_DEV,
+ .name = "apm_bios",
+ .fops = &apm_bios_fops
+};
+
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Arguments, with symbols from linux/apm_bios.h.
+ *
+ * 0) Linux driver version (this will change if format changes)
+ * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
+ * 2) APM flags from APM Installation Check (0x00):
+ * bit 0: APM_16_BIT_SUPPORT
+ * bit 1: APM_32_BIT_SUPPORT
+ * bit 2: APM_IDLE_SLOWS_CLOCK
+ * bit 3: APM_BIOS_DISABLED
+ * bit 4: APM_BIOS_DISENGAGED
+ * 3) AC line status
+ * 0x00: Off-line
+ * 0x01: On-line
+ * 0x02: On backup power (BIOS >= 1.1 only)
+ * 0xff: Unknown
+ * 4) Battery status
+ * 0x00: High
+ * 0x01: Low
+ * 0x02: Critical
+ * 0x03: Charging
+ * 0x04: Selected battery not present (BIOS >= 1.2 only)
+ * 0xff: Unknown
+ * 5) Battery flag
+ * bit 0: High
+ * bit 1: Low
+ * bit 2: Critical
+ * bit 3: Charging
+ * bit 7: No system battery
+ * 0xff: Unknown
+ * 6) Remaining battery life (percentage of charge):
+ * 0-100: valid
+ * -1: Unknown
+ * 7) Remaining battery life (time units):
+ * Number of remaining minutes or seconds
+ * -1: Unknown
+ * 8) min = minutes; sec = seconds
+ */
+static int proc_apm_show(struct seq_file *m, void *v)
+{
+ struct apm_power_info info;
+ char *units;
+
+ info.ac_line_status = 0xff;
+ info.battery_status = 0xff;
+ info.battery_flag = 0xff;
+ info.battery_life = -1;
+ info.time = -1;
+ info.units = -1;
+
+ if (apm_get_power_status)
+ apm_get_power_status(&info);
+
+ switch (info.units) {
+ default: units = "?"; break;
+ case 0: units = "min"; break;
+ case 1: units = "sec"; break;
+ }
+
+ seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
+ driver_version, APM_32_BIT_SUPPORT,
+ info.ac_line_status, info.battery_status,
+ info.battery_flag, info.battery_life,
+ info.time, units);
+
+ return 0;
+}
+#endif
+
+static int kapmd(void *arg)
+{
+ do {
+ apm_event_t event;
+
+ wait_event_interruptible(kapmd_wait,
+ !queue_empty(&kapmd_queue) || kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irq(&kapmd_queue_lock);
+ event = 0;
+ if (!queue_empty(&kapmd_queue))
+ event = queue_get_event(&kapmd_queue);
+ spin_unlock_irq(&kapmd_queue_lock);
+
+ switch (event) {
+ case 0:
+ break;
+
+ case APM_LOW_BATTERY:
+ case APM_POWER_STATUS_CHANGE:
+ queue_event(event);
+ break;
+
+ case APM_USER_SUSPEND:
+ case APM_SYS_SUSPEND:
+ pm_suspend(PM_SUSPEND_MEM);
+ break;
+
+ case APM_CRITICAL_SUSPEND:
+ atomic_inc(&userspace_notification_inhibit);
+ pm_suspend(PM_SUSPEND_MEM);
+ atomic_dec(&userspace_notification_inhibit);
+ break;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int apm_suspend_notifier(struct notifier_block *nb,
+ unsigned long event,
+ void *dummy)
+{
+ struct apm_user *as;
+ int err;
+ unsigned long apm_event;
+
+ /* short-cut emergency suspends */
+ if (atomic_read(&userspace_notification_inhibit))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ apm_event = (event == PM_SUSPEND_PREPARE) ?
+ APM_USER_SUSPEND : APM_USER_HIBERNATION;
+ /*
+ * Queue an event to all "writer" users that we want
+ * to suspend and need their ack.
+ */
+ mutex_lock(&state_lock);
+ down_read(&user_list_lock);
+
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as->suspend_state != SUSPEND_WAIT && as->reader &&
+ as->writer && as->suser) {
+ as->suspend_state = SUSPEND_PENDING;
+ atomic_inc(&suspend_acks_pending);
+ queue_add_event(&as->queue, apm_event);
+ }
+ }
+
+ up_read(&user_list_lock);
+ mutex_unlock(&state_lock);
+ wake_up_interruptible(&apm_waitqueue);
+
+ /*
+ * Wait for the suspend_acks_pending variable to drop to
+ * zero, meaning everybody acked the suspend event (or the
+ * process was killed.)
+ *
+ * If the app won't answer within a short while we assume it
+ * locked up and ignore it.
+ */
+ err = wait_event_interruptible_timeout(
+ apm_suspend_waitqueue,
+ atomic_read(&suspend_acks_pending) == 0,
+ 5*HZ);
+
+ /* timed out */
+ if (err == 0) {
+ /*
+ * Move anybody who timed out to "ack timeout" state.
+ *
+ * We could time out and the userspace does the ACK
+ * right after we time out but before we enter the
+ * locked section here, but that's fine.
+ */
+ mutex_lock(&state_lock);
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as->suspend_state == SUSPEND_PENDING ||
+ as->suspend_state == SUSPEND_READ) {
+ as->suspend_state = SUSPEND_ACKTO;
+ atomic_dec(&suspend_acks_pending);
+ }
+ }
+ up_read(&user_list_lock);
+ mutex_unlock(&state_lock);
+ }
+
+ /* let suspend proceed */
+ if (err >= 0)
+ return NOTIFY_OK;
+
+ /* interrupted by signal */
+ return notifier_from_errno(err);
+
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ apm_event = (event == PM_POST_SUSPEND) ?
+ APM_NORMAL_RESUME : APM_HIBERNATION_RESUME;
+ /*
+ * Anyone on the APM queues will think we're still suspended.
+ * Send a message so everyone knows we're now awake again.
+ */
+ queue_event(apm_event);
+
+ /*
+ * Finally, wake up anyone who is sleeping on the suspend.
+ */
+ mutex_lock(&state_lock);
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as->suspend_state == SUSPEND_ACKED) {
+ /*
+ * TODO: maybe grab error code, needs core
+ * changes to push the error to the notifier
+ * chain (could use the second parameter if
+ * implemented)
+ */
+ as->suspend_result = 0;
+ as->suspend_state = SUSPEND_DONE;
+ }
+ }
+ up_read(&user_list_lock);
+ mutex_unlock(&state_lock);
+
+ wake_up(&apm_suspend_waitqueue);
+ return NOTIFY_OK;
+
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block apm_notif_block = {
+ .notifier_call = apm_suspend_notifier,
+};
+
+static int __init apm_init(void)
+{
+ int ret;
+
+ if (apm_disabled) {
+ printk(KERN_NOTICE "apm: disabled on user request.\n");
+ return -ENODEV;
+ }
+
+ kapmd_tsk = kthread_create(kapmd, NULL, "kapmd");
+ if (IS_ERR(kapmd_tsk)) {
+ ret = PTR_ERR(kapmd_tsk);
+ kapmd_tsk = NULL;
+ goto out;
+ }
+ wake_up_process(kapmd_tsk);
+
+#ifdef CONFIG_PROC_FS
+ proc_create_single("apm", 0, NULL, proc_apm_show);
+#endif
+
+ ret = misc_register(&apm_device);
+ if (ret)
+ goto out_stop;
+
+ ret = register_pm_notifier(&apm_notif_block);
+ if (ret)
+ goto out_unregister;
+
+ return 0;
+
+ out_unregister:
+ misc_deregister(&apm_device);
+ out_stop:
+ remove_proc_entry("apm", NULL);
+ kthread_stop(kapmd_tsk);
+ out:
+ return ret;
+}
+
+static void __exit apm_exit(void)
+{
+ unregister_pm_notifier(&apm_notif_block);
+ misc_deregister(&apm_device);
+ remove_proc_entry("apm", NULL);
+
+ kthread_stop(kapmd_tsk);
+}
+
+module_init(apm_init);
+module_exit(apm_exit);
+
+MODULE_AUTHOR("Stephen Rothwell");
+MODULE_DESCRIPTION("Advanced Power Management");
+MODULE_LICENSE("GPL");
+
+#ifndef MODULE
+static int __init apm_setup(char *str)
+{
+ while ((str != NULL) && (*str != '\0')) {
+ if (strncmp(str, "off", 3) == 0)
+ apm_disabled = 1;
+ if (strncmp(str, "on", 2) == 0)
+ apm_disabled = 0;
+ str = strchr(str, ',');
+ if (str != NULL)
+ str += strspn(str, ", \t");
+ }
+ return 1;
+}
+
+__setup("apm=", apm_setup);
+#endif
+
+/**
+ * apm_queue_event - queue an APM event for kapmd
+ * @event: APM event
+ *
+ * Queue an APM event for kapmd to process and ultimately take the
+ * appropriate action. Only a subset of events are handled:
+ * %APM_LOW_BATTERY
+ * %APM_POWER_STATUS_CHANGE
+ * %APM_USER_SUSPEND
+ * %APM_SYS_SUSPEND
+ * %APM_CRITICAL_SUSPEND
+ */
+void apm_queue_event(apm_event_t event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kapmd_queue_lock, flags);
+ queue_add_event(&kapmd_queue, event);
+ spin_unlock_irqrestore(&kapmd_queue_lock, flags);
+
+ wake_up_interruptible(&kapmd_wait);
+}
+EXPORT_SYMBOL(apm_queue_event);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
new file mode 100644
index 0000000000..69314532f3
--- /dev/null
+++ b/drivers/char/applicom.c
@@ -0,0 +1,855 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Derived from Applicom driver ac.c for SCO Unix */
+/* Ported by David Woodhouse, Axiom (Cambridge) Ltd. */
+/* dwmw2@infradead.org 30/8/98 */
+/* $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $ */
+/* This module is for Linux 2.1 and 2.2 series kernels. */
+/*****************************************************************************/
+/* J PAGET 18/02/94 passage V2.4.2 ioctl avec code 2 reset to les interrupt */
+/* ceci pour reseter correctement apres une sortie sauvage */
+/* J PAGET 02/05/94 passage V2.4.3 dans le traitement de d'interruption, */
+/* LoopCount n'etait pas initialise a 0. */
+/* F LAFORSE 04/07/95 version V2.6.0 lecture bidon apres acces a une carte */
+/* pour liberer le bus */
+/* J.PAGET 19/11/95 version V2.6.1 Nombre, addresse,irq n'est plus configure */
+/* et passe en argument a acinit, mais est scrute sur le bus pour s'adapter */
+/* au nombre de cartes presentes sur le bus. IOCL code 6 affichait V2.4.3 */
+/* F.LAFORSE 28/11/95 creation de fichiers acXX.o avec les differentes */
+/* addresses de base des cartes, IOCTL 6 plus complet */
+/* J.PAGET le 19/08/96 copie de la version V2.6 en V2.8.0 sans modification */
+/* de code autre que le texte V2.6.1 en V2.8.0 */
+/*****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/nospec.h>
+
+#include <asm/io.h>
+#include <linux/uaccess.h>
+
+#include "applicom.h"
+
+
+/* NOTE: We use for loops with {write,read}b() instead of
+ memcpy_{from,to}io throughout this driver. This is because
+ the board doesn't correctly handle word accesses - only
+ bytes.
+*/
+
+
+#undef DEBUG
+
+#define MAX_BOARD 8 /* maximum of pc board possible */
+#define MAX_ISA_BOARD 4
+#define LEN_RAM_IO 0x800
+
+#ifndef PCI_VENDOR_ID_APPLICOM
+#define PCI_VENDOR_ID_APPLICOM 0x1389
+#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001
+#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002
+#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
+#endif
+
+static DEFINE_MUTEX(ac_mutex);
+static char *applicom_pci_devnames[] = {
+ "PCI board",
+ "PCI2000IBS / PCI2000CAN",
+ "PCI2000PFB"
+};
+
+static const struct pci_device_id applicom_pci_tbl[] = {
+ { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
+ { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
+ { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, applicom_pci_tbl);
+
+MODULE_AUTHOR("David Woodhouse & Applicom International");
+MODULE_DESCRIPTION("Driver for Applicom Profibus card");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(AC_MINOR);
+
+static struct applicom_board {
+ unsigned long PhysIO;
+ void __iomem *RamIO;
+ wait_queue_head_t FlagSleepSend;
+ long irq;
+ spinlock_t mutex;
+} apbs[MAX_BOARD];
+
+static unsigned int irq; /* interrupt number IRQ */
+static unsigned long mem; /* physical segment of board */
+
+module_param_hw(irq, uint, irq, 0);
+MODULE_PARM_DESC(irq, "IRQ of the Applicom board");
+module_param_hw(mem, ulong, iomem, 0);
+MODULE_PARM_DESC(mem, "Shared Memory Address of Applicom board");
+
+static unsigned int numboards; /* number of installed boards */
+static volatile unsigned char Dummy;
+static DECLARE_WAIT_QUEUE_HEAD(FlagSleepRec);
+static unsigned int WriteErrorCount; /* number of write error */
+static unsigned int ReadErrorCount; /* number of read error */
+static unsigned int DeviceErrorCount; /* number of device error */
+
+static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *);
+static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *);
+static long ac_ioctl(struct file *, unsigned int, unsigned long);
+static irqreturn_t ac_interrupt(int, void *);
+
+static const struct file_operations ac_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = ac_read,
+ .write = ac_write,
+ .unlocked_ioctl = ac_ioctl,
+};
+
+static struct miscdevice ac_miscdev = {
+ AC_MINOR,
+ "ac",
+ &ac_fops
+};
+
+static int dummy; /* dev_id for request_irq() */
+
+static int ac_register_board(unsigned long physloc, void __iomem *loc,
+ unsigned char boardno)
+{
+ volatile unsigned char byte_reset_it;
+
+ if((readb(loc + CONF_END_TEST) != 0x00) ||
+ (readb(loc + CONF_END_TEST + 1) != 0x55) ||
+ (readb(loc + CONF_END_TEST + 2) != 0xAA) ||
+ (readb(loc + CONF_END_TEST + 3) != 0xFF))
+ return 0;
+
+ if (!boardno)
+ boardno = readb(loc + NUMCARD_OWNER_TO_PC);
+
+ if (!boardno || boardno > MAX_BOARD) {
+ printk(KERN_WARNING "Board #%d (at 0x%lx) is out of range (1 <= x <= %d).\n",
+ boardno, physloc, MAX_BOARD);
+ return 0;
+ }
+
+ if (apbs[boardno - 1].RamIO) {
+ printk(KERN_WARNING "Board #%d (at 0x%lx) conflicts with previous board #%d (at 0x%lx)\n",
+ boardno, physloc, boardno, apbs[boardno-1].PhysIO);
+ return 0;
+ }
+
+ boardno--;
+
+ apbs[boardno].PhysIO = physloc;
+ apbs[boardno].RamIO = loc;
+ init_waitqueue_head(&apbs[boardno].FlagSleepSend);
+ spin_lock_init(&apbs[boardno].mutex);
+ byte_reset_it = readb(loc + RAM_IT_TO_PC);
+
+ numboards++;
+ return boardno + 1;
+}
+
+static void __exit applicom_exit(void)
+{
+ unsigned int i;
+
+ misc_deregister(&ac_miscdev);
+
+ for (i = 0; i < MAX_BOARD; i++) {
+
+ if (!apbs[i].RamIO)
+ continue;
+
+ if (apbs[i].irq)
+ free_irq(apbs[i].irq, &dummy);
+
+ iounmap(apbs[i].RamIO);
+ }
+}
+
+static int __init applicom_init(void)
+{
+ int i, numisa = 0;
+ struct pci_dev *dev = NULL;
+ void __iomem *RamIO;
+ int boardno, ret;
+
+ printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n");
+
+ /* No mem and irq given - check for a PCI card */
+
+ while ( (dev = pci_get_class(PCI_CLASS_OTHERS << 16, dev))) {
+
+ if (!pci_match_id(applicom_pci_tbl, dev))
+ continue;
+
+ if (pci_enable_device(dev)) {
+ pci_dev_put(dev);
+ return -EIO;
+ }
+
+ RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
+
+ if (!RamIO) {
+ printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
+ "space at 0x%llx\n",
+ (unsigned long long)pci_resource_start(dev, 0));
+ pci_disable_device(dev);
+ pci_dev_put(dev);
+ return -EIO;
+ }
+
+ printk(KERN_INFO "Applicom %s found at mem 0x%llx, irq %d\n",
+ applicom_pci_devnames[dev->device-1],
+ (unsigned long long)pci_resource_start(dev, 0),
+ dev->irq);
+
+ boardno = ac_register_board(pci_resource_start(dev, 0),
+ RamIO, 0);
+ if (!boardno) {
+ printk(KERN_INFO "ac.o: PCI Applicom device doesn't have correct signature.\n");
+ iounmap(RamIO);
+ pci_disable_device(dev);
+ continue;
+ }
+
+ if (request_irq(dev->irq, &ac_interrupt, IRQF_SHARED, "Applicom PCI", &dummy)) {
+ printk(KERN_INFO "Could not allocate IRQ %d for PCI Applicom device.\n", dev->irq);
+ iounmap(RamIO);
+ pci_disable_device(dev);
+ apbs[boardno - 1].RamIO = NULL;
+ continue;
+ }
+
+ /* Enable interrupts. */
+
+ writeb(0x40, apbs[boardno - 1].RamIO + RAM_IT_FROM_PC);
+
+ apbs[boardno - 1].irq = dev->irq;
+ }
+
+ /* Finished with PCI cards. If none registered,
+ * and there was no mem/irq specified, exit */
+
+ if (!mem || !irq) {
+ if (numboards)
+ goto fin;
+ else {
+ printk(KERN_INFO "ac.o: No PCI boards found.\n");
+ printk(KERN_INFO "ac.o: For an ISA board you must supply memory and irq parameters.\n");
+ return -ENXIO;
+ }
+ }
+
+ /* Now try the specified ISA cards */
+
+ for (i = 0; i < MAX_ISA_BOARD; i++) {
+ RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
+
+ if (!RamIO) {
+ printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
+ continue;
+ }
+
+ if (!(boardno = ac_register_board((unsigned long)mem+ (LEN_RAM_IO*i),
+ RamIO,i+1))) {
+ iounmap(RamIO);
+ continue;
+ }
+
+ printk(KERN_NOTICE "Applicom ISA card found at mem 0x%lx, irq %d\n", mem + (LEN_RAM_IO*i), irq);
+
+ if (!numisa) {
+ if (request_irq(irq, &ac_interrupt, IRQF_SHARED, "Applicom ISA", &dummy)) {
+ printk(KERN_WARNING "Could not allocate IRQ %d for ISA Applicom device.\n", irq);
+ iounmap(RamIO);
+ apbs[boardno - 1].RamIO = NULL;
+ }
+ else
+ apbs[boardno - 1].irq = irq;
+ }
+ else
+ apbs[boardno - 1].irq = 0;
+
+ numisa++;
+ }
+
+ if (!numisa)
+ printk(KERN_WARNING "ac.o: No valid ISA Applicom boards found "
+ "at mem 0x%lx\n", mem);
+
+ fin:
+ init_waitqueue_head(&FlagSleepRec);
+
+ WriteErrorCount = 0;
+ ReadErrorCount = 0;
+ DeviceErrorCount = 0;
+
+ if (numboards) {
+ ret = misc_register(&ac_miscdev);
+ if (ret) {
+ printk(KERN_WARNING "ac.o: Unable to register misc device\n");
+ goto out;
+ }
+ for (i = 0; i < MAX_BOARD; i++) {
+ int serial;
+ char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
+
+ if (!apbs[i].RamIO)
+ continue;
+
+ for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
+ boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
+
+ boardname[serial] = 0;
+
+
+ printk(KERN_INFO "Applicom board %d: %s, PROM V%d.%d",
+ i+1, boardname,
+ (int)(readb(apbs[i].RamIO + VERS) >> 4),
+ (int)(readb(apbs[i].RamIO + VERS) & 0xF));
+
+ serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) +
+ (readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) +
+ (readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );
+
+ if (serial != 0)
+ printk(" S/N %d\n", serial);
+ else
+ printk("\n");
+ }
+ return 0;
+ }
+
+ else
+ return -ENXIO;
+
+out:
+ for (i = 0; i < MAX_BOARD; i++) {
+ if (!apbs[i].RamIO)
+ continue;
+ if (apbs[i].irq)
+ free_irq(apbs[i].irq, &dummy);
+ iounmap(apbs[i].RamIO);
+ }
+ return ret;
+}
+
+module_init(applicom_init);
+module_exit(applicom_exit);
+
+
+static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
+{
+ unsigned int NumCard; /* Board number 1 -> 8 */
+ unsigned int IndexCard; /* Index board number 0 -> 7 */
+ unsigned char TicCard; /* Board TIC to send */
+ unsigned long flags; /* Current priority */
+ struct st_ram_io st_loc;
+ struct mailbox tmpmailbox;
+#ifdef DEBUG
+ int c;
+#endif
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
+ static int warncount = 5;
+ if (warncount) {
+ printk(KERN_INFO "Hmmm. write() of Applicom card, length %zd != expected %zd\n",
+ count, sizeof(struct st_ram_io) + sizeof(struct mailbox));
+ warncount--;
+ }
+ return -EINVAL;
+ }
+
+ if(copy_from_user(&st_loc, buf, sizeof(struct st_ram_io)))
+ return -EFAULT;
+
+ if(copy_from_user(&tmpmailbox, &buf[sizeof(struct st_ram_io)],
+ sizeof(struct mailbox)))
+ return -EFAULT;
+
+ NumCard = st_loc.num_card; /* board number to send */
+ TicCard = st_loc.tic_des_from_pc; /* tic number to send */
+ IndexCard = NumCard - 1;
+
+ if (IndexCard >= MAX_BOARD)
+ return -EINVAL;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (!apbs[IndexCard].RamIO)
+ return -EINVAL;
+
+#ifdef DEBUG
+ printk("Write to applicom card #%d. struct st_ram_io follows:",
+ IndexCard+1);
+
+ for (c = 0; c < sizeof(struct st_ram_io);) {
+
+ printk("\n%5.5X: %2.2X", c, ((unsigned char *) &st_loc)[c]);
+
+ for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
+ printk(" %2.2X", ((unsigned char *) &st_loc)[c]);
+ }
+ }
+
+ printk("\nstruct mailbox follows:");
+
+ for (c = 0; c < sizeof(struct mailbox);) {
+ printk("\n%5.5X: %2.2X", c, ((unsigned char *) &tmpmailbox)[c]);
+
+ for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
+ printk(" %2.2X", ((unsigned char *) &tmpmailbox)[c]);
+ }
+ }
+
+ printk("\n");
+#endif
+
+ spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
+
+ /* Test octet ready correct */
+ if(readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) > 2) {
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+ spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
+ printk(KERN_WARNING "APPLICOM driver write error board %d, DataFromPcReady = %d\n",
+ IndexCard,(int)readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY));
+ DeviceErrorCount++;
+ return -EIO;
+ }
+
+ /* Place ourselves on the wait queue */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);
+
+ /* Check whether the card is ready for us */
+ while (readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) != 0) {
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+ /* It's busy. Sleep. */
+
+ spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
+ schedule();
+ if (signal_pending(current)) {
+ remove_wait_queue(&apbs[IndexCard].FlagSleepSend,
+ &wait);
+ return -EINTR;
+ }
+ spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ /* We may not have actually slept */
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);
+
+ writeb(1, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+
+ /* Which is best - lock down the pages with rawio and then
+ copy directly, or use bounce buffers? For now we do the latter
+ because it works with 2.2 still */
+ {
+ unsigned char *from = (unsigned char *) &tmpmailbox;
+ void __iomem *to = apbs[IndexCard].RamIO + RAM_FROM_PC;
+ int c;
+
+ for (c = 0; c < sizeof(struct mailbox); c++)
+ writeb(*(from++), to++);
+ }
+
+ writeb(0x20, apbs[IndexCard].RamIO + TIC_OWNER_FROM_PC);
+ writeb(0xff, apbs[IndexCard].RamIO + NUMCARD_OWNER_FROM_PC);
+ writeb(TicCard, apbs[IndexCard].RamIO + TIC_DES_FROM_PC);
+ writeb(NumCard, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
+ writeb(2, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+ writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+ spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
+ return 0;
+}
+
+static int do_ac_read(int IndexCard, char __user *buf,
+ struct st_ram_io *st_loc, struct mailbox *mailbox)
+{
+ void __iomem *from = apbs[IndexCard].RamIO + RAM_TO_PC;
+ unsigned char *to = (unsigned char *)mailbox;
+#ifdef DEBUG
+ int c;
+#endif
+
+ st_loc->tic_owner_to_pc = readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC);
+ st_loc->numcard_owner_to_pc = readb(apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
+
+
+ {
+ int c;
+
+ for (c = 0; c < sizeof(struct mailbox); c++)
+ *(to++) = readb(from++);
+ }
+ writeb(1, apbs[IndexCard].RamIO + ACK_FROM_PC_READY);
+ writeb(1, apbs[IndexCard].RamIO + TYP_ACK_FROM_PC);
+ writeb(IndexCard+1, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
+ writeb(readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC),
+ apbs[IndexCard].RamIO + TIC_ACK_FROM_PC);
+ writeb(2, apbs[IndexCard].RamIO + ACK_FROM_PC_READY);
+ writeb(0, apbs[IndexCard].RamIO + DATA_TO_PC_READY);
+ writeb(2, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+
+#ifdef DEBUG
+ printk("Read from applicom card #%d. struct st_ram_io follows:", NumCard);
+
+ for (c = 0; c < sizeof(struct st_ram_io);) {
+ printk("\n%5.5X: %2.2X", c, ((unsigned char *)st_loc)[c]);
+
+ for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
+ printk(" %2.2X", ((unsigned char *)st_loc)[c]);
+ }
+ }
+
+ printk("\nstruct mailbox follows:");
+
+ for (c = 0; c < sizeof(struct mailbox);) {
+ printk("\n%5.5X: %2.2X", c, ((unsigned char *)mailbox)[c]);
+
+ for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
+ printk(" %2.2X", ((unsigned char *)mailbox)[c]);
+ }
+ }
+ printk("\n");
+#endif
+ return (sizeof(struct st_ram_io) + sizeof(struct mailbox));
+}
+
+static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_t *ptr)
+{
+ unsigned long flags;
+ unsigned int i;
+ unsigned char tmp;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+#ifdef DEBUG
+ int loopcount=0;
+#endif
+ /* No need to ratelimit this. Only root can trigger it anyway */
+ if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
+ printk( KERN_WARNING "Hmmm. read() of Applicom card, length %zd != expected %zd\n",
+ count,sizeof(struct st_ram_io) + sizeof(struct mailbox));
+ return -EINVAL;
+ }
+
+ while(1) {
+ /* Stick ourself on the wait queue */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&FlagSleepRec, &wait);
+
+ /* Scan each board, looking for one which has a packet for us */
+ for (i=0; i < MAX_BOARD; i++) {
+ if (!apbs[i].RamIO)
+ continue;
+ spin_lock_irqsave(&apbs[i].mutex, flags);
+
+ tmp = readb(apbs[i].RamIO + DATA_TO_PC_READY);
+
+ if (tmp == 2) {
+ struct st_ram_io st_loc;
+ struct mailbox mailbox;
+
+ /* Got a packet for us */
+ memset(&st_loc, 0, sizeof(st_loc));
+ ret = do_ac_read(i, buf, &st_loc, &mailbox);
+ spin_unlock_irqrestore(&apbs[i].mutex, flags);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&FlagSleepRec, &wait);
+
+ if (copy_to_user(buf, &st_loc, sizeof(st_loc)))
+ return -EFAULT;
+ if (copy_to_user(buf + sizeof(st_loc), &mailbox, sizeof(mailbox)))
+ return -EFAULT;
+ return tmp;
+ }
+
+ if (tmp > 2) {
+ /* Got an error */
+ Dummy = readb(apbs[i].RamIO + VERS);
+
+ spin_unlock_irqrestore(&apbs[i].mutex, flags);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&FlagSleepRec, &wait);
+
+ printk(KERN_WARNING "APPLICOM driver read error board %d, DataToPcReady = %d\n",
+ i,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
+ DeviceErrorCount++;
+ return -EIO;
+ }
+
+ /* Nothing for us. Try the next board */
+ Dummy = readb(apbs[i].RamIO + VERS);
+ spin_unlock_irqrestore(&apbs[i].mutex, flags);
+
+ } /* per board */
+
+ /* OK - No boards had data for us. Sleep now */
+
+ schedule();
+ remove_wait_queue(&FlagSleepRec, &wait);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+#ifdef DEBUG
+ if (loopcount++ > 2) {
+ printk(KERN_DEBUG "Looping in ac_read. loopcount %d\n", loopcount);
+ }
+#endif
+ }
+}
+
+static irqreturn_t ac_interrupt(int vec, void *dev_instance)
+{
+ unsigned int i;
+ unsigned int FlagInt;
+ unsigned int LoopCount;
+ int handled = 0;
+
+ // printk("Applicom interrupt on IRQ %d occurred\n", vec);
+
+ LoopCount = 0;
+
+ do {
+ FlagInt = 0;
+ for (i = 0; i < MAX_BOARD; i++) {
+
+ /* Skip if this board doesn't exist */
+ if (!apbs[i].RamIO)
+ continue;
+
+ spin_lock(&apbs[i].mutex);
+
+ /* Skip if this board doesn't want attention */
+ if(readb(apbs[i].RamIO + RAM_IT_TO_PC) == 0) {
+ spin_unlock(&apbs[i].mutex);
+ continue;
+ }
+
+ handled = 1;
+ FlagInt = 1;
+ writeb(0, apbs[i].RamIO + RAM_IT_TO_PC);
+
+ if (readb(apbs[i].RamIO + DATA_TO_PC_READY) > 2) {
+ printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataToPcReady = %d\n",
+ i+1,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
+ DeviceErrorCount++;
+ }
+
+ if((readb(apbs[i].RamIO + DATA_FROM_PC_READY) > 2) &&
+ (readb(apbs[i].RamIO + DATA_FROM_PC_READY) != 6)) {
+
+ printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataFromPcReady = %d\n",
+ i+1,(int)readb(apbs[i].RamIO + DATA_FROM_PC_READY));
+ DeviceErrorCount++;
+ }
+
+ if (readb(apbs[i].RamIO + DATA_TO_PC_READY) == 2) { /* mailbox sent by the card ? */
+ if (waitqueue_active(&FlagSleepRec)) {
+ wake_up_interruptible(&FlagSleepRec);
+ }
+ }
+
+ if (readb(apbs[i].RamIO + DATA_FROM_PC_READY) == 0) { /* ram i/o free for write by pc ? */
+ if (waitqueue_active(&apbs[i].FlagSleepSend)) { /* process sleep during read ? */
+ wake_up_interruptible(&apbs[i].FlagSleepSend);
+ }
+ }
+ Dummy = readb(apbs[i].RamIO + VERS);
+
+ if(readb(apbs[i].RamIO + RAM_IT_TO_PC)) {
+ /* There's another int waiting on this card */
+ spin_unlock(&apbs[i].mutex);
+ i--;
+ } else {
+ spin_unlock(&apbs[i].mutex);
+ }
+ }
+ if (FlagInt)
+ LoopCount = 0;
+ else
+ LoopCount++;
+ } while(LoopCount < 2);
+ return IRQ_RETVAL(handled);
+}
+
+
+
+static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+
+{ /* @ ADG ou ATO selon le cas */
+ int i;
+ unsigned char IndexCard;
+ void __iomem *pmem;
+ int ret = 0;
+ static int warncount = 10;
+ volatile unsigned char byte_reset_it;
+ struct st_ram_io *adgl;
+ void __user *argp = (void __user *)arg;
+
+ /* In general, the device is only openable by root anyway, so we're not
+ particularly concerned that bogus ioctls can flood the console. */
+
+ adgl = memdup_user(argp, sizeof(struct st_ram_io));
+ if (IS_ERR(adgl))
+ return PTR_ERR(adgl);
+
+ mutex_lock(&ac_mutex);
+ IndexCard = adgl->num_card-1;
+
+ if (cmd != 6 && IndexCard >= MAX_BOARD)
+ goto err;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (cmd != 6 && !apbs[IndexCard].RamIO)
+ goto err;
+
+ switch (cmd) {
+
+ case 0:
+ pmem = apbs[IndexCard].RamIO;
+ for (i = 0; i < sizeof(struct st_ram_io); i++)
+ ((unsigned char *)adgl)[i]=readb(pmem++);
+ if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
+ ret = -EFAULT;
+ break;
+ case 1:
+ pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
+ for (i = 0; i < 4; i++)
+ adgl->conf_end_test[i] = readb(pmem++);
+ for (i = 0; i < 2; i++)
+ adgl->error_code[i] = readb(pmem++);
+ for (i = 0; i < 4; i++)
+ adgl->parameter_error[i] = readb(pmem++);
+ pmem = apbs[IndexCard].RamIO + VERS;
+ adgl->vers = readb(pmem);
+ pmem = apbs[IndexCard].RamIO + TYPE_CARD;
+ for (i = 0; i < 20; i++)
+ adgl->reserv1[i] = readb(pmem++);
+ *(int *)&adgl->reserv1[20] =
+ (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER) << 16) +
+ (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 1) << 8) +
+ (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 2) );
+
+ if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
+ ret = -EFAULT;
+ break;
+ case 2:
+ pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
+ for (i = 0; i < 10; i++)
+ writeb(0xff, pmem++);
+ writeb(adgl->data_from_pc_ready,
+ apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+
+ writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+
+ for (i = 0; i < MAX_BOARD; i++) {
+ if (apbs[i].RamIO) {
+ byte_reset_it = readb(apbs[i].RamIO + RAM_IT_TO_PC);
+ }
+ }
+ break;
+ case 3:
+ pmem = apbs[IndexCard].RamIO + TIC_DES_FROM_PC;
+ writeb(adgl->tic_des_from_pc, pmem);
+ break;
+ case 4:
+ pmem = apbs[IndexCard].RamIO + TIC_OWNER_TO_PC;
+ adgl->tic_owner_to_pc = readb(pmem++);
+ adgl->numcard_owner_to_pc = readb(pmem);
+ if (copy_to_user(argp, adgl,sizeof(struct st_ram_io)))
+ ret = -EFAULT;
+ break;
+ case 5:
+ writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
+ writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
+ writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
+ writeb(4, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+ writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+ break;
+ case 6:
+ printk(KERN_INFO "APPLICOM driver release .... V2.8.0 ($Revision: 1.30 $)\n");
+ printk(KERN_INFO "Number of installed boards . %d\n", (int) numboards);
+ printk(KERN_INFO "Segment of board ........... %X\n", (int) mem);
+ printk(KERN_INFO "Interrupt IRQ number ....... %d\n", (int) irq);
+ for (i = 0; i < MAX_BOARD; i++) {
+ int serial;
+ char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
+
+ if (!apbs[i].RamIO)
+ continue;
+
+ for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
+ boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
+ boardname[serial] = 0;
+
+ printk(KERN_INFO "Prom version board %d ....... V%d.%d %s",
+ i+1,
+ (int)(readb(apbs[i].RamIO + VERS) >> 4),
+ (int)(readb(apbs[i].RamIO + VERS) & 0xF),
+ boardname);
+
+
+ serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) +
+ (readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) +
+ (readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );
+
+ if (serial != 0)
+ printk(" S/N %d\n", serial);
+ else
+ printk("\n");
+ }
+ if (DeviceErrorCount != 0)
+ printk(KERN_INFO "DeviceErrorCount ........... %d\n", DeviceErrorCount);
+ if (ReadErrorCount != 0)
+ printk(KERN_INFO "ReadErrorCount ............. %d\n", ReadErrorCount);
+ if (WriteErrorCount != 0)
+ printk(KERN_INFO "WriteErrorCount ............ %d\n", WriteErrorCount);
+ if (waitqueue_active(&FlagSleepRec))
+ printk(KERN_INFO "Process in read pending\n");
+ for (i = 0; i < MAX_BOARD; i++) {
+ if (apbs[i].RamIO && waitqueue_active(&apbs[i].FlagSleepSend))
+ printk(KERN_INFO "Process in write pending board %d\n",i+1);
+ }
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return ret;
+
+err:
+ if (warncount) {
+ pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
+ (int)IndexCard + 1);
+ warncount--;
+ }
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return -EINVAL;
+
+}
+
diff --git a/drivers/char/applicom.h b/drivers/char/applicom.h
new file mode 100644
index 0000000000..282e08f159
--- /dev/null
+++ b/drivers/char/applicom.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* $Id: applicom.h,v 1.2 1999/08/28 15:09:49 dwmw2 Exp $ */
+
+
+#ifndef __LINUX_APPLICOM_H__
+#define __LINUX_APPLICOM_H__
+
+
+#define DATA_TO_PC_READY 0x00
+#define TIC_OWNER_TO_PC 0x01
+#define NUMCARD_OWNER_TO_PC 0x02
+#define TIC_DES_TO_PC 0x03
+#define NUMCARD_DES_TO_PC 0x04
+#define DATA_FROM_PC_READY 0x05
+#define TIC_OWNER_FROM_PC 0x06
+#define NUMCARD_OWNER_FROM_PC 0x07
+#define TIC_DES_FROM_PC 0x08
+#define NUMCARD_DES_FROM_PC 0x09
+#define ACK_FROM_PC_READY 0x0E
+#define TIC_ACK_FROM_PC 0x0F
+#define NUMCARD_ACK_FROM_PC 0x010
+#define TYP_ACK_FROM_PC 0x011
+#define CONF_END_TEST 0x012
+#define ERROR_CODE 0x016
+#define PARAMETER_ERROR 0x018
+#define VERS 0x01E
+#define RAM_TO_PC 0x040
+#define RAM_FROM_PC 0x0170
+#define TYPE_CARD 0x03C0
+#define SERIAL_NUMBER 0x03DA
+#define RAM_IT_FROM_PC 0x03FE
+#define RAM_IT_TO_PC 0x03FF
+
+struct mailbox{
+ u16 stjb_codef; /* offset 00 */
+ s16 stjb_status; /* offset 02 */
+ u16 stjb_ticuser_root; /* offset 04 */
+ u8 stjb_piduser[4]; /* offset 06 */
+ u16 stjb_mode; /* offset 0A */
+ u16 stjb_time; /* offset 0C */
+ u16 stjb_stop; /* offset 0E */
+ u16 stjb_nfonc; /* offset 10 */
+ u16 stjb_ncard; /* offset 12 */
+ u16 stjb_nchan; /* offset 14 */
+ u16 stjb_nes; /* offset 16 */
+ u16 stjb_nb; /* offset 18 */
+ u16 stjb_typvar; /* offset 1A */
+ u32 stjb_adr; /* offset 1C */
+ u16 stjb_ticuser_dispcyc; /* offset 20 */
+ u16 stjb_ticuser_protocol; /* offset 22 */
+ u8 stjb_filler[12]; /* offset 24 */
+ u8 stjb_data[256]; /* offset 30 */
+ };
+
+struct st_ram_io
+{
+ unsigned char data_to_pc_ready;
+ unsigned char tic_owner_to_pc;
+ unsigned char numcard_owner_to_pc;
+ unsigned char tic_des_to_pc;
+ unsigned char numcard_des_to_pc;
+ unsigned char data_from_pc_ready;
+ unsigned char tic_owner_from_pc;
+ unsigned char numcard_owner_from_pc;
+ unsigned char tic_des_from_pc;
+ unsigned char numcard_des_from_pc;
+ unsigned char ack_to_pc_ready;
+ unsigned char tic_ack_to_pc;
+ unsigned char numcard_ack_to_pc;
+ unsigned char typ_ack_to_pc;
+ unsigned char ack_from_pc_ready;
+ unsigned char tic_ack_from_pc;
+ unsigned char numcard_ack_from_pc;
+ unsigned char typ_ack_from_pc;
+ unsigned char conf_end_test[4];
+ unsigned char error_code[2];
+ unsigned char parameter_error[4];
+ unsigned char time_base;
+ unsigned char nul_inc;
+ unsigned char vers;
+ unsigned char num_card;
+ unsigned char reserv1[32];
+};
+
+
+#endif /* __LINUX_APPLICOM_H__ */
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
new file mode 100644
index 0000000000..70d31aed90
--- /dev/null
+++ b/drivers/char/bsr.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* IBM POWER Barrier Synchronization Register Driver
+ *
+ * Copyright IBM Corporation 2008
+ *
+ * Author: Sonny Rao <sonnyrao@us.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+/*
+ This driver exposes a special register which can be used for fast
+ synchronization across a large SMP machine. The hardware is exposed
+ as an array of bytes where each process will write to one of the bytes to
+ indicate it has finished the current stage and this update is broadcast to
+ all processors without having to bounce a cacheline between them. In
+ POWER5 and POWER6 there is one of these registers per SMP, but it is
+ presented in two forms; first, it is given as a whole and then as a number
+ of smaller registers which alias to parts of the single whole register.
+ This can potentially allow multiple groups of processes to each have their
+ own private synchronization device.
+
+ Note that this hardware *must* be written to using *only* single byte writes.
+ It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
+ this region is treated as cache-inhibited processes should also use a
+ full sync before and after writing to the BSR to ensure all stores and
+ the BSR update have made it to all chips in the system
+*/
+
+/* This is arbitrary number, up to Power6 it's been 17 or fewer */
+#define BSR_MAX_DEVS (32)
+
+struct bsr_dev {
+ u64 bsr_addr; /* Real address */
+ u64 bsr_len; /* length of mem region we can map */
+ unsigned bsr_bytes; /* size of the BSR reg itself */
+ unsigned bsr_stride; /* interval at which BSR repeats in the page */
+ unsigned bsr_type; /* maps to enum below */
+ unsigned bsr_num; /* bsr id number for its type */
+ int bsr_minor;
+
+ struct list_head bsr_list;
+
+ dev_t bsr_dev;
+ struct cdev bsr_cdev;
+ struct device *bsr_device;
+ char bsr_name[32];
+
+};
+
+static unsigned total_bsr_devs;
+static LIST_HEAD(bsr_devs);
+static int bsr_major;
+
+enum {
+ BSR_8 = 0,
+ BSR_16 = 1,
+ BSR_64 = 2,
+ BSR_128 = 3,
+ BSR_4096 = 4,
+ BSR_UNKNOWN = 5,
+ BSR_MAX = 6,
+};
+
+static unsigned bsr_types[BSR_MAX];
+
+static ssize_t
+bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
+}
+static DEVICE_ATTR_RO(bsr_size);
+
+static ssize_t
+bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
+}
+static DEVICE_ATTR_RO(bsr_stride);
+
+static ssize_t
+bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
+}
+static DEVICE_ATTR_RO(bsr_length);
+
+static struct attribute *bsr_dev_attrs[] = {
+ &dev_attr_bsr_size.attr,
+ &dev_attr_bsr_stride.attr,
+ &dev_attr_bsr_length.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(bsr_dev);
+
+static const struct class bsr_class = {
+ .name = "bsr",
+ .dev_groups = bsr_dev_groups,
+};
+
+static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct bsr_dev *dev = filp->private_data;
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* check for the case of a small BSR device and map one 4k page for it*/
+ if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
+ ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
+ vma->vm_page_prot);
+ else if (size <= dev->bsr_len)
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ dev->bsr_addr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ else
+ return -EINVAL;
+
+ if (ret)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int bsr_open(struct inode *inode, struct file *filp)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
+
+ filp->private_data = dev;
+ return 0;
+}
+
+static const struct file_operations bsr_fops = {
+ .owner = THIS_MODULE,
+ .mmap = bsr_mmap,
+ .open = bsr_open,
+ .llseek = noop_llseek,
+};
+
+static void bsr_cleanup_devs(void)
+{
+ struct bsr_dev *cur, *n;
+
+ list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
+ if (cur->bsr_device) {
+ cdev_del(&cur->bsr_cdev);
+ device_del(cur->bsr_device);
+ }
+ list_del(&cur->bsr_list);
+ kfree(cur);
+ }
+}
+
+static int bsr_add_node(struct device_node *bn)
+{
+ int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
+ const u32 *bsr_stride;
+ const u32 *bsr_bytes;
+ unsigned i;
+ int ret = -ENODEV;
+
+ bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
+ bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
+
+ if (!bsr_stride || !bsr_bytes ||
+ (bsr_stride_len != bsr_bytes_len)) {
+ printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
+ return ret;
+ }
+
+ num_bsr_devs = bsr_bytes_len / sizeof(u32);
+
+ for (i = 0 ; i < num_bsr_devs; i++) {
+ struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
+ GFP_KERNEL);
+ struct resource res;
+ int result;
+
+ if (!cur) {
+ printk(KERN_ERR "Unable to alloc bsr dev\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ result = of_address_to_resource(bn, i, &res);
+ if (result < 0) {
+ printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
+ kfree(cur);
+ continue;
+ }
+
+ cur->bsr_minor = i + total_bsr_devs;
+ cur->bsr_addr = res.start;
+ cur->bsr_len = resource_size(&res);
+ cur->bsr_bytes = bsr_bytes[i];
+ cur->bsr_stride = bsr_stride[i];
+ cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
+
+ /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
+ /* we can only map 4k of it, so only advertise the 4k in sysfs */
+ if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
+ cur->bsr_len = 4096;
+
+ switch(cur->bsr_bytes) {
+ case 8:
+ cur->bsr_type = BSR_8;
+ break;
+ case 16:
+ cur->bsr_type = BSR_16;
+ break;
+ case 64:
+ cur->bsr_type = BSR_64;
+ break;
+ case 128:
+ cur->bsr_type = BSR_128;
+ break;
+ case 4096:
+ cur->bsr_type = BSR_4096;
+ break;
+ default:
+ cur->bsr_type = BSR_UNKNOWN;
+ }
+
+ cur->bsr_num = bsr_types[cur->bsr_type];
+ snprintf(cur->bsr_name, 32, "bsr%d_%d",
+ cur->bsr_bytes, cur->bsr_num);
+
+ cdev_init(&cur->bsr_cdev, &bsr_fops);
+ result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
+ if (result) {
+ kfree(cur);
+ goto out_err;
+ }
+
+ cur->bsr_device = device_create(&bsr_class, NULL, cur->bsr_dev,
+ cur, "%s", cur->bsr_name);
+ if (IS_ERR(cur->bsr_device)) {
+ printk(KERN_ERR "device_create failed for %s\n",
+ cur->bsr_name);
+ cdev_del(&cur->bsr_cdev);
+ kfree(cur);
+ goto out_err;
+ }
+
+ bsr_types[cur->bsr_type] = cur->bsr_num + 1;
+ list_add_tail(&cur->bsr_list, &bsr_devs);
+ }
+
+ total_bsr_devs += num_bsr_devs;
+
+ return 0;
+
+ out_err:
+
+ bsr_cleanup_devs();
+ return ret;
+}
+
+static int bsr_create_devs(struct device_node *bn)
+{
+ int ret;
+
+ while (bn) {
+ ret = bsr_add_node(bn);
+ if (ret) {
+ of_node_put(bn);
+ return ret;
+ }
+ bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
+ }
+ return 0;
+}
+
+static int __init bsr_init(void)
+{
+ struct device_node *np;
+ dev_t bsr_dev;
+ int ret = -ENODEV;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
+ if (!np)
+ goto out_err;
+
+ ret = class_register(&bsr_class);
+ if (ret)
+ goto out_err_1;
+
+ ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
+ bsr_major = MAJOR(bsr_dev);
+ if (ret < 0) {
+ printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
+ goto out_err_2;
+ }
+
+ ret = bsr_create_devs(np);
+ if (ret < 0) {
+ np = NULL;
+ goto out_err_3;
+ }
+
+ return 0;
+
+ out_err_3:
+ unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
+
+ out_err_2:
+ class_unregister(&bsr_class);
+
+ out_err_1:
+ of_node_put(np);
+
+ out_err:
+
+ return ret;
+}
+
+static void __exit bsr_exit(void)
+{
+
+ bsr_cleanup_devs();
+
+ class_unregister(&bsr_class);
+
+ if (bsr_major)
+ unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
+}
+
+module_init(bsr_init);
+module_exit(bsr_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
new file mode 100644
index 0000000000..cf89a96311
--- /dev/null
+++ b/drivers/char/ds1620.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/char/ds1620.c: Dallas Semiconductors DS1620
+ * thermometer driver (as used in the Rebel.com NetWinder)
+ */
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/capability.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <linux/uaccess.h>
+#include <asm/therm.h>
+
+#ifdef CONFIG_PROC_FS
+/* define for /proc interface */
+#define THERM_USE_PROC
+#endif
+
+/* Definitions for DS1620 chip */
+#define THERM_START_CONVERT 0xee
+#define THERM_RESET 0xaf
+#define THERM_READ_CONFIG 0xac
+#define THERM_READ_TEMP 0xaa
+#define THERM_READ_TL 0xa2
+#define THERM_READ_TH 0xa1
+#define THERM_WRITE_CONFIG 0x0c
+#define THERM_WRITE_TL 0x02
+#define THERM_WRITE_TH 0x01
+
+#define CFG_CPU 2
+#define CFG_1SHOT 1
+
+static DEFINE_MUTEX(ds1620_mutex);
+static const char *fan_state[] = { "off", "on", "on (hardwired)" };
+
+/*
+ * Start of NetWinder specifics
+ * Note! We have to hold the gpio lock with IRQs disabled over the
+ * whole of our transaction to the Dallas chip, since there is a
+ * chance that the WaveArtist driver could touch these bits to
+ * enable or disable the speaker.
+ */
+extern unsigned int system_rev;
+
+static inline void netwinder_ds1620_set_clk(int clk)
+{
+ nw_gpio_modify_op(GPIO_DSCLK, clk ? GPIO_DSCLK : 0);
+}
+
+static inline void netwinder_ds1620_set_data(int dat)
+{
+ nw_gpio_modify_op(GPIO_DATA, dat ? GPIO_DATA : 0);
+}
+
+static inline int netwinder_ds1620_get_data(void)
+{
+ return nw_gpio_read() & GPIO_DATA;
+}
+
+static inline void netwinder_ds1620_set_data_dir(int dir)
+{
+ nw_gpio_modify_io(GPIO_DATA, dir ? GPIO_DATA : 0);
+}
+
+static inline void netwinder_ds1620_reset(void)
+{
+ nw_cpld_modify(CPLD_DS_ENABLE, 0);
+ nw_cpld_modify(CPLD_DS_ENABLE, CPLD_DS_ENABLE);
+}
+
+static inline void netwinder_lock(unsigned long *flags)
+{
+ raw_spin_lock_irqsave(&nw_gpio_lock, *flags);
+}
+
+static inline void netwinder_unlock(unsigned long *flags)
+{
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, *flags);
+}
+
+static inline void netwinder_set_fan(int i)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+}
+
+static inline int netwinder_get_fan(void)
+{
+ if ((system_rev & 0xf000) == 0x4000)
+ return FAN_ALWAYS_ON;
+
+ return (nw_gpio_read() & GPIO_FAN) ? FAN_ON : FAN_OFF;
+}
+
+/*
+ * End of NetWinder specifics
+ */
+
+static void ds1620_send_bits(int nr, int value)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ netwinder_ds1620_set_data(value & 1);
+ netwinder_ds1620_set_clk(0);
+ udelay(1);
+ netwinder_ds1620_set_clk(1);
+ udelay(1);
+
+ value >>= 1;
+ }
+}
+
+static unsigned int ds1620_recv_bits(int nr)
+{
+ unsigned int value = 0, mask = 1;
+ int i;
+
+ netwinder_ds1620_set_data(0);
+
+ for (i = 0; i < nr; i++) {
+ netwinder_ds1620_set_clk(0);
+ udelay(1);
+
+ if (netwinder_ds1620_get_data())
+ value |= mask;
+
+ mask <<= 1;
+
+ netwinder_ds1620_set_clk(1);
+ udelay(1);
+ }
+
+ return value;
+}
+
+static void ds1620_out(int cmd, int bits, int value)
+{
+ unsigned long flags;
+
+ netwinder_lock(&flags);
+ netwinder_ds1620_set_clk(1);
+ netwinder_ds1620_set_data_dir(0);
+ netwinder_ds1620_reset();
+
+ udelay(1);
+
+ ds1620_send_bits(8, cmd);
+ if (bits)
+ ds1620_send_bits(bits, value);
+
+ udelay(1);
+
+ netwinder_ds1620_reset();
+ netwinder_unlock(&flags);
+
+ msleep(20);
+}
+
+static unsigned int ds1620_in(int cmd, int bits)
+{
+ unsigned long flags;
+ unsigned int value;
+
+ netwinder_lock(&flags);
+ netwinder_ds1620_set_clk(1);
+ netwinder_ds1620_set_data_dir(0);
+ netwinder_ds1620_reset();
+
+ udelay(1);
+
+ ds1620_send_bits(8, cmd);
+
+ netwinder_ds1620_set_data_dir(1);
+ value = ds1620_recv_bits(bits);
+
+ netwinder_ds1620_reset();
+ netwinder_unlock(&flags);
+
+ return value;
+}
+
+static int cvt_9_to_int(unsigned int val)
+{
+ if (val & 0x100)
+ val |= 0xfffffe00;
+
+ return val;
+}
+
+static void ds1620_write_state(struct therm *therm)
+{
+ ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU);
+ ds1620_out(THERM_WRITE_TL, 9, therm->lo);
+ ds1620_out(THERM_WRITE_TH, 9, therm->hi);
+ ds1620_out(THERM_START_CONVERT, 0, 0);
+}
+
+static void ds1620_read_state(struct therm *therm)
+{
+ therm->lo = cvt_9_to_int(ds1620_in(THERM_READ_TL, 9));
+ therm->hi = cvt_9_to_int(ds1620_in(THERM_READ_TH, 9));
+}
+
+static int ds1620_open(struct inode *inode, struct file *file)
+{
+ return stream_open(inode, file);
+}
+
+static ssize_t
+ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
+{
+ signed int cur_temp;
+ signed char cur_temp_degF;
+
+ cur_temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)) >> 1;
+
+ /* convert to Fahrenheit, as per wdt.c */
+ cur_temp_degF = (cur_temp * 9) / 5 + 32;
+
+ if (copy_to_user(buf, &cur_temp_degF, 1))
+ return -EFAULT;
+
+ return 1;
+}
+
+static int
+ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct therm therm;
+ union {
+ struct therm __user *therm;
+ int __user *i;
+ } uarg;
+ int i;
+
+ uarg.i = (int __user *)arg;
+
+ switch(cmd) {
+ case CMD_SET_THERMOSTATE:
+ case CMD_SET_THERMOSTATE2:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (cmd == CMD_SET_THERMOSTATE) {
+ if (get_user(therm.hi, uarg.i))
+ return -EFAULT;
+ therm.lo = therm.hi - 3;
+ } else {
+ if (copy_from_user(&therm, uarg.therm, sizeof(therm)))
+ return -EFAULT;
+ }
+
+ therm.lo <<= 1;
+ therm.hi <<= 1;
+
+ ds1620_write_state(&therm);
+ break;
+
+ case CMD_GET_THERMOSTATE:
+ case CMD_GET_THERMOSTATE2:
+ ds1620_read_state(&therm);
+
+ therm.lo >>= 1;
+ therm.hi >>= 1;
+
+ if (cmd == CMD_GET_THERMOSTATE) {
+ if (put_user(therm.hi, uarg.i))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(uarg.therm, &therm, sizeof(therm)))
+ return -EFAULT;
+ }
+ break;
+
+ case CMD_GET_TEMPERATURE:
+ case CMD_GET_TEMPERATURE2:
+ i = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
+
+ if (cmd == CMD_GET_TEMPERATURE)
+ i >>= 1;
+
+ return put_user(i, uarg.i) ? -EFAULT : 0;
+
+ case CMD_GET_STATUS:
+ i = ds1620_in(THERM_READ_CONFIG, 8) & 0xe3;
+
+ return put_user(i, uarg.i) ? -EFAULT : 0;
+
+ case CMD_GET_FAN:
+ i = netwinder_get_fan();
+
+ return put_user(i, uarg.i) ? -EFAULT : 0;
+
+ case CMD_SET_FAN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(i, uarg.i))
+ return -EFAULT;
+
+ netwinder_set_fan(i);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+static long
+ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ds1620_mutex);
+ ret = ds1620_ioctl(file, cmd, arg);
+ mutex_unlock(&ds1620_mutex);
+
+ return ret;
+}
+
+#ifdef THERM_USE_PROC
+static int ds1620_proc_therm_show(struct seq_file *m, void *v)
+{
+ struct therm th;
+ int temp;
+
+ ds1620_read_state(&th);
+ temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
+
+ seq_printf(m, "Thermostat: HI %i.%i, LOW %i.%i; temperature: %i.%i C, fan %s\n",
+ th.hi >> 1, th.hi & 1 ? 5 : 0,
+ th.lo >> 1, th.lo & 1 ? 5 : 0,
+ temp >> 1, temp & 1 ? 5 : 0,
+ fan_state[netwinder_get_fan()]);
+ return 0;
+}
+#endif
+
+static const struct file_operations ds1620_fops = {
+ .owner = THIS_MODULE,
+ .open = ds1620_open,
+ .read = ds1620_read,
+ .unlocked_ioctl = ds1620_unlocked_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice ds1620_miscdev = {
+ TEMP_MINOR,
+ "temp",
+ &ds1620_fops
+};
+
+static int __init ds1620_init(void)
+{
+ int ret;
+ struct therm th, th_start;
+
+ if (!machine_is_netwinder())
+ return -ENODEV;
+
+ ds1620_out(THERM_RESET, 0, 0);
+ ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU);
+ ds1620_out(THERM_START_CONVERT, 0, 0);
+
+ /*
+ * Trigger the fan to start by setting
+ * temperature high point low. This kicks
+ * the fan into action.
+ */
+ ds1620_read_state(&th);
+ th_start.lo = 0;
+ th_start.hi = 1;
+ ds1620_write_state(&th_start);
+
+ msleep(2000);
+
+ ds1620_write_state(&th);
+
+ ret = misc_register(&ds1620_miscdev);
+ if (ret < 0)
+ return ret;
+
+#ifdef THERM_USE_PROC
+ if (!proc_create_single("therm", 0, NULL, ds1620_proc_therm_show))
+ printk(KERN_ERR "therm: unable to register /proc/therm\n");
+#endif
+
+ ds1620_read_state(&th);
+ ret = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
+
+ printk(KERN_INFO "Thermostat: high %i.%i, low %i.%i, "
+ "current %i.%i C, fan %s.\n",
+ th.hi >> 1, th.hi & 1 ? 5 : 0,
+ th.lo >> 1, th.lo & 1 ? 5 : 0,
+ ret >> 1, ret & 1 ? 5 : 0,
+ fan_state[netwinder_get_fan()]);
+
+ return 0;
+}
+
+static void __exit ds1620_exit(void)
+{
+#ifdef THERM_USE_PROC
+ remove_proc_entry("therm", NULL);
+#endif
+ misc_deregister(&ds1620_miscdev);
+}
+
+module_init(ds1620_init);
+module_exit(ds1620_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
new file mode 100644
index 0000000000..bda27e595d
--- /dev/null
+++ b/drivers/char/dsp56k.c
@@ -0,0 +1,534 @@
+/*
+ * The DSP56001 Device Driver, saviour of the Free World(tm)
+ *
+ * Authors: Fredrik Noring <noring@nocrew.org>
+ * lars brinkhoff <lars@nocrew.org>
+ * Tomas Berndtsson <tomas@nocrew.org>
+ *
+ * First version May 1996
+ *
+ * History:
+ * 97-01-29 Tomas Berndtsson,
+ * Integrated with Linux 2.1.21 kernel sources.
+ * 97-02-15 Tomas Berndtsson,
+ * Fixed for kernel 2.1.26
+ *
+ * BUGS:
+ * Hmm... there must be something here :)
+ *
+ * Copyright (C) 1996,1997 Fredrik Noring, lars brinkhoff & Tomas Berndtsson
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/major.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h> /* guess what */
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h> /* For put_user and get_user */
+
+#include <asm/atarihw.h>
+#include <asm/traps.h>
+
+#include <asm/dsp56k.h>
+
+/* minor devices */
+#define DSP56K_DEV_56001 0 /* The only device so far */
+
+#define TIMEOUT 10 /* Host port timeout in number of tries */
+#define MAXIO 2048 /* Maximum number of words before sleep */
+#define DSP56K_MAX_BINARY_LENGTH (3*64*1024)
+
+#define DSP56K_TX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_TREQ
+#define DSP56K_RX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_RREQ
+#define DSP56K_TX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_TREQ
+#define DSP56K_RX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_RREQ
+
+#define DSP56K_TRANSMIT (dsp56k_host_interface.isr & DSP56K_ISR_TXDE)
+#define DSP56K_RECEIVE (dsp56k_host_interface.isr & DSP56K_ISR_RXDF)
+
+#define handshake(count, maxio, timeout, ENABLE, f) \
+{ \
+ long i, t, m; \
+ while (count > 0) { \
+ m = min_t(unsigned long, count, maxio); \
+ for (i = 0; i < m; i++) { \
+ for (t = 0; t < timeout && !ENABLE; t++) \
+ msleep(20); \
+ if(!ENABLE) \
+ return -EIO; \
+ f; \
+ } \
+ count -= m; \
+ if (m == maxio) msleep(20); \
+ } \
+}
+
+#define tx_wait(n) \
+{ \
+ int t; \
+ for(t = 0; t < n && !DSP56K_TRANSMIT; t++) \
+ msleep(10); \
+ if(!DSP56K_TRANSMIT) { \
+ return -EIO; \
+ } \
+}
+
+#define rx_wait(n) \
+{ \
+ int t; \
+ for(t = 0; t < n && !DSP56K_RECEIVE; t++) \
+ msleep(10); \
+ if(!DSP56K_RECEIVE) { \
+ return -EIO; \
+ } \
+}
+
+static DEFINE_MUTEX(dsp56k_mutex);
+static struct dsp56k_device {
+ unsigned long in_use;
+ long maxio, timeout;
+ int tx_wsize, rx_wsize;
+} dsp56k;
+
+static const struct class dsp56k_class = {
+ .name = "dsp56k",
+};
+
+static int dsp56k_reset(void)
+{
+ u_char status;
+
+ /* Power down the DSP */
+ sound_ym.rd_data_reg_sel = 14;
+ status = sound_ym.rd_data_reg_sel & 0xef;
+ sound_ym.wd_data = status;
+ sound_ym.wd_data = status | 0x10;
+
+ udelay(10);
+
+ /* Power up the DSP */
+ sound_ym.rd_data_reg_sel = 14;
+ sound_ym.wd_data = sound_ym.rd_data_reg_sel & 0xef;
+
+ return 0;
+}
+
+static int dsp56k_upload(u_char __user *bin, int len)
+{
+ struct platform_device *pdev;
+ const struct firmware *fw;
+ const char fw_name[] = "dsp56k/bootstrap.bin";
+ int err;
+ int i;
+
+ dsp56k_reset();
+
+ pdev = platform_device_register_simple("dsp56k", 0, NULL, 0);
+ if (IS_ERR(pdev)) {
+ printk(KERN_ERR "Failed to register device for \"%s\"\n",
+ fw_name);
+ return -EINVAL;
+ }
+ err = request_firmware(&fw, fw_name, &pdev->dev);
+ platform_device_unregister(pdev);
+ if (err) {
+ printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
+ fw_name, err);
+ return err;
+ }
+ if (fw->size % 3) {
+ printk(KERN_ERR "Bogus length %d in image \"%s\"\n",
+ fw->size, fw_name);
+ release_firmware(fw);
+ return -EINVAL;
+ }
+ for (i = 0; i < fw->size; i = i + 3) {
+ /* tx_wait(10); */
+ dsp56k_host_interface.data.b[1] = fw->data[i];
+ dsp56k_host_interface.data.b[2] = fw->data[i + 1];
+ dsp56k_host_interface.data.b[3] = fw->data[i + 2];
+ }
+ release_firmware(fw);
+ for (; i < 512; i++) {
+ /* tx_wait(10); */
+ dsp56k_host_interface.data.b[1] = 0;
+ dsp56k_host_interface.data.b[2] = 0;
+ dsp56k_host_interface.data.b[3] = 0;
+ }
+
+ for (i = 0; i < len; i++) {
+ tx_wait(10);
+ get_user(dsp56k_host_interface.data.b[1], bin++);
+ get_user(dsp56k_host_interface.data.b[2], bin++);
+ get_user(dsp56k_host_interface.data.b[3], bin++);
+ }
+
+ tx_wait(10);
+ dsp56k_host_interface.data.l = 3; /* Magic execute */
+
+ return 0;
+}
+
+static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct inode *inode = file_inode(file);
+ int dev = iminor(inode) & 0x0f;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+ {
+
+ long n;
+
+ /* Don't do anything if nothing is to be done */
+ if (!count) return 0;
+
+ n = 0;
+ switch (dsp56k.rx_wsize) {
+ case 1: /* 8 bit */
+ {
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+ put_user(dsp56k_host_interface.data.b[3], buf+n++));
+ return n;
+ }
+ case 2: /* 16 bit */
+ {
+ short __user *data;
+
+ count /= 2;
+ data = (short __user *) buf;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+ put_user(dsp56k_host_interface.data.w[1], data+n++));
+ return 2*n;
+ }
+ case 3: /* 24 bit */
+ {
+ count /= 3;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+ put_user(dsp56k_host_interface.data.b[1], buf+n++);
+ put_user(dsp56k_host_interface.data.b[2], buf+n++);
+ put_user(dsp56k_host_interface.data.b[3], buf+n++));
+ return 3*n;
+ }
+ case 4: /* 32 bit */
+ {
+ long __user *data;
+
+ count /= 4;
+ data = (long __user *) buf;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+ put_user(dsp56k_host_interface.data.l, data+n++));
+ return 4*n;
+ }
+ }
+ return -EFAULT;
+ }
+
+ default:
+ printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+ return -ENXIO;
+ }
+}
+
+static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct inode *inode = file_inode(file);
+ int dev = iminor(inode) & 0x0f;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+ {
+ long n;
+
+ /* Don't do anything if nothing is to be done */
+ if (!count) return 0;
+
+ n = 0;
+ switch (dsp56k.tx_wsize) {
+ case 1: /* 8 bit */
+ {
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+ get_user(dsp56k_host_interface.data.b[3], buf+n++));
+ return n;
+ }
+ case 2: /* 16 bit */
+ {
+ const short __user *data;
+
+ count /= 2;
+ data = (const short __user *)buf;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+ get_user(dsp56k_host_interface.data.w[1], data+n++));
+ return 2*n;
+ }
+ case 3: /* 24 bit */
+ {
+ count /= 3;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+ get_user(dsp56k_host_interface.data.b[1], buf+n++);
+ get_user(dsp56k_host_interface.data.b[2], buf+n++);
+ get_user(dsp56k_host_interface.data.b[3], buf+n++));
+ return 3*n;
+ }
+ case 4: /* 32 bit */
+ {
+ const long __user *data;
+
+ count /= 4;
+ data = (const long __user *)buf;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+ get_user(dsp56k_host_interface.data.l, data+n++));
+ return 4*n;
+ }
+ }
+
+ return -EFAULT;
+ }
+ default:
+ printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+ return -ENXIO;
+ }
+}
+
+static long dsp56k_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int dev = iminor(file_inode(file)) & 0x0f;
+ void __user *argp = (void __user *)arg;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+
+ switch(cmd) {
+ case DSP56K_UPLOAD:
+ {
+ char __user *bin;
+ int r, len;
+ struct dsp56k_upload __user *binary = argp;
+
+ if(get_user(len, &binary->len) < 0)
+ return -EFAULT;
+ if(get_user(bin, &binary->bin) < 0)
+ return -EFAULT;
+
+ if (len <= 0) {
+ return -EINVAL; /* nothing to upload?!? */
+ }
+ if (len > DSP56K_MAX_BINARY_LENGTH) {
+ return -EINVAL;
+ }
+ mutex_lock(&dsp56k_mutex);
+ r = dsp56k_upload(bin, len);
+ mutex_unlock(&dsp56k_mutex);
+ if (r < 0) {
+ return r;
+ }
+
+ break;
+ }
+ case DSP56K_SET_TX_WSIZE:
+ if (arg > 4 || arg < 1)
+ return -EINVAL;
+ mutex_lock(&dsp56k_mutex);
+ dsp56k.tx_wsize = (int) arg;
+ mutex_unlock(&dsp56k_mutex);
+ break;
+ case DSP56K_SET_RX_WSIZE:
+ if (arg > 4 || arg < 1)
+ return -EINVAL;
+ mutex_lock(&dsp56k_mutex);
+ dsp56k.rx_wsize = (int) arg;
+ mutex_unlock(&dsp56k_mutex);
+ break;
+ case DSP56K_HOST_FLAGS:
+ {
+ int dir, out, status;
+ struct dsp56k_host_flags __user *hf = argp;
+
+ if(get_user(dir, &hf->dir) < 0)
+ return -EFAULT;
+ if(get_user(out, &hf->out) < 0)
+ return -EFAULT;
+
+ mutex_lock(&dsp56k_mutex);
+ if ((dir & 0x1) && (out & 0x1))
+ dsp56k_host_interface.icr |= DSP56K_ICR_HF0;
+ else if (dir & 0x1)
+ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0;
+ if ((dir & 0x2) && (out & 0x2))
+ dsp56k_host_interface.icr |= DSP56K_ICR_HF1;
+ else if (dir & 0x2)
+ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1;
+
+ status = 0;
+ if (dsp56k_host_interface.icr & DSP56K_ICR_HF0) status |= 0x1;
+ if (dsp56k_host_interface.icr & DSP56K_ICR_HF1) status |= 0x2;
+ if (dsp56k_host_interface.isr & DSP56K_ISR_HF2) status |= 0x4;
+ if (dsp56k_host_interface.isr & DSP56K_ISR_HF3) status |= 0x8;
+ mutex_unlock(&dsp56k_mutex);
+ return put_user(status, &hf->status);
+ }
+ case DSP56K_HOST_CMD:
+ if (arg > 31)
+ return -EINVAL;
+ mutex_lock(&dsp56k_mutex);
+ dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) |
+ DSP56K_CVR_HC);
+ mutex_unlock(&dsp56k_mutex);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+
+ default:
+ printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+ return -ENXIO;
+ }
+}
+
+/* As of 2.1.26 this should be dsp56k_poll,
+ * but how do I then check device minor number?
+ * Do I need this function at all???
+ */
+#if 0
+static __poll_t dsp56k_poll(struct file *file, poll_table *wait)
+{
+ int dev = iminor(file_inode(file)) & 0x0f;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+ /* poll_wait(file, ???, wait); */
+ return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
+
+ default:
+ printk("DSP56k driver: Unknown minor device: %d\n", dev);
+ return 0;
+ }
+}
+#endif
+
+static int dsp56k_open(struct inode *inode, struct file *file)
+{
+ int dev = iminor(inode) & 0x0f;
+ int ret = 0;
+
+ mutex_lock(&dsp56k_mutex);
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+
+ if (test_and_set_bit(0, &dsp56k.in_use)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ dsp56k.timeout = TIMEOUT;
+ dsp56k.maxio = MAXIO;
+ dsp56k.rx_wsize = dsp56k.tx_wsize = 4;
+
+ DSP56K_TX_INT_OFF;
+ DSP56K_RX_INT_OFF;
+
+ /* Zero host flags */
+ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0;
+ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1;
+
+ break;
+
+ default:
+ ret = -ENODEV;
+ }
+out:
+ mutex_unlock(&dsp56k_mutex);
+ return ret;
+}
+
+static int dsp56k_release(struct inode *inode, struct file *file)
+{
+ int dev = iminor(inode) & 0x0f;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+ clear_bit(0, &dsp56k.in_use);
+ break;
+ default:
+ printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static const struct file_operations dsp56k_fops = {
+ .owner = THIS_MODULE,
+ .read = dsp56k_read,
+ .write = dsp56k_write,
+ .unlocked_ioctl = dsp56k_ioctl,
+ .open = dsp56k_open,
+ .release = dsp56k_release,
+ .llseek = noop_llseek,
+};
+
+
+/****** Init and module functions ******/
+
+static const char banner[] __initconst = KERN_INFO "DSP56k driver installed\n";
+
+static int __init dsp56k_init_driver(void)
+{
+ int err;
+
+ if(!MACH_IS_ATARI || !ATARIHW_PRESENT(DSP56K)) {
+ printk("DSP56k driver: Hardware not present\n");
+ return -ENODEV;
+ }
+
+ if(register_chrdev(DSP56K_MAJOR, "dsp56k", &dsp56k_fops)) {
+ printk("DSP56k driver: Unable to register driver\n");
+ return -ENODEV;
+ }
+ err = class_register(&dsp56k_class);
+ if (err)
+ goto out_chrdev;
+ device_create(&dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), NULL,
+ "dsp56k");
+
+ printk(banner);
+ goto out;
+
+out_chrdev:
+ unregister_chrdev(DSP56K_MAJOR, "dsp56k");
+out:
+ return err;
+}
+module_init(dsp56k_init_driver);
+
+static void __exit dsp56k_cleanup_driver(void)
+{
+ device_destroy(&dsp56k_class, MKDEV(DSP56K_MAJOR, 0));
+ class_unregister(&dsp56k_class);
+ unregister_chrdev(DSP56K_MAJOR, "dsp56k");
+}
+module_exit(dsp56k_cleanup_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("dsp56k/bootstrap.bin");
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
new file mode 100644
index 0000000000..6946c1cad9
--- /dev/null
+++ b/drivers/char/dtlk.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* -*- linux-c -*-
+ * dtlk.c - DoubleTalk PC driver for Linux
+ *
+ * Original author: Chris Pallotta <chris@allmedia.com>
+ * Current maintainer: Jim Van Zandt <jrv@vanzandt.mv.com>
+ *
+ * 2000-03-18 Jim Van Zandt: Fix polling.
+ * Eliminate dtlk_timer_active flag and separate dtlk_stop_timer
+ * function. Don't restart timer in dtlk_timer_tick. Restart timer
+ * in dtlk_poll after every poll. dtlk_poll returns mask (duh).
+ * Eliminate unused function dtlk_write_byte. Misc. code cleanups.
+ */
+
+/* This driver is for the DoubleTalk PC, a speech synthesizer
+ manufactured by RC Systems (http://www.rcsys.com/). It was written
+ based on documentation in their User's Manual file and Developer's
+ Tools disk.
+
+ The DoubleTalk PC contains four voice synthesizers: text-to-speech
+ (TTS), linear predictive coding (LPC), PCM/ADPCM, and CVSD. It
+ also has a tone generator. Output data for LPC are written to the
+ LPC port, and output data for the other modes are written to the
+ TTS port.
+
+ Two kinds of data can be read from the DoubleTalk: status
+ information (in response to the "\001?" interrogation command) is
+ read from the TTS port, and index markers (which mark the progress
+ of the speech) are read from the LPC port. Not all models of the
+ DoubleTalk PC implement index markers. Both the TTS and LPC ports
+ can also display status flags.
+
+ The DoubleTalk PC generates no interrupts.
+
+ These characteristics are mapped into the Unix stream I/O model as
+ follows:
+
+ "write" sends bytes to the TTS port. It is the responsibility of
+ the user program to switch modes among TTS, PCM/ADPCM, and CVSD.
+ This driver was written for use with the text-to-speech
+ synthesizer. If LPC output is needed some day, other minor device
+ numbers can be used to select among output modes.
+
+ "read" gets index markers from the LPC port. If the device does
+ not implement index markers, the read will fail with error EINVAL.
+
+ Status information is available using the DTLK_INTERROGATE ioctl.
+
+ */
+
+#include <linux/module.h>
+
+#define KERNEL
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/errno.h> /* for -EBUSY */
+#include <linux/ioport.h> /* for request_region */
+#include <linux/delay.h> /* for loops_per_jiffy */
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */
+#include <linux/uaccess.h> /* for get_user, etc. */
+#include <linux/wait.h> /* for wait_queue */
+#include <linux/init.h> /* for __init, module_{init,exit} */
+#include <linux/poll.h> /* for EPOLLIN, etc. */
+#include <linux/dtlk.h> /* local header file for DoubleTalk values */
+
+#ifdef TRACING
+#define TRACE_TEXT(str) printk(str);
+#define TRACE_RET printk(")")
+#else /* !TRACING */
+#define TRACE_TEXT(str) ((void) 0)
+#define TRACE_RET ((void) 0)
+#endif /* TRACING */
+
+static DEFINE_MUTEX(dtlk_mutex);
+static void dtlk_timer_tick(struct timer_list *unused);
+
+static int dtlk_major;
+static int dtlk_port_lpc;
+static int dtlk_port_tts;
+static int dtlk_busy;
+static int dtlk_has_indexing;
+static unsigned int dtlk_portlist[] =
+{0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0};
+static wait_queue_head_t dtlk_process_list;
+static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick);
+
+/* prototypes for file_operations struct */
+static ssize_t dtlk_read(struct file *, char __user *,
+ size_t nbytes, loff_t * ppos);
+static ssize_t dtlk_write(struct file *, const char __user *,
+ size_t nbytes, loff_t * ppos);
+static __poll_t dtlk_poll(struct file *, poll_table *);
+static int dtlk_open(struct inode *, struct file *);
+static int dtlk_release(struct inode *, struct file *);
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+static const struct file_operations dtlk_fops =
+{
+ .owner = THIS_MODULE,
+ .read = dtlk_read,
+ .write = dtlk_write,
+ .poll = dtlk_poll,
+ .unlocked_ioctl = dtlk_ioctl,
+ .open = dtlk_open,
+ .release = dtlk_release,
+ .llseek = no_llseek,
+};
+
+/* local prototypes */
+static int dtlk_dev_probe(void);
+static struct dtlk_settings *dtlk_interrogate(void);
+static int dtlk_readable(void);
+static char dtlk_read_lpc(void);
+static char dtlk_read_tts(void);
+static int dtlk_writeable(void);
+static char dtlk_write_bytes(const char *buf, int n);
+static char dtlk_write_tts(char);
+/*
+ static void dtlk_handle_error(char, char, unsigned int);
+ */
+
+static ssize_t dtlk_read(struct file *file, char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ unsigned int minor = iminor(file_inode(file));
+ char ch;
+ int i = 0, retries;
+
+ TRACE_TEXT("(dtlk_read");
+ /* printk("DoubleTalk PC - dtlk_read()\n"); */
+
+ if (minor != DTLK_MINOR || !dtlk_has_indexing)
+ return -EINVAL;
+
+ for (retries = 0; retries < loops_per_jiffy; retries++) {
+ while (i < count && dtlk_readable()) {
+ ch = dtlk_read_lpc();
+ /* printk("dtlk_read() reads 0x%02x\n", ch); */
+ if (put_user(ch, buf++))
+ return -EFAULT;
+ i++;
+ }
+ if (i)
+ return i;
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ msleep_interruptible(100);
+ }
+ if (retries == loops_per_jiffy)
+ printk(KERN_ERR "dtlk_read times out\n");
+ TRACE_RET;
+ return -EAGAIN;
+}
+
+static ssize_t dtlk_write(struct file *file, const char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ int i = 0, retries = 0, ch;
+
+ TRACE_TEXT("(dtlk_write");
+#ifdef TRACING
+ printk(" \"");
+ {
+ int i, ch;
+ for (i = 0; i < count; i++) {
+ if (get_user(ch, buf + i))
+ return -EFAULT;
+ if (' ' <= ch && ch <= '~')
+ printk("%c", ch);
+ else
+ printk("\\%03o", ch);
+ }
+ printk("\"");
+ }
+#endif
+
+ if (iminor(file_inode(file)) != DTLK_MINOR)
+ return -EINVAL;
+
+ while (1) {
+ while (i < count && !get_user(ch, buf) &&
+ (ch == DTLK_CLEAR || dtlk_writeable())) {
+ dtlk_write_tts(ch);
+ buf++;
+ i++;
+ if (i % 5 == 0)
+ /* We yield our time until scheduled
+ again. This reduces the transfer
+ rate to 500 bytes/sec, but that's
+ still enough to keep up with the
+ speech synthesizer. */
+ msleep_interruptible(1);
+ else {
+ /* the RDY bit goes zero 2-3 usec
+ after writing, and goes 1 again
+ 180-190 usec later. Here, we wait
+ up to 250 usec for the RDY bit to
+ go nonzero. */
+ for (retries = 0;
+ retries < loops_per_jiffy / (4000/HZ);
+ retries++)
+ if (inb_p(dtlk_port_tts) &
+ TTS_WRITABLE)
+ break;
+ }
+ retries = 0;
+ }
+ if (i == count)
+ return i;
+ if (file->f_flags & O_NONBLOCK)
+ break;
+
+ msleep_interruptible(1);
+
+ if (++retries > 10 * HZ) { /* wait no more than 10 sec
+ from last write */
+ printk("dtlk: write timeout. "
+ "inb_p(dtlk_port_tts) = 0x%02x\n",
+ inb_p(dtlk_port_tts));
+ TRACE_RET;
+ return -EBUSY;
+ }
+ }
+ TRACE_RET;
+ return -EAGAIN;
+}
+
+static __poll_t dtlk_poll(struct file *file, poll_table * wait)
+{
+ __poll_t mask = 0;
+ unsigned long expires;
+
+ TRACE_TEXT(" dtlk_poll");
+ /*
+ static long int j;
+ printk(".");
+ printk("<%ld>", jiffies-j);
+ j=jiffies;
+ */
+ poll_wait(file, &dtlk_process_list, wait);
+
+ if (dtlk_has_indexing && dtlk_readable()) {
+ del_timer(&dtlk_timer);
+ mask = EPOLLIN | EPOLLRDNORM;
+ }
+ if (dtlk_writeable()) {
+ del_timer(&dtlk_timer);
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ }
+ /* there are no exception conditions */
+
+ /* There won't be any interrupts, so we set a timer instead. */
+ expires = jiffies + 3*HZ / 100;
+ mod_timer(&dtlk_timer, expires);
+
+ return mask;
+}
+
+static void dtlk_timer_tick(struct timer_list *unused)
+{
+ TRACE_TEXT(" dtlk_timer_tick");
+ wake_up_interruptible(&dtlk_process_list);
+}
+
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ char __user *argp = (char __user *)arg;
+ struct dtlk_settings *sp;
+ char portval;
+ TRACE_TEXT(" dtlk_ioctl");
+
+ switch (cmd) {
+
+ case DTLK_INTERROGATE:
+ mutex_lock(&dtlk_mutex);
+ sp = dtlk_interrogate();
+ mutex_unlock(&dtlk_mutex);
+ if (copy_to_user(argp, sp, sizeof(struct dtlk_settings)))
+ return -EINVAL;
+ return 0;
+
+ case DTLK_STATUS:
+ portval = inb_p(dtlk_port_tts);
+ return put_user(portval, argp);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Note that nobody ever sets dtlk_busy... */
+static int dtlk_open(struct inode *inode, struct file *file)
+{
+ TRACE_TEXT("(dtlk_open");
+
+ switch (iminor(inode)) {
+ case DTLK_MINOR:
+ if (dtlk_busy)
+ return -EBUSY;
+ return stream_open(inode, file);
+
+ default:
+ return -ENXIO;
+ }
+}
+
+static int dtlk_release(struct inode *inode, struct file *file)
+{
+ TRACE_TEXT("(dtlk_release");
+
+ switch (iminor(inode)) {
+ case DTLK_MINOR:
+ break;
+
+ default:
+ break;
+ }
+ TRACE_RET;
+
+ del_timer_sync(&dtlk_timer);
+
+ return 0;
+}
+
+static int __init dtlk_init(void)
+{
+ int err;
+
+ dtlk_port_lpc = 0;
+ dtlk_port_tts = 0;
+ dtlk_busy = 0;
+ dtlk_major = register_chrdev(0, "dtlk", &dtlk_fops);
+ if (dtlk_major < 0) {
+ printk(KERN_ERR "DoubleTalk PC - cannot register device\n");
+ return dtlk_major;
+ }
+ err = dtlk_dev_probe();
+ if (err) {
+ unregister_chrdev(dtlk_major, "dtlk");
+ return err;
+ }
+ printk(", MAJOR %d\n", dtlk_major);
+
+ init_waitqueue_head(&dtlk_process_list);
+
+ return 0;
+}
+
+static void __exit dtlk_cleanup (void)
+{
+ dtlk_write_bytes("goodbye", 8);
+ msleep_interruptible(500); /* nap 0.50 sec but
+ could be awakened
+ earlier by
+ signals... */
+
+ dtlk_write_tts(DTLK_CLEAR);
+ unregister_chrdev(dtlk_major, "dtlk");
+ release_region(dtlk_port_lpc, DTLK_IO_EXTENT);
+}
+
+module_init(dtlk_init);
+module_exit(dtlk_cleanup);
+
+/* ------------------------------------------------------------------------ */
+
+static int dtlk_readable(void)
+{
+#ifdef TRACING
+ printk(" dtlk_readable=%u@%u", inb_p(dtlk_port_lpc) != 0x7f, jiffies);
+#endif
+ return inb_p(dtlk_port_lpc) != 0x7f;
+}
+
+static int dtlk_writeable(void)
+{
+ /* TRACE_TEXT(" dtlk_writeable"); */
+#ifdef TRACINGMORE
+ printk(" dtlk_writeable=%u", (inb_p(dtlk_port_tts) & TTS_WRITABLE)!=0);
+#endif
+ return inb_p(dtlk_port_tts) & TTS_WRITABLE;
+}
+
+static int __init dtlk_dev_probe(void)
+{
+ unsigned int testval = 0;
+ int i = 0;
+ struct dtlk_settings *sp;
+
+ if (dtlk_port_lpc | dtlk_port_tts)
+ return -EBUSY;
+
+ for (i = 0; dtlk_portlist[i]; i++) {
+#if 0
+ printk("DoubleTalk PC - Port %03x = %04x\n",
+ dtlk_portlist[i], (testval = inw_p(dtlk_portlist[i])));
+#endif
+
+ if (!request_region(dtlk_portlist[i], DTLK_IO_EXTENT,
+ "dtlk"))
+ continue;
+ testval = inw_p(dtlk_portlist[i]);
+ if ((testval &= 0xfbff) == 0x107f) {
+ dtlk_port_lpc = dtlk_portlist[i];
+ dtlk_port_tts = dtlk_port_lpc + 1;
+
+ sp = dtlk_interrogate();
+ printk("DoubleTalk PC at %03x-%03x, "
+ "ROM version %s, serial number %u",
+ dtlk_portlist[i], dtlk_portlist[i] +
+ DTLK_IO_EXTENT - 1,
+ sp->rom_version, sp->serial_number);
+
+ /* put LPC port into known state, so
+ dtlk_readable() gives valid result */
+ outb_p(0xff, dtlk_port_lpc);
+
+ /* INIT string and index marker */
+ dtlk_write_bytes("\036\1@\0\0012I\r", 8);
+ /* posting an index takes 18 msec. Here, we
+ wait up to 100 msec to see whether it
+ appears. */
+ msleep_interruptible(100);
+ dtlk_has_indexing = dtlk_readable();
+#ifdef TRACING
+ printk(", indexing %d\n", dtlk_has_indexing);
+#endif
+#ifdef INSCOPE
+ {
+/* This macro records ten samples read from the LPC port, for later display */
+#define LOOK \
+for (i = 0; i < 10; i++) \
+ { \
+ buffer[b++] = inb_p(dtlk_port_lpc); \
+ __delay(loops_per_jiffy/(1000000/HZ)); \
+ }
+ char buffer[1000];
+ int b = 0, i, j;
+
+ LOOK
+ outb_p(0xff, dtlk_port_lpc);
+ buffer[b++] = 0;
+ LOOK
+ dtlk_write_bytes("\0012I\r", 4);
+ buffer[b++] = 0;
+ __delay(50 * loops_per_jiffy / (1000/HZ));
+ outb_p(0xff, dtlk_port_lpc);
+ buffer[b++] = 0;
+ LOOK
+
+ printk("\n");
+ for (j = 0; j < b; j++)
+ printk(" %02x", buffer[j]);
+ printk("\n");
+ }
+#endif /* INSCOPE */
+
+#ifdef OUTSCOPE
+ {
+/* This macro records ten samples read from the TTS port, for later display */
+#define LOOK \
+for (i = 0; i < 10; i++) \
+ { \
+ buffer[b++] = inb_p(dtlk_port_tts); \
+ __delay(loops_per_jiffy/(1000000/HZ)); /* 1 us */ \
+ }
+ char buffer[1000];
+ int b = 0, i, j;
+
+ mdelay(10); /* 10 ms */
+ LOOK
+ outb_p(0x03, dtlk_port_tts);
+ buffer[b++] = 0;
+ LOOK
+ LOOK
+
+ printk("\n");
+ for (j = 0; j < b; j++)
+ printk(" %02x", buffer[j]);
+ printk("\n");
+ }
+#endif /* OUTSCOPE */
+
+ dtlk_write_bytes("Double Talk found", 18);
+
+ return 0;
+ }
+ release_region(dtlk_portlist[i], DTLK_IO_EXTENT);
+ }
+
+ printk(KERN_INFO "DoubleTalk PC - not found\n");
+ return -ENODEV;
+}
+
+/*
+ static void dtlk_handle_error(char op, char rc, unsigned int minor)
+ {
+ printk(KERN_INFO"\nDoubleTalk PC - MINOR: %d, OPCODE: %d, ERROR: %d\n",
+ minor, op, rc);
+ return;
+ }
+ */
+
+/* interrogate the DoubleTalk PC and return its settings */
+static struct dtlk_settings *dtlk_interrogate(void)
+{
+ unsigned char *t;
+ static char buf[sizeof(struct dtlk_settings) + 1];
+ int total, i;
+ static struct dtlk_settings status;
+ TRACE_TEXT("(dtlk_interrogate");
+ dtlk_write_bytes("\030\001?", 3);
+ for (total = 0, i = 0; i < 50; i++) {
+ buf[total] = dtlk_read_tts();
+ if (total > 2 && buf[total] == 0x7f)
+ break;
+ if (total < sizeof(struct dtlk_settings))
+ total++;
+ }
+ /*
+ if (i==50) printk("interrogate() read overrun\n");
+ for (i=0; i<sizeof(buf); i++)
+ printk(" %02x", buf[i]);
+ printk("\n");
+ */
+ t = buf;
+ status.serial_number = t[0] + t[1] * 256; /* serial number is
+ little endian */
+ t += 2;
+
+ i = 0;
+ while (*t != '\r') {
+ status.rom_version[i] = *t;
+ if (i < sizeof(status.rom_version) - 1)
+ i++;
+ t++;
+ }
+ status.rom_version[i] = 0;
+ t++;
+
+ status.mode = *t++;
+ status.punc_level = *t++;
+ status.formant_freq = *t++;
+ status.pitch = *t++;
+ status.speed = *t++;
+ status.volume = *t++;
+ status.tone = *t++;
+ status.expression = *t++;
+ status.ext_dict_loaded = *t++;
+ status.ext_dict_status = *t++;
+ status.free_ram = *t++;
+ status.articulation = *t++;
+ status.reverb = *t++;
+ status.eob = *t++;
+ status.has_indexing = dtlk_has_indexing;
+ TRACE_RET;
+ return &status;
+}
+
+static char dtlk_read_tts(void)
+{
+ int portval, retries = 0;
+ char ch;
+ TRACE_TEXT("(dtlk_read_tts");
+
+ /* verify DT is ready, read char, wait for ACK */
+ do {
+ portval = inb_p(dtlk_port_tts);
+ } while ((portval & TTS_READABLE) == 0 &&
+ retries++ < DTLK_MAX_RETRIES);
+ if (retries > DTLK_MAX_RETRIES)
+ printk(KERN_ERR "dtlk_read_tts() timeout\n");
+
+ ch = inb_p(dtlk_port_tts); /* input from TTS port */
+ ch &= 0x7f;
+ outb_p(ch, dtlk_port_tts);
+
+ retries = 0;
+ do {
+ portval = inb_p(dtlk_port_tts);
+ } while ((portval & TTS_READABLE) != 0 &&
+ retries++ < DTLK_MAX_RETRIES);
+ if (retries > DTLK_MAX_RETRIES)
+ printk(KERN_ERR "dtlk_read_tts() timeout\n");
+
+ TRACE_RET;
+ return ch;
+}
+
+static char dtlk_read_lpc(void)
+{
+ int retries = 0;
+ char ch;
+ TRACE_TEXT("(dtlk_read_lpc");
+
+ /* no need to test -- this is only called when the port is readable */
+
+ ch = inb_p(dtlk_port_lpc); /* input from LPC port */
+
+ outb_p(0xff, dtlk_port_lpc);
+
+ /* acknowledging a read takes 3-4
+ usec. Here, we wait up to 20 usec
+ for the acknowledgement */
+ retries = (loops_per_jiffy * 20) / (1000000/HZ);
+ while (inb_p(dtlk_port_lpc) != 0x7f && --retries > 0);
+ if (retries == 0)
+ printk(KERN_ERR "dtlk_read_lpc() timeout\n");
+
+ TRACE_RET;
+ return ch;
+}
+
+/* write n bytes to tts port */
+static char dtlk_write_bytes(const char *buf, int n)
+{
+ char val = 0;
+ /* printk("dtlk_write_bytes(\"%-*s\", %d)\n", n, buf, n); */
+ TRACE_TEXT("(dtlk_write_bytes");
+ while (n-- > 0)
+ val = dtlk_write_tts(*buf++);
+ TRACE_RET;
+ return val;
+}
+
+static char dtlk_write_tts(char ch)
+{
+ int retries = 0;
+#ifdef TRACINGMORE
+ printk(" dtlk_write_tts(");
+ if (' ' <= ch && ch <= '~')
+ printk("'%c'", ch);
+ else
+ printk("0x%02x", ch);
+#endif
+ if (ch != DTLK_CLEAR) /* no flow control for CLEAR command */
+ while ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0 &&
+ retries++ < DTLK_MAX_RETRIES) /* DT ready? */
+ ;
+ if (retries > DTLK_MAX_RETRIES)
+ printk(KERN_ERR "dtlk_write_tts() timeout\n");
+
+ outb_p(ch, dtlk_port_tts); /* output to TTS port */
+ /* the RDY bit goes zero 2-3 usec after writing, and goes
+ 1 again 180-190 usec later. Here, we wait up to 10
+ usec for the RDY bit to go zero. */
+ for (retries = 0; retries < loops_per_jiffy / (100000/HZ); retries++)
+ if ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0)
+ break;
+
+#ifdef TRACINGMORE
+ printk(")\n");
+#endif
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
new file mode 100644
index 0000000000..4181bcc1c7
--- /dev/null
+++ b/drivers/char/hangcheck-timer.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * hangcheck-timer.c
+ *
+ * Driver for a little io fencing timer.
+ *
+ * Copyright (C) 2002, 2003 Oracle. All rights reserved.
+ *
+ * Author: Joel Becker <joel.becker@oracle.com>
+ */
+
+/*
+ * The hangcheck-timer driver uses the TSC to catch delays that
+ * jiffies does not notice. A timer is set. When the timer fires, it
+ * checks whether it was delayed and if that delay exceeds a given
+ * margin of error. The hangcheck_tick module parameter takes the timer
+ * duration in seconds. The hangcheck_margin parameter defines the
+ * margin of error, in seconds. The defaults are 60 seconds for the
+ * timer and 180 seconds for the margin of error. IOW, a timer is set
+ * for 60 seconds. When the timer fires, the callback checks the
+ * actual duration that the timer waited. If the duration exceeds the
+ * allotted time and margin (here 60 + 180, or 240 seconds), the machine
+ * is restarted. A healthy machine will have the duration match the
+ * expected timeout very closely.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/sysrq.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+
+#define VERSION_STR "0.9.1"
+
+#define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */
+#define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */
+
+static int hangcheck_tick = DEFAULT_IOFENCE_TICK;
+static int hangcheck_margin = DEFAULT_IOFENCE_MARGIN;
+static int hangcheck_reboot; /* Defaults to not reboot */
+static int hangcheck_dump_tasks; /* Defaults to not dumping SysRQ T */
+
+/* options - modular */
+module_param(hangcheck_tick, int, 0);
+MODULE_PARM_DESC(hangcheck_tick, "Timer delay.");
+module_param(hangcheck_margin, int, 0);
+MODULE_PARM_DESC(hangcheck_margin, "If the hangcheck timer has been delayed more than hangcheck_margin seconds, the driver will fire.");
+module_param(hangcheck_reboot, int, 0);
+MODULE_PARM_DESC(hangcheck_reboot, "If nonzero, the machine will reboot when the timer margin is exceeded.");
+module_param(hangcheck_dump_tasks, int, 0);
+MODULE_PARM_DESC(hangcheck_dump_tasks, "If nonzero, the machine will dump the system task state when the timer margin is exceeded.");
+
+MODULE_AUTHOR("Oracle");
+MODULE_DESCRIPTION("Hangcheck-timer detects when the system has gone out to lunch past a certain margin.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION_STR);
+
+/* options - nonmodular */
+#ifndef MODULE
+
+static int __init hangcheck_parse_tick(char *str)
+{
+ int par;
+ if (get_option(&str,&par))
+ hangcheck_tick = par;
+ return 1;
+}
+
+static int __init hangcheck_parse_margin(char *str)
+{
+ int par;
+ if (get_option(&str,&par))
+ hangcheck_margin = par;
+ return 1;
+}
+
+static int __init hangcheck_parse_reboot(char *str)
+{
+ int par;
+ if (get_option(&str,&par))
+ hangcheck_reboot = par;
+ return 1;
+}
+
+static int __init hangcheck_parse_dump_tasks(char *str)
+{
+ int par;
+ if (get_option(&str,&par))
+ hangcheck_dump_tasks = par;
+ return 1;
+}
+
+__setup("hcheck_tick", hangcheck_parse_tick);
+__setup("hcheck_margin", hangcheck_parse_margin);
+__setup("hcheck_reboot", hangcheck_parse_reboot);
+__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
+#endif /* not MODULE */
+
+#define TIMER_FREQ 1000000000ULL
+
+/* Last time scheduled */
+static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
+
+static void hangcheck_fire(struct timer_list *);
+
+static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire);
+
+static void hangcheck_fire(struct timer_list *unused)
+{
+ unsigned long long cur_tsc, tsc_diff;
+
+ cur_tsc = ktime_get_ns();
+
+ if (cur_tsc > hangcheck_tsc)
+ tsc_diff = cur_tsc - hangcheck_tsc;
+ else
+ tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */
+
+ if (tsc_diff > hangcheck_tsc_margin) {
+ if (hangcheck_dump_tasks) {
+ printk(KERN_CRIT "Hangcheck: Task state:\n");
+#ifdef CONFIG_MAGIC_SYSRQ
+ handle_sysrq('t');
+#endif /* CONFIG_MAGIC_SYSRQ */
+ }
+ if (hangcheck_reboot) {
+ printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
+ emergency_restart();
+ } else {
+ printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
+ }
+ }
+#if 0
+ /*
+ * Enable to investigate delays in detail
+ */
+ printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+ tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
+#endif
+ mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
+ hangcheck_tsc = ktime_get_ns();
+}
+
+
+static int __init hangcheck_init(void)
+{
+ printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
+ VERSION_STR, hangcheck_tick, hangcheck_margin);
+ hangcheck_tsc_margin =
+ (unsigned long long)hangcheck_margin + hangcheck_tick;
+ hangcheck_tsc_margin *= TIMER_FREQ;
+
+ hangcheck_tsc = ktime_get_ns();
+ mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
+
+ return 0;
+}
+
+
+static void __exit hangcheck_exit(void)
+{
+ del_timer_sync(&hangcheck_ticktock);
+ printk("Hangcheck: Stopped hangcheck timer.\n");
+}
+
+module_init(hangcheck_init);
+module_exit(hangcheck_exit);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
new file mode 100644
index 0000000000..ee71376f17
--- /dev/null
+++ b/drivers/char/hpet.c
@@ -0,0 +1,1047 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel & MS High Precision Event Timer Implementation.
+ *
+ * Copyright (C) 2003 Intel Corporation
+ * Venki Pallipadi
+ * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
+ * Bob Picco <robert.picco@hp.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/poll.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/sysctl.h>
+#include <linux/wait.h>
+#include <linux/sched/signal.h>
+#include <linux/bcd.h>
+#include <linux/seq_file.h>
+#include <linux/bitops.h>
+#include <linux/compat.h>
+#include <linux/clocksource.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/hpet.h>
+#include <asm/current.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+/*
+ * The High Precision Event Timer driver.
+ * This driver is closely modelled after the rtc.c driver.
+ * See HPET spec revision 1.
+ */
+#define HPET_USER_FREQ (64)
+#define HPET_DRIFT (500)
+
+#define HPET_RANGE_SIZE 1024 /* from HPET spec */
+
+
+/* WARNING -- don't get confused. These macros are never used
+ * to write the (single) counter, and rarely to read it.
+ * They're badly named; to fix, someday.
+ */
+#if BITS_PER_LONG == 64
+#define write_counter(V, MC) writeq(V, MC)
+#define read_counter(MC) readq(MC)
+#else
+#define write_counter(V, MC) writel(V, MC)
+#define read_counter(MC) readl(MC)
+#endif
+
+static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
+static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
+
+/* This clocksource driver currently only works on ia64 */
+#ifdef CONFIG_IA64
+static void __iomem *hpet_mctr;
+
+static u64 read_hpet(struct clocksource *cs)
+{
+ return (u64)read_counter((void __iomem *)hpet_mctr);
+}
+
+static struct clocksource clocksource_hpet = {
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+static struct clocksource *hpet_clocksource;
+#endif
+
+/* A lock for concurrent access by app and isr hpet activity. */
+static DEFINE_SPINLOCK(hpet_lock);
+
+#define HPET_DEV_NAME (7)
+
+struct hpet_dev {
+ struct hpets *hd_hpets;
+ struct hpet __iomem *hd_hpet;
+ struct hpet_timer __iomem *hd_timer;
+ unsigned long hd_ireqfreq;
+ unsigned long hd_irqdata;
+ wait_queue_head_t hd_waitqueue;
+ struct fasync_struct *hd_async_queue;
+ unsigned int hd_flags;
+ unsigned int hd_irq;
+ unsigned int hd_hdwirq;
+ char hd_name[HPET_DEV_NAME];
+};
+
+struct hpets {
+ struct hpets *hp_next;
+ struct hpet __iomem *hp_hpet;
+ unsigned long hp_hpet_phys;
+ struct clocksource *hp_clocksource;
+ unsigned long long hp_tick_freq;
+ unsigned long hp_delta;
+ unsigned int hp_ntimer;
+ unsigned int hp_which;
+ struct hpet_dev hp_dev[];
+};
+
+static struct hpets *hpets;
+
+#define HPET_OPEN 0x0001
+#define HPET_IE 0x0002 /* interrupt enabled */
+#define HPET_PERIODIC 0x0004
+#define HPET_SHARED_IRQ 0x0008
+
+static irqreturn_t hpet_interrupt(int irq, void *data)
+{
+ struct hpet_dev *devp;
+ unsigned long isr;
+
+ devp = data;
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+
+ if ((devp->hd_flags & HPET_SHARED_IRQ) &&
+ !(isr & readl(&devp->hd_hpet->hpet_isr)))
+ return IRQ_NONE;
+
+ spin_lock(&hpet_lock);
+ devp->hd_irqdata++;
+
+ /*
+ * For non-periodic timers, increment the accumulator.
+ * This has the effect of treating non-periodic like periodic.
+ */
+ if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
+ unsigned long t, mc, base, k;
+ struct hpet __iomem *hpet = devp->hd_hpet;
+ struct hpets *hpetp = devp->hd_hpets;
+
+ t = devp->hd_ireqfreq;
+ read_counter(&devp->hd_timer->hpet_compare);
+ mc = read_counter(&hpet->hpet_mc);
+ /* The time for the next interrupt would logically be t + m,
+ * however, if we are very unlucky and the interrupt is delayed
+ * for longer than t then we will completely miss the next
+ * interrupt if we set t + m and an application will hang.
+ * Therefore we need to make a more complex computation assuming
+ * that there exists a k for which the following is true:
+ * k * t + base < mc + delta
+ * (k + 1) * t + base > mc + delta
+ * where t is the interval in hpet ticks for the given freq,
+ * base is the theoretical start value 0 < base < t,
+ * mc is the main counter value at the time of the interrupt,
+ * delta is the time it takes to write the a value to the
+ * comparator.
+ * k may then be computed as (mc - base + delta) / t .
+ */
+ base = mc % t;
+ k = (mc - base + hpetp->hp_delta) / t;
+ write_counter(t * (k + 1) + base,
+ &devp->hd_timer->hpet_compare);
+ }
+
+ if (devp->hd_flags & HPET_SHARED_IRQ)
+ writel(isr, &devp->hd_hpet->hpet_isr);
+ spin_unlock(&hpet_lock);
+
+ wake_up_interruptible(&devp->hd_waitqueue);
+
+ kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
+
+ return IRQ_HANDLED;
+}
+
+static void hpet_timer_set_irq(struct hpet_dev *devp)
+{
+ unsigned long v;
+ int irq, gsi;
+ struct hpet_timer __iomem *timer;
+
+ spin_lock_irq(&hpet_lock);
+ if (devp->hd_hdwirq) {
+ spin_unlock_irq(&hpet_lock);
+ return;
+ }
+
+ timer = devp->hd_timer;
+
+ /* we prefer level triggered mode */
+ v = readl(&timer->hpet_config);
+ if (!(v & Tn_INT_TYPE_CNF_MASK)) {
+ v |= Tn_INT_TYPE_CNF_MASK;
+ writel(v, &timer->hpet_config);
+ }
+ spin_unlock_irq(&hpet_lock);
+
+ v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
+ Tn_INT_ROUTE_CAP_SHIFT;
+
+ /*
+ * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
+ * legacy device. In IO APIC mode, we skip all the legacy IRQS.
+ */
+ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
+ v &= ~0xf3df;
+ else
+ v &= ~0xffff;
+
+ for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
+ if (irq >= nr_irqs) {
+ irq = HPET_MAX_IRQ;
+ break;
+ }
+
+ gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_LOW);
+ if (gsi > 0)
+ break;
+
+ /* FIXME: Setup interrupt source table */
+ }
+
+ if (irq < HPET_MAX_IRQ) {
+ spin_lock_irq(&hpet_lock);
+ v = readl(&timer->hpet_config);
+ v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
+ writel(v, &timer->hpet_config);
+ devp->hd_hdwirq = gsi;
+ spin_unlock_irq(&hpet_lock);
+ }
+ return;
+}
+
+static int hpet_open(struct inode *inode, struct file *file)
+{
+ struct hpet_dev *devp;
+ struct hpets *hpetp;
+ int i;
+
+ if (file->f_mode & FMODE_WRITE)
+ return -EINVAL;
+
+ mutex_lock(&hpet_mutex);
+ spin_lock_irq(&hpet_lock);
+
+ for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
+ for (i = 0; i < hpetp->hp_ntimer; i++)
+ if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) {
+ continue;
+ } else {
+ devp = &hpetp->hp_dev[i];
+ break;
+ }
+
+ if (!devp) {
+ spin_unlock_irq(&hpet_lock);
+ mutex_unlock(&hpet_mutex);
+ return -EBUSY;
+ }
+
+ file->private_data = devp;
+ devp->hd_irqdata = 0;
+ devp->hd_flags |= HPET_OPEN;
+ spin_unlock_irq(&hpet_lock);
+ mutex_unlock(&hpet_mutex);
+
+ hpet_timer_set_irq(devp);
+
+ return 0;
+}
+
+static ssize_t
+hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long data;
+ ssize_t retval;
+ struct hpet_dev *devp;
+
+ devp = file->private_data;
+ if (!devp->hd_ireqfreq)
+ return -EIO;
+
+ if (count < sizeof(unsigned long))
+ return -EINVAL;
+
+ add_wait_queue(&devp->hd_waitqueue, &wait);
+
+ for ( ; ; ) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_irq(&hpet_lock);
+ data = devp->hd_irqdata;
+ devp->hd_irqdata = 0;
+ spin_unlock_irq(&hpet_lock);
+
+ if (data) {
+ break;
+ } else if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ goto out;
+ } else if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ goto out;
+ }
+ schedule();
+ }
+
+ retval = put_user(data, (unsigned long __user *)buf);
+ if (!retval)
+ retval = sizeof(unsigned long);
+out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&devp->hd_waitqueue, &wait);
+
+ return retval;
+}
+
+static __poll_t hpet_poll(struct file *file, poll_table * wait)
+{
+ unsigned long v;
+ struct hpet_dev *devp;
+
+ devp = file->private_data;
+
+ if (!devp->hd_ireqfreq)
+ return 0;
+
+ poll_wait(file, &devp->hd_waitqueue, wait);
+
+ spin_lock_irq(&hpet_lock);
+ v = devp->hd_irqdata;
+ spin_unlock_irq(&hpet_lock);
+
+ if (v != 0)
+ return EPOLLIN | EPOLLRDNORM;
+
+ return 0;
+}
+
+#ifdef CONFIG_HPET_MMAP
+#ifdef CONFIG_HPET_MMAP_DEFAULT
+static int hpet_mmap_enabled = 1;
+#else
+static int hpet_mmap_enabled = 0;
+#endif
+
+static __init int hpet_mmap_enable(char *str)
+{
+ get_option(&str, &hpet_mmap_enabled);
+ pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
+ return 1;
+}
+__setup("hpet_mmap=", hpet_mmap_enable);
+
+static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct hpet_dev *devp;
+ unsigned long addr;
+
+ if (!hpet_mmap_enabled)
+ return -EACCES;
+
+ devp = file->private_data;
+ addr = devp->hd_hpets->hp_hpet_phys;
+
+ if (addr & (PAGE_SIZE - 1))
+ return -ENOSYS;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return vm_iomap_memory(vma, addr, PAGE_SIZE);
+}
+#else
+static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return -ENOSYS;
+}
+#endif
+
+static int hpet_fasync(int fd, struct file *file, int on)
+{
+ struct hpet_dev *devp;
+
+ devp = file->private_data;
+
+ if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
+ return 0;
+ else
+ return -EIO;
+}
+
+static int hpet_release(struct inode *inode, struct file *file)
+{
+ struct hpet_dev *devp;
+ struct hpet_timer __iomem *timer;
+ int irq = 0;
+
+ devp = file->private_data;
+ timer = devp->hd_timer;
+
+ spin_lock_irq(&hpet_lock);
+
+ writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
+ &timer->hpet_config);
+
+ irq = devp->hd_irq;
+ devp->hd_irq = 0;
+
+ devp->hd_ireqfreq = 0;
+
+ if (devp->hd_flags & HPET_PERIODIC
+ && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
+ unsigned long v;
+
+ v = readq(&timer->hpet_config);
+ v ^= Tn_TYPE_CNF_MASK;
+ writeq(v, &timer->hpet_config);
+ }
+
+ devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
+ spin_unlock_irq(&hpet_lock);
+
+ if (irq)
+ free_irq(irq, devp);
+
+ file->private_data = NULL;
+ return 0;
+}
+
+static int hpet_ioctl_ieon(struct hpet_dev *devp)
+{
+ struct hpet_timer __iomem *timer;
+ struct hpet __iomem *hpet;
+ struct hpets *hpetp;
+ int irq;
+ unsigned long g, v, t, m;
+ unsigned long flags, isr;
+
+ timer = devp->hd_timer;
+ hpet = devp->hd_hpet;
+ hpetp = devp->hd_hpets;
+
+ if (!devp->hd_ireqfreq)
+ return -EIO;
+
+ spin_lock_irq(&hpet_lock);
+
+ if (devp->hd_flags & HPET_IE) {
+ spin_unlock_irq(&hpet_lock);
+ return -EBUSY;
+ }
+
+ devp->hd_flags |= HPET_IE;
+
+ if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
+ devp->hd_flags |= HPET_SHARED_IRQ;
+ spin_unlock_irq(&hpet_lock);
+
+ irq = devp->hd_hdwirq;
+
+ if (irq) {
+ unsigned long irq_flags;
+
+ if (devp->hd_flags & HPET_SHARED_IRQ) {
+ /*
+ * To prevent the interrupt handler from seeing an
+ * unwanted interrupt status bit, program the timer
+ * so that it will not fire in the near future ...
+ */
+ writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
+ &timer->hpet_config);
+ write_counter(read_counter(&hpet->hpet_mc),
+ &timer->hpet_compare);
+ /* ... and clear any left-over status. */
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+ writel(isr, &hpet->hpet_isr);
+ }
+
+ sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
+ irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : 0;
+ if (request_irq(irq, hpet_interrupt, irq_flags,
+ devp->hd_name, (void *)devp)) {
+ printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
+ irq = 0;
+ }
+ }
+
+ if (irq == 0) {
+ spin_lock_irq(&hpet_lock);
+ devp->hd_flags ^= HPET_IE;
+ spin_unlock_irq(&hpet_lock);
+ return -EIO;
+ }
+
+ devp->hd_irq = irq;
+ t = devp->hd_ireqfreq;
+ v = readq(&timer->hpet_config);
+
+ /* 64-bit comparators are not yet supported through the ioctls,
+ * so force this into 32-bit mode if it supports both modes
+ */
+ g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
+
+ if (devp->hd_flags & HPET_PERIODIC) {
+ g |= Tn_TYPE_CNF_MASK;
+ v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
+ writeq(v, &timer->hpet_config);
+ local_irq_save(flags);
+
+ /*
+ * NOTE: First we modify the hidden accumulator
+ * register supported by periodic-capable comparators.
+ * We never want to modify the (single) counter; that
+ * would affect all the comparators. The value written
+ * is the counter value when the first interrupt is due.
+ */
+ m = read_counter(&hpet->hpet_mc);
+ write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+ /*
+ * Then we modify the comparator, indicating the period
+ * for subsequent interrupt.
+ */
+ write_counter(t, &timer->hpet_compare);
+ } else {
+ local_irq_save(flags);
+ m = read_counter(&hpet->hpet_mc);
+ write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+ }
+
+ if (devp->hd_flags & HPET_SHARED_IRQ) {
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+ writel(isr, &hpet->hpet_isr);
+ }
+ writeq(g, &timer->hpet_config);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/* converts Hz to number of timer ticks */
+static inline unsigned long hpet_time_div(struct hpets *hpets,
+ unsigned long dis)
+{
+ unsigned long long m;
+
+ m = hpets->hp_tick_freq + (dis >> 1);
+ return div64_ul(m, dis);
+}
+
+static int
+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
+ struct hpet_info *info)
+{
+ struct hpet_timer __iomem *timer;
+ struct hpets *hpetp;
+ int err;
+ unsigned long v;
+
+ switch (cmd) {
+ case HPET_IE_OFF:
+ case HPET_INFO:
+ case HPET_EPI:
+ case HPET_DPI:
+ case HPET_IRQFREQ:
+ timer = devp->hd_timer;
+ hpetp = devp->hd_hpets;
+ break;
+ case HPET_IE_ON:
+ return hpet_ioctl_ieon(devp);
+ default:
+ return -EINVAL;
+ }
+
+ err = 0;
+
+ switch (cmd) {
+ case HPET_IE_OFF:
+ if ((devp->hd_flags & HPET_IE) == 0)
+ break;
+ v = readq(&timer->hpet_config);
+ v &= ~Tn_INT_ENB_CNF_MASK;
+ writeq(v, &timer->hpet_config);
+ if (devp->hd_irq) {
+ free_irq(devp->hd_irq, devp);
+ devp->hd_irq = 0;
+ }
+ devp->hd_flags ^= HPET_IE;
+ break;
+ case HPET_INFO:
+ {
+ memset(info, 0, sizeof(*info));
+ if (devp->hd_ireqfreq)
+ info->hi_ireqfreq =
+ hpet_time_div(hpetp, devp->hd_ireqfreq);
+ info->hi_flags =
+ readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
+ info->hi_hpet = hpetp->hp_which;
+ info->hi_timer = devp - hpetp->hp_dev;
+ break;
+ }
+ case HPET_EPI:
+ v = readq(&timer->hpet_config);
+ if ((v & Tn_PER_INT_CAP_MASK) == 0) {
+ err = -ENXIO;
+ break;
+ }
+ devp->hd_flags |= HPET_PERIODIC;
+ break;
+ case HPET_DPI:
+ v = readq(&timer->hpet_config);
+ if ((v & Tn_PER_INT_CAP_MASK) == 0) {
+ err = -ENXIO;
+ break;
+ }
+ if (devp->hd_flags & HPET_PERIODIC &&
+ readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
+ v = readq(&timer->hpet_config);
+ v ^= Tn_TYPE_CNF_MASK;
+ writeq(v, &timer->hpet_config);
+ }
+ devp->hd_flags &= ~HPET_PERIODIC;
+ break;
+ case HPET_IRQFREQ:
+ if ((arg > hpet_max_freq) &&
+ !capable(CAP_SYS_RESOURCE)) {
+ err = -EACCES;
+ break;
+ }
+
+ if (!arg) {
+ err = -EINVAL;
+ break;
+ }
+
+ devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
+ }
+
+ return err;
+}
+
+static long
+hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct hpet_info info;
+ int err;
+
+ mutex_lock(&hpet_mutex);
+ err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
+ mutex_unlock(&hpet_mutex);
+
+ if ((cmd == HPET_INFO) && !err &&
+ (copy_to_user((void __user *)arg, &info, sizeof(info))))
+ err = -EFAULT;
+
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+struct compat_hpet_info {
+ compat_ulong_t hi_ireqfreq; /* Hz */
+ compat_ulong_t hi_flags; /* information */
+ unsigned short hi_hpet;
+ unsigned short hi_timer;
+};
+
+static long
+hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct hpet_info info;
+ int err;
+
+ mutex_lock(&hpet_mutex);
+ err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
+ mutex_unlock(&hpet_mutex);
+
+ if ((cmd == HPET_INFO) && !err) {
+ struct compat_hpet_info __user *u = compat_ptr(arg);
+ if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) ||
+ put_user(info.hi_flags, &u->hi_flags) ||
+ put_user(info.hi_hpet, &u->hi_hpet) ||
+ put_user(info.hi_timer, &u->hi_timer))
+ err = -EFAULT;
+ }
+
+ return err;
+}
+#endif
+
+static const struct file_operations hpet_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = hpet_read,
+ .poll = hpet_poll,
+ .unlocked_ioctl = hpet_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hpet_compat_ioctl,
+#endif
+ .open = hpet_open,
+ .release = hpet_release,
+ .fasync = hpet_fasync,
+ .mmap = hpet_mmap,
+};
+
+static int hpet_is_known(struct hpet_data *hdp)
+{
+ struct hpets *hpetp;
+
+ for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
+ if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
+ return 1;
+
+ return 0;
+}
+
+static struct ctl_table hpet_table[] = {
+ {
+ .procname = "max-user-freq",
+ .data = &hpet_max_freq,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static struct ctl_table_header *sysctl_header;
+
+/*
+ * Adjustment for when arming the timer with
+ * initial conditions. That is, main counter
+ * ticks expired before interrupts are enabled.
+ */
+#define TICK_CALIBRATE (1000UL)
+
+static unsigned long __hpet_calibrate(struct hpets *hpetp)
+{
+ struct hpet_timer __iomem *timer = NULL;
+ unsigned long t, m, count, i, flags, start;
+ struct hpet_dev *devp;
+ int j;
+ struct hpet __iomem *hpet;
+
+ for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
+ if ((devp->hd_flags & HPET_OPEN) == 0) {
+ timer = devp->hd_timer;
+ break;
+ }
+
+ if (!timer)
+ return 0;
+
+ hpet = hpetp->hp_hpet;
+ t = read_counter(&timer->hpet_compare);
+
+ i = 0;
+ count = hpet_time_div(hpetp, TICK_CALIBRATE);
+
+ local_irq_save(flags);
+
+ start = read_counter(&hpet->hpet_mc);
+
+ do {
+ m = read_counter(&hpet->hpet_mc);
+ write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+ } while (i++, (m - start) < count);
+
+ local_irq_restore(flags);
+
+ return (m - start) / i;
+}
+
+static unsigned long hpet_calibrate(struct hpets *hpetp)
+{
+ unsigned long ret = ~0UL;
+ unsigned long tmp;
+
+ /*
+ * Try to calibrate until return value becomes stable small value.
+ * If SMI interruption occurs in calibration loop, the return value
+ * will be big. This avoids its impact.
+ */
+ for ( ; ; ) {
+ tmp = __hpet_calibrate(hpetp);
+ if (ret <= tmp)
+ break;
+ ret = tmp;
+ }
+
+ return ret;
+}
+
+int hpet_alloc(struct hpet_data *hdp)
+{
+ u64 cap, mcfg;
+ struct hpet_dev *devp;
+ u32 i, ntimer;
+ struct hpets *hpetp;
+ struct hpet __iomem *hpet;
+ static struct hpets *last;
+ unsigned long period;
+ unsigned long long temp;
+ u32 remainder;
+
+ /*
+ * hpet_alloc can be called by platform dependent code.
+ * If platform dependent code has allocated the hpet that
+ * ACPI has also reported, then we catch it here.
+ */
+ if (hpet_is_known(hdp)) {
+ printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
+ __func__);
+ return 0;
+ }
+
+ hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs),
+ GFP_KERNEL);
+
+ if (!hpetp)
+ return -ENOMEM;
+
+ hpetp->hp_which = hpet_nhpet++;
+ hpetp->hp_hpet = hdp->hd_address;
+ hpetp->hp_hpet_phys = hdp->hd_phys_address;
+
+ hpetp->hp_ntimer = hdp->hd_nirqs;
+
+ for (i = 0; i < hdp->hd_nirqs; i++)
+ hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
+
+ hpet = hpetp->hp_hpet;
+
+ cap = readq(&hpet->hpet_cap);
+
+ ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;
+
+ if (hpetp->hp_ntimer != ntimer) {
+ printk(KERN_WARNING "hpet: number irqs doesn't agree"
+ " with number of timers\n");
+ kfree(hpetp);
+ return -ENODEV;
+ }
+
+ if (last)
+ last->hp_next = hpetp;
+ else
+ hpets = hpetp;
+
+ last = hpetp;
+
+ period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
+ HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
+ temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
+ temp += period >> 1; /* round */
+ do_div(temp, period);
+ hpetp->hp_tick_freq = temp; /* ticks per second */
+
+ printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
+ hpetp->hp_which, hdp->hd_phys_address,
+ hpetp->hp_ntimer > 1 ? "s" : "");
+ for (i = 0; i < hpetp->hp_ntimer; i++)
+ printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+ printk(KERN_CONT "\n");
+
+ temp = hpetp->hp_tick_freq;
+ remainder = do_div(temp, 1000000);
+ printk(KERN_INFO
+ "hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
+ hpetp->hp_which, hpetp->hp_ntimer,
+ cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
+ (unsigned) temp, remainder);
+
+ mcfg = readq(&hpet->hpet_config);
+ if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
+ write_counter(0L, &hpet->hpet_mc);
+ mcfg |= HPET_ENABLE_CNF_MASK;
+ writeq(mcfg, &hpet->hpet_config);
+ }
+
+ for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
+ struct hpet_timer __iomem *timer;
+
+ timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
+
+ devp->hd_hpets = hpetp;
+ devp->hd_hpet = hpet;
+ devp->hd_timer = timer;
+
+ /*
+ * If the timer was reserved by platform code,
+ * then make timer unavailable for opens.
+ */
+ if (hdp->hd_state & (1 << i)) {
+ devp->hd_flags = HPET_OPEN;
+ continue;
+ }
+
+ init_waitqueue_head(&devp->hd_waitqueue);
+ }
+
+ hpetp->hp_delta = hpet_calibrate(hpetp);
+
+/* This clocksource driver currently only works on ia64 */
+#ifdef CONFIG_IA64
+ if (!hpet_clocksource) {
+ hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
+ clocksource_hpet.archdata.fsys_mmio = hpet_mctr;
+ clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
+ hpetp->hp_clocksource = &clocksource_hpet;
+ hpet_clocksource = &clocksource_hpet;
+ }
+#endif
+
+ return 0;
+}
+
+static acpi_status hpet_resources(struct acpi_resource *res, void *data)
+{
+ struct hpet_data *hdp;
+ acpi_status status;
+ struct acpi_resource_address64 addr;
+
+ hdp = data;
+
+ status = acpi_resource_to_address64(res, &addr);
+
+ if (ACPI_SUCCESS(status)) {
+ hdp->hd_phys_address = addr.address.minimum;
+ hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
+ if (!hdp->hd_address)
+ return AE_ERROR;
+
+ if (hpet_is_known(hdp)) {
+ iounmap(hdp->hd_address);
+ return AE_ALREADY_EXISTS;
+ }
+ } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
+ struct acpi_resource_fixed_memory32 *fixmem32;
+
+ fixmem32 = &res->data.fixed_memory32;
+
+ hdp->hd_phys_address = fixmem32->address;
+ hdp->hd_address = ioremap(fixmem32->address,
+ HPET_RANGE_SIZE);
+ if (!hdp->hd_address)
+ return AE_ERROR;
+
+ if (hpet_is_known(hdp)) {
+ iounmap(hdp->hd_address);
+ return AE_ALREADY_EXISTS;
+ }
+ } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
+ struct acpi_resource_extended_irq *irqp;
+ int i, irq;
+
+ irqp = &res->data.extended_irq;
+
+ for (i = 0; i < irqp->interrupt_count; i++) {
+ if (hdp->hd_nirqs >= HPET_MAX_TIMERS)
+ break;
+
+ irq = acpi_register_gsi(NULL, irqp->interrupts[i],
+ irqp->triggering,
+ irqp->polarity);
+ if (irq < 0)
+ return AE_ERROR;
+
+ hdp->hd_irq[hdp->hd_nirqs] = irq;
+ hdp->hd_nirqs++;
+ }
+ }
+
+ return AE_OK;
+}
+
+static int hpet_acpi_add(struct acpi_device *device)
+{
+ acpi_status result;
+ struct hpet_data data;
+
+ memset(&data, 0, sizeof(data));
+
+ result =
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ hpet_resources, &data);
+
+ if (ACPI_FAILURE(result))
+ return -ENODEV;
+
+ if (!data.hd_address || !data.hd_nirqs) {
+ if (data.hd_address)
+ iounmap(data.hd_address);
+ printk("%s: no address or irqs in _CRS\n", __func__);
+ return -ENODEV;
+ }
+
+ return hpet_alloc(&data);
+}
+
+static const struct acpi_device_id hpet_device_ids[] = {
+ {"PNP0103", 0},
+ {"", 0},
+};
+
+static struct acpi_driver hpet_acpi_driver = {
+ .name = "hpet",
+ .ids = hpet_device_ids,
+ .ops = {
+ .add = hpet_acpi_add,
+ },
+};
+
+static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
+
+static int __init hpet_init(void)
+{
+ int result;
+
+ result = misc_register(&hpet_misc);
+ if (result < 0)
+ return -ENODEV;
+
+ sysctl_header = register_sysctl("dev/hpet", hpet_table);
+
+ result = acpi_bus_register_driver(&hpet_acpi_driver);
+ if (result < 0) {
+ if (sysctl_header)
+ unregister_sysctl_table(sysctl_header);
+ misc_deregister(&hpet_misc);
+ return result;
+ }
+
+ return 0;
+}
+device_initcall(hpet_init);
+
+/*
+MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
+MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
new file mode 100644
index 0000000000..8de74dcfa1
--- /dev/null
+++ b/drivers/char/hw_random/Kconfig
@@ -0,0 +1,591 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Hardware Random Number Generator (RNG) configuration
+#
+
+menuconfig HW_RANDOM
+ tristate "Hardware Random Number Generator Core support"
+ default m
+ help
+ Hardware Random Number Generator Core infrastructure.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rng-core. This provides a device
+ that's usually called /dev/hwrng, and which exposes one
+ of possibly several hardware random number generators.
+
+ These hardware random number generators do feed into the
+ kernel's random number generator entropy pool.
+
+ If unsure, say Y.
+
+if HW_RANDOM
+
+config HW_RANDOM_TIMERIOMEM
+ tristate "Timer IOMEM HW Random Number Generator support"
+ depends on HAS_IOMEM
+ help
+ This driver provides kernel-side support for a generic Random
+ Number Generator used by reading a 'dumb' iomem address that
+ is to be read no faster than, for example, once a second;
+ the default FPGA bitstream on the TS-7800 has such functionality.
+
+ To compile this driver as a module, choose M here: the
+ module will be called timeriomem-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_INTEL
+ tristate "Intel HW Random Number Generator support"
+ depends on (X86 || IA64 || COMPILE_TEST) && PCI
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Intel i8xx-based motherboards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called intel-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_AMD
+ tristate "AMD HW Random Number Generator support"
+ depends on (X86 || PPC_MAPLE || COMPILE_TEST)
+ depends on PCI && HAS_IOPORT_MAP
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on AMD 76x-based motherboards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called amd-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_ATMEL
+ tristate "Atmel Random Number Generator support"
+ depends on (ARCH_AT91 || COMPILE_TEST)
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Atmel AT91 devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atmel-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_BA431
+ tristate "Silex Insight BA431 Random Number Generator support"
+ depends on HAS_IOMEM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware based on Silex Insight BA431 IP.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ba431-rng.
+
+config HW_RANDOM_BCM2835
+ tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
+ depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
+ ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the Broadcom BCM2835 and BCM63xx SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm2835-rng
+
+ If unsure, say Y.
+
+config HW_RANDOM_IPROC_RNG200
+ tristate "Broadcom iProc/STB RNG200 support"
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the RNG200
+ hardware found on the Broadcom iProc and STB SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iproc-rng200
+
+ If unsure, say Y.
+
+config HW_RANDOM_GEODE
+ tristate "AMD Geode HW Random Number Generator support"
+ depends on (X86_32 || COMPILE_TEST)
+ depends on PCI
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the AMD Geode LX.
+
+ To compile this driver as a module, choose M here: the
+ module will be called geode-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_N2RNG
+ tristate "Niagara2 Random Number Generator support"
+ depends on SPARC64
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Niagara2 cpus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called n2-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_VIA
+ tristate "VIA HW Random Number Generator support"
+ depends on X86
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on VIA based motherboards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called via-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_IXP4XX
+ tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
+ depends on ARCH_IXP4XX || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Pseudo-Random
+ Number Generator hardware found on the Intel IXP45x/46x NPU.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ixp4xx-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_OMAP
+ tristate "OMAP Random Number Generator support"
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3 || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on OMAP16xx, OMAP2/3/4/5, AM33xx/AM43xx
+ multimedia processors, and Marvell Armada 7k/8k SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called omap-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_OMAP3_ROM
+ tristate "OMAP3 ROM Random Number Generator support"
+ depends on ARCH_OMAP3 || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on OMAP34xx processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called omap3-rom-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_OCTEON
+ tristate "Octeon Random Number Generator support"
+ depends on CAVIUM_OCTEON_SOC
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Octeon processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called octeon-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_PASEMI
+ tristate "PA Semi HW Random Number Generator support"
+ depends on PPC_PASEMI || (PPC && COMPILE_TEST)
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on PA Semi PWRficient SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pasemi-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_VIRTIO
+ tristate "VirtIO Random Number Generator support"
+ depends on VIRTIO
+ help
+ This driver provides kernel-side support for the virtual Random Number
+ Generator hardware.
+
+ To compile this driver as a module, choose M here: the
+ module will be called virtio-rng. If unsure, say N.
+
+config HW_RANDOM_MXC_RNGA
+ tristate "Freescale i.MX RNGA Random Number Generator"
+ depends on SOC_IMX31 || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Freescale i.MX processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mxc-rnga.
+
+ If unsure, say Y.
+
+config HW_RANDOM_IMX_RNGC
+ tristate "Freescale i.MX RNGC Random Number Generator"
+ depends on HAS_IOMEM
+ depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator Version C hardware found on some Freescale i.MX
+ processors. Version B is also supported by this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-rngc.
+
+ If unsure, say Y.
+
+config HW_RANDOM_INGENIC_RNG
+ tristate "Ingenic Random Number Generator support"
+ depends on MACH_JZ4780 || MACH_X1000 || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number Generator
+ hardware found in ingenic JZ4780 and X1000 SoC. MIPS Creator CI20 uses
+ JZ4780 SoC, YSH & ATIL CU1000-Neo uses X1000 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ingenic-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_INGENIC_TRNG
+ tristate "Ingenic True Random Number Generator support"
+ depends on MACH_X1830 || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number Generator
+ hardware found in ingenic X1830 SoC. YSH & ATIL CU1830-Neo uses X1830 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ingenic-trng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_NOMADIK
+ tristate "ST-Ericsson Nomadik Random Number Generator support"
+ depends on ARCH_NOMADIK || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
+
+ To compile this driver as a module, choose M here: the
+ module will be called nomadik-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_PSERIES
+ tristate "pSeries HW Random Number Generator support"
+ depends on PPC64 && IBMVIO
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on POWER7+ machines and above
+
+ To compile this driver as a module, choose M here: the
+ module will be called pseries-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_POWERNV
+ tristate "PowerNV Random Number Generator support"
+ depends on PPC_POWERNV
+ default HW_RANDOM
+ help
+ This is the driver for Random Number Generator hardware found
+ in POWER7+ and above machines for PowerNV platform.
+
+ To compile this driver as a module, choose M here: the
+ module will be called powernv-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_HISI
+ tristate "Hisilicon Random Number Generator support"
+ depends on ARCH_HISI || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Hisilicon Hip04 and Hip05 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hisi-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_HISTB
+ tristate "Hisilicon STB Random Number Generator support"
+ depends on ARCH_HISI || COMPILE_TEST
+ default ARCH_HISI
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Hisilicon Hi37xx SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called histb-rng.
+
+config HW_RANDOM_ST
+ tristate "ST Microelectronics HW Random Number Generator support"
+ depends on ARCH_STI || COMPILE_TEST
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on STi series of SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called st-rng.
+
+config HW_RANDOM_XGENE
+ tristate "APM X-Gene True Random Number Generator (TRNG) support"
+ depends on ARCH_XGENE || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on APM X-Gene SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xgene_rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_STM32
+ tristate "STMicroelectronics STM32 random number generator"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on HAS_IOMEM
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on STM32 microcontrollers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stm32-rng.
+
+ If unsure, say N.
+
+config HW_RANDOM_PIC32
+ tristate "Microchip PIC32 Random Number Generator support"
+ depends on MACH_PIC32 || COMPILE_TEST
+ default HW_RANDOM if MACH_PIC32
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on a PIC32.
+
+ To compile this driver as a module, choose M here. the
+ module will be called pic32-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_POLARFIRE_SOC
+ tristate "Microchip PolarFire SoC Random Number Generator support"
+ depends on HW_RANDOM && POLARFIRE_SOC_SYS_CTRL
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on PolarFire SoC (MPFS).
+
+ To compile this driver as a module, choose M here. The
+ module will be called mfps_rng.
+
+ If unsure, say N.
+
+
+config HW_RANDOM_MESON
+ tristate "Amlogic Meson Random Number Generator support"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ default HW_RANDOM if ARCH_MESON
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Amlogic Meson SoCs.
+
+ To compile this driver as a module, choose M here. the
+ module will be called meson-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_CAVIUM
+ tristate "Cavium ThunderX Random Number Generator support"
+ depends on PCI
+ depends on ARCH_THUNDER || (ARM64 && COMPILE_TEST)
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Cavium SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cavium_rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_MTK
+ tristate "Mediatek Random Number Generator support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ default HW_RANDOM if ARCH_MEDIATEK
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Mediatek SoCs.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mtk-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_S390
+ tristate "S390 True Random Number Generator support"
+ depends on S390
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True
+ Random Number Generator available as CPACF extension
+ on modern s390 hardware platforms.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s390-trng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_EXYNOS
+ tristate "Samsung Exynos True Random Number Generator support"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_IOMEM
+ default HW_RANDOM if ARCH_EXYNOS
+ help
+ This driver provides support for the True Random Number
+ Generator available in Exynos SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called exynos-trng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_OPTEE
+ tristate "OP-TEE based Random Number Generator support"
+ depends on OPTEE
+ default HW_RANDOM
+ help
+ This driver provides support for OP-TEE based Random Number
+ Generator on ARM SoCs where hardware entropy sources are not
+ accessible to normal world (Linux).
+
+ To compile this driver as a module, choose M here: the module
+ will be called optee-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_NPCM
+ tristate "NPCM Random Number Generator support"
+ depends on ARCH_NPCM || COMPILE_TEST
+ depends on HAS_IOMEM
+ default HW_RANDOM if ARCH_NPCM
+ help
+ This driver provides support for the Random Number
+ Generator hardware available in Nuvoton NPCM SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called npcm-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_KEYSTONE
+ depends on ARCH_KEYSTONE || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ default HW_RANDOM
+ tristate "TI Keystone NETCP SA Hardware random number generator"
+ help
+ This option enables Keystone's hardware random generator.
+
+config HW_RANDOM_CCTRNG
+ tristate "Arm CryptoCell True Random Number Generator support"
+ depends on HAS_IOMEM && OF
+ help
+ Say 'Y' to enable the True Random Number Generator driver for the
+ Arm TrustZone CryptoCell family of processors.
+ Currently the CryptoCell 713 and 703 are supported.
+ The driver is supported only in SoC where Trusted Execution
+ Environment is not used.
+ Choose 'M' to compile this driver as a module. The module
+ will be called cctrng.
+ If unsure, say 'N'.
+
+config HW_RANDOM_XIPHERA
+ tristate "Xiphera FPGA based True Random Number Generator support"
+ depends on HAS_IOMEM
+ help
+ This driver provides kernel-side support for Xiphera True Random
+ Number Generator Intellectual Property Core.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xiphera-trng.
+
+config HW_RANDOM_ARM_SMCCC_TRNG
+ tristate "Arm SMCCC TRNG firmware interface support"
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ default HW_RANDOM
+ help
+ Say 'Y' to enable the True Random Number Generator driver using
+ the Arm SMCCC TRNG firmware interface. This reads entropy from
+ higher exception levels (firmware, hypervisor). Uses SMCCC for
+ communicating with the firmware:
+ https://developer.arm.com/documentation/den0098/latest/
+
+ To compile this driver as a module, choose M here: the
+ module will be called arm_smccc_trng.
+
+config HW_RANDOM_CN10K
+ tristate "Marvell CN10K Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
+ default HW_RANDOM
+ help
+ This driver provides support for the True Random Number
+ generator available in Marvell CN10K SoCs.
+
+ To compile this driver as a module, choose M here.
+ The module will be called cn10k_rng. If unsure, say Y.
+
+config HW_RANDOM_JH7110
+ tristate "StarFive JH7110 Random Number Generator support"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ help
+ This driver provides support for the True Random Number
+ Generator in StarFive JH7110 SoCs.
+
+ To compile this driver as a module, choose M here.
+ The module will be called jh7110-trng.
+
+endif # HW_RANDOM
+
+config UML_RANDOM
+ depends on UML
+ select HW_RANDOM
+ tristate "UML Random Number Generator support"
+ help
+ This option enables UML's "hardware" random number generator. It
+ attaches itself to the host's /dev/random, supplying as much entropy
+ as the host has, rather than the small amount the UML gets from its
+ own drivers. It registers itself as a rng-core driver thus providing
+ a device which is usually called /dev/hwrng. This hardware random
+ number generator does feed into the kernel's random number generator
+ entropy pool.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
new file mode 100644
index 0000000000..32549a1186
--- /dev/null
+++ b/drivers/char/hw_random/Makefile
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for HW Random Number Generator (RNG) device drivers.
+#
+
+obj-$(CONFIG_HW_RANDOM) += rng-core.o
+rng-core-y := core.o
+obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
+obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
+obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
+obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
+obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
+obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
+n2-rng-y := n2-drv.o n2-asm.o
+obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
+obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o
+obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
+obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
+obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
+obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
+obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
+obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
+obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
+obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o
+obj-$(CONFIG_HW_RANDOM_INGENIC_TRNG) += ingenic-trng.o
+obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
+obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
+obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
+obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
+obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
+obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
+obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
+obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
+obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
+obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
+obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
+obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
+obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
+obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
+obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
+obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
+obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
+obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
+obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
+obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
+obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
+obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
+obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
new file mode 100644
index 0000000000..86162a1368
--- /dev/null
+++ b/drivers/char/hw_random/amd-rng.c
@@ -0,0 +1,220 @@
+/*
+ * RNG driver for AMD RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define DRV_NAME "AMD768-HWRNG"
+
+#define RNGDATA 0x00
+#define RNGDONE 0x04
+#define PMBASE_OFFSET 0xF0
+#define PMBASE_SIZE 8
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+ { PCI_VDEVICE(AMD, 0x7443), 0, },
+ { PCI_VDEVICE(AMD, 0x746b), 0, },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+struct amd768_priv {
+ void __iomem *iobase;
+ struct pci_dev *pcidev;
+ u32 pmbase;
+};
+
+static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ u32 *data = buf;
+ struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
+ size_t read = 0;
+ /* We will wait at maximum one time per read */
+ int timeout = max / 4 + 1;
+
+ /*
+ * RNG data is available when RNGDONE is set to 1
+ * New random numbers are generated approximately 128 microseconds
+ * after RNGDATA is read
+ */
+ while (read < max) {
+ if (ioread32(priv->iobase + RNGDONE) == 0) {
+ if (wait) {
+ /* Delay given by datasheet */
+ usleep_range(128, 196);
+ if (timeout-- == 0)
+ return read;
+ } else {
+ return 0;
+ }
+ } else {
+ *data = ioread32(priv->iobase + RNGDATA);
+ data++;
+ read += 4;
+ }
+ }
+
+ return read;
+}
+
+static int amd_rng_init(struct hwrng *rng)
+{
+ struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
+ u8 rnen;
+
+ pci_read_config_byte(priv->pcidev, 0x40, &rnen);
+ rnen |= BIT(7); /* RNG on */
+ pci_write_config_byte(priv->pcidev, 0x40, rnen);
+
+ pci_read_config_byte(priv->pcidev, 0x41, &rnen);
+ rnen |= BIT(7); /* PMIO enable */
+ pci_write_config_byte(priv->pcidev, 0x41, rnen);
+
+ return 0;
+}
+
+static void amd_rng_cleanup(struct hwrng *rng)
+{
+ struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
+ u8 rnen;
+
+ pci_read_config_byte(priv->pcidev, 0x40, &rnen);
+ rnen &= ~BIT(7); /* RNG off */
+ pci_write_config_byte(priv->pcidev, 0x40, rnen);
+}
+
+static struct hwrng amd_rng = {
+ .name = "amd",
+ .init = amd_rng_init,
+ .cleanup = amd_rng_cleanup,
+ .read = amd_rng_read,
+};
+
+static int __init amd_rng_mod_init(void)
+{
+ int err;
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+ u32 pmbase;
+ struct amd768_priv *priv;
+
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+ if (ent)
+ goto found;
+ }
+ /* Device not found. */
+ return -ENODEV;
+
+found:
+ err = pci_read_config_dword(pdev, 0x58, &pmbase);
+ if (err)
+ goto put_dev;
+
+ pmbase &= 0x0000FF00;
+ if (pmbase == 0) {
+ err = -EIO;
+ goto put_dev;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto put_dev;
+ }
+
+ if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
+ dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
+ pmbase + 0xF0);
+ err = -EBUSY;
+ goto out;
+ }
+
+ priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ if (!priv->iobase) {
+ pr_err(DRV_NAME "Cannot map ioport\n");
+ err = -EINVAL;
+ goto err_iomap;
+ }
+
+ amd_rng.priv = (unsigned long)priv;
+ priv->pmbase = pmbase;
+ priv->pcidev = pdev;
+
+ pr_info(DRV_NAME " detected\n");
+ err = hwrng_register(&amd_rng);
+ if (err) {
+ pr_err(DRV_NAME " registering failed (%d)\n", err);
+ goto err_hwrng;
+ }
+ return 0;
+
+err_hwrng:
+ ioport_unmap(priv->iobase);
+err_iomap:
+ release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+out:
+ kfree(priv);
+put_dev:
+ pci_dev_put(pdev);
+ return err;
+}
+
+static void __exit amd_rng_mod_exit(void)
+{
+ struct amd768_priv *priv;
+
+ priv = (struct amd768_priv *)amd_rng.priv;
+
+ hwrng_unregister(&amd_rng);
+
+ ioport_unmap(priv->iobase);
+
+ release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+
+ pci_dev_put(priv->pcidev);
+
+ kfree(priv);
+}
+
+module_init(amd_rng_mod_init);
+module_exit(amd_rng_mod_exit);
+
+MODULE_AUTHOR("The Linux Kernel team");
+MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
new file mode 100644
index 0000000000..7e954341b0
--- /dev/null
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Randomness driver for the ARM SMCCC TRNG Firmware Interface
+ * https://developer.arm.com/documentation/den0098/latest/
+ *
+ * Copyright (C) 2020 Arm Ltd.
+ *
+ * The ARM TRNG firmware interface specifies a protocol to read entropy
+ * from a higher exception level, to abstract from any machine specific
+ * implemenations and allow easier use in hypervisors.
+ *
+ * The firmware interface is realised using the SMCCC specification.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
+
+#ifdef CONFIG_ARM64
+#define ARM_SMCCC_TRNG_RND ARM_SMCCC_TRNG_RND64
+#define MAX_BITS_PER_CALL (3 * 64UL)
+#else
+#define ARM_SMCCC_TRNG_RND ARM_SMCCC_TRNG_RND32
+#define MAX_BITS_PER_CALL (3 * 32UL)
+#endif
+
+/* We don't want to allow the firmware to stall us forever. */
+#define SMCCC_TRNG_MAX_TRIES 20
+
+#define SMCCC_RET_TRNG_INVALID_PARAMETER -2
+#define SMCCC_RET_TRNG_NO_ENTROPY -3
+
+static int copy_from_registers(char *buf, struct arm_smccc_res *res,
+ size_t bytes)
+{
+ unsigned int chunk, copied;
+
+ if (bytes == 0)
+ return 0;
+
+ chunk = min(bytes, sizeof(long));
+ memcpy(buf, &res->a3, chunk);
+ copied = chunk;
+ if (copied >= bytes)
+ return copied;
+
+ chunk = min((bytes - copied), sizeof(long));
+ memcpy(&buf[copied], &res->a2, chunk);
+ copied += chunk;
+ if (copied >= bytes)
+ return copied;
+
+ chunk = min((bytes - copied), sizeof(long));
+ memcpy(&buf[copied], &res->a1, chunk);
+
+ return copied + chunk;
+}
+
+static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct arm_smccc_res res;
+ u8 *buf = data;
+ unsigned int copied = 0;
+ int tries = 0;
+
+ while (copied < max) {
+ size_t bits = min_t(size_t, (max - copied) * BITS_PER_BYTE,
+ MAX_BITS_PER_CALL);
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
+
+ switch ((int)res.a0) {
+ case SMCCC_RET_SUCCESS:
+ copied += copy_from_registers(buf + copied, &res,
+ bits / BITS_PER_BYTE);
+ tries = 0;
+ break;
+ case SMCCC_RET_TRNG_NO_ENTROPY:
+ if (!wait)
+ return copied;
+ tries++;
+ if (tries >= SMCCC_TRNG_MAX_TRIES)
+ return copied;
+ cond_resched();
+ break;
+ default:
+ return -EIO;
+ }
+ }
+
+ return copied;
+}
+
+static int smccc_trng_probe(struct platform_device *pdev)
+{
+ struct hwrng *trng;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->name = "smccc_trng";
+ trng->read = smccc_trng_read;
+
+ return devm_hwrng_register(&pdev->dev, trng);
+}
+
+static struct platform_driver smccc_trng_driver = {
+ .driver = {
+ .name = "smccc_trng",
+ },
+ .probe = smccc_trng_probe,
+};
+module_platform_driver(smccc_trng_driver);
+
+MODULE_ALIAS("platform:smccc_trng");
+MODULE_AUTHOR("Andre Przywara");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
new file mode 100644
index 0000000000..a37367ebcb
--- /dev/null
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2011 Peter Korsgaard <jacmet@sunsite.dk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/hw_random.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define TRNG_CR 0x00
+#define TRNG_MR 0x04
+#define TRNG_ISR 0x1c
+#define TRNG_ISR_DATRDY BIT(0)
+#define TRNG_ODATA 0x50
+
+#define TRNG_KEY 0x524e4700 /* RNG */
+
+#define TRNG_HALFR BIT(0) /* generate RN every 168 cycles */
+
+struct atmel_trng_data {
+ bool has_half_rate;
+};
+
+struct atmel_trng {
+ struct clk *clk;
+ void __iomem *base;
+ struct hwrng rng;
+ bool has_half_rate;
+};
+
+static bool atmel_trng_wait_ready(struct atmel_trng *trng, bool wait)
+{
+ int ready;
+
+ ready = readl(trng->base + TRNG_ISR) & TRNG_ISR_DATRDY;
+ if (!ready && wait)
+ readl_poll_timeout(trng->base + TRNG_ISR, ready,
+ ready & TRNG_ISR_DATRDY, 1000, 20000);
+
+ return !!ready;
+}
+
+static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng);
+ u32 *data = buf;
+ int ret;
+
+ ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ if (ret < 0) {
+ pm_runtime_put_sync((struct device *)trng->rng.priv);
+ return ret;
+ }
+
+ ret = atmel_trng_wait_ready(trng, wait);
+ if (!ret)
+ goto out;
+
+ *data = readl(trng->base + TRNG_ODATA);
+ /*
+ * ensure data ready is only set again AFTER the next data word is ready
+ * in case it got set between checking ISR and reading ODATA, so we
+ * don't risk re-reading the same word
+ */
+ readl(trng->base + TRNG_ISR);
+ ret = 4;
+
+out:
+ pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ return ret;
+}
+
+static int atmel_trng_init(struct atmel_trng *trng)
+{
+ unsigned long rate;
+ int ret;
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret)
+ return ret;
+
+ if (trng->has_half_rate) {
+ rate = clk_get_rate(trng->clk);
+
+ /* if peripheral clk is above 100MHz, set HALFR */
+ if (rate > 100000000)
+ writel(TRNG_HALFR, trng->base + TRNG_MR);
+ }
+
+ writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+
+ return 0;
+}
+
+static void atmel_trng_cleanup(struct atmel_trng *trng)
+{
+ writel(TRNG_KEY, trng->base + TRNG_CR);
+ clk_disable_unprepare(trng->clk);
+}
+
+static int atmel_trng_probe(struct platform_device *pdev)
+{
+ struct atmel_trng *trng;
+ const struct atmel_trng_data *data;
+ int ret;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return PTR_ERR(trng->base);
+
+ trng->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(trng->clk))
+ return PTR_ERR(trng->clk);
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ trng->has_half_rate = data->has_half_rate;
+ trng->rng.name = pdev->name;
+ trng->rng.read = atmel_trng_read;
+ trng->rng.priv = (unsigned long)&pdev->dev;
+ platform_set_drvdata(pdev, trng);
+
+#ifndef CONFIG_PM
+ ret = atmel_trng_init(trng);
+ if (ret)
+ return ret;
+#endif
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+#ifndef CONFIG_PM
+ atmel_trng_cleanup(trng);
+#endif
+ }
+
+ return ret;
+}
+
+static int atmel_trng_remove(struct platform_device *pdev)
+{
+ struct atmel_trng *trng = platform_get_drvdata(pdev);
+
+ atmel_trng_cleanup(trng);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev)
+{
+ struct atmel_trng *trng = dev_get_drvdata(dev);
+
+ atmel_trng_cleanup(trng);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_trng_runtime_resume(struct device *dev)
+{
+ struct atmel_trng *trng = dev_get_drvdata(dev);
+
+ return atmel_trng_init(trng);
+}
+
+static const struct dev_pm_ops __maybe_unused atmel_trng_pm_ops = {
+ SET_RUNTIME_PM_OPS(atmel_trng_runtime_suspend,
+ atmel_trng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct atmel_trng_data at91sam9g45_config = {
+ .has_half_rate = false,
+};
+
+static const struct atmel_trng_data sam9x60_config = {
+ .has_half_rate = true,
+};
+
+static const struct of_device_id atmel_trng_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9g45-trng",
+ .data = &at91sam9g45_config,
+ }, {
+ .compatible = "microchip,sam9x60-trng",
+ .data = &sam9x60_config,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
+
+static struct platform_driver atmel_trng_driver = {
+ .probe = atmel_trng_probe,
+ .remove = atmel_trng_remove,
+ .driver = {
+ .name = "atmel-trng",
+ .pm = pm_ptr(&atmel_trng_pm_ops),
+ .of_match_table = atmel_trng_dt_ids,
+ },
+};
+
+module_platform_driver(atmel_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
+MODULE_DESCRIPTION("Atmel true random number generator driver");
diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c
new file mode 100644
index 0000000000..9de7466e68
--- /dev/null
+++ b/drivers/char/hw_random/ba431-rng.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Silex Insight
+
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#define BA431_RESET_DELAY 1 /* usec */
+#define BA431_RESET_READ_STATUS_TIMEOUT 1000 /* usec */
+#define BA431_RESET_READ_STATUS_INTERVAL 10 /* usec */
+#define BA431_READ_RETRY_INTERVAL 1 /* usec */
+
+#define BA431_REG_CTRL 0x00
+#define BA431_REG_FIFO_LEVEL 0x04
+#define BA431_REG_STATUS 0x30
+#define BA431_REG_FIFODATA 0x80
+
+#define BA431_CTRL_ENABLE BIT(0)
+#define BA431_CTRL_SOFTRESET BIT(8)
+
+#define BA431_STATUS_STATE_MASK (BIT(1) | BIT(2) | BIT(3))
+#define BA431_STATUS_STATE_OFFSET 1
+
+enum ba431_state {
+ BA431_STATE_RESET,
+ BA431_STATE_STARTUP,
+ BA431_STATE_FIFOFULLON,
+ BA431_STATE_FIFOFULLOFF,
+ BA431_STATE_RUNNING,
+ BA431_STATE_ERROR
+};
+
+struct ba431_trng {
+ struct device *dev;
+ void __iomem *base;
+ struct hwrng rng;
+ atomic_t reset_pending;
+ struct work_struct reset_work;
+};
+
+static inline u32 ba431_trng_read_reg(struct ba431_trng *ba431, u32 reg)
+{
+ return ioread32(ba431->base + reg);
+}
+
+static inline void ba431_trng_write_reg(struct ba431_trng *ba431, u32 reg,
+ u32 val)
+{
+ iowrite32(val, ba431->base + reg);
+}
+
+static inline enum ba431_state ba431_trng_get_state(struct ba431_trng *ba431)
+{
+ u32 status = ba431_trng_read_reg(ba431, BA431_REG_STATUS);
+
+ return (status & BA431_STATUS_STATE_MASK) >> BA431_STATUS_STATE_OFFSET;
+}
+
+static int ba431_trng_is_in_error(struct ba431_trng *ba431)
+{
+ enum ba431_state state = ba431_trng_get_state(ba431);
+
+ if ((state < BA431_STATE_STARTUP) ||
+ (state >= BA431_STATE_ERROR))
+ return 1;
+
+ return 0;
+}
+
+static int ba431_trng_reset(struct ba431_trng *ba431)
+{
+ int ret;
+
+ /* Disable interrupts, random generation and enable the softreset */
+ ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_SOFTRESET);
+ udelay(BA431_RESET_DELAY);
+ ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_ENABLE);
+
+ /* Wait until the state changed */
+ if (readx_poll_timeout(ba431_trng_is_in_error, ba431, ret, !ret,
+ BA431_RESET_READ_STATUS_INTERVAL,
+ BA431_RESET_READ_STATUS_TIMEOUT)) {
+ dev_err(ba431->dev, "reset failed (state: %d)\n",
+ ba431_trng_get_state(ba431));
+ return -ETIMEDOUT;
+ }
+
+ dev_info(ba431->dev, "reset done\n");
+
+ return 0;
+}
+
+static void ba431_trng_reset_work(struct work_struct *work)
+{
+ struct ba431_trng *ba431 = container_of(work, struct ba431_trng,
+ reset_work);
+ ba431_trng_reset(ba431);
+ atomic_set(&ba431->reset_pending, 0);
+}
+
+static void ba431_trng_schedule_reset(struct ba431_trng *ba431)
+{
+ if (atomic_cmpxchg(&ba431->reset_pending, 0, 1))
+ return;
+
+ schedule_work(&ba431->reset_work);
+}
+
+static int ba431_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
+ u32 *data = buf;
+ unsigned int level, i;
+ int n = 0;
+
+ while (max > 0) {
+ level = ba431_trng_read_reg(ba431, BA431_REG_FIFO_LEVEL);
+ if (!level) {
+ if (ba431_trng_is_in_error(ba431)) {
+ ba431_trng_schedule_reset(ba431);
+ break;
+ }
+
+ if (!wait)
+ break;
+
+ udelay(BA431_READ_RETRY_INTERVAL);
+ continue;
+ }
+
+ i = level;
+ do {
+ data[n++] = ba431_trng_read_reg(ba431,
+ BA431_REG_FIFODATA);
+ max -= sizeof(*data);
+ } while (--i && (max > 0));
+
+ if (ba431_trng_is_in_error(ba431)) {
+ n -= (level - i);
+ ba431_trng_schedule_reset(ba431);
+ break;
+ }
+ }
+
+ n *= sizeof(data);
+ return (n || !wait) ? n : -EIO;
+}
+
+static void ba431_trng_cleanup(struct hwrng *rng)
+{
+ struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
+
+ ba431_trng_write_reg(ba431, BA431_REG_CTRL, 0);
+ cancel_work_sync(&ba431->reset_work);
+}
+
+static int ba431_trng_init(struct hwrng *rng)
+{
+ struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
+
+ return ba431_trng_reset(ba431);
+}
+
+static int ba431_trng_probe(struct platform_device *pdev)
+{
+ struct ba431_trng *ba431;
+ int ret;
+
+ ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL);
+ if (!ba431)
+ return -ENOMEM;
+
+ ba431->dev = &pdev->dev;
+
+ ba431->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ba431->base))
+ return PTR_ERR(ba431->base);
+
+ atomic_set(&ba431->reset_pending, 0);
+ INIT_WORK(&ba431->reset_work, ba431_trng_reset_work);
+ ba431->rng.name = pdev->name;
+ ba431->rng.init = ba431_trng_init;
+ ba431->rng.cleanup = ba431_trng_cleanup;
+ ba431->rng.read = ba431_trng_read;
+
+ ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "BA431 registration failed\n");
+
+ dev_info(&pdev->dev, "BA431 TRNG registered\n");
+
+ return 0;
+}
+
+static const struct of_device_id ba431_trng_dt_ids[] = {
+ { .compatible = "silex-insight,ba431-rng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids);
+
+static struct platform_driver ba431_trng_driver = {
+ .driver = {
+ .name = "ba431-rng",
+ .of_match_table = ba431_trng_dt_ids,
+ },
+ .probe = ba431_trng_probe,
+};
+
+module_platform_driver(ba431_trng_driver);
+
+MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
+MODULE_DESCRIPTION("TRNG driver for Silex Insight BA431");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
new file mode 100644
index 0000000000..4c08efe7f3
--- /dev/null
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ * Copyright (c) 2013 Lubomir Rintel
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+
+#define RNG_CTRL 0x0
+#define RNG_STATUS 0x4
+#define RNG_DATA 0x8
+#define RNG_INT_MASK 0x10
+
+/* enable rng */
+#define RNG_RBGEN 0x1
+
+/* the initial numbers generated are "less random" so will be discarded */
+#define RNG_WARMUP_COUNT 0x40000
+
+#define RNG_INT_OFF 0x1
+
+struct bcm2835_rng_priv {
+ struct hwrng rng;
+ void __iomem *base;
+ bool mask_interrupts;
+ struct clk *clk;
+ struct reset_control *reset;
+};
+
+static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
+{
+ return container_of(rng, struct bcm2835_rng_priv, rng);
+}
+
+static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset)
+{
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(priv->base + offset);
+ else
+ return readl(priv->base + offset);
+}
+
+static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val,
+ u32 offset)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, priv->base + offset);
+ else
+ writel(val, priv->base + offset);
+}
+
+static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+ u32 max_words = max / sizeof(u32);
+ u32 num_words, count;
+
+ while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
+ if (!wait)
+ return 0;
+ hwrng_yield(rng);
+ }
+
+ num_words = rng_readl(priv, RNG_STATUS) >> 24;
+ if (num_words > max_words)
+ num_words = max_words;
+
+ for (count = 0; count < num_words; count++)
+ ((u32 *)buf)[count] = rng_readl(priv, RNG_DATA);
+
+ return num_words * sizeof(u32);
+}
+
+static int bcm2835_rng_init(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+ int ret = 0;
+ u32 val;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
+
+ if (priv->mask_interrupts) {
+ /* mask the interrupt */
+ val = rng_readl(priv, RNG_INT_MASK);
+ val |= RNG_INT_OFF;
+ rng_writel(priv, val, RNG_INT_MASK);
+ }
+
+ /* set warm-up count & enable */
+ rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS);
+ rng_writel(priv, RNG_RBGEN, RNG_CTRL);
+
+ return ret;
+}
+
+static void bcm2835_rng_cleanup(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+
+ /* disable rng hardware */
+ rng_writel(priv, 0, RNG_CTRL);
+
+ clk_disable_unprepare(priv->clk);
+}
+
+struct bcm2835_rng_of_data {
+ bool mask_interrupts;
+};
+
+static const struct bcm2835_rng_of_data nsp_rng_of_data = {
+ .mask_interrupts = true,
+};
+
+static const struct of_device_id bcm2835_rng_of_match[] = {
+ { .compatible = "brcm,bcm2835-rng"},
+ { .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm6368-rng"},
+ {},
+};
+
+static int bcm2835_rng_probe(struct platform_device *pdev)
+{
+ const struct bcm2835_rng_of_data *of_data;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *rng_id;
+ struct bcm2835_rng_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ /* map peripheral */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /* Clock is optional on most platforms */
+ priv->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = bcm2835_rng_init;
+ priv->rng.read = bcm2835_rng_read;
+ priv->rng.cleanup = bcm2835_rng_cleanup;
+
+ if (dev_of_node(dev)) {
+ rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node);
+ if (!rng_id)
+ return -EINVAL;
+
+ /* Check for rng init function, execute it */
+ of_data = rng_id->data;
+ if (of_data)
+ priv->mask_interrupts = of_data->mask_interrupts;
+ }
+
+ /* register driver */
+ err = devm_hwrng_register(dev, &priv->rng);
+ if (err)
+ dev_err(dev, "hwrng registration failed\n");
+ else
+ dev_info(dev, "hwrng registered\n");
+
+ return err;
+}
+
+MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
+
+static const struct platform_device_id bcm2835_rng_devtype[] = {
+ { .name = "bcm2835-rng" },
+ { .name = "bcm63xx-rng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype);
+
+static struct platform_driver bcm2835_rng_driver = {
+ .driver = {
+ .name = "bcm2835-rng",
+ .of_match_table = bcm2835_rng_of_match,
+ },
+ .probe = bcm2835_rng_probe,
+ .id_table = bcm2835_rng_devtype,
+};
+module_platform_driver(bcm2835_rng_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("BCM2835 Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
new file mode 100644
index 0000000000..c99c54cd99
--- /dev/null
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware Random Number Generator support.
+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
+ *
+ * Copyright (C) 2016 Cavium, Inc.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <asm/arch_timer.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CAVIUM_RNG_PF 0xA018
+#define PCI_DEVID_CAVIUM_RNG_VF 0xA033
+
+#define HEALTH_STATUS_REG 0x38
+
+/* RST device info */
+#define PCI_DEVICE_ID_RST_OTX2 0xA085
+#define RST_BOOT_REG 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+#define MSEC_TO_NSEC(x) (x * 1000000)
+
+struct cavium_rng {
+ struct hwrng ops;
+ void __iomem *result;
+ void __iomem *pf_regbase;
+ struct pci_dev *pdev;
+ u64 clock_rate;
+ u64 prev_error;
+ u64 prev_time;
+};
+
+static inline bool is_octeontx(struct pci_dev *pdev)
+{
+ if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)))
+ return true;
+
+ return false;
+}
+
+static u64 rng_get_coprocessor_clkrate(void)
+{
+ u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */
+ struct pci_dev *pdev;
+ void __iomem *base;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_RST_OTX2, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto error_put_pdev;
+
+ /* RST: PNR_MUL * 50Mhz gives clockrate */
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+static int check_rng_health(struct cavium_rng *rng)
+{
+ u64 cur_err, cur_time;
+ u64 status, cycles;
+ u64 time_elapsed;
+
+
+ /* Skip checking health for OcteonTx */
+ if (!rng->pf_regbase)
+ return 0;
+
+ status = readq(rng->pf_regbase + HEALTH_STATUS_REG);
+ if (status & BIT_ULL(0)) {
+ dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n");
+ return -EIO;
+ }
+
+ cycles = status >> 1;
+ if (!cycles)
+ return 0;
+
+ cur_time = arch_timer_read_counter();
+
+ /* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE]
+ * Number of coprocessor cycles times 2 since the last failure.
+ * This field doesn't get cleared/updated until another failure.
+ */
+ cycles = cycles / 2;
+ cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */
+
+ /* Ignore errors that happenned a long time ago, these
+ * are most likely false positive errors.
+ */
+ if (cur_err > MSEC_TO_NSEC(10)) {
+ rng->prev_error = 0;
+ rng->prev_time = 0;
+ return 0;
+ }
+
+ if (rng->prev_error) {
+ /* Calculate time elapsed since last error
+ * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz.
+ */
+ time_elapsed = (cur_time - rng->prev_time) * 10;
+ time_elapsed += rng->prev_error;
+
+ /* Check if current error is a new one or the old one itself.
+ * If error is a new one then consider there is a persistent
+ * issue with entropy, declare hardware failure.
+ */
+ if (cur_err < time_elapsed) {
+ dev_err(&rng->pdev->dev, "HWRNG failure detected\n");
+ rng->prev_error = cur_err;
+ rng->prev_time = cur_time;
+ return -EIO;
+ }
+ }
+
+ rng->prev_error = cur_err;
+ rng->prev_time = cur_time;
+ return 0;
+}
+
+/* Read data from the RNG unit */
+static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
+{
+ struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
+ unsigned int size = max;
+ int err = 0;
+
+ err = check_rng_health(p);
+ if (err)
+ return err;
+
+ while (size >= 8) {
+ *((u64 *)dat) = readq(p->result);
+ size -= 8;
+ dat += 8;
+ }
+ while (size > 0) {
+ *((u8 *)dat) = readb(p->result);
+ size--;
+ dat++;
+ }
+ return max;
+}
+
+static int cavium_map_pf_regs(struct cavium_rng *rng)
+{
+ struct pci_dev *pdev;
+
+ /* Health status is not supported on 83xx, skip mapping PF CSRs */
+ if (is_octeontx(rng->pdev)) {
+ rng->pf_regbase = NULL;
+ return 0;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_CAVIUM_RNG_PF, NULL);
+ if (!pdev) {
+ pr_err("Cannot find RNG PF device\n");
+ return -EIO;
+ }
+
+ rng->pf_regbase = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!rng->pf_regbase) {
+ dev_err(&pdev->dev, "Failed to map PF CSR region\n");
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+
+ pci_dev_put(pdev);
+
+ /* Get co-processor clock rate */
+ rng->clock_rate = rng_get_coprocessor_clkrate();
+
+ return 0;
+}
+
+/* Map Cavium RNG to an HWRNG object */
+static int cavium_rng_probe_vf(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct cavium_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->pdev = pdev;
+
+ /* Map the RNG result */
+ rng->result = pcim_iomap(pdev, 0, 0);
+ if (!rng->result) {
+ dev_err(&pdev->dev, "Error iomap failed retrieving result.\n");
+ return -ENOMEM;
+ }
+
+ rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "cavium-rng-%s", dev_name(&pdev->dev));
+ if (!rng->ops.name)
+ return -ENOMEM;
+
+ rng->ops.read = cavium_rng_read;
+
+ pci_set_drvdata(pdev, rng);
+
+ /* Health status is available only at PF, hence map PF registers. */
+ ret = cavium_map_pf_regs(rng);
+ if (ret)
+ return ret;
+
+ ret = devm_hwrng_register(&pdev->dev, &rng->ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Remove the VF */
+static void cavium_rng_remove_vf(struct pci_dev *pdev)
+{
+ struct cavium_rng *rng;
+
+ rng = pci_get_drvdata(pdev);
+ iounmap(rng->pf_regbase);
+}
+
+static const struct pci_device_id cavium_rng_vf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
+
+static struct pci_driver cavium_rng_vf_driver = {
+ .name = "cavium_rng_vf",
+ .id_table = cavium_rng_vf_id_table,
+ .probe = cavium_rng_probe_vf,
+ .remove = cavium_rng_remove_vf,
+};
+module_pci_driver(cavium_rng_vf_driver);
+
+MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
new file mode 100644
index 0000000000..b965792224
--- /dev/null
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware Random Number Generator support.
+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
+ *
+ * Copyright (C) 2016 Cavium, Inc.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#define THUNDERX_RNM_ENT_EN 0x1
+#define THUNDERX_RNM_RNG_EN 0x2
+
+struct cavium_rng_pf {
+ void __iomem *control_status;
+};
+
+/* Enable the RNG hardware and activate the VF */
+static int cavium_rng_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct cavium_rng_pf *rng;
+ int iov_err;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ /*Map the RNG control */
+ rng->control_status = pcim_iomap(pdev, 0, 0);
+ if (!rng->control_status) {
+ dev_err(&pdev->dev,
+ "Error iomap failed retrieving control_status.\n");
+ return -ENOMEM;
+ }
+
+ /* Enable the RNG hardware and entropy source */
+ writeq(THUNDERX_RNM_RNG_EN | THUNDERX_RNM_ENT_EN,
+ rng->control_status);
+
+ pci_set_drvdata(pdev, rng);
+
+ /* Enable the Cavium RNG as a VF */
+ iov_err = pci_enable_sriov(pdev, 1);
+ if (iov_err != 0) {
+ /* Disable the RNG hardware and entropy source */
+ writeq(0, rng->control_status);
+ dev_err(&pdev->dev,
+ "Error initializing RNG virtual function,(%i).\n",
+ iov_err);
+ return iov_err;
+ }
+
+ return 0;
+}
+
+/* Disable VF and RNG Hardware */
+static void cavium_rng_remove(struct pci_dev *pdev)
+{
+ struct cavium_rng_pf *rng;
+
+ rng = pci_get_drvdata(pdev);
+
+ /* Remove the VF */
+ pci_disable_sriov(pdev);
+
+ /* Disable the RNG hardware and entropy source */
+ writeq(0, rng->control_status);
+}
+
+static const struct pci_device_id cavium_rng_pf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa018), 0, 0, 0}, /* Thunder RNM */
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, cavium_rng_pf_id_table);
+
+static struct pci_driver cavium_rng_pf_driver = {
+ .name = "cavium_rng_pf",
+ .id_table = cavium_rng_pf_id_table,
+ .probe = cavium_rng_probe,
+ .remove = cavium_rng_remove,
+};
+
+module_pci_driver(cavium_rng_pf_driver);
+MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
new file mode 100644
index 0000000000..1abbff04a0
--- /dev/null
+++ b/drivers/char/hw_random/cctrng.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/workqueue.h>
+#include <linux/circ_buf.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/fips.h>
+
+#include "cctrng.h"
+
+#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
+#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
+#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
+
+#define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \
+ (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
+
+#define CC_HW_RESET_LOOP_COUNT 10
+#define CC_TRNG_SUSPEND_TIMEOUT 3000
+
+/* data circular buffer in words must be:
+ * - of a power-of-2 size (limitation of circ_buf.h macros)
+ * - at least 6, the size generated in the EHR according to HW implementation
+ */
+#define CCTRNG_DATA_BUF_WORDS 32
+
+/* The timeout for the TRNG operation should be calculated with the formula:
+ * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
+ * while:
+ * - SAMPLE_CNT is input value from the characterisation process
+ * - all the rest are constants
+ */
+#define EHR_NUM 1
+#define VN_COEFF 4
+#define EHR_LENGTH CC_TRNG_EHR_IN_BITS
+#define SCALE_VALUE 2
+#define CCTRNG_TIMEOUT(smpl_cnt) \
+ (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
+
+struct cctrng_drvdata {
+ struct platform_device *pdev;
+ void __iomem *cc_base;
+ struct clk *clk;
+ struct hwrng rng;
+ u32 active_rosc;
+ /* Sampling interval for each ring oscillator:
+ * count of ring oscillator cycles between consecutive bits sampling.
+ * Value of 0 indicates non-valid rosc
+ */
+ u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
+
+ u32 data_buf[CCTRNG_DATA_BUF_WORDS];
+ struct circ_buf circ;
+ struct work_struct compwork;
+ struct work_struct startwork;
+
+ /* pending_hw - 1 when HW is pending, 0 when it is idle */
+ atomic_t pending_hw;
+
+ /* protects against multiple concurrent consumers of data_buf */
+ spinlock_t read_lock;
+};
+
+
+/* functions for write/read CC registers */
+static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
+
+static int cc_trng_pm_get(struct device *dev)
+{
+ int rc = 0;
+
+ rc = pm_runtime_get_sync(dev);
+
+ /* pm_runtime_get_sync() can return 1 as a valid return code */
+ return (rc == 1 ? 0 : rc);
+}
+
+static void cc_trng_pm_put_suspend(struct device *dev)
+{
+ int rc = 0;
+
+ pm_runtime_mark_last_busy(dev);
+ rc = pm_runtime_put_autosuspend(dev);
+ if (rc)
+ dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
+}
+
+static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ /* must be before the enabling to avoid redundant suspending */
+ pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
+ return pm_runtime_set_active(dev);
+}
+
+static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ /* enable the PM module*/
+ pm_runtime_enable(dev);
+}
+
+static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ pm_runtime_disable(dev);
+}
+
+
+static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+ struct device_node *np = drvdata->pdev->dev.of_node;
+ int rc;
+ int i;
+ /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
+ int ret = -EINVAL;
+
+ rc = of_property_read_u32_array(np, "arm,rosc-ratio",
+ drvdata->smpl_ratio,
+ CC_TRNG_NUM_OF_ROSCS);
+ if (rc) {
+ /* arm,rosc-ratio was not found in device tree */
+ return rc;
+ }
+
+ /* verify that at least one rosc has (sampling ratio > 0) */
+ for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
+ dev_dbg(dev, "rosc %d sampling ratio %u",
+ i, drvdata->smpl_ratio[i]);
+
+ if (drvdata->smpl_ratio[i] > 0)
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
+ drvdata->active_rosc += 1;
+
+ while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
+ if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
+ return 0;
+
+ drvdata->active_rosc += 1;
+ }
+ return -EINVAL;
+}
+
+
+static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
+{
+ u32 max_cycles;
+
+ /* Set watchdog threshold to maximal allowed time (in CPU cycles) */
+ max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
+ cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
+
+ /* enable the RND source */
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
+
+ /* unmask RNG interrupts */
+ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
+}
+
+
+/* increase circular data buffer index (head/tail) */
+static inline void circ_idx_inc(int *idx, int bytes)
+{
+ *idx += (bytes + 3) >> 2;
+ *idx &= (CCTRNG_DATA_BUF_WORDS - 1);
+}
+
+static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
+{
+ return CIRC_SPACE(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+
+}
+
+static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ /* current implementation ignores "wait" */
+
+ struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
+ struct device *dev = &(drvdata->pdev->dev);
+ u32 *buf = (u32 *)drvdata->circ.buf;
+ size_t copied = 0;
+ size_t cnt_w;
+ size_t size;
+ size_t left;
+
+ if (!spin_trylock(&drvdata->read_lock)) {
+ /* concurrent consumers from data_buf cannot be served */
+ dev_dbg_ratelimited(dev, "unable to hold lock\n");
+ return 0;
+ }
+
+ /* copy till end of data buffer (without wrap back) */
+ cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+ size = min((cnt_w<<2), max);
+ memcpy(data, &(buf[drvdata->circ.tail]), size);
+ copied = size;
+ circ_idx_inc(&drvdata->circ.tail, size);
+ /* copy rest of data in data buffer */
+ left = max - copied;
+ if (left > 0) {
+ cnt_w = CIRC_CNT(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+ size = min((cnt_w<<2), left);
+ memcpy(data, &(buf[drvdata->circ.tail]), size);
+ copied += size;
+ circ_idx_inc(&drvdata->circ.tail, size);
+ }
+
+ spin_unlock(&drvdata->read_lock);
+
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
+ /* re-check space in buffer to avoid potential race */
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ /* increment device's usage counter */
+ int rc = cc_trng_pm_get(dev);
+
+ if (rc) {
+ dev_err(dev,
+ "cc_trng_pm_get returned %x\n",
+ rc);
+ return rc;
+ }
+
+ /* schedule execution of deferred work handler
+ * for filling of data buffer
+ */
+ schedule_work(&drvdata->startwork);
+ } else {
+ atomic_set(&drvdata->pending_hw, 0);
+ }
+ }
+ }
+
+ return copied;
+}
+
+static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
+{
+ u32 tmp_smpl_cnt = 0;
+ struct device *dev = &(drvdata->pdev->dev);
+
+ dev_dbg(dev, "cctrng hw trigger.\n");
+
+ /* enable the HW RND clock */
+ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
+
+ /* do software reset */
+ cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
+ /* in order to verify that the reset has completed,
+ * the sample count need to be verified
+ */
+ do {
+ /* enable the HW RND clock */
+ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
+
+ /* set sampling ratio (rng_clocks) between consecutive bits */
+ cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
+ drvdata->smpl_ratio[drvdata->active_rosc]);
+
+ /* read the sampling ratio */
+ tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
+
+ } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
+
+ /* disable the RND source for setting new parameters in HW */
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
+
+ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
+
+ cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
+
+ /* Debug Control register: set to 0 - no bypasses */
+ cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
+
+ cc_trng_enable_rnd_source(drvdata);
+}
+
+static void cc_trng_compwork_handler(struct work_struct *w)
+{
+ u32 isr = 0;
+ u32 ehr_valid = 0;
+ struct cctrng_drvdata *drvdata =
+ container_of(w, struct cctrng_drvdata, compwork);
+ struct device *dev = &(drvdata->pdev->dev);
+ int i;
+
+ /* stop DMA and the RNG source */
+ cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
+
+ /* read RNG_ISR and check for errors */
+ isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
+ ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
+ dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
+
+ if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
+ fips_fail_notify();
+ /* FIPS error is fatal */
+ panic("Got HW CRNGT error while fips is enabled!\n");
+ }
+
+ /* Clear all pending RNG interrupts */
+ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
+
+
+ if (!ehr_valid) {
+ /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
+ if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
+ CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
+ dev_dbg(dev, "cctrng autocorr/timeout error.\n");
+ goto next_rosc;
+ }
+
+ /* in case of VN error, ignore it */
+ }
+
+ /* read EHR data from registers */
+ for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
+ /* calc word ptr in data_buf */
+ u32 *buf = (u32 *)drvdata->circ.buf;
+
+ buf[drvdata->circ.head] = cc_ioread(drvdata,
+ CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
+
+ /* EHR_DATA registers are cleared on read. In case 0 value was
+ * returned, restart the entropy collection.
+ */
+ if (buf[drvdata->circ.head] == 0) {
+ dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
+ drvdata->active_rosc);
+ goto next_rosc;
+ }
+
+ circ_idx_inc(&drvdata->circ.head, 1<<2);
+ }
+
+ atomic_set(&drvdata->pending_hw, 0);
+
+ /* continue to fill data buffer if needed */
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
+ /* Re-enable rnd source */
+ cc_trng_enable_rnd_source(drvdata);
+ return;
+ }
+ }
+
+ cc_trng_pm_put_suspend(dev);
+
+ dev_dbg(dev, "compwork handler done\n");
+ return;
+
+next_rosc:
+ if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
+ (cc_trng_change_rosc(drvdata) == 0)) {
+ /* trigger trng hw with next rosc */
+ cc_trng_hw_trigger(drvdata);
+ } else {
+ atomic_set(&drvdata->pending_hw, 0);
+ cc_trng_pm_put_suspend(dev);
+ }
+}
+
+static irqreturn_t cc_isr(int irq, void *dev_id)
+{
+ struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
+ struct device *dev = &(drvdata->pdev->dev);
+ u32 irr;
+
+ /* if driver suspended return, probably shared interrupt */
+ if (pm_runtime_suspended(dev))
+ return IRQ_NONE;
+
+ /* read the interrupt status */
+ irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
+ dev_dbg(dev, "Got IRR=0x%08X\n", irr);
+
+ if (irr == 0) /* Probably shared interrupt line */
+ return IRQ_NONE;
+
+ /* clear interrupt - must be before processing events */
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
+
+ /* RNG interrupt - most probable */
+ if (irr & CC_HOST_RNG_IRQ_MASK) {
+ /* Mask RNG interrupts - will be unmasked in deferred work */
+ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
+
+ /* We clear RNG interrupt here,
+ * to avoid it from firing as we'll unmask RNG interrupts.
+ */
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
+ CC_HOST_RNG_IRQ_MASK);
+
+ irr &= ~CC_HOST_RNG_IRQ_MASK;
+
+ /* schedule execution of deferred work handler */
+ schedule_work(&drvdata->compwork);
+ }
+
+ if (irr) {
+ dev_dbg_ratelimited(dev,
+ "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
+ /* Just warning */
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cc_trng_startwork_handler(struct work_struct *w)
+{
+ struct cctrng_drvdata *drvdata =
+ container_of(w, struct cctrng_drvdata, startwork);
+
+ drvdata->active_rosc = 0;
+ cc_trng_hw_trigger(drvdata);
+}
+
+static int cctrng_probe(struct platform_device *pdev)
+{
+ struct cctrng_drvdata *drvdata;
+ struct device *dev = &pdev->dev;
+ int rc = 0;
+ u32 val;
+ int irq;
+
+ /* Compile time assertion checks */
+ BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
+ BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!drvdata->rng.name)
+ return -ENOMEM;
+
+ drvdata->rng.read = cctrng_read;
+ drvdata->rng.priv = (unsigned long)drvdata;
+ drvdata->rng.quality = CC_TRNG_QUALITY;
+
+ platform_set_drvdata(pdev, drvdata);
+ drvdata->pdev = pdev;
+
+ drvdata->circ.buf = (char *)drvdata->data_buf;
+
+ drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drvdata->cc_base))
+ return dev_err_probe(dev, PTR_ERR(drvdata->cc_base), "Failed to ioremap registers");
+
+ /* Then IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* parse sampling rate from device tree */
+ rc = cc_trng_parse_sampling_ratio(drvdata);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get legal sampling ratio for rosc\n");
+
+ drvdata->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(drvdata->clk))
+ return dev_err_probe(dev, PTR_ERR(drvdata->clk),
+ "Failed to get or enable the clock\n");
+
+ INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
+ INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
+ spin_lock_init(&drvdata->read_lock);
+
+ /* register the driver isr function */
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
+ if (rc)
+ return dev_err_probe(dev, rc, "Could not register to interrupt %d\n", irq);
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
+
+ /* Clear all pending interrupts */
+ val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
+ dev_dbg(dev, "IRR=0x%08X\n", val);
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
+
+ /* unmask HOST RNG interrupt */
+ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
+ cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
+ ~CC_HOST_RNG_IRQ_MASK);
+
+ /* init PM */
+ rc = cc_trng_pm_init(drvdata);
+ if (rc)
+ return dev_err_probe(dev, rc, "cc_trng_pm_init failed\n");
+
+ /* increment device's usage counter */
+ rc = cc_trng_pm_get(dev);
+ if (rc)
+ return dev_err_probe(dev, rc, "cc_trng_pm_get returned %x\n", rc);
+
+ /* set pending_hw to verify that HW won't be triggered from read */
+ atomic_set(&drvdata->pending_hw, 1);
+
+ /* registration of the hwrng device */
+ rc = devm_hwrng_register(dev, &drvdata->rng);
+ if (rc) {
+ dev_err(dev, "Could not register hwrng device.\n");
+ goto post_pm_err;
+ }
+
+ /* trigger HW to start generate data */
+ drvdata->active_rosc = 0;
+ cc_trng_hw_trigger(drvdata);
+
+ /* All set, we can allow auto-suspend */
+ cc_trng_pm_go(drvdata);
+
+ dev_info(dev, "ARM cctrng device initialized\n");
+
+ return 0;
+
+post_pm_err:
+ cc_trng_pm_fini(drvdata);
+
+ return rc;
+}
+
+static int cctrng_remove(struct platform_device *pdev)
+{
+ struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "Releasing cctrng resources...\n");
+
+ cc_trng_pm_fini(drvdata);
+
+ dev_info(dev, "ARM cctrng device terminated\n");
+
+ return 0;
+}
+
+static int __maybe_unused cctrng_suspend(struct device *dev)
+{
+ struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
+ POWER_DOWN_ENABLE);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ return 0;
+}
+
+static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
+{
+ unsigned int val;
+ unsigned int i;
+
+ for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
+ /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
+ * completed and device is fully functional
+ */
+ val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
+ if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
+ /* hw indicate reset completed */
+ return true;
+ }
+ /* allow scheduling other process on the processor */
+ schedule();
+ }
+ /* reset not completed */
+ return false;
+}
+
+static int __maybe_unused cctrng_resume(struct device *dev)
+{
+ struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
+ int rc;
+
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ /* Enables the device source clk */
+ rc = clk_prepare_enable(drvdata->clk);
+ if (rc) {
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
+ return rc;
+ }
+
+ /* wait for Cryptocell reset completion */
+ if (!cctrng_wait_for_reset_completion(drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ return -EBUSY;
+ }
+
+ /* unmask HOST RNG interrupt */
+ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
+ cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
+ ~CC_HOST_RNG_IRQ_MASK);
+
+ cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
+ POWER_DOWN_DISABLE);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
+
+static const struct of_device_id arm_cctrng_dt_match[] = {
+ { .compatible = "arm,cryptocell-713-trng", },
+ { .compatible = "arm,cryptocell-703-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
+
+static struct platform_driver cctrng_driver = {
+ .driver = {
+ .name = "cctrng",
+ .of_match_table = arm_cctrng_dt_match,
+ .pm = &cctrng_pm,
+ },
+ .probe = cctrng_probe,
+ .remove = cctrng_remove,
+};
+
+module_platform_driver(cctrng_driver);
+
+/* Module description */
+MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
+MODULE_AUTHOR("ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cctrng.h b/drivers/char/hw_random/cctrng.h
new file mode 100644
index 0000000000..1f2fde95ad
--- /dev/null
+++ b/drivers/char/hw_random/cctrng.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
+
+#include <linux/bitops.h>
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+/* hwrng quality: bits of true entropy per 1024 bits of input */
+#define CC_TRNG_QUALITY 1024
+
+/* CryptoCell TRNG HW definitions */
+#define CC_TRNG_NUM_OF_ROSCS 4
+/* The number of words generated in the entropy holding register (EHR)
+ * 6 words (192 bit) according to HW implementation
+ */
+#define CC_TRNG_EHR_IN_WORDS 6
+#define CC_TRNG_EHR_IN_BITS (CC_TRNG_EHR_IN_WORDS * BITS_PER_TYPE(u32))
+
+#define CC_HOST_RNG_IRQ_MASK BIT(CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT)
+
+/* RNG interrupt mask */
+#define CC_RNG_INT_MASK (BIT(CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT))
+
+// --------------------------------------
+// BLOCK: RNG
+// --------------------------------------
+#define CC_RNG_IMR_REG_OFFSET 0x0100UL
+#define CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT 0x0UL
+#define CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT 0x1UL
+#define CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT 0x2UL
+#define CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT 0x3UL
+#define CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT 0x4UL
+#define CC_RNG_ISR_REG_OFFSET 0x0104UL
+#define CC_RNG_ISR_EHR_VALID_BIT_SHIFT 0x0UL
+#define CC_RNG_ISR_EHR_VALID_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SHIFT 0x1UL
+#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_CRNGT_ERR_BIT_SHIFT 0x2UL
+#define CC_RNG_ISR_CRNGT_ERR_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_WATCHDOG_BIT_SHIFT 0x4UL
+#define CC_RNG_ISR_WATCHDOG_BIT_SIZE 0x1UL
+#define CC_RNG_ICR_REG_OFFSET 0x0108UL
+#define CC_TRNG_CONFIG_REG_OFFSET 0x010CUL
+#define CC_EHR_DATA_0_REG_OFFSET 0x0114UL
+#define CC_RND_SOURCE_ENABLE_REG_OFFSET 0x012CUL
+#define CC_SAMPLE_CNT1_REG_OFFSET 0x0130UL
+#define CC_TRNG_DEBUG_CONTROL_REG_OFFSET 0x0138UL
+#define CC_RNG_SW_RESET_REG_OFFSET 0x0140UL
+#define CC_RNG_CLK_ENABLE_REG_OFFSET 0x01C4UL
+#define CC_RNG_DMA_ENABLE_REG_OFFSET 0x01C8UL
+#define CC_RNG_WATCHDOG_VAL_REG_OFFSET 0x01D8UL
+// --------------------------------------
+// BLOCK: SEC_HOST_RGF
+// --------------------------------------
+#define CC_HOST_RGF_IRR_REG_OFFSET 0x0A00UL
+#define CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_RGF_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_RGF_ICR_REG_OFFSET 0x0A08UL
+
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0x0A78UL
+
+// --------------------------------------
+// BLOCK: NVM
+// --------------------------------------
+#define CC_NVM_IS_IDLE_REG_OFFSET 0x0F10UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
new file mode 100644
index 0000000000..31935316a1
--- /dev/null
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K RVU Hardware Random Number Generator.
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+
+#include <linux/arm-smccc.h>
+
+/* CSRs */
+#define RNM_CTL_STATUS 0x000
+#define RNM_ENTROPY_STATUS 0x008
+#define RNM_CONST 0x030
+#define RNM_EBG_ENT 0x048
+#define RNM_PF_EBG_HEALTH 0x050
+#define RNM_PF_RANDOM 0x400
+#define RNM_TRNG_RESULT 0x408
+
+/* Extended TRNG Read and Status Registers */
+#define RNM_PF_TRNG_DAT 0x1000
+#define RNM_PF_TRNG_RES 0x1008
+
+struct cn10k_rng {
+ void __iomem *reg_base;
+ struct hwrng ops;
+ struct pci_dev *pdev;
+ /* Octeon CN10K-A A0/A1, CNF10K-A A0/A1 and CNF10K-B A0/B0
+ * does not support extended TRNG registers
+ */
+ bool extended_trng_regs;
+};
+
+#define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f
+
+#define PCI_SUBSYS_DEVID_CN10K_A_RNG 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_RNG 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_RNG 0xBC00
+
+static bool cn10k_is_extended_trng_regs_supported(struct pci_dev *pdev)
+{
+ /* CN10K-A A0/A1 */
+ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RNG) &&
+ (!pdev->revision || (pdev->revision & 0xff) == 0x50 ||
+ (pdev->revision & 0xff) == 0x51))
+ return false;
+
+ /* CNF10K-A A0 */
+ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_RNG) &&
+ (!pdev->revision || (pdev->revision & 0xff) == 0x60 ||
+ (pdev->revision & 0xff) == 0x61))
+ return false;
+
+ /* CNF10K-B A0/B0 */
+ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_RNG) &&
+ (!pdev->revision || (pdev->revision & 0xff) == 0x70 ||
+ (pdev->revision & 0xff) == 0x74))
+ return false;
+
+ return true;
+}
+
+static unsigned long reset_rng_health_state(struct cn10k_rng *rng)
+{
+ struct arm_smccc_res res;
+
+ /* Send SMC service call to reset EBG health state */
+ arm_smccc_smc(PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE, 0, 0, 0, 0, 0, 0, 0, &res);
+ return res.a0;
+}
+
+static int check_rng_health(struct cn10k_rng *rng)
+{
+ u64 status;
+ unsigned long err;
+
+ /* Skip checking health */
+ if (!rng->reg_base)
+ return -ENODEV;
+
+ status = readq(rng->reg_base + RNM_PF_EBG_HEALTH);
+ if (status & BIT_ULL(20)) {
+ err = reset_rng_health_state(rng);
+ if (err) {
+ dev_err(&rng->pdev->dev, "HWRNG: Health test failed (status=%llx)\n",
+ status);
+ dev_err(&rng->pdev->dev, "HWRNG: error during reset (error=%lx)\n",
+ err);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+/* Returns true when valid data available otherwise return false */
+static bool cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
+{
+ u16 retry_count = 0;
+ u64 upper, lower;
+ u64 status;
+
+ if (rng->extended_trng_regs) {
+ do {
+ *value = readq(rng->reg_base + RNM_PF_TRNG_DAT);
+ if (*value)
+ return true;
+ status = readq(rng->reg_base + RNM_PF_TRNG_RES);
+ if (!status && (retry_count++ > 0x1000))
+ return false;
+ } while (!status);
+ }
+
+ *value = readq(rng->reg_base + RNM_PF_RANDOM);
+
+ /* HW can run out of entropy if large amount random data is read in
+ * quick succession. Zeros may not be real random data from HW.
+ */
+ if (!*value) {
+ upper = readq(rng->reg_base + RNM_PF_RANDOM);
+ lower = readq(rng->reg_base + RNM_PF_RANDOM);
+ while (!(upper & 0x00000000FFFFFFFFULL))
+ upper = readq(rng->reg_base + RNM_PF_RANDOM);
+ while (!(lower & 0xFFFFFFFF00000000ULL))
+ lower = readq(rng->reg_base + RNM_PF_RANDOM);
+
+ *value = (upper & 0xFFFFFFFF00000000) | (lower & 0xFFFFFFFF);
+ }
+ return true;
+}
+
+static int cn10k_rng_read(struct hwrng *hwrng, void *data,
+ size_t max, bool wait)
+{
+ struct cn10k_rng *rng = (struct cn10k_rng *)hwrng->priv;
+ unsigned int size;
+ u8 *pos = data;
+ int err = 0;
+ u64 value;
+
+ err = check_rng_health(rng);
+ if (err)
+ return err;
+
+ size = max;
+
+ while (size >= 8) {
+ if (!cn10k_read_trng(rng, &value))
+ goto out;
+
+ *((u64 *)pos) = value;
+ size -= 8;
+ pos += 8;
+ }
+
+ if (size > 0) {
+ if (!cn10k_read_trng(rng, &value))
+ goto out;
+
+ while (size > 0) {
+ *pos = (u8)value;
+ value >>= 8;
+ size--;
+ pos++;
+ }
+ }
+
+out:
+ return max - size;
+}
+
+static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct cn10k_rng *rng;
+ int err;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->pdev = pdev;
+ pci_set_drvdata(pdev, rng);
+
+ rng->reg_base = pcim_iomap(pdev, 0, 0);
+ if (!rng->reg_base)
+ return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n");
+
+ rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "cn10k-rng-%s", dev_name(&pdev->dev));
+ if (!rng->ops.name)
+ return -ENOMEM;
+
+ rng->ops.read = cn10k_rng_read;
+ rng->ops.priv = (unsigned long)rng;
+
+ rng->extended_trng_regs = cn10k_is_extended_trng_regs_supported(pdev);
+
+ reset_rng_health_state(rng);
+
+ err = devm_hwrng_register(&pdev->dev, &rng->ops);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Could not register hwrng device.\n");
+
+ return 0;
+}
+
+static const struct pci_device_id cn10k_rng_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA098) }, /* RNG PF */
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, cn10k_rng_id_table);
+
+static struct pci_driver cn10k_rng_driver = {
+ .name = "cn10k_rng",
+ .id_table = cn10k_rng_id_table,
+ .probe = cn10k_rng_probe,
+};
+
+module_pci_driver(cn10k_rng_driver);
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell CN10K HW RNG Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
new file mode 100644
index 0000000000..a3bbdd6e60
--- /dev/null
+++ b/drivers/char/hw_random/core.c
@@ -0,0 +1,734 @@
+/*
+ * hw_random/core.c: HWRNG core API
+ *
+ * Copyright 2006 Michael Buesch <m@bues.ch>
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Please read Documentation/admin-guide/hw_random.rst for details on use.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+#define RNG_MODULE_NAME "hw_random"
+
+#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
+
+static struct hwrng *current_rng;
+/* the current rng has been explicitly chosen by user via sysfs */
+static int cur_rng_set_by_user;
+static struct task_struct *hwrng_fill;
+/* list of registered rngs */
+static LIST_HEAD(rng_list);
+/* Protects rng_list and current_rng */
+static DEFINE_MUTEX(rng_mutex);
+/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
+static DEFINE_MUTEX(reading_mutex);
+static int data_avail;
+static u8 *rng_buffer, *rng_fillbuf;
+static unsigned short current_quality;
+static unsigned short default_quality = 1024; /* default to maximum */
+
+module_param(current_quality, ushort, 0644);
+MODULE_PARM_DESC(current_quality,
+ "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
+module_param(default_quality, ushort, 0644);
+MODULE_PARM_DESC(default_quality,
+ "default maximum entropy content of hwrng per 1024 bits of input");
+
+static void drop_current_rng(void);
+static int hwrng_init(struct hwrng *rng);
+static int hwrng_fillfn(void *unused);
+
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ int wait);
+
+static size_t rng_buffer_size(void)
+{
+ return RNG_BUFFER_SIZE;
+}
+
+static void add_early_randomness(struct hwrng *rng)
+{
+ int bytes_read;
+
+ mutex_lock(&reading_mutex);
+ bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
+ mutex_unlock(&reading_mutex);
+ if (bytes_read > 0) {
+ size_t entropy = bytes_read * 8 * rng->quality / 1024;
+ add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false);
+ }
+}
+
+static inline void cleanup_rng(struct kref *kref)
+{
+ struct hwrng *rng = container_of(kref, struct hwrng, ref);
+
+ if (rng->cleanup)
+ rng->cleanup(rng);
+
+ complete(&rng->cleanup_done);
+}
+
+static int set_current_rng(struct hwrng *rng)
+{
+ int err;
+
+ BUG_ON(!mutex_is_locked(&rng_mutex));
+
+ err = hwrng_init(rng);
+ if (err)
+ return err;
+
+ drop_current_rng();
+ current_rng = rng;
+
+ /* if necessary, start hwrng thread */
+ if (!hwrng_fill) {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed\n");
+ hwrng_fill = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void drop_current_rng(void)
+{
+ BUG_ON(!mutex_is_locked(&rng_mutex));
+ if (!current_rng)
+ return;
+
+ /* decrease last reference for triggering the cleanup */
+ kref_put(&current_rng->ref, cleanup_rng);
+ current_rng = NULL;
+}
+
+/* Returns ERR_PTR(), NULL or refcounted hwrng */
+static struct hwrng *get_current_rng_nolock(void)
+{
+ if (current_rng)
+ kref_get(&current_rng->ref);
+
+ return current_rng;
+}
+
+static struct hwrng *get_current_rng(void)
+{
+ struct hwrng *rng;
+
+ if (mutex_lock_interruptible(&rng_mutex))
+ return ERR_PTR(-ERESTARTSYS);
+
+ rng = get_current_rng_nolock();
+
+ mutex_unlock(&rng_mutex);
+ return rng;
+}
+
+static void put_rng(struct hwrng *rng)
+{
+ /*
+ * Hold rng_mutex here so we serialize in case they set_current_rng
+ * on rng again immediately.
+ */
+ mutex_lock(&rng_mutex);
+ if (rng)
+ kref_put(&rng->ref, cleanup_rng);
+ mutex_unlock(&rng_mutex);
+}
+
+static int hwrng_init(struct hwrng *rng)
+{
+ if (kref_get_unless_zero(&rng->ref))
+ goto skip_init;
+
+ if (rng->init) {
+ int ret;
+
+ ret = rng->init(rng);
+ if (ret)
+ return ret;
+ }
+
+ kref_init(&rng->ref);
+ reinit_completion(&rng->cleanup_done);
+
+skip_init:
+ rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
+ current_quality = rng->quality; /* obsolete */
+
+ return 0;
+}
+
+static int rng_dev_open(struct inode *inode, struct file *filp)
+{
+ /* enforce read-only access to this chrdev */
+ if ((filp->f_mode & FMODE_READ) == 0)
+ return -EINVAL;
+ if (filp->f_mode & FMODE_WRITE)
+ return -EINVAL;
+ return 0;
+}
+
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ int wait) {
+ int present;
+
+ BUG_ON(!mutex_is_locked(&reading_mutex));
+ if (rng->read)
+ return rng->read(rng, (void *)buffer, size, wait);
+
+ if (rng->data_present)
+ present = rng->data_present(rng, wait);
+ else
+ present = 1;
+
+ if (present)
+ return rng->data_read(rng, (u32 *)buffer);
+
+ return 0;
+}
+
+static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ size_t size, loff_t *offp)
+{
+ u8 buffer[RNG_BUFFER_SIZE];
+ ssize_t ret = 0;
+ int err = 0;
+ int bytes_read, len;
+ struct hwrng *rng;
+
+ while (size) {
+ rng = get_current_rng();
+ if (IS_ERR(rng)) {
+ err = PTR_ERR(rng);
+ goto out;
+ }
+ if (!rng) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (mutex_lock_interruptible(&reading_mutex)) {
+ err = -ERESTARTSYS;
+ goto out_put;
+ }
+ if (!data_avail) {
+ bytes_read = rng_get_data(rng, rng_buffer,
+ rng_buffer_size(),
+ !(filp->f_flags & O_NONBLOCK));
+ if (bytes_read < 0) {
+ err = bytes_read;
+ goto out_unlock_reading;
+ } else if (bytes_read == 0 &&
+ (filp->f_flags & O_NONBLOCK)) {
+ err = -EAGAIN;
+ goto out_unlock_reading;
+ }
+
+ data_avail = bytes_read;
+ }
+
+ len = data_avail;
+ if (len) {
+ if (len > size)
+ len = size;
+
+ data_avail -= len;
+
+ memcpy(buffer, rng_buffer + data_avail, len);
+ }
+ mutex_unlock(&reading_mutex);
+ put_rng(rng);
+
+ if (len) {
+ if (copy_to_user(buf + ret, buffer, len)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ size -= len;
+ ret += len;
+ }
+
+
+ if (need_resched())
+ schedule_timeout_interruptible(1);
+
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
+ }
+out:
+ memzero_explicit(buffer, sizeof(buffer));
+ return ret ? : err;
+
+out_unlock_reading:
+ mutex_unlock(&reading_mutex);
+out_put:
+ put_rng(rng);
+ goto out;
+}
+
+static const struct file_operations rng_chrdev_ops = {
+ .owner = THIS_MODULE,
+ .open = rng_dev_open,
+ .read = rng_dev_read,
+ .llseek = noop_llseek,
+};
+
+static const struct attribute_group *rng_dev_groups[];
+
+static struct miscdevice rng_miscdev = {
+ .minor = HWRNG_MINOR,
+ .name = RNG_MODULE_NAME,
+ .nodename = "hwrng",
+ .fops = &rng_chrdev_ops,
+ .groups = rng_dev_groups,
+};
+
+static int enable_best_rng(void)
+{
+ struct hwrng *rng, *new_rng = NULL;
+ int ret = -ENODEV;
+
+ BUG_ON(!mutex_is_locked(&rng_mutex));
+
+ /* no rng to use? */
+ if (list_empty(&rng_list)) {
+ drop_current_rng();
+ cur_rng_set_by_user = 0;
+ return 0;
+ }
+
+ /* use the rng which offers the best quality */
+ list_for_each_entry(rng, &rng_list, list) {
+ if (!new_rng || rng->quality > new_rng->quality)
+ new_rng = rng;
+ }
+
+ ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
+ if (!ret)
+ cur_rng_set_by_user = 0;
+
+ return ret;
+}
+
+static ssize_t rng_current_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ int err;
+ struct hwrng *rng, *old_rng, *new_rng;
+
+ err = mutex_lock_interruptible(&rng_mutex);
+ if (err)
+ return -ERESTARTSYS;
+
+ old_rng = current_rng;
+ if (sysfs_streq(buf, "")) {
+ err = enable_best_rng();
+ } else {
+ list_for_each_entry(rng, &rng_list, list) {
+ if (sysfs_streq(rng->name, buf)) {
+ err = set_current_rng(rng);
+ if (!err)
+ cur_rng_set_by_user = 1;
+ break;
+ }
+ }
+ }
+ new_rng = get_current_rng_nolock();
+ mutex_unlock(&rng_mutex);
+
+ if (new_rng) {
+ if (new_rng != old_rng)
+ add_early_randomness(new_rng);
+ put_rng(new_rng);
+ }
+
+ return err ? : len;
+}
+
+static ssize_t rng_current_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct hwrng *rng;
+
+ rng = get_current_rng();
+ if (IS_ERR(rng))
+ return PTR_ERR(rng);
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
+ put_rng(rng);
+
+ return ret;
+}
+
+static ssize_t rng_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err;
+ struct hwrng *rng;
+
+ err = mutex_lock_interruptible(&rng_mutex);
+ if (err)
+ return -ERESTARTSYS;
+ buf[0] = '\0';
+ list_for_each_entry(rng, &rng_list, list) {
+ strlcat(buf, rng->name, PAGE_SIZE);
+ strlcat(buf, " ", PAGE_SIZE);
+ }
+ strlcat(buf, "\n", PAGE_SIZE);
+ mutex_unlock(&rng_mutex);
+
+ return strlen(buf);
+}
+
+static ssize_t rng_selected_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
+}
+
+static ssize_t rng_quality_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct hwrng *rng;
+
+ rng = get_current_rng();
+ if (IS_ERR(rng))
+ return PTR_ERR(rng);
+
+ if (!rng) /* no need to put_rng */
+ return -ENODEV;
+
+ ret = sysfs_emit(buf, "%hu\n", rng->quality);
+ put_rng(rng);
+
+ return ret;
+}
+
+static ssize_t rng_quality_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ u16 quality;
+ int ret = -EINVAL;
+
+ if (len < 2)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&rng_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ ret = kstrtou16(buf, 0, &quality);
+ if (ret || quality > 1024) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!current_rng) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ current_rng->quality = quality;
+ current_quality = quality; /* obsolete */
+
+ /* the best available RNG may have changed */
+ ret = enable_best_rng();
+
+out:
+ mutex_unlock(&rng_mutex);
+ return ret ? ret : len;
+}
+
+static DEVICE_ATTR_RW(rng_current);
+static DEVICE_ATTR_RO(rng_available);
+static DEVICE_ATTR_RO(rng_selected);
+static DEVICE_ATTR_RW(rng_quality);
+
+static struct attribute *rng_dev_attrs[] = {
+ &dev_attr_rng_current.attr,
+ &dev_attr_rng_available.attr,
+ &dev_attr_rng_selected.attr,
+ &dev_attr_rng_quality.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(rng_dev);
+
+static void __exit unregister_miscdev(void)
+{
+ misc_deregister(&rng_miscdev);
+}
+
+static int __init register_miscdev(void)
+{
+ return misc_register(&rng_miscdev);
+}
+
+static int hwrng_fillfn(void *unused)
+{
+ size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
+ long rc;
+
+ while (!kthread_should_stop()) {
+ unsigned short quality;
+ struct hwrng *rng;
+
+ rng = get_current_rng();
+ if (IS_ERR(rng) || !rng)
+ break;
+ mutex_lock(&reading_mutex);
+ rc = rng_get_data(rng, rng_fillbuf,
+ rng_buffer_size(), 1);
+ if (current_quality != rng->quality)
+ rng->quality = current_quality; /* obsolete */
+ quality = rng->quality;
+ mutex_unlock(&reading_mutex);
+
+ if (rc <= 0)
+ hwrng_msleep(rng, 10000);
+
+ put_rng(rng);
+
+ if (rc <= 0)
+ continue;
+
+ /* If we cannot credit at least one bit of entropy,
+ * keep track of the remainder for the next iteration
+ */
+ entropy = rc * quality * 8 + entropy_credit;
+ if ((entropy >> 10) == 0)
+ entropy_credit = entropy;
+
+ /* Outside lock, sure, but y'know: randomness. */
+ add_hwgenerator_randomness((void *)rng_fillbuf, rc,
+ entropy >> 10, true);
+ }
+ hwrng_fill = NULL;
+ return 0;
+}
+
+int hwrng_register(struct hwrng *rng)
+{
+ int err = -EINVAL;
+ struct hwrng *tmp;
+ bool is_new_current = false;
+
+ if (!rng->name || (!rng->data_read && !rng->read))
+ goto out;
+
+ mutex_lock(&rng_mutex);
+
+ /* Must not register two RNGs with the same name. */
+ err = -EEXIST;
+ list_for_each_entry(tmp, &rng_list, list) {
+ if (strcmp(tmp->name, rng->name) == 0)
+ goto out_unlock;
+ }
+ list_add_tail(&rng->list, &rng_list);
+
+ init_completion(&rng->cleanup_done);
+ complete(&rng->cleanup_done);
+ init_completion(&rng->dying);
+
+ if (!current_rng ||
+ (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
+ /*
+ * Set new rng as current as the new rng source
+ * provides better entropy quality and was not
+ * chosen by userspace.
+ */
+ err = set_current_rng(rng);
+ if (err)
+ goto out_unlock;
+ /* to use current_rng in add_early_randomness() we need
+ * to take a ref
+ */
+ is_new_current = true;
+ kref_get(&rng->ref);
+ }
+ mutex_unlock(&rng_mutex);
+ if (is_new_current || !rng->init) {
+ /*
+ * Use a new device's input to add some randomness to
+ * the system. If this rng device isn't going to be
+ * used right away, its init function hasn't been
+ * called yet by set_current_rng(); so only use the
+ * randomness from devices that don't need an init callback
+ */
+ add_early_randomness(rng);
+ }
+ if (is_new_current)
+ put_rng(rng);
+ return 0;
+out_unlock:
+ mutex_unlock(&rng_mutex);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(hwrng_register);
+
+void hwrng_unregister(struct hwrng *rng)
+{
+ struct hwrng *old_rng, *new_rng;
+ int err;
+
+ mutex_lock(&rng_mutex);
+
+ old_rng = current_rng;
+ list_del(&rng->list);
+ complete_all(&rng->dying);
+ if (current_rng == rng) {
+ err = enable_best_rng();
+ if (err) {
+ drop_current_rng();
+ cur_rng_set_by_user = 0;
+ }
+ }
+
+ new_rng = get_current_rng_nolock();
+ if (list_empty(&rng_list)) {
+ mutex_unlock(&rng_mutex);
+ if (hwrng_fill)
+ kthread_stop(hwrng_fill);
+ } else
+ mutex_unlock(&rng_mutex);
+
+ if (new_rng) {
+ if (old_rng != new_rng)
+ add_early_randomness(new_rng);
+ put_rng(new_rng);
+ }
+
+ wait_for_completion(&rng->cleanup_done);
+}
+EXPORT_SYMBOL_GPL(hwrng_unregister);
+
+static void devm_hwrng_release(struct device *dev, void *res)
+{
+ hwrng_unregister(*(struct hwrng **)res);
+}
+
+static int devm_hwrng_match(struct device *dev, void *res, void *data)
+{
+ struct hwrng **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+int devm_hwrng_register(struct device *dev, struct hwrng *rng)
+{
+ struct hwrng **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = hwrng_register(rng);
+ if (error) {
+ devres_free(ptr);
+ return error;
+ }
+
+ *ptr = rng;
+ devres_add(dev, ptr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hwrng_register);
+
+void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
+{
+ devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
+}
+EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
+
+long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+{
+ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+ return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
+}
+EXPORT_SYMBOL_GPL(hwrng_msleep);
+
+long hwrng_yield(struct hwrng *rng)
+{
+ return wait_for_completion_interruptible_timeout(&rng->dying, 1);
+}
+EXPORT_SYMBOL_GPL(hwrng_yield);
+
+static int __init hwrng_modinit(void)
+{
+ int ret;
+
+ /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
+ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
+ if (!rng_buffer)
+ return -ENOMEM;
+
+ rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
+ if (!rng_fillbuf) {
+ kfree(rng_buffer);
+ return -ENOMEM;
+ }
+
+ ret = register_miscdev();
+ if (ret) {
+ kfree(rng_fillbuf);
+ kfree(rng_buffer);
+ }
+
+ return ret;
+}
+
+static void __exit hwrng_modexit(void)
+{
+ mutex_lock(&rng_mutex);
+ BUG_ON(current_rng);
+ kfree(rng_buffer);
+ kfree(rng_fillbuf);
+ mutex_unlock(&rng_mutex);
+
+ unregister_miscdev();
+}
+
+fs_initcall(hwrng_modinit); /* depends on misc_register() */
+module_exit(hwrng_modexit);
+
+MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
new file mode 100644
index 0000000000..30207b7ac5
--- /dev/null
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RNG driver for Exynos TRNGs
+ *
+ * Author: Łukasz Stelmach <l.stelmach@samsung.com>
+ *
+ * Copyright 2017 (c) Samsung Electronics Software, Inc.
+ *
+ * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by
+ * Krzysztof Kozłowski <krzk@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define EXYNOS_TRNG_CLKDIV (0x0)
+
+#define EXYNOS_TRNG_CTRL (0x20)
+#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
+
+#define EXYNOS_TRNG_POST_CTRL (0x30)
+#define EXYNOS_TRNG_ONLINE_CTRL (0x40)
+#define EXYNOS_TRNG_ONLINE_STAT (0x44)
+#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48)
+#define EXYNOS_TRNG_FIFO_CTRL (0x50)
+#define EXYNOS_TRNG_FIFO_0 (0x80)
+#define EXYNOS_TRNG_FIFO_1 (0x84)
+#define EXYNOS_TRNG_FIFO_2 (0x88)
+#define EXYNOS_TRNG_FIFO_3 (0x8c)
+#define EXYNOS_TRNG_FIFO_4 (0x90)
+#define EXYNOS_TRNG_FIFO_5 (0x94)
+#define EXYNOS_TRNG_FIFO_6 (0x98)
+#define EXYNOS_TRNG_FIFO_7 (0x9c)
+#define EXYNOS_TRNG_FIFO_LEN (8)
+#define EXYNOS_TRNG_CLOCK_RATE (500000)
+
+
+struct exynos_trng_dev {
+ struct device *dev;
+ void __iomem *mem;
+ struct clk *clk;
+ struct hwrng rng;
+};
+
+static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct exynos_trng_dev *trng;
+ int val;
+
+ max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4));
+
+ trng = (struct exynos_trng_dev *)rng->priv;
+
+ writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL);
+ val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val,
+ val == 0, 200, 1000000);
+ if (val < 0)
+ return val;
+
+ memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max);
+
+ return max;
+}
+
+static int exynos_trng_init(struct hwrng *rng)
+{
+ struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
+ unsigned long sss_rate;
+ u32 val;
+
+ sss_rate = clk_get_rate(trng->clk);
+
+ /*
+ * For most TRNG circuits the clock frequency of under 500 kHz
+ * is safe.
+ */
+ val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2);
+ if (val > 0x7fff) {
+ dev_err(trng->dev, "clock divider too large: %d", val);
+ return -ERANGE;
+ }
+ val = val << 1;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV);
+
+ /* Enable the generator. */
+ val = EXYNOS_TRNG_CTRL_RNGEN;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL);
+
+ /*
+ * Disable post-processing. /dev/hwrng is supposed to deliver
+ * unprocessed data.
+ */
+ writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL);
+
+ return 0;
+}
+
+static int exynos_trng_probe(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng;
+ int ret = -ENOMEM;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return ret;
+
+ trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev),
+ GFP_KERNEL);
+ if (!trng->rng.name)
+ return ret;
+
+ trng->rng.init = exynos_trng_init;
+ trng->rng.read = exynos_trng_do_read;
+ trng->rng.priv = (unsigned long) trng;
+
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ trng->mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->mem))
+ return PTR_ERR(trng->mem);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not get runtime PM.\n");
+ goto err_pm_get;
+ }
+
+ trng->clk = devm_clk_get(&pdev->dev, "secss");
+ if (IS_ERR(trng->clk)) {
+ ret = PTR_ERR(trng->clk);
+ dev_err(&pdev->dev, "Could not get clock.\n");
+ goto err_clock;
+ }
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable the clk.\n");
+ goto err_clock;
+ }
+
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register hwrng device.\n");
+ goto err_register;
+ }
+
+ dev_info(&pdev->dev, "Exynos True Random Number Generator.\n");
+
+ return 0;
+
+err_register:
+ clk_disable_unprepare(trng->clk);
+
+err_clock:
+ pm_runtime_put_noidle(&pdev->dev);
+
+err_pm_get:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int exynos_trng_remove(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(trng->clk);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int exynos_trng_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int exynos_trng_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not get runtime PM.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
+ exynos_trng_resume);
+
+static const struct of_device_id exynos_trng_dt_match[] = {
+ {
+ .compatible = "samsung,exynos5250-trng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
+
+static struct platform_driver exynos_trng_driver = {
+ .driver = {
+ .name = "exynos-trng",
+ .pm = pm_sleep_ptr(&exynos_trng_pm_ops),
+ .of_match_table = exynos_trng_dt_match,
+ },
+ .probe = exynos_trng_probe,
+ .remove = exynos_trng_remove,
+};
+
+module_platform_driver(exynos_trng_driver);
+MODULE_AUTHOR("Łukasz Stelmach");
+MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
new file mode 100644
index 0000000000..159baf00a8
--- /dev/null
+++ b/drivers/char/hw_random/geode-rng.c
@@ -0,0 +1,161 @@
+/*
+ * RNG driver for AMD Geode RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+
+#define PFX KBUILD_MODNAME ": "
+
+#define GEODE_RNG_DATA_REG 0x50
+#define GEODE_RNG_STATUS_REG 0x54
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+struct amd_geode_priv {
+ struct pci_dev *pcidev;
+ void __iomem *membase;
+};
+
+static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
+ void __iomem *mem = priv->membase;
+
+ *data = readl(mem + GEODE_RNG_DATA_REG);
+
+ return 4;
+}
+
+static int geode_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
+ void __iomem *mem = priv->membase;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = !!(readl(mem + GEODE_RNG_STATUS_REG));
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
+}
+
+
+static struct hwrng geode_rng = {
+ .name = "geode",
+ .data_present = geode_rng_data_present,
+ .data_read = geode_rng_data_read,
+};
+
+
+static int __init geode_rng_init(void)
+{
+ int err = -ENODEV;
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+ void __iomem *mem;
+ unsigned long rng_base;
+ struct amd_geode_priv *priv;
+
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+ if (ent)
+ goto found;
+ }
+ /* Device not found. */
+ return err;
+
+found:
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto put_dev;
+ }
+
+ rng_base = pci_resource_start(pdev, 0);
+ if (rng_base == 0)
+ goto free_priv;
+ err = -ENOMEM;
+ mem = ioremap(rng_base, 0x58);
+ if (!mem)
+ goto free_priv;
+
+ geode_rng.priv = (unsigned long)priv;
+ priv->membase = mem;
+ priv->pcidev = pdev;
+
+ pr_info("AMD Geode RNG detected\n");
+ err = hwrng_register(&geode_rng);
+ if (err) {
+ pr_err(PFX "RNG registering failed (%d)\n",
+ err);
+ goto err_unmap;
+ }
+ return err;
+
+err_unmap:
+ iounmap(mem);
+free_priv:
+ kfree(priv);
+put_dev:
+ pci_dev_put(pdev);
+ return err;
+}
+
+static void __exit geode_rng_exit(void)
+{
+ struct amd_geode_priv *priv;
+
+ priv = (struct amd_geode_priv *)geode_rng.priv;
+ hwrng_unregister(&geode_rng);
+ iounmap(priv->membase);
+ pci_dev_put(priv->pcidev);
+ kfree(priv);
+}
+
+module_init(geode_rng_init);
+module_exit(geode_rng_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
new file mode 100644
index 0000000000..96438f85ca
--- /dev/null
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 HiSilicon Co., Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#define RNG_SEED 0x0
+#define RNG_CTRL 0x4
+ #define RNG_SEED_SEL BIT(2)
+ #define RNG_RING_EN BIT(1)
+ #define RNG_EN BIT(0)
+#define RNG_RAN_NUM 0x10
+#define RNG_PHY_SEED 0x14
+
+#define to_hisi_rng(p) container_of(p, struct hisi_rng, rng)
+
+static int seed_sel;
+module_param(seed_sel, int, S_IRUGO);
+MODULE_PARM_DESC(seed_sel, "Auto reload seed. 0, use LFSR(default); 1, use ring oscillator.");
+
+struct hisi_rng {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int hisi_rng_init(struct hwrng *rng)
+{
+ struct hisi_rng *hrng = to_hisi_rng(rng);
+ int val = RNG_EN;
+ u32 seed;
+
+ /* get a random number as initial seed */
+ get_random_bytes(&seed, sizeof(seed));
+
+ writel_relaxed(seed, hrng->base + RNG_SEED);
+
+ /**
+ * The seed is reload periodically, there are two choice
+ * of seeds, default seed using the value from LFSR, or
+ * will use seed generated by ring oscillator.
+ */
+ if (seed_sel == 1)
+ val |= RNG_RING_EN | RNG_SEED_SEL;
+
+ writel_relaxed(val, hrng->base + RNG_CTRL);
+ return 0;
+}
+
+static void hisi_rng_cleanup(struct hwrng *rng)
+{
+ struct hisi_rng *hrng = to_hisi_rng(rng);
+
+ writel_relaxed(0, hrng->base + RNG_CTRL);
+}
+
+static int hisi_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct hisi_rng *hrng = to_hisi_rng(rng);
+ u32 *data = buf;
+
+ *data = readl_relaxed(hrng->base + RNG_RAN_NUM);
+ return 4;
+}
+
+static int hisi_rng_probe(struct platform_device *pdev)
+{
+ struct hisi_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rng);
+
+ rng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rng->base))
+ return PTR_ERR(rng->base);
+
+ rng->rng.name = pdev->name;
+ rng->rng.init = hisi_rng_init;
+ rng->rng.cleanup = hisi_rng_cleanup;
+ rng->rng.read = hisi_rng_read;
+
+ ret = devm_hwrng_register(&pdev->dev, &rng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register hwrng\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id hisi_rng_dt_ids[] __maybe_unused = {
+ { .compatible = "hisilicon,hip04-rng" },
+ { .compatible = "hisilicon,hip05-rng" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hisi_rng_dt_ids);
+
+static struct platform_driver hisi_rng_driver = {
+ .probe = hisi_rng_probe,
+ .driver = {
+ .name = "hisi-rng",
+ .of_match_table = of_match_ptr(hisi_rng_dt_ids),
+ },
+};
+
+module_platform_driver(hisi_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kefeng Wang <wangkefeng.wang@huawei>");
+MODULE_DESCRIPTION("Hisilicon random number generator driver");
diff --git a/drivers/char/hw_random/histb-rng.c b/drivers/char/hw_random/histb-rng.c
new file mode 100644
index 0000000000..f652e1135e
--- /dev/null
+++ b/drivers/char/hw_random/histb-rng.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2023 David Yang
+ */
+
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define RNG_CTRL 0x0
+#define RNG_SOURCE GENMASK(1, 0)
+#define DROP_ENABLE BIT(5)
+#define POST_PROCESS_ENABLE BIT(7)
+#define POST_PROCESS_DEPTH GENMASK(15, 8)
+#define RNG_NUMBER 0x4
+#define RNG_STAT 0x8
+#define DATA_COUNT GENMASK(2, 0) /* max 4 */
+
+struct histb_rng_priv {
+ struct hwrng rng;
+ void __iomem *base;
+};
+
+/*
+ * Observed:
+ * depth = 1 -> ~1ms
+ * depth = 255 -> ~16ms
+ */
+static int histb_rng_wait(void __iomem *base)
+{
+ u32 val;
+
+ return readl_relaxed_poll_timeout(base + RNG_STAT, val,
+ val & DATA_COUNT, 1000, 30 * 1000);
+}
+
+static void histb_rng_init(void __iomem *base, unsigned int depth)
+{
+ u32 val;
+
+ val = readl_relaxed(base + RNG_CTRL);
+
+ val &= ~RNG_SOURCE;
+ val |= 2;
+
+ val &= ~POST_PROCESS_DEPTH;
+ val |= min(depth, 0xffu) << 8;
+
+ val |= POST_PROCESS_ENABLE;
+ val |= DROP_ENABLE;
+
+ writel_relaxed(val, base + RNG_CTRL);
+}
+
+static int histb_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct histb_rng_priv *priv = container_of(rng, typeof(*priv), rng);
+ void __iomem *base = priv->base;
+
+ for (int i = 0; i < max; i += sizeof(u32)) {
+ if (!(readl_relaxed(base + RNG_STAT) & DATA_COUNT)) {
+ if (!wait)
+ return i;
+ if (histb_rng_wait(base)) {
+ pr_err("failed to generate random number, generated %d\n",
+ i);
+ return i ? i : -ETIMEDOUT;
+ }
+ }
+ *(u32 *) (data + i) = readl_relaxed(base + RNG_NUMBER);
+ }
+
+ return max;
+}
+
+static unsigned int histb_rng_get_depth(void __iomem *base)
+{
+ return (readl_relaxed(base + RNG_CTRL) & POST_PROCESS_DEPTH) >> 8;
+}
+
+static ssize_t
+depth_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct histb_rng_priv *priv = dev_get_drvdata(dev);
+ void __iomem *base = priv->base;
+
+ return sprintf(buf, "%d\n", histb_rng_get_depth(base));
+}
+
+static ssize_t
+depth_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct histb_rng_priv *priv = dev_get_drvdata(dev);
+ void __iomem *base = priv->base;
+ unsigned int depth;
+
+ if (kstrtouint(buf, 0, &depth))
+ return -ERANGE;
+
+ histb_rng_init(base, depth);
+ return count;
+}
+
+static DEVICE_ATTR_RW(depth);
+
+static struct attribute *histb_rng_attrs[] = {
+ &dev_attr_depth.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(histb_rng);
+
+static int histb_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct histb_rng_priv *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ histb_rng_init(base, 144);
+ if (histb_rng_wait(base)) {
+ dev_err(dev, "cannot bring up device\n");
+ return -ENODEV;
+ }
+
+ priv->base = base;
+ priv->rng.name = pdev->name;
+ priv->rng.read = histb_rng_read;
+ ret = devm_hwrng_register(dev, &priv->rng);
+ if (ret) {
+ dev_err(dev, "failed to register hwrng: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ dev_set_drvdata(dev, priv);
+ return 0;
+}
+
+static const struct of_device_id histb_rng_of_match[] = {
+ { .compatible = "hisilicon,histb-rng", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, histb_rng_of_match);
+
+static struct platform_driver histb_rng_driver = {
+ .probe = histb_rng_probe,
+ .driver = {
+ .name = "histb-rng",
+ .of_match_table = histb_rng_of_match,
+ .dev_groups = histb_rng_groups,
+ },
+};
+
+module_platform_driver(histb_rng_driver);
+
+MODULE_DESCRIPTION("Hisilicon STB random number generator driver");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
new file mode 100644
index 0000000000..e4b385b01b
--- /dev/null
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RNG driver for Freescale RNGC
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2017 Martin Kaiser <martin@kaiser.cx>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/bitfield.h>
+
+#define RNGC_VER_ID 0x0000
+#define RNGC_COMMAND 0x0004
+#define RNGC_CONTROL 0x0008
+#define RNGC_STATUS 0x000C
+#define RNGC_ERROR 0x0010
+#define RNGC_FIFO 0x0014
+
+/* the fields in the ver id register */
+#define RNG_TYPE GENMASK(31, 28)
+#define RNGC_VER_MAJ_SHIFT 8
+
+/* the rng_type field */
+#define RNGC_TYPE_RNGB 0x1
+#define RNGC_TYPE_RNGC 0x2
+
+
+#define RNGC_CMD_CLR_ERR BIT(5)
+#define RNGC_CMD_CLR_INT BIT(4)
+#define RNGC_CMD_SEED BIT(1)
+#define RNGC_CMD_SELF_TEST BIT(0)
+
+#define RNGC_CTRL_MASK_ERROR BIT(6)
+#define RNGC_CTRL_MASK_DONE BIT(5)
+#define RNGC_CTRL_AUTO_SEED BIT(4)
+
+#define RNGC_STATUS_ERROR BIT(16)
+#define RNGC_STATUS_FIFO_LEVEL_MASK GENMASK(11, 8)
+#define RNGC_STATUS_SEED_DONE BIT(5)
+#define RNGC_STATUS_ST_DONE BIT(4)
+
+#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008
+
+#define RNGC_TIMEOUT 3000 /* 3 sec */
+
+
+static bool self_test = true;
+module_param(self_test, bool, 0);
+
+struct imx_rngc {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+ struct hwrng rng;
+ struct completion rng_op_done;
+ /*
+ * err_reg is written only by the irq handler and read only
+ * when interrupts are masked, we need no spinlock
+ */
+ u32 err_reg;
+};
+
+
+static inline void imx_rngc_irq_mask_clear(struct imx_rngc *rngc)
+{
+ u32 ctrl, cmd;
+
+ /* mask interrupts */
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl |= RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR;
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+
+ /*
+ * CLR_INT clears the interrupt only if there's no error
+ * CLR_ERR clear the interrupt and the error register if there
+ * is an error
+ */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ cmd |= RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR;
+ writel(cmd, rngc->base + RNGC_COMMAND);
+}
+
+static inline void imx_rngc_irq_unmask(struct imx_rngc *rngc)
+{
+ u32 ctrl;
+
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl &= ~(RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR);
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+}
+
+static int imx_rngc_self_test(struct imx_rngc *rngc)
+{
+ u32 cmd;
+ int ret;
+
+ imx_rngc_irq_unmask(rngc);
+
+ /* run self test */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
+ imx_rngc_irq_mask_clear(rngc);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return rngc->err_reg ? -EIO : 0;
+}
+
+static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ unsigned int status;
+ int retval = 0;
+
+ while (max >= sizeof(u32)) {
+ status = readl(rngc->base + RNGC_STATUS);
+
+ /* is there some error while reading this random number? */
+ if (status & RNGC_STATUS_ERROR)
+ break;
+
+ if (status & RNGC_STATUS_FIFO_LEVEL_MASK) {
+ /* retrieve a random number from FIFO */
+ *(u32 *)data = readl(rngc->base + RNGC_FIFO);
+
+ retval += sizeof(u32);
+ data += sizeof(u32);
+ max -= sizeof(u32);
+ }
+ }
+
+ return retval ? retval : -EIO;
+}
+
+static irqreturn_t imx_rngc_irq(int irq, void *priv)
+{
+ struct imx_rngc *rngc = (struct imx_rngc *)priv;
+ u32 status;
+
+ /*
+ * clearing the interrupt will also clear the error register
+ * read error and status before clearing
+ */
+ status = readl(rngc->base + RNGC_STATUS);
+ rngc->err_reg = readl(rngc->base + RNGC_ERROR);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ if (status & (RNGC_STATUS_SEED_DONE | RNGC_STATUS_ST_DONE))
+ complete(&rngc->rng_op_done);
+
+ return IRQ_HANDLED;
+}
+
+static int imx_rngc_init(struct hwrng *rng)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ u32 cmd, ctrl;
+ int ret;
+
+ /* clear error */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND);
+
+ imx_rngc_irq_unmask(rngc);
+
+ /* create seed, repeat while there is some statistical error */
+ do {
+ /* seed creation */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
+
+ if (rngc->err_reg) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /*
+ * enable automatic seeding, the rngc creates a new seed automatically
+ * after serving 2^20 random 160-bit words
+ */
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl |= RNGC_CTRL_AUTO_SEED;
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+
+ /*
+ * if initialisation was successful, we keep the interrupt
+ * unmasked until imx_rngc_cleanup is called
+ * we mask the interrupt ourselves if we return an error
+ */
+ return 0;
+
+err:
+ imx_rngc_irq_mask_clear(rngc);
+ return ret;
+}
+
+static void imx_rngc_cleanup(struct hwrng *rng)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+
+ imx_rngc_irq_mask_clear(rngc);
+}
+
+static int __init imx_rngc_probe(struct platform_device *pdev)
+{
+ struct imx_rngc *rngc;
+ int ret;
+ int irq;
+ u32 ver_id;
+ u8 rng_type;
+
+ rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL);
+ if (!rngc)
+ return -ENOMEM;
+
+ rngc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rngc->base))
+ return PTR_ERR(rngc->base);
+
+ rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(rngc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ver_id = readl(rngc->base + RNGC_VER_ID);
+ rng_type = FIELD_GET(RNG_TYPE, ver_id);
+ /*
+ * This driver supports only RNGC and RNGB. (There's a different
+ * driver for RNGA.)
+ */
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+ return -ENODEV;
+
+ init_completion(&rngc->rng_op_done);
+
+ rngc->rng.name = pdev->name;
+ rngc->rng.init = imx_rngc_init;
+ rngc->rng.read = imx_rngc_read;
+ rngc->rng.cleanup = imx_rngc_cleanup;
+ rngc->rng.quality = 19;
+
+ rngc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rngc);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n");
+
+ if (self_test) {
+ ret = imx_rngc_self_test(rngc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "self test failed\n");
+ }
+
+ ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n");
+
+ dev_info(&pdev->dev,
+ "Freescale RNG%c registered (HW revision %d.%02d)\n",
+ rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
+ (ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
+ return 0;
+}
+
+static int imx_rngc_suspend(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rngc->clk);
+
+ return 0;
+}
+
+static int imx_rngc_resume(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_prepare_enable(rngc->clk);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
+
+static const struct of_device_id imx_rngc_dt_ids[] = {
+ { .compatible = "fsl,imx25-rngb" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
+
+static struct platform_driver imx_rngc_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = pm_sleep_ptr(&imx_rngc_pm_ops),
+ .of_match_table = imx_rngc_dt_ids,
+ },
+};
+
+module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGC driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
new file mode 100644
index 0000000000..4f18c3fa54
--- /dev/null
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic Random Number Generator driver
+ * Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* RNG register offsets */
+#define RNG_REG_ERNG_OFFSET 0x0
+#define RNG_REG_RNG_OFFSET 0x4
+
+/* bits within the ERND register */
+#define ERNG_READY BIT(31)
+#define ERNG_ENABLE BIT(0)
+
+enum ingenic_rng_version {
+ ID_JZ4780,
+ ID_X1000,
+};
+
+/* Device associated memory */
+struct ingenic_rng {
+ enum ingenic_rng_version version;
+
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int ingenic_rng_init(struct hwrng *rng)
+{
+ struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
+
+ writel(ERNG_ENABLE, priv->base + RNG_REG_ERNG_OFFSET);
+
+ return 0;
+}
+
+static void ingenic_rng_cleanup(struct hwrng *rng)
+{
+ struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
+
+ writel(0, priv->base + RNG_REG_ERNG_OFFSET);
+}
+
+static int ingenic_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
+ u32 *data = buf;
+ u32 status;
+ int ret;
+
+ if (priv->version >= ID_X1000) {
+ ret = readl_poll_timeout(priv->base + RNG_REG_ERNG_OFFSET, status,
+ status & ERNG_READY, 10, 1000);
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: Wait for RNG data ready timeout\n", __func__);
+ return ret;
+ }
+ } else {
+ /*
+ * A delay is required so that the current RNG data is not bit shifted
+ * version of previous RNG data which could happen if random data is
+ * read continuously from this device.
+ */
+ udelay(20);
+ }
+
+ *data = readl(priv->base + RNG_REG_RNG_OFFSET);
+
+ return 4;
+}
+
+static int ingenic_rng_probe(struct platform_device *pdev)
+{
+ struct ingenic_rng *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ pr_err("%s: Failed to map RNG registers\n", __func__);
+ return PTR_ERR(priv->base);
+ }
+
+ priv->version = (enum ingenic_rng_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = ingenic_rng_init;
+ priv->rng.cleanup = ingenic_rng_cleanup;
+ priv->rng.read = ingenic_rng_read;
+
+ ret = hwrng_register(&priv->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register hwrng\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(&pdev->dev, "Ingenic RNG driver registered\n");
+ return 0;
+}
+
+static int ingenic_rng_remove(struct platform_device *pdev)
+{
+ struct ingenic_rng *priv = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&priv->rng);
+
+ writel(0, priv->base + RNG_REG_ERNG_OFFSET);
+
+ return 0;
+}
+
+static const struct of_device_id ingenic_rng_of_match[] = {
+ { .compatible = "ingenic,jz4780-rng", .data = (void *) ID_JZ4780 },
+ { .compatible = "ingenic,x1000-rng", .data = (void *) ID_X1000 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
+
+static struct platform_driver ingenic_rng_driver = {
+ .probe = ingenic_rng_probe,
+ .remove = ingenic_rng_remove,
+ .driver = {
+ .name = "ingenic-rng",
+ .of_match_table = ingenic_rng_of_match,
+ },
+};
+
+module_platform_driver(ingenic_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>");
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_DESCRIPTION("Ingenic Random Number Generator driver");
diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c
new file mode 100644
index 0000000000..1672320e7d
--- /dev/null
+++ b/drivers/char/hw_random/ingenic-trng.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic True Random Number Generator driver
+ * Copyright (c) 2019 漆鹏振 (Qi Pengzhen) <aric.pzqi@ingenic.com>
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* DTRNG register offsets */
+#define TRNG_REG_CFG_OFFSET 0x00
+#define TRNG_REG_RANDOMNUM_OFFSET 0x04
+#define TRNG_REG_STATUS_OFFSET 0x08
+
+/* bits within the CFG register */
+#define CFG_GEN_EN BIT(0)
+
+/* bits within the STATUS register */
+#define STATUS_RANDOM_RDY BIT(0)
+
+struct ingenic_trng {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int ingenic_trng_init(struct hwrng *rng)
+{
+ struct ingenic_trng *trng = container_of(rng, struct ingenic_trng, rng);
+ unsigned int ctrl;
+
+ ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET);
+ ctrl |= CFG_GEN_EN;
+ writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET);
+
+ return 0;
+}
+
+static void ingenic_trng_cleanup(struct hwrng *rng)
+{
+ struct ingenic_trng *trng = container_of(rng, struct ingenic_trng, rng);
+ unsigned int ctrl;
+
+ ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET);
+ ctrl &= ~CFG_GEN_EN;
+ writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET);
+}
+
+static int ingenic_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct ingenic_trng *trng = container_of(rng, struct ingenic_trng, rng);
+ u32 *data = buf;
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(trng->base + TRNG_REG_STATUS_OFFSET, status,
+ status & STATUS_RANDOM_RDY, 10, 1000);
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: Wait for DTRNG data ready timeout\n", __func__);
+ return ret;
+ }
+
+ *data = readl(trng->base + TRNG_REG_RANDOMNUM_OFFSET);
+
+ return 4;
+}
+
+static int ingenic_trng_probe(struct platform_device *pdev)
+{
+ struct ingenic_trng *trng;
+ struct clk *clk;
+ int ret;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->base),
+ "%s: Failed to map DTRNG registers\n", __func__);
+
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "%s: Cannot get and enable DTRNG clock\n", __func__);
+
+ trng->rng.name = pdev->name;
+ trng->rng.init = ingenic_trng_init;
+ trng->rng.cleanup = ingenic_trng_cleanup;
+ trng->rng.read = ingenic_trng_read;
+
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n");
+
+ platform_set_drvdata(pdev, trng);
+
+ dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n");
+ return 0;
+}
+
+static const struct of_device_id ingenic_trng_of_match[] = {
+ { .compatible = "ingenic,x1830-dtrng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ingenic_trng_of_match);
+
+static struct platform_driver ingenic_trng_driver = {
+ .probe = ingenic_trng_probe,
+ .driver = {
+ .name = "ingenic-trng",
+ .of_match_table = ingenic_trng_of_match,
+ },
+};
+
+module_platform_driver(ingenic_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("漆鹏振 (Qi Pengzhen) <aric.pzqi@ingenic.com>");
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_DESCRIPTION("Ingenic True Random Number Generator driver");
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
new file mode 100644
index 0000000000..7b171cb3b8
--- /dev/null
+++ b/drivers/char/hw_random/intel-rng.c
@@ -0,0 +1,418 @@
+/*
+ * RNG driver for Intel RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stop_machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+
+#define PFX KBUILD_MODNAME ": "
+
+/*
+ * RNG registers
+ */
+#define INTEL_RNG_HW_STATUS 0
+#define INTEL_RNG_PRESENT 0x40
+#define INTEL_RNG_ENABLED 0x01
+#define INTEL_RNG_STATUS 1
+#define INTEL_RNG_DATA_PRESENT 0x01
+#define INTEL_RNG_DATA 2
+
+/*
+ * Magic address at which Intel PCI bridges locate the RNG
+ */
+#define INTEL_RNG_ADDR 0xFFBC015F
+#define INTEL_RNG_ADDR_LEN 3
+
+/*
+ * LPC bridge PCI config space registers
+ */
+#define FWH_DEC_EN1_REG_OLD 0xe3
+#define FWH_DEC_EN1_REG_NEW 0xd9 /* high byte of 16-bit register */
+#define FWH_F8_EN_MASK 0x80
+
+#define BIOS_CNTL_REG_OLD 0x4e
+#define BIOS_CNTL_REG_NEW 0xdc
+#define BIOS_CNTL_WRITE_ENABLE_MASK 0x01
+#define BIOS_CNTL_LOCK_ENABLE_MASK 0x02
+
+/*
+ * Magic address at which Intel Firmware Hubs get accessed
+ */
+#define INTEL_FWH_ADDR 0xffff0000
+#define INTEL_FWH_ADDR_LEN 2
+
+/*
+ * Intel Firmware Hub command codes (write to any address inside the device)
+ */
+#define INTEL_FWH_RESET_CMD 0xff /* aka READ_ARRAY */
+#define INTEL_FWH_READ_ID_CMD 0x90
+
+/*
+ * Intel Firmware Hub Read ID command result addresses
+ */
+#define INTEL_FWH_MANUFACTURER_CODE_ADDRESS 0x000000
+#define INTEL_FWH_DEVICE_CODE_ADDRESS 0x000001
+
+/*
+ * Intel Firmware Hub Read ID command result values
+ */
+#define INTEL_FWH_MANUFACTURER_CODE 0x89
+#define INTEL_FWH_DEVICE_CODE_8M 0xac
+#define INTEL_FWH_DEVICE_CODE_4M 0xad
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+/* AA
+ { PCI_DEVICE(0x8086, 0x2418) }, */
+ { PCI_DEVICE(0x8086, 0x2410) }, /* AA */
+/* AB
+ { PCI_DEVICE(0x8086, 0x2428) }, */
+ { PCI_DEVICE(0x8086, 0x2420) }, /* AB */
+/* ??
+ { PCI_DEVICE(0x8086, 0x2430) }, */
+/* BAM, CAM, DBM, FBM, GxM
+ { PCI_DEVICE(0x8086, 0x2448) }, */
+ { PCI_DEVICE(0x8086, 0x244c) }, /* BAM */
+ { PCI_DEVICE(0x8086, 0x248c) }, /* CAM */
+ { PCI_DEVICE(0x8086, 0x24cc) }, /* DBM */
+ { PCI_DEVICE(0x8086, 0x2641) }, /* FBM */
+ { PCI_DEVICE(0x8086, 0x27b9) }, /* GxM */
+ { PCI_DEVICE(0x8086, 0x27bd) }, /* GxM DH */
+/* BA, CA, DB, Ex, 6300, Fx, 631x/632x, Gx
+ { PCI_DEVICE(0x8086, 0x244e) }, */
+ { PCI_DEVICE(0x8086, 0x2440) }, /* BA */
+ { PCI_DEVICE(0x8086, 0x2480) }, /* CA */
+ { PCI_DEVICE(0x8086, 0x24c0) }, /* DB */
+ { PCI_DEVICE(0x8086, 0x24d0) }, /* Ex */
+ { PCI_DEVICE(0x8086, 0x25a1) }, /* 6300 */
+ { PCI_DEVICE(0x8086, 0x2640) }, /* Fx */
+ { PCI_DEVICE(0x8086, 0x2670) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2671) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2672) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2673) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2674) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2675) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2676) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2677) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2678) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2679) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267a) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267b) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267c) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267d) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267e) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267f) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x27b8) }, /* Gx */
+/* E
+ { PCI_DEVICE(0x8086, 0x245e) }, */
+ { PCI_DEVICE(0x8086, 0x2450) }, /* E */
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+static __initdata int no_fwh_detect;
+module_param(no_fwh_detect, int, 0);
+MODULE_PARM_DESC(no_fwh_detect, "Skip FWH detection:\n"
+ " positive value - skip if FWH space locked read-only\n"
+ " negative value - skip always");
+
+static inline u8 hwstatus_get(void __iomem *mem)
+{
+ return readb(mem + INTEL_RNG_HW_STATUS);
+}
+
+static inline u8 hwstatus_set(void __iomem *mem,
+ u8 hw_status)
+{
+ writeb(hw_status, mem + INTEL_RNG_HW_STATUS);
+ return hwstatus_get(mem);
+}
+
+static int intel_rng_data_present(struct hwrng *rng, int wait)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = !!(readb(mem + INTEL_RNG_STATUS) &
+ INTEL_RNG_DATA_PRESENT);
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
+}
+
+static int intel_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+
+ *data = readb(mem + INTEL_RNG_DATA);
+
+ return 1;
+}
+
+static int intel_rng_init(struct hwrng *rng)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+ u8 hw_status;
+ int err = -EIO;
+
+ hw_status = hwstatus_get(mem);
+ /* turn RNG h/w on, if it's off */
+ if ((hw_status & INTEL_RNG_ENABLED) == 0)
+ hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED);
+ if ((hw_status & INTEL_RNG_ENABLED) == 0) {
+ pr_err(PFX "cannot enable RNG, aborting\n");
+ goto out;
+ }
+ err = 0;
+out:
+ return err;
+}
+
+static void intel_rng_cleanup(struct hwrng *rng)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+ u8 hw_status;
+
+ hw_status = hwstatus_get(mem);
+ if (hw_status & INTEL_RNG_ENABLED)
+ hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED);
+ else
+ pr_warn(PFX "unusual: RNG already disabled\n");
+}
+
+
+static struct hwrng intel_rng = {
+ .name = "intel",
+ .init = intel_rng_init,
+ .cleanup = intel_rng_cleanup,
+ .data_present = intel_rng_data_present,
+ .data_read = intel_rng_data_read,
+};
+
+struct intel_rng_hw {
+ struct pci_dev *dev;
+ void __iomem *mem;
+ u8 bios_cntl_off;
+ u8 bios_cntl_val;
+ u8 fwh_dec_en1_off;
+ u8 fwh_dec_en1_val;
+};
+
+static int __init intel_rng_hw_init(void *_intel_rng_hw)
+{
+ struct intel_rng_hw *intel_rng_hw = _intel_rng_hw;
+ u8 mfc, dvc;
+
+ /* interrupts disabled in stop_machine call */
+
+ if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK))
+ pci_write_config_byte(intel_rng_hw->dev,
+ intel_rng_hw->fwh_dec_en1_off,
+ intel_rng_hw->fwh_dec_en1_val |
+ FWH_F8_EN_MASK);
+ if (!(intel_rng_hw->bios_cntl_val & BIOS_CNTL_WRITE_ENABLE_MASK))
+ pci_write_config_byte(intel_rng_hw->dev,
+ intel_rng_hw->bios_cntl_off,
+ intel_rng_hw->bios_cntl_val |
+ BIOS_CNTL_WRITE_ENABLE_MASK);
+
+ writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem);
+ writeb(INTEL_FWH_READ_ID_CMD, intel_rng_hw->mem);
+ mfc = readb(intel_rng_hw->mem + INTEL_FWH_MANUFACTURER_CODE_ADDRESS);
+ dvc = readb(intel_rng_hw->mem + INTEL_FWH_DEVICE_CODE_ADDRESS);
+ writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem);
+
+ if (!(intel_rng_hw->bios_cntl_val &
+ (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)))
+ pci_write_config_byte(intel_rng_hw->dev,
+ intel_rng_hw->bios_cntl_off,
+ intel_rng_hw->bios_cntl_val);
+ if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK))
+ pci_write_config_byte(intel_rng_hw->dev,
+ intel_rng_hw->fwh_dec_en1_off,
+ intel_rng_hw->fwh_dec_en1_val);
+
+ if (mfc != INTEL_FWH_MANUFACTURER_CODE ||
+ (dvc != INTEL_FWH_DEVICE_CODE_8M &&
+ dvc != INTEL_FWH_DEVICE_CODE_4M)) {
+ pr_notice(PFX "FWH not detected\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw,
+ struct pci_dev *dev)
+{
+ intel_rng_hw->bios_cntl_val = 0xff;
+ intel_rng_hw->fwh_dec_en1_val = 0xff;
+ intel_rng_hw->dev = dev;
+
+ /* Check for Intel 82802 */
+ if (dev->device < 0x2640) {
+ intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD;
+ intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_OLD;
+ } else {
+ intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_NEW;
+ intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_NEW;
+ }
+
+ pci_read_config_byte(dev, intel_rng_hw->fwh_dec_en1_off,
+ &intel_rng_hw->fwh_dec_en1_val);
+ pci_read_config_byte(dev, intel_rng_hw->bios_cntl_off,
+ &intel_rng_hw->bios_cntl_val);
+
+ if ((intel_rng_hw->bios_cntl_val &
+ (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
+ == BIOS_CNTL_LOCK_ENABLE_MASK) {
+ static __initdata /*const*/ char warning[] =
+PFX "Firmware space is locked read-only. If you can't or\n"
+PFX "don't want to disable this in firmware setup, and if\n"
+PFX "you are certain that your system has a functional\n"
+PFX "RNG, try using the 'no_fwh_detect' option.\n";
+
+ if (no_fwh_detect)
+ return -ENODEV;
+ pr_warn("%s", warning);
+ return -EBUSY;
+ }
+
+ intel_rng_hw->mem = ioremap(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
+ if (intel_rng_hw->mem == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+
+static int __init intel_rng_mod_init(void)
+{
+ int err = -ENODEV;
+ int i;
+ struct pci_dev *dev = NULL;
+ void __iomem *mem;
+ u8 hw_status;
+ struct intel_rng_hw *intel_rng_hw;
+
+ for (i = 0; !dev && pci_tbl[i].vendor; ++i)
+ dev = pci_get_device(pci_tbl[i].vendor, pci_tbl[i].device,
+ NULL);
+
+ if (!dev)
+ goto out; /* Device not found. */
+
+ if (no_fwh_detect < 0) {
+ pci_dev_put(dev);
+ goto fwh_done;
+ }
+
+ intel_rng_hw = kmalloc(sizeof(*intel_rng_hw), GFP_KERNEL);
+ if (!intel_rng_hw) {
+ pci_dev_put(dev);
+ goto out;
+ }
+
+ err = intel_init_hw_struct(intel_rng_hw, dev);
+ if (err) {
+ pci_dev_put(dev);
+ kfree(intel_rng_hw);
+ if (err == -ENODEV)
+ goto fwh_done;
+ goto out;
+ }
+
+ /*
+ * Since the BIOS code/data is going to disappear from its normal
+ * location with the Read ID command, all activity on the system
+ * must be stopped until the state is back to normal.
+ *
+ * Use stop_machine because IPIs can be blocked by disabling
+ * interrupts.
+ */
+ err = stop_machine(intel_rng_hw_init, intel_rng_hw, NULL);
+ pci_dev_put(dev);
+ iounmap(intel_rng_hw->mem);
+ kfree(intel_rng_hw);
+ if (err)
+ goto out;
+
+fwh_done:
+ err = -ENOMEM;
+ mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
+ if (!mem)
+ goto out;
+ intel_rng.priv = (unsigned long)mem;
+
+ /* Check for Random Number Generator */
+ err = -ENODEV;
+ hw_status = hwstatus_get(mem);
+ if ((hw_status & INTEL_RNG_PRESENT) == 0) {
+ iounmap(mem);
+ goto out;
+ }
+
+ pr_info("Intel 82802 RNG detected\n");
+ err = hwrng_register(&intel_rng);
+ if (err) {
+ pr_err(PFX "RNG registering failed (%d)\n",
+ err);
+ iounmap(mem);
+ }
+out:
+ return err;
+
+}
+
+static void __exit intel_rng_mod_exit(void)
+{
+ void __iomem *mem = (void __iomem *)intel_rng.priv;
+
+ hwrng_unregister(&intel_rng);
+ iounmap(mem);
+}
+
+module_init(intel_rng_mod_init);
+module_exit(intel_rng_mod_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
new file mode 100644
index 0000000000..440fe28bdd
--- /dev/null
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+* Copyright (C) 2015 Broadcom Corporation
+*
+*/
+/*
+ * DESCRIPTION: The Broadcom iProc RNG200 Driver
+ */
+
+#include <linux/hw_random.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+/* Registers */
+#define RNG_CTRL_OFFSET 0x00
+#define RNG_CTRL_RNG_RBGEN_MASK 0x00001FFF
+#define RNG_CTRL_RNG_RBGEN_ENABLE 0x00000001
+
+#define RNG_SOFT_RESET_OFFSET 0x04
+#define RNG_SOFT_RESET 0x00000001
+
+#define RBG_SOFT_RESET_OFFSET 0x08
+#define RBG_SOFT_RESET 0x00000001
+
+#define RNG_INT_STATUS_OFFSET 0x18
+#define RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK 0x80000000
+#define RNG_INT_STATUS_STARTUP_TRANSITIONS_MET_IRQ_MASK 0x00020000
+#define RNG_INT_STATUS_NIST_FAIL_IRQ_MASK 0x00000020
+#define RNG_INT_STATUS_TOTAL_BITS_COUNT_IRQ_MASK 0x00000001
+
+#define RNG_FIFO_DATA_OFFSET 0x20
+
+#define RNG_FIFO_COUNT_OFFSET 0x24
+#define RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK 0x000000FF
+
+struct iproc_rng200_dev {
+ struct hwrng rng;
+ void __iomem *base;
+};
+
+#define to_rng_priv(rng) container_of(rng, struct iproc_rng200_dev, rng)
+
+static void iproc_rng200_enable_set(void __iomem *rng_base, bool enable)
+{
+ u32 val;
+
+ val = ioread32(rng_base + RNG_CTRL_OFFSET);
+ val &= ~RNG_CTRL_RNG_RBGEN_MASK;
+
+ if (enable)
+ val |= RNG_CTRL_RNG_RBGEN_ENABLE;
+
+ iowrite32(val, rng_base + RNG_CTRL_OFFSET);
+}
+
+static void iproc_rng200_restart(void __iomem *rng_base)
+{
+ uint32_t val;
+
+ iproc_rng200_enable_set(rng_base, false);
+
+ /* Clear all interrupt status */
+ iowrite32(0xFFFFFFFFUL, rng_base + RNG_INT_STATUS_OFFSET);
+
+ /* Reset RNG and RBG */
+ val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET);
+ val |= RBG_SOFT_RESET;
+ iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET);
+
+ val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET);
+ val |= RNG_SOFT_RESET;
+ iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET);
+
+ val = ioread32(rng_base + RNG_SOFT_RESET_OFFSET);
+ val &= ~RNG_SOFT_RESET;
+ iowrite32(val, rng_base + RNG_SOFT_RESET_OFFSET);
+
+ val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET);
+ val &= ~RBG_SOFT_RESET;
+ iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET);
+
+ iproc_rng200_enable_set(rng_base, true);
+}
+
+static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct iproc_rng200_dev *priv = to_rng_priv(rng);
+ uint32_t num_remaining = max;
+ uint32_t status;
+
+ #define MAX_RESETS_PER_READ 1
+ uint32_t num_resets = 0;
+
+ #define MAX_IDLE_TIME (1 * HZ)
+ unsigned long idle_endtime = jiffies + MAX_IDLE_TIME;
+
+ while ((num_remaining > 0) && time_before(jiffies, idle_endtime)) {
+
+ /* Is RNG sane? If not, reset it. */
+ status = ioread32(priv->base + RNG_INT_STATUS_OFFSET);
+ if ((status & (RNG_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK |
+ RNG_INT_STATUS_NIST_FAIL_IRQ_MASK)) != 0) {
+
+ if (num_resets >= MAX_RESETS_PER_READ)
+ return max - num_remaining;
+
+ iproc_rng200_restart(priv->base);
+ num_resets++;
+ }
+
+ /* Are there any random numbers available? */
+ if ((ioread32(priv->base + RNG_FIFO_COUNT_OFFSET) &
+ RNG_FIFO_COUNT_RNG_FIFO_COUNT_MASK) > 0) {
+
+ if (num_remaining >= sizeof(uint32_t)) {
+ /* Buffer has room to store entire word */
+ *(uint32_t *)buf = ioread32(priv->base +
+ RNG_FIFO_DATA_OFFSET);
+ buf += sizeof(uint32_t);
+ num_remaining -= sizeof(uint32_t);
+ } else {
+ /* Buffer can only store partial word */
+ uint32_t rnd_number = ioread32(priv->base +
+ RNG_FIFO_DATA_OFFSET);
+ memcpy(buf, &rnd_number, num_remaining);
+ buf += num_remaining;
+ num_remaining = 0;
+ }
+
+ /* Reset the IDLE timeout */
+ idle_endtime = jiffies + MAX_IDLE_TIME;
+ } else {
+ if (!wait)
+ /* Cannot wait, return immediately */
+ return max - num_remaining;
+
+ /* Can wait, give others chance to run */
+ usleep_range(min(num_remaining * 10, 500U), 500);
+ }
+ }
+
+ return max - num_remaining;
+}
+
+static int iproc_rng200_init(struct hwrng *rng)
+{
+ struct iproc_rng200_dev *priv = to_rng_priv(rng);
+
+ iproc_rng200_enable_set(priv->base, true);
+
+ return 0;
+}
+
+static void iproc_rng200_cleanup(struct hwrng *rng)
+{
+ struct iproc_rng200_dev *priv = to_rng_priv(rng);
+
+ iproc_rng200_enable_set(priv->base, false);
+}
+
+static int iproc_rng200_probe(struct platform_device *pdev)
+{
+ struct iproc_rng200_dev *priv;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Map peripheral */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ dev_err(dev, "failed to remap rng regs\n");
+ return PTR_ERR(priv->base);
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ priv->rng.name = "iproc-rng200";
+ priv->rng.read = iproc_rng200_read;
+ priv->rng.init = iproc_rng200_init;
+ priv->rng.cleanup = iproc_rng200_cleanup;
+
+ /* Register driver */
+ ret = devm_hwrng_register(dev, &priv->rng);
+ if (ret) {
+ dev_err(dev, "hwrng registration failed\n");
+ return ret;
+ }
+
+ dev_info(dev, "hwrng registered\n");
+
+ return 0;
+}
+
+static int __maybe_unused iproc_rng200_suspend(struct device *dev)
+{
+ struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
+
+ iproc_rng200_cleanup(&priv->rng);
+
+ return 0;
+}
+
+static int __maybe_unused iproc_rng200_resume(struct device *dev)
+{
+ struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
+
+ iproc_rng200_init(&priv->rng);
+
+ return 0;
+}
+
+static const struct dev_pm_ops iproc_rng200_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume)
+};
+
+static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm2711-rng200", },
+ { .compatible = "brcm,bcm7211-rng200", },
+ { .compatible = "brcm,bcm7278-rng200", },
+ { .compatible = "brcm,iproc-rng200", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, iproc_rng200_of_match);
+
+static struct platform_driver iproc_rng200_driver = {
+ .driver = {
+ .name = "iproc-rng200",
+ .of_match_table = iproc_rng200_of_match,
+ .pm = &iproc_rng200_pm_ops,
+ },
+ .probe = iproc_rng200_probe,
+};
+module_platform_driver(iproc_rng200_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("iProc RNG200 Random Number Generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
new file mode 100644
index 0000000000..7df5e9f751
--- /dev/null
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/char/hw_random/ixp4xx-rng.c
+ *
+ * RNG driver for Intel IXP4xx family of NPUs
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Fixes by Michael Buesch
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/hw_random.h>
+#include <linux/of.h>
+#include <linux/soc/ixp4xx/cpu.h>
+
+#include <asm/io.h>
+
+static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer)
+{
+ void __iomem * rng_base = (void __iomem *)rng->priv;
+
+ *buffer = __raw_readl(rng_base);
+
+ return 4;
+}
+
+static struct hwrng ixp4xx_rng_ops = {
+ .name = "ixp4xx",
+ .data_read = ixp4xx_rng_data_read,
+};
+
+static int ixp4xx_rng_probe(struct platform_device *pdev)
+{
+ void __iomem * rng_base;
+ struct device *dev = &pdev->dev;
+
+ if (!cpu_is_ixp46x()) /* includes IXP455 */
+ return -ENOSYS;
+
+ rng_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rng_base))
+ return PTR_ERR(rng_base);
+
+ ixp4xx_rng_ops.priv = (unsigned long)rng_base;
+ return devm_hwrng_register(dev, &ixp4xx_rng_ops);
+}
+
+static const struct of_device_id ixp4xx_rng_of_match[] = {
+ {
+ .compatible = "intel,ixp46x-rng",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ixp4xx_rng_of_match);
+
+static struct platform_driver ixp4xx_rng_driver = {
+ .driver = {
+ .name = "ixp4xx-hwrandom",
+ .of_match_table = ixp4xx_rng_of_match,
+ },
+ .probe = ixp4xx_rng_probe,
+};
+module_platform_driver(ixp4xx_rng_driver);
+
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
+MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/jh7110-trng.c b/drivers/char/hw_random/jh7110-trng.c
new file mode 100644
index 0000000000..38474d48a2
--- /dev/null
+++ b/drivers/char/hw_random/jh7110-trng.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TRNG driver for the StarFive JH7110 SoC
+ *
+ * Copyright (C) 2022 StarFive Technology Co.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/reset.h>
+
+/* trng register offset */
+#define STARFIVE_CTRL 0x00
+#define STARFIVE_STAT 0x04
+#define STARFIVE_MODE 0x08
+#define STARFIVE_SMODE 0x0C
+#define STARFIVE_IE 0x10
+#define STARFIVE_ISTAT 0x14
+#define STARFIVE_RAND0 0x20
+#define STARFIVE_RAND1 0x24
+#define STARFIVE_RAND2 0x28
+#define STARFIVE_RAND3 0x2C
+#define STARFIVE_RAND4 0x30
+#define STARFIVE_RAND5 0x34
+#define STARFIVE_RAND6 0x38
+#define STARFIVE_RAND7 0x3C
+#define STARFIVE_AUTO_RQSTS 0x60
+#define STARFIVE_AUTO_AGE 0x64
+
+/* CTRL CMD */
+#define STARFIVE_CTRL_EXEC_NOP 0x0
+#define STARFIVE_CTRL_GENE_RANDNUM 0x1
+#define STARFIVE_CTRL_EXEC_RANDRESEED 0x2
+
+/* STAT */
+#define STARFIVE_STAT_NONCE_MODE BIT(2)
+#define STARFIVE_STAT_R256 BIT(3)
+#define STARFIVE_STAT_MISSION_MODE BIT(8)
+#define STARFIVE_STAT_SEEDED BIT(9)
+#define STARFIVE_STAT_LAST_RESEED(x) ((x) << 16)
+#define STARFIVE_STAT_SRVC_RQST BIT(27)
+#define STARFIVE_STAT_RAND_GENERATING BIT(30)
+#define STARFIVE_STAT_RAND_SEEDING BIT(31)
+
+/* MODE */
+#define STARFIVE_MODE_R256 BIT(3)
+
+/* SMODE */
+#define STARFIVE_SMODE_NONCE_MODE BIT(2)
+#define STARFIVE_SMODE_MISSION_MODE BIT(8)
+#define STARFIVE_SMODE_MAX_REJECTS(x) ((x) << 16)
+
+/* IE */
+#define STARFIVE_IE_RAND_RDY_EN BIT(0)
+#define STARFIVE_IE_SEED_DONE_EN BIT(1)
+#define STARFIVE_IE_LFSR_LOCKUP_EN BIT(4)
+#define STARFIVE_IE_GLBL_EN BIT(31)
+
+#define STARFIVE_IE_ALL (STARFIVE_IE_GLBL_EN | \
+ STARFIVE_IE_RAND_RDY_EN | \
+ STARFIVE_IE_SEED_DONE_EN | \
+ STARFIVE_IE_LFSR_LOCKUP_EN)
+
+/* ISTAT */
+#define STARFIVE_ISTAT_RAND_RDY BIT(0)
+#define STARFIVE_ISTAT_SEED_DONE BIT(1)
+#define STARFIVE_ISTAT_LFSR_LOCKUP BIT(4)
+
+#define STARFIVE_RAND_LEN sizeof(u32)
+
+#define to_trng(p) container_of(p, struct starfive_trng, rng)
+
+enum reseed {
+ RANDOM_RESEED,
+ NONCE_RESEED,
+};
+
+enum mode {
+ PRNG_128BIT,
+ PRNG_256BIT,
+};
+
+struct starfive_trng {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *hclk;
+ struct clk *ahb;
+ struct reset_control *rst;
+ struct hwrng rng;
+ struct completion random_done;
+ struct completion reseed_done;
+ u32 mode;
+ u32 mission;
+ u32 reseed;
+ /* protects against concurrent write to ctrl register */
+ spinlock_t write_lock;
+};
+
+static u16 autoreq;
+module_param(autoreq, ushort, 0);
+MODULE_PARM_DESC(autoreq, "Auto-reseeding after random number requests by host reaches specified counter:\n"
+ " 0 - disable counter\n"
+ " other - reload value for internal counter");
+
+static u16 autoage;
+module_param(autoage, ushort, 0);
+MODULE_PARM_DESC(autoage, "Auto-reseeding after specified timer countdowns to 0:\n"
+ " 0 - disable timer\n"
+ " other - reload value for internal timer");
+
+static inline int starfive_trng_wait_idle(struct starfive_trng *trng)
+{
+ u32 stat;
+
+ return readl_relaxed_poll_timeout(trng->base + STARFIVE_STAT, stat,
+ !(stat & (STARFIVE_STAT_RAND_GENERATING |
+ STARFIVE_STAT_RAND_SEEDING)),
+ 10, 100000);
+}
+
+static inline void starfive_trng_irq_mask_clear(struct starfive_trng *trng)
+{
+ /* clear register: ISTAT */
+ u32 data = readl(trng->base + STARFIVE_ISTAT);
+
+ writel(data, trng->base + STARFIVE_ISTAT);
+}
+
+static int starfive_trng_cmd(struct starfive_trng *trng, u32 cmd, bool wait)
+{
+ int wait_time = 1000;
+
+ /* allow up to 40 us for wait == 0 */
+ if (!wait)
+ wait_time = 40;
+
+ switch (cmd) {
+ case STARFIVE_CTRL_GENE_RANDNUM:
+ reinit_completion(&trng->random_done);
+ spin_lock_irq(&trng->write_lock);
+ writel(cmd, trng->base + STARFIVE_CTRL);
+ spin_unlock_irq(&trng->write_lock);
+ if (!wait_for_completion_timeout(&trng->random_done, usecs_to_jiffies(wait_time)))
+ return -ETIMEDOUT;
+ break;
+ case STARFIVE_CTRL_EXEC_RANDRESEED:
+ reinit_completion(&trng->reseed_done);
+ spin_lock_irq(&trng->write_lock);
+ writel(cmd, trng->base + STARFIVE_CTRL);
+ spin_unlock_irq(&trng->write_lock);
+ if (!wait_for_completion_timeout(&trng->reseed_done, usecs_to_jiffies(wait_time)))
+ return -ETIMEDOUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int starfive_trng_init(struct hwrng *rng)
+{
+ struct starfive_trng *trng = to_trng(rng);
+ u32 mode, intr = 0;
+
+ /* setup Auto Request/Age register */
+ writel(autoage, trng->base + STARFIVE_AUTO_AGE);
+ writel(autoreq, trng->base + STARFIVE_AUTO_RQSTS);
+
+ /* clear register: ISTAT */
+ starfive_trng_irq_mask_clear(trng);
+
+ intr |= STARFIVE_IE_ALL;
+ writel(intr, trng->base + STARFIVE_IE);
+
+ mode = readl(trng->base + STARFIVE_MODE);
+
+ switch (trng->mode) {
+ case PRNG_128BIT:
+ mode &= ~STARFIVE_MODE_R256;
+ break;
+ case PRNG_256BIT:
+ mode |= STARFIVE_MODE_R256;
+ break;
+ default:
+ mode |= STARFIVE_MODE_R256;
+ break;
+ }
+
+ writel(mode, trng->base + STARFIVE_MODE);
+
+ return starfive_trng_cmd(trng, STARFIVE_CTRL_EXEC_RANDRESEED, 1);
+}
+
+static irqreturn_t starfive_trng_irq(int irq, void *priv)
+{
+ u32 status;
+ struct starfive_trng *trng = (struct starfive_trng *)priv;
+
+ status = readl(trng->base + STARFIVE_ISTAT);
+ if (status & STARFIVE_ISTAT_RAND_RDY) {
+ writel(STARFIVE_ISTAT_RAND_RDY, trng->base + STARFIVE_ISTAT);
+ complete(&trng->random_done);
+ }
+
+ if (status & STARFIVE_ISTAT_SEED_DONE) {
+ writel(STARFIVE_ISTAT_SEED_DONE, trng->base + STARFIVE_ISTAT);
+ complete(&trng->reseed_done);
+ }
+
+ if (status & STARFIVE_ISTAT_LFSR_LOCKUP) {
+ writel(STARFIVE_ISTAT_LFSR_LOCKUP, trng->base + STARFIVE_ISTAT);
+ /* SEU occurred, reseeding required*/
+ spin_lock(&trng->write_lock);
+ writel(STARFIVE_CTRL_EXEC_RANDRESEED, trng->base + STARFIVE_CTRL);
+ spin_unlock(&trng->write_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void starfive_trng_cleanup(struct hwrng *rng)
+{
+ struct starfive_trng *trng = to_trng(rng);
+
+ writel(0, trng->base + STARFIVE_CTRL);
+
+ reset_control_assert(trng->rst);
+ clk_disable_unprepare(trng->hclk);
+ clk_disable_unprepare(trng->ahb);
+}
+
+static int starfive_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct starfive_trng *trng = to_trng(rng);
+ int ret;
+
+ pm_runtime_get_sync(trng->dev);
+
+ if (trng->mode == PRNG_256BIT)
+ max = min_t(size_t, max, (STARFIVE_RAND_LEN * 8));
+ else
+ max = min_t(size_t, max, (STARFIVE_RAND_LEN * 4));
+
+ if (wait) {
+ ret = starfive_trng_wait_idle(trng);
+ if (ret)
+ return -ETIMEDOUT;
+ }
+
+ ret = starfive_trng_cmd(trng, STARFIVE_CTRL_GENE_RANDNUM, wait);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, trng->base + STARFIVE_RAND0, max);
+
+ pm_runtime_put_sync_autosuspend(trng->dev);
+
+ return max;
+}
+
+static int starfive_trng_probe(struct platform_device *pdev)
+{
+ int ret;
+ int irq;
+ struct starfive_trng *trng;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->base),
+ "Error remapping memory for platform device.\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ init_completion(&trng->random_done);
+ init_completion(&trng->reseed_done);
+ spin_lock_init(&trng->write_lock);
+
+ ret = devm_request_irq(&pdev->dev, irq, starfive_trng_irq, 0, pdev->name,
+ (void *)trng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, irq,
+ "Failed to register interrupt handler\n");
+
+ trng->hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(trng->hclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->hclk),
+ "Error getting hardware reference clock\n");
+
+ trng->ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(trng->ahb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->ahb),
+ "Error getting ahb reference clock\n");
+
+ trng->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(trng->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->rst),
+ "Error getting hardware reset line\n");
+
+ clk_prepare_enable(trng->hclk);
+ clk_prepare_enable(trng->ahb);
+ reset_control_deassert(trng->rst);
+
+ trng->rng.name = dev_driver_string(&pdev->dev);
+ trng->rng.init = starfive_trng_init;
+ trng->rng.cleanup = starfive_trng_cleanup;
+ trng->rng.read = starfive_trng_read;
+
+ trng->mode = PRNG_256BIT;
+ trng->mission = 1;
+ trng->reseed = RANDOM_RESEED;
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+
+ reset_control_assert(trng->rst);
+ clk_disable_unprepare(trng->ahb);
+ clk_disable_unprepare(trng->hclk);
+
+ return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n");
+ }
+
+ return 0;
+}
+
+static int __maybe_unused starfive_trng_suspend(struct device *dev)
+{
+ struct starfive_trng *trng = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(trng->hclk);
+ clk_disable_unprepare(trng->ahb);
+
+ return 0;
+}
+
+static int __maybe_unused starfive_trng_resume(struct device *dev)
+{
+ struct starfive_trng *trng = dev_get_drvdata(dev);
+
+ clk_prepare_enable(trng->hclk);
+ clk_prepare_enable(trng->ahb);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(starfive_trng_pm_ops, starfive_trng_suspend,
+ starfive_trng_resume);
+
+static const struct of_device_id trng_dt_ids[] __maybe_unused = {
+ { .compatible = "starfive,jh7110-trng" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, trng_dt_ids);
+
+static struct platform_driver starfive_trng_driver = {
+ .probe = starfive_trng_probe,
+ .driver = {
+ .name = "jh7110-trng",
+ .pm = &starfive_trng_pm_ops,
+ .of_match_table = of_match_ptr(trng_dt_ids),
+ },
+};
+
+module_platform_driver(starfive_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("StarFive True Random Number Generator");
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
new file mode 100644
index 0000000000..2f2f21f1b6
--- /dev/null
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Random Number Generator driver for the Keystone SOC
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Sandeep Nair
+ * Vitaly Andrianov
+ */
+
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/timekeeping.h>
+
+#define SA_CMD_STATUS_OFS 0x8
+
+/* TRNG enable control in SA System module*/
+#define SA_CMD_STATUS_REG_TRNG_ENABLE BIT(3)
+
+/* TRNG start control in TRNG module */
+#define TRNG_CNTL_REG_TRNG_ENABLE BIT(10)
+
+/* Data ready indicator in STATUS register */
+#define TRNG_STATUS_REG_READY BIT(0)
+
+/* Data ready clear control in INTACK register */
+#define TRNG_INTACK_REG_READY BIT(0)
+
+/*
+ * Number of samples taken to gather entropy during startup.
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^8.
+ */
+#define TRNG_DEF_STARTUP_CYCLES 0
+#define TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT 16
+
+/*
+ * Minimum number of samples taken to regenerate entropy
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^6.
+ */
+#define TRNG_DEF_MIN_REFILL_CYCLES 1
+#define TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT 0
+
+/*
+ * Maximum number of samples taken to regenerate entropy
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^8.
+ */
+#define TRNG_DEF_MAX_REFILL_CYCLES 0
+#define TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT 16
+
+/* Number of CLK input cycles between samples */
+#define TRNG_DEF_CLK_DIV_CYCLES 0
+#define TRNG_CFG_REG_SAMPLE_DIV_SHIFT 8
+
+/* Maximum retries to get rng data */
+#define SA_MAX_RNG_DATA_RETRIES 5
+/* Delay between retries (in usecs) */
+#define SA_RNG_DATA_RETRY_DELAY 5
+
+struct trng_regs {
+ u32 output_l;
+ u32 output_h;
+ u32 status;
+ u32 intmask;
+ u32 intack;
+ u32 control;
+ u32 config;
+};
+
+struct ks_sa_rng {
+ struct device *dev;
+ struct hwrng rng;
+ struct clk *clk;
+ struct regmap *regmap_cfg;
+ struct trng_regs __iomem *reg_rng;
+ u64 ready_ts;
+ unsigned int refill_delay_ns;
+};
+
+static unsigned int cycles_to_ns(unsigned long clk_rate, unsigned int cycles)
+{
+ return DIV_ROUND_UP_ULL((TRNG_DEF_CLK_DIV_CYCLES + 1) * 1000000000ull *
+ cycles, clk_rate);
+}
+
+static unsigned int startup_delay_ns(unsigned long clk_rate)
+{
+ if (!TRNG_DEF_STARTUP_CYCLES)
+ return cycles_to_ns(clk_rate, BIT(24));
+ return cycles_to_ns(clk_rate, 256 * TRNG_DEF_STARTUP_CYCLES);
+}
+
+static unsigned int refill_delay_ns(unsigned long clk_rate)
+{
+ if (!TRNG_DEF_MAX_REFILL_CYCLES)
+ return cycles_to_ns(clk_rate, BIT(24));
+ return cycles_to_ns(clk_rate, 256 * TRNG_DEF_MAX_REFILL_CYCLES);
+}
+
+static int ks_sa_rng_init(struct hwrng *rng)
+{
+ u32 value;
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ unsigned long clk_rate = clk_get_rate(ks_sa_rng->clk);
+
+ /* Enable RNG module */
+ regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
+ SA_CMD_STATUS_REG_TRNG_ENABLE,
+ SA_CMD_STATUS_REG_TRNG_ENABLE);
+
+ /* Configure RNG module */
+ writel(0, &ks_sa_rng->reg_rng->control);
+ value = TRNG_DEF_STARTUP_CYCLES << TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT;
+ writel(value, &ks_sa_rng->reg_rng->control);
+
+ value = (TRNG_DEF_MIN_REFILL_CYCLES <<
+ TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT) |
+ (TRNG_DEF_MAX_REFILL_CYCLES <<
+ TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT) |
+ (TRNG_DEF_CLK_DIV_CYCLES <<
+ TRNG_CFG_REG_SAMPLE_DIV_SHIFT);
+
+ writel(value, &ks_sa_rng->reg_rng->config);
+
+ /* Disable all interrupts from TRNG */
+ writel(0, &ks_sa_rng->reg_rng->intmask);
+
+ /* Enable RNG */
+ value = readl(&ks_sa_rng->reg_rng->control);
+ value |= TRNG_CNTL_REG_TRNG_ENABLE;
+ writel(value, &ks_sa_rng->reg_rng->control);
+
+ ks_sa_rng->refill_delay_ns = refill_delay_ns(clk_rate);
+ ks_sa_rng->ready_ts = ktime_get_ns() +
+ startup_delay_ns(clk_rate);
+
+ return 0;
+}
+
+static void ks_sa_rng_cleanup(struct hwrng *rng)
+{
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ /* Disable RNG */
+ writel(0, &ks_sa_rng->reg_rng->control);
+ regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
+ SA_CMD_STATUS_REG_TRNG_ENABLE, 0);
+}
+
+static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ /* Read random data */
+ data[0] = readl(&ks_sa_rng->reg_rng->output_l);
+ data[1] = readl(&ks_sa_rng->reg_rng->output_h);
+
+ writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack);
+ ks_sa_rng->ready_ts = ktime_get_ns() + ks_sa_rng->refill_delay_ns;
+
+ return sizeof(u32) * 2;
+}
+
+static int ks_sa_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+ u64 now = ktime_get_ns();
+
+ u32 ready;
+ int j;
+
+ if (wait && now < ks_sa_rng->ready_ts) {
+ /* Max delay expected here is 81920000 ns */
+ unsigned long min_delay =
+ DIV_ROUND_UP((u32)(ks_sa_rng->ready_ts - now), 1000);
+
+ usleep_range(min_delay, min_delay + SA_RNG_DATA_RETRY_DELAY);
+ }
+
+ for (j = 0; j < SA_MAX_RNG_DATA_RETRIES; j++) {
+ ready = readl(&ks_sa_rng->reg_rng->status);
+ ready &= TRNG_STATUS_REG_READY;
+
+ if (ready || !wait)
+ break;
+
+ udelay(SA_RNG_DATA_RETRY_DELAY);
+ }
+
+ return ready;
+}
+
+static int ks_sa_rng_probe(struct platform_device *pdev)
+{
+ struct ks_sa_rng *ks_sa_rng;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL);
+ if (!ks_sa_rng)
+ return -ENOMEM;
+
+ ks_sa_rng->dev = dev;
+ ks_sa_rng->rng = (struct hwrng) {
+ .name = "ks_sa_hwrng",
+ .init = ks_sa_rng_init,
+ .data_read = ks_sa_rng_data_read,
+ .data_present = ks_sa_rng_data_present,
+ .cleanup = ks_sa_rng_cleanup,
+ };
+ ks_sa_rng->rng.priv = (unsigned long)dev;
+
+ ks_sa_rng->reg_rng = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ks_sa_rng->reg_rng))
+ return PTR_ERR(ks_sa_rng->reg_rng);
+
+ ks_sa_rng->regmap_cfg =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "ti,syscon-sa-cfg");
+
+ if (IS_ERR(ks_sa_rng->regmap_cfg)) {
+ dev_err(dev, "syscon_node_to_regmap failed\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable SA power-domain\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ks_sa_rng);
+
+ return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);
+}
+
+static int ks_sa_rng_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ks_sa_rng_dt_match[] = {
+ {
+ .compatible = "ti,keystone-rng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ks_sa_rng_dt_match);
+
+static struct platform_driver ks_sa_rng_driver = {
+ .driver = {
+ .name = "ks-sa-rng",
+ .of_match_table = ks_sa_rng_dt_match,
+ },
+ .probe = ks_sa_rng_probe,
+ .remove = ks_sa_rng_remove,
+};
+
+module_platform_driver(ks_sa_rng_driver);
+
+MODULE_DESCRIPTION("Keystone NETCP SA H/W Random Number Generator driver");
+MODULE_AUTHOR("Vitaly Andrianov <vitalya@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
new file mode 100644
index 0000000000..a4eb8e35f1
--- /dev/null
+++ b/drivers/char/hw_random/meson-rng.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+
+#define RNG_DATA 0x00
+
+struct meson_rng_data {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct meson_rng_data *data =
+ container_of(rng, struct meson_rng_data, rng);
+
+ *(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
+
+ return sizeof(u32);
+}
+
+static int meson_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_rng_data *data;
+ struct clk *core_clk;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ core_clk = devm_clk_get_optional_enabled(dev, "core");
+ if (IS_ERR(core_clk))
+ return dev_err_probe(dev, PTR_ERR(core_clk),
+ "Failed to get core clock\n");
+
+ data->rng.name = pdev->name;
+ data->rng.read = meson_rng_read;
+
+ return devm_hwrng_register(dev, &data->rng);
+}
+
+static const struct of_device_id meson_rng_of_match[] = {
+ { .compatible = "amlogic,meson-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, meson_rng_of_match);
+
+static struct platform_driver meson_rng_driver = {
+ .probe = meson_rng_probe,
+ .driver = {
+ .name = "meson-rng",
+ .of_match_table = meson_rng_of_match,
+ },
+};
+
+module_platform_driver(meson_rng_driver);
+
+MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
+MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/hw_random/mpfs-rng.c b/drivers/char/hw_random/mpfs-rng.c
new file mode 100644
index 0000000000..c6972734ae
--- /dev/null
+++ b/drivers/char/hw_random/mpfs-rng.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip PolarFire SoC (MPFS) hardware random driver
+ *
+ * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ */
+
+#include <linux/module.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <soc/microchip/mpfs.h>
+
+#define CMD_OPCODE 0x21
+#define CMD_DATA_SIZE 0U
+#define CMD_DATA NULL
+#define MBOX_OFFSET 0U
+#define RESP_OFFSET 0U
+#define RNG_RESP_BYTES 32U
+
+struct mpfs_rng {
+ struct mpfs_sys_controller *sys_controller;
+ struct hwrng rng;
+};
+
+static int mpfs_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct mpfs_rng *rng_priv = container_of(rng, struct mpfs_rng, rng);
+ u32 response_msg[RNG_RESP_BYTES / sizeof(u32)];
+ unsigned int count = 0, copy_size_bytes;
+ int ret;
+
+ struct mpfs_mss_response response = {
+ .resp_status = 0U,
+ .resp_msg = (u32 *)response_msg,
+ .resp_size = RNG_RESP_BYTES
+ };
+ struct mpfs_mss_msg msg = {
+ .cmd_opcode = CMD_OPCODE,
+ .cmd_data_size = CMD_DATA_SIZE,
+ .response = &response,
+ .cmd_data = CMD_DATA,
+ .mbox_offset = MBOX_OFFSET,
+ .resp_offset = RESP_OFFSET
+ };
+
+ while (count < max) {
+ ret = mpfs_blocking_transaction(rng_priv->sys_controller, &msg);
+ if (ret)
+ return ret;
+
+ copy_size_bytes = max - count > RNG_RESP_BYTES ? RNG_RESP_BYTES : max - count;
+ memcpy(buf + count, response_msg, copy_size_bytes);
+
+ count += copy_size_bytes;
+ if (!wait)
+ break;
+ }
+
+ return count;
+}
+
+static int mpfs_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpfs_rng *rng_priv;
+ int ret;
+
+ rng_priv = devm_kzalloc(dev, sizeof(*rng_priv), GFP_KERNEL);
+ if (!rng_priv)
+ return -ENOMEM;
+
+ rng_priv->sys_controller = mpfs_sys_controller_get(&pdev->dev);
+ if (IS_ERR(rng_priv->sys_controller))
+ return dev_err_probe(dev, PTR_ERR(rng_priv->sys_controller),
+ "Failed to register system controller hwrng sub device\n");
+
+ rng_priv->rng.read = mpfs_rng_read;
+ rng_priv->rng.name = pdev->name;
+
+ platform_set_drvdata(pdev, rng_priv);
+
+ ret = devm_hwrng_register(&pdev->dev, &rng_priv->rng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register MPFS hwrng\n");
+
+ dev_info(&pdev->dev, "Registered MPFS hwrng\n");
+
+ return 0;
+}
+
+static struct platform_driver mpfs_rng_driver = {
+ .driver = {
+ .name = "mpfs-rng",
+ },
+ .probe = mpfs_rng_probe,
+};
+module_platform_driver(mpfs_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("PolarFire SoC (MPFS) hardware random driver");
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
new file mode 100644
index 0000000000..aa993753ab
--- /dev/null
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Mediatek Hardware Random Number Generator
+ *
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ */
+#define MTK_RNG_DEV KBUILD_MODNAME
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+/* Runtime PM autosuspend timeout: */
+#define RNG_AUTOSUSPEND_TIMEOUT 100
+
+#define USEC_POLL 2
+#define TIMEOUT_POLL 60
+
+#define RNG_CTRL 0x00
+#define RNG_EN BIT(0)
+#define RNG_READY BIT(31)
+
+#define RNG_DATA 0x08
+
+#define to_mtk_rng(p) container_of(p, struct mtk_rng, rng)
+
+struct mtk_rng {
+ void __iomem *base;
+ struct clk *clk;
+ struct hwrng rng;
+};
+
+static int mtk_rng_init(struct hwrng *rng)
+{
+ struct mtk_rng *priv = to_mtk_rng(rng);
+ u32 val;
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ val = readl(priv->base + RNG_CTRL);
+ val |= RNG_EN;
+ writel(val, priv->base + RNG_CTRL);
+
+ return 0;
+}
+
+static void mtk_rng_cleanup(struct hwrng *rng)
+{
+ struct mtk_rng *priv = to_mtk_rng(rng);
+ u32 val;
+
+ val = readl(priv->base + RNG_CTRL);
+ val &= ~RNG_EN;
+ writel(val, priv->base + RNG_CTRL);
+
+ clk_disable_unprepare(priv->clk);
+}
+
+static bool mtk_rng_wait_ready(struct hwrng *rng, bool wait)
+{
+ struct mtk_rng *priv = to_mtk_rng(rng);
+ int ready;
+
+ ready = readl(priv->base + RNG_CTRL) & RNG_READY;
+ if (!ready && wait)
+ readl_poll_timeout_atomic(priv->base + RNG_CTRL, ready,
+ ready & RNG_READY, USEC_POLL,
+ TIMEOUT_POLL);
+ return !!(ready & RNG_READY);
+}
+
+static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct mtk_rng *priv = to_mtk_rng(rng);
+ int retval = 0;
+
+ pm_runtime_get_sync((struct device *)priv->rng.priv);
+
+ while (max >= sizeof(u32)) {
+ if (!mtk_rng_wait_ready(rng, wait))
+ break;
+
+ *(u32 *)buf = readl(priv->base + RNG_DATA);
+ retval += sizeof(u32);
+ buf += sizeof(u32);
+ max -= sizeof(u32);
+ }
+
+ pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+
+ return retval || !wait ? retval : -EIO;
+}
+
+static int mtk_rng_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct mtk_rng *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->rng.name = pdev->name;
+#ifndef CONFIG_PM
+ priv->rng.init = mtk_rng_init;
+ priv->rng.cleanup = mtk_rng_cleanup;
+#endif
+ priv->rng.read = mtk_rng_read;
+ priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->rng.quality = 900;
+
+ priv->clk = devm_clk_get(&pdev->dev, "rng");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "no clock for device: %d\n", ret);
+ return ret;
+ }
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register rng device: %d\n",
+ ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&pdev->dev, priv);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ dev_info(&pdev->dev, "registered RNG driver\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mtk_rng_runtime_suspend(struct device *dev)
+{
+ struct mtk_rng *priv = dev_get_drvdata(dev);
+
+ mtk_rng_cleanup(&priv->rng);
+
+ return 0;
+}
+
+static int mtk_rng_runtime_resume(struct device *dev)
+{
+ struct mtk_rng *priv = dev_get_drvdata(dev);
+
+ return mtk_rng_init(&priv->rng);
+}
+
+static const struct dev_pm_ops mtk_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_rng_runtime_suspend,
+ mtk_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+#define MTK_RNG_PM_OPS (&mtk_rng_pm_ops)
+#else /* CONFIG_PM */
+#define MTK_RNG_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static const struct of_device_id mtk_rng_match[] = {
+ { .compatible = "mediatek,mt7986-rng" },
+ { .compatible = "mediatek,mt7623-rng" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_rng_match);
+
+static struct platform_driver mtk_rng_driver = {
+ .probe = mtk_rng_probe,
+ .driver = {
+ .name = MTK_RNG_DEV,
+ .pm = MTK_RNG_PM_OPS,
+ .of_match_table = mtk_rng_match,
+ },
+};
+
+module_platform_driver(mtk_rng_driver);
+
+MODULE_DESCRIPTION("Mediatek Random Number Generator Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
new file mode 100644
index 0000000000..008763c988
--- /dev/null
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RNG driver for Freescale RNGA
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Author: Alan Carvalho de Assis <acassis@gmail.com>
+ */
+
+/*
+ *
+ * This driver is based on other RNG drivers.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* RNGA Registers */
+#define RNGA_CONTROL 0x00
+#define RNGA_STATUS 0x04
+#define RNGA_ENTROPY 0x08
+#define RNGA_OUTPUT_FIFO 0x0c
+#define RNGA_MODE 0x10
+#define RNGA_VERIFICATION_CONTROL 0x14
+#define RNGA_OSC_CONTROL_COUNTER 0x18
+#define RNGA_OSC1_COUNTER 0x1c
+#define RNGA_OSC2_COUNTER 0x20
+#define RNGA_OSC_COUNTER_STATUS 0x24
+
+/* RNGA Registers Range */
+#define RNG_ADDR_RANGE 0x28
+
+/* RNGA Control Register */
+#define RNGA_CONTROL_SLEEP 0x00000010
+#define RNGA_CONTROL_CLEAR_INT 0x00000008
+#define RNGA_CONTROL_MASK_INTS 0x00000004
+#define RNGA_CONTROL_HIGH_ASSURANCE 0x00000002
+#define RNGA_CONTROL_GO 0x00000001
+
+#define RNGA_STATUS_LEVEL_MASK 0x0000ff00
+
+/* RNGA Status Register */
+#define RNGA_STATUS_OSC_DEAD 0x80000000
+#define RNGA_STATUS_SLEEP 0x00000010
+#define RNGA_STATUS_ERROR_INT 0x00000008
+#define RNGA_STATUS_FIFO_UNDERFLOW 0x00000004
+#define RNGA_STATUS_LAST_READ_STATUS 0x00000002
+#define RNGA_STATUS_SECURITY_VIOLATION 0x00000001
+
+struct mxc_rng {
+ struct device *dev;
+ struct hwrng rng;
+ void __iomem *mem;
+ struct clk *clk;
+};
+
+static int mxc_rnga_data_present(struct hwrng *rng, int wait)
+{
+ int i;
+ struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
+
+ for (i = 0; i < 20; i++) {
+ /* how many random numbers are in FIFO? [0-16] */
+ int level = (__raw_readl(mxc_rng->mem + RNGA_STATUS) &
+ RNGA_STATUS_LEVEL_MASK) >> 8;
+ if (level || !wait)
+ return !!level;
+ udelay(10);
+ }
+ return 0;
+}
+
+static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
+{
+ int err;
+ u32 ctrl;
+ struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
+
+ /* retrieve a random number from FIFO */
+ *data = __raw_readl(mxc_rng->mem + RNGA_OUTPUT_FIFO);
+
+ /* some error while reading this random number? */
+ err = __raw_readl(mxc_rng->mem + RNGA_STATUS) & RNGA_STATUS_ERROR_INT;
+
+ /* if error: clear error interrupt, but doesn't return random number */
+ if (err) {
+ dev_dbg(mxc_rng->dev, "Error while reading random number!\n");
+ ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
+ __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT,
+ mxc_rng->mem + RNGA_CONTROL);
+ return 0;
+ } else
+ return 4;
+}
+
+static int mxc_rnga_init(struct hwrng *rng)
+{
+ u32 ctrl, osc;
+ struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
+
+ /* wake up */
+ ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
+ __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, mxc_rng->mem + RNGA_CONTROL);
+
+ /* verify if oscillator is working */
+ osc = __raw_readl(mxc_rng->mem + RNGA_STATUS);
+ if (osc & RNGA_STATUS_OSC_DEAD) {
+ dev_err(mxc_rng->dev, "RNGA Oscillator is dead!\n");
+ return -ENODEV;
+ }
+
+ /* go running */
+ ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
+ __raw_writel(ctrl | RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL);
+
+ return 0;
+}
+
+static void mxc_rnga_cleanup(struct hwrng *rng)
+{
+ u32 ctrl;
+ struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
+
+ ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
+
+ /* stop rnga */
+ __raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL);
+}
+
+static int __init mxc_rnga_probe(struct platform_device *pdev)
+{
+ int err;
+ struct mxc_rng *mxc_rng;
+
+ mxc_rng = devm_kzalloc(&pdev->dev, sizeof(*mxc_rng), GFP_KERNEL);
+ if (!mxc_rng)
+ return -ENOMEM;
+
+ mxc_rng->dev = &pdev->dev;
+ mxc_rng->rng.name = "mxc-rnga";
+ mxc_rng->rng.init = mxc_rnga_init;
+ mxc_rng->rng.cleanup = mxc_rnga_cleanup;
+ mxc_rng->rng.data_present = mxc_rnga_data_present;
+ mxc_rng->rng.data_read = mxc_rnga_data_read;
+
+ mxc_rng->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mxc_rng->clk)) {
+ dev_err(&pdev->dev, "Could not get rng_clk!\n");
+ return PTR_ERR(mxc_rng->clk);
+ }
+
+ err = clk_prepare_enable(mxc_rng->clk);
+ if (err)
+ return err;
+
+ mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mxc_rng->mem)) {
+ err = PTR_ERR(mxc_rng->mem);
+ goto err_ioremap;
+ }
+
+ err = hwrng_register(&mxc_rng->rng);
+ if (err) {
+ dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
+ goto err_ioremap;
+ }
+
+ return 0;
+
+err_ioremap:
+ clk_disable_unprepare(mxc_rng->clk);
+ return err;
+}
+
+static int __exit mxc_rnga_remove(struct platform_device *pdev)
+{
+ struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&mxc_rng->rng);
+
+ clk_disable_unprepare(mxc_rng->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mxc_rnga_of_match[] = {
+ { .compatible = "fsl,imx21-rnga", },
+ { .compatible = "fsl,imx31-rnga", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mxc_rnga_of_match);
+
+static struct platform_driver mxc_rnga_driver = {
+ .driver = {
+ .name = "mxc_rnga",
+ .of_match_table = mxc_rnga_of_match,
+ },
+ .remove = __exit_p(mxc_rnga_remove),
+};
+
+module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGA driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/n2-asm.S b/drivers/char/hw_random/n2-asm.S
new file mode 100644
index 0000000000..c205df43d5
--- /dev/null
+++ b/drivers/char/hw_random/n2-asm.S
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* n2-asm.S: Niagara2 RNG hypervisor call assembler.
+ *
+ * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ */
+#include <linux/linkage.h>
+#include <asm/hypervisor.h>
+#include "n2rng.h"
+
+ .text
+
+ENTRY(sun4v_rng_get_diag_ctl)
+ mov HV_FAST_RNG_GET_DIAG_CTL, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_rng_get_diag_ctl)
+
+ENTRY(sun4v_rng_ctl_read_v1)
+ mov %o1, %o3
+ mov %o2, %o4
+ mov HV_FAST_RNG_CTL_READ, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o3]
+ retl
+ stx %o2, [%o4]
+ENDPROC(sun4v_rng_ctl_read_v1)
+
+ENTRY(sun4v_rng_ctl_read_v2)
+ save %sp, -192, %sp
+ mov %i0, %o0
+ mov %i1, %o1
+ mov HV_FAST_RNG_CTL_READ, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%i2]
+ stx %o2, [%i3]
+ stx %o3, [%i4]
+ stx %o4, [%i5]
+ ret
+ restore %g0, %o0, %o0
+ENDPROC(sun4v_rng_ctl_read_v2)
+
+ENTRY(sun4v_rng_ctl_write_v1)
+ mov %o3, %o4
+ mov HV_FAST_RNG_CTL_WRITE, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(sun4v_rng_ctl_write_v1)
+
+ENTRY(sun4v_rng_ctl_write_v2)
+ mov HV_FAST_RNG_CTL_WRITE, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_rng_ctl_write_v2)
+
+ENTRY(sun4v_rng_data_read_diag_v1)
+ mov %o2, %o4
+ mov HV_FAST_RNG_DATA_READ_DIAG, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(sun4v_rng_data_read_diag_v1)
+
+ENTRY(sun4v_rng_data_read_diag_v2)
+ mov %o3, %o4
+ mov HV_FAST_RNG_DATA_READ_DIAG, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(sun4v_rng_data_read_diag_v2)
+
+ENTRY(sun4v_rng_data_read)
+ mov %o1, %o4
+ mov HV_FAST_RNG_DATA_READ, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(sun4v_rng_data_read)
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
new file mode 100644
index 0000000000..73e4081464
--- /dev/null
+++ b/drivers/char/hw_random/n2-drv.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* n2-drv.c: Niagara-2 RNG driver.
+ *
+ * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/preempt.h>
+#include <linux/hw_random.h>
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/hypervisor.h>
+
+#include "n2rng.h"
+
+#define DRV_MODULE_NAME "n2rng"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "0.3"
+#define DRV_MODULE_RELDATE "Jan 7, 2017"
+
+static char version[] =
+ DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Niagara2 RNG driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+/* The Niagara2 RNG provides a 64-bit read-only random number
+ * register, plus a control register. Access to the RNG is
+ * virtualized through the hypervisor so that both guests and control
+ * nodes can access the device.
+ *
+ * The entropy source consists of raw entropy sources, each
+ * constructed from a voltage controlled oscillator whose phase is
+ * jittered by thermal noise sources.
+ *
+ * The oscillator in each of the three raw entropy sources run at
+ * different frequencies. Normally, all three generator outputs are
+ * gathered, xored together, and fed into a CRC circuit, the output of
+ * which is the 64-bit read-only register.
+ *
+ * Some time is necessary for all the necessary entropy to build up
+ * such that a full 64-bits of entropy are available in the register.
+ * In normal operating mode (RNG_CTL_LFSR is set), the chip implements
+ * an interlock which blocks register reads until sufficient entropy
+ * is available.
+ *
+ * A control register is provided for adjusting various aspects of RNG
+ * operation, and to enable diagnostic modes. Each of the three raw
+ * entropy sources has an enable bit (RNG_CTL_ES{1,2,3}). Also
+ * provided are fields for controlling the minimum time in cycles
+ * between read accesses to the register (RNG_CTL_WAIT, this controls
+ * the interlock described in the previous paragraph).
+ *
+ * The standard setting is to have the mode bit (RNG_CTL_LFSR) set,
+ * all three entropy sources enabled, and the interlock time set
+ * appropriately.
+ *
+ * The CRC polynomial used by the chip is:
+ *
+ * P(X) = x64 + x61 + x57 + x56 + x52 + x51 + x50 + x48 + x47 + x46 +
+ * x43 + x42 + x41 + x39 + x38 + x37 + x35 + x32 + x28 + x25 +
+ * x22 + x21 + x17 + x15 + x13 + x12 + x11 + x7 + x5 + x + 1
+ *
+ * The RNG_CTL_VCO value of each noise cell must be programmed
+ * separately. This is why 4 control register values must be provided
+ * to the hypervisor. During a write, the hypervisor writes them all,
+ * one at a time, to the actual RNG_CTL register. The first three
+ * values are used to setup the desired RNG_CTL_VCO for each entropy
+ * source, for example:
+ *
+ * control 0: (1 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES1
+ * control 1: (2 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES2
+ * control 2: (3 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES3
+ *
+ * And then the fourth value sets the final chip state and enables
+ * desired.
+ */
+
+static int n2rng_hv_err_trans(unsigned long hv_err)
+{
+ switch (hv_err) {
+ case HV_EOK:
+ return 0;
+ case HV_EWOULDBLOCK:
+ return -EAGAIN;
+ case HV_ENOACCESS:
+ return -EPERM;
+ case HV_EIO:
+ return -EIO;
+ case HV_EBUSY:
+ return -EBUSY;
+ case HV_EBADALIGN:
+ case HV_ENORADDR:
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long n2rng_generic_read_control_v2(unsigned long ra,
+ unsigned long unit)
+{
+ unsigned long hv_err, state, ticks, watchdog_delta, watchdog_status;
+ int block = 0, busy = 0;
+
+ while (1) {
+ hv_err = sun4v_rng_ctl_read_v2(ra, unit, &state,
+ &ticks,
+ &watchdog_delta,
+ &watchdog_status);
+ if (hv_err == HV_EOK)
+ break;
+
+ if (hv_err == HV_EBUSY) {
+ if (++busy >= N2RNG_BUSY_LIMIT)
+ break;
+
+ udelay(1);
+ } else if (hv_err == HV_EWOULDBLOCK) {
+ if (++block >= N2RNG_BLOCK_LIMIT)
+ break;
+
+ __delay(ticks);
+ } else
+ break;
+ }
+
+ return hv_err;
+}
+
+/* In multi-socket situations, the hypervisor might need to
+ * queue up the RNG control register write if it's for a unit
+ * that is on a cpu socket other than the one we are executing on.
+ *
+ * We poll here waiting for a successful read of that control
+ * register to make sure the write has been actually performed.
+ */
+static unsigned long n2rng_control_settle_v2(struct n2rng *np, int unit)
+{
+ unsigned long ra = __pa(&np->scratch_control[0]);
+
+ return n2rng_generic_read_control_v2(ra, unit);
+}
+
+static unsigned long n2rng_write_ctl_one(struct n2rng *np, int unit,
+ unsigned long state,
+ unsigned long control_ra,
+ unsigned long watchdog_timeout,
+ unsigned long *ticks)
+{
+ unsigned long hv_err;
+
+ if (np->hvapi_major == 1) {
+ hv_err = sun4v_rng_ctl_write_v1(control_ra, state,
+ watchdog_timeout, ticks);
+ } else {
+ hv_err = sun4v_rng_ctl_write_v2(control_ra, state,
+ watchdog_timeout, unit);
+ if (hv_err == HV_EOK)
+ hv_err = n2rng_control_settle_v2(np, unit);
+ *ticks = N2RNG_ACCUM_CYCLES_DEFAULT;
+ }
+
+ return hv_err;
+}
+
+static int n2rng_generic_read_data(unsigned long data_ra)
+{
+ unsigned long ticks, hv_err;
+ int block = 0, hcheck = 0;
+
+ while (1) {
+ hv_err = sun4v_rng_data_read(data_ra, &ticks);
+ if (hv_err == HV_EOK)
+ return 0;
+
+ if (hv_err == HV_EWOULDBLOCK) {
+ if (++block >= N2RNG_BLOCK_LIMIT)
+ return -EWOULDBLOCK;
+ __delay(ticks);
+ } else if (hv_err == HV_ENOACCESS) {
+ return -EPERM;
+ } else if (hv_err == HV_EIO) {
+ if (++hcheck >= N2RNG_HCHECK_LIMIT)
+ return -EIO;
+ udelay(10000);
+ } else
+ return -ENODEV;
+ }
+}
+
+static unsigned long n2rng_read_diag_data_one(struct n2rng *np,
+ unsigned long unit,
+ unsigned long data_ra,
+ unsigned long data_len,
+ unsigned long *ticks)
+{
+ unsigned long hv_err;
+
+ if (np->hvapi_major == 1) {
+ hv_err = sun4v_rng_data_read_diag_v1(data_ra, data_len, ticks);
+ } else {
+ hv_err = sun4v_rng_data_read_diag_v2(data_ra, data_len,
+ unit, ticks);
+ if (!*ticks)
+ *ticks = N2RNG_ACCUM_CYCLES_DEFAULT;
+ }
+ return hv_err;
+}
+
+static int n2rng_generic_read_diag_data(struct n2rng *np,
+ unsigned long unit,
+ unsigned long data_ra,
+ unsigned long data_len)
+{
+ unsigned long ticks, hv_err;
+ int block = 0;
+
+ while (1) {
+ hv_err = n2rng_read_diag_data_one(np, unit,
+ data_ra, data_len,
+ &ticks);
+ if (hv_err == HV_EOK)
+ return 0;
+
+ if (hv_err == HV_EWOULDBLOCK) {
+ if (++block >= N2RNG_BLOCK_LIMIT)
+ return -EWOULDBLOCK;
+ __delay(ticks);
+ } else if (hv_err == HV_ENOACCESS) {
+ return -EPERM;
+ } else if (hv_err == HV_EIO) {
+ return -EIO;
+ } else
+ return -ENODEV;
+ }
+}
+
+
+static int n2rng_generic_write_control(struct n2rng *np,
+ unsigned long control_ra,
+ unsigned long unit,
+ unsigned long state)
+{
+ unsigned long hv_err, ticks;
+ int block = 0, busy = 0;
+
+ while (1) {
+ hv_err = n2rng_write_ctl_one(np, unit, state, control_ra,
+ np->wd_timeo, &ticks);
+ if (hv_err == HV_EOK)
+ return 0;
+
+ if (hv_err == HV_EWOULDBLOCK) {
+ if (++block >= N2RNG_BLOCK_LIMIT)
+ return -EWOULDBLOCK;
+ __delay(ticks);
+ } else if (hv_err == HV_EBUSY) {
+ if (++busy >= N2RNG_BUSY_LIMIT)
+ return -EBUSY;
+ udelay(1);
+ } else
+ return -ENODEV;
+ }
+}
+
+/* Just try to see if we can successfully access the control register
+ * of the RNG on the domain on which we are currently executing.
+ */
+static int n2rng_try_read_ctl(struct n2rng *np)
+{
+ unsigned long hv_err;
+ unsigned long x;
+
+ if (np->hvapi_major == 1) {
+ hv_err = sun4v_rng_get_diag_ctl();
+ } else {
+ /* We purposefully give invalid arguments, HV_NOACCESS
+ * is higher priority than the errors we'd get from
+ * these other cases, and that's the error we are
+ * truly interested in.
+ */
+ hv_err = sun4v_rng_ctl_read_v2(0UL, ~0UL, &x, &x, &x, &x);
+ switch (hv_err) {
+ case HV_EWOULDBLOCK:
+ case HV_ENOACCESS:
+ break;
+ default:
+ hv_err = HV_EOK;
+ break;
+ }
+ }
+
+ return n2rng_hv_err_trans(hv_err);
+}
+
+static u64 n2rng_control_default(struct n2rng *np, int ctl)
+{
+ u64 val = 0;
+
+ if (np->data->chip_version == 1) {
+ val = ((2 << RNG_v1_CTL_ASEL_SHIFT) |
+ (N2RNG_ACCUM_CYCLES_DEFAULT << RNG_v1_CTL_WAIT_SHIFT) |
+ RNG_CTL_LFSR);
+
+ switch (ctl) {
+ case 0:
+ val |= (1 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES1;
+ break;
+ case 1:
+ val |= (2 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES2;
+ break;
+ case 2:
+ val |= (3 << RNG_v1_CTL_VCO_SHIFT) | RNG_CTL_ES3;
+ break;
+ case 3:
+ val |= RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3;
+ break;
+ default:
+ break;
+ }
+
+ } else {
+ val = ((2 << RNG_v2_CTL_ASEL_SHIFT) |
+ (N2RNG_ACCUM_CYCLES_DEFAULT << RNG_v2_CTL_WAIT_SHIFT) |
+ RNG_CTL_LFSR);
+
+ switch (ctl) {
+ case 0:
+ val |= (1 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES1;
+ break;
+ case 1:
+ val |= (2 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES2;
+ break;
+ case 2:
+ val |= (3 << RNG_v2_CTL_VCO_SHIFT) | RNG_CTL_ES3;
+ break;
+ case 3:
+ val |= RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return val;
+}
+
+static void n2rng_control_swstate_init(struct n2rng *np)
+{
+ int i;
+
+ np->flags |= N2RNG_FLAG_CONTROL;
+
+ np->health_check_sec = N2RNG_HEALTH_CHECK_SEC_DEFAULT;
+ np->accum_cycles = N2RNG_ACCUM_CYCLES_DEFAULT;
+ np->wd_timeo = N2RNG_WD_TIMEO_DEFAULT;
+
+ for (i = 0; i < np->num_units; i++) {
+ struct n2rng_unit *up = &np->units[i];
+
+ up->control[0] = n2rng_control_default(np, 0);
+ up->control[1] = n2rng_control_default(np, 1);
+ up->control[2] = n2rng_control_default(np, 2);
+ up->control[3] = n2rng_control_default(np, 3);
+ }
+
+ np->hv_state = HV_RNG_STATE_UNCONFIGURED;
+}
+
+static int n2rng_grab_diag_control(struct n2rng *np)
+{
+ int i, busy_count, err = -ENODEV;
+
+ busy_count = 0;
+ for (i = 0; i < 100; i++) {
+ err = n2rng_try_read_ctl(np);
+ if (err != -EAGAIN)
+ break;
+
+ if (++busy_count > 100) {
+ dev_err(&np->op->dev,
+ "Grab diag control timeout.\n");
+ return -ENODEV;
+ }
+
+ udelay(1);
+ }
+
+ return err;
+}
+
+static int n2rng_init_control(struct n2rng *np)
+{
+ int err = n2rng_grab_diag_control(np);
+
+ /* Not in the control domain, that's OK we are only a consumer
+ * of the RNG data, we don't setup and program it.
+ */
+ if (err == -EPERM)
+ return 0;
+ if (err)
+ return err;
+
+ n2rng_control_swstate_init(np);
+
+ return 0;
+}
+
+static int n2rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct n2rng *np = (struct n2rng *) rng->priv;
+ unsigned long ra = __pa(&np->test_data);
+ int len;
+
+ if (!(np->flags & N2RNG_FLAG_READY)) {
+ len = 0;
+ } else if (np->flags & N2RNG_FLAG_BUFFER_VALID) {
+ np->flags &= ~N2RNG_FLAG_BUFFER_VALID;
+ *data = np->buffer;
+ len = 4;
+ } else {
+ int err = n2rng_generic_read_data(ra);
+ if (!err) {
+ np->flags |= N2RNG_FLAG_BUFFER_VALID;
+ np->buffer = np->test_data >> 32;
+ *data = np->test_data & 0xffffffff;
+ len = 4;
+ } else {
+ dev_err(&np->op->dev, "RNG error, retesting\n");
+ np->flags &= ~N2RNG_FLAG_READY;
+ if (!(np->flags & N2RNG_FLAG_SHUTDOWN))
+ schedule_delayed_work(&np->work, 0);
+ len = 0;
+ }
+ }
+
+ return len;
+}
+
+/* On a guest node, just make sure we can read random data properly.
+ * If a control node reboots or reloads it's n2rng driver, this won't
+ * work during that time. So we have to keep probing until the device
+ * becomes usable.
+ */
+static int n2rng_guest_check(struct n2rng *np)
+{
+ unsigned long ra = __pa(&np->test_data);
+
+ return n2rng_generic_read_data(ra);
+}
+
+static int n2rng_entropy_diag_read(struct n2rng *np, unsigned long unit,
+ u64 *pre_control, u64 pre_state,
+ u64 *buffer, unsigned long buf_len,
+ u64 *post_control, u64 post_state)
+{
+ unsigned long post_ctl_ra = __pa(post_control);
+ unsigned long pre_ctl_ra = __pa(pre_control);
+ unsigned long buffer_ra = __pa(buffer);
+ int err;
+
+ err = n2rng_generic_write_control(np, pre_ctl_ra, unit, pre_state);
+ if (err)
+ return err;
+
+ err = n2rng_generic_read_diag_data(np, unit,
+ buffer_ra, buf_len);
+
+ (void) n2rng_generic_write_control(np, post_ctl_ra, unit,
+ post_state);
+
+ return err;
+}
+
+static u64 advance_polynomial(u64 poly, u64 val, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ int highbit_set = ((s64)val < 0);
+
+ val <<= 1;
+ if (highbit_set)
+ val ^= poly;
+ }
+
+ return val;
+}
+
+static int n2rng_test_buffer_find(struct n2rng *np, u64 val)
+{
+ int i, count = 0;
+
+ /* Purposefully skip over the first word. */
+ for (i = 1; i < SELFTEST_BUFFER_WORDS; i++) {
+ if (np->test_buffer[i] == val)
+ count++;
+ }
+ return count;
+}
+
+static void n2rng_dump_test_buffer(struct n2rng *np)
+{
+ int i;
+
+ for (i = 0; i < SELFTEST_BUFFER_WORDS; i++)
+ dev_err(&np->op->dev, "Test buffer slot %d [0x%016llx]\n",
+ i, np->test_buffer[i]);
+}
+
+static int n2rng_check_selftest_buffer(struct n2rng *np, unsigned long unit)
+{
+ u64 val;
+ int err, matches, limit;
+
+ switch (np->data->id) {
+ case N2_n2_rng:
+ case N2_vf_rng:
+ case N2_kt_rng:
+ case N2_m4_rng: /* yes, m4 uses the old value */
+ val = RNG_v1_SELFTEST_VAL;
+ break;
+ default:
+ val = RNG_v2_SELFTEST_VAL;
+ break;
+ }
+
+ matches = 0;
+ for (limit = 0; limit < SELFTEST_LOOPS_MAX; limit++) {
+ matches += n2rng_test_buffer_find(np, val);
+ if (matches >= SELFTEST_MATCH_GOAL)
+ break;
+ val = advance_polynomial(SELFTEST_POLY, val, 1);
+ }
+
+ err = 0;
+ if (limit >= SELFTEST_LOOPS_MAX) {
+ err = -ENODEV;
+ dev_err(&np->op->dev, "Selftest failed on unit %lu\n", unit);
+ n2rng_dump_test_buffer(np);
+ } else
+ dev_info(&np->op->dev, "Selftest passed on unit %lu\n", unit);
+
+ return err;
+}
+
+static int n2rng_control_selftest(struct n2rng *np, unsigned long unit)
+{
+ int err;
+ u64 base, base3;
+
+ switch (np->data->id) {
+ case N2_n2_rng:
+ case N2_vf_rng:
+ case N2_kt_rng:
+ base = RNG_v1_CTL_ASEL_NOOUT << RNG_v1_CTL_ASEL_SHIFT;
+ base3 = base | RNG_CTL_LFSR |
+ ((RNG_v1_SELFTEST_TICKS - 2) << RNG_v1_CTL_WAIT_SHIFT);
+ break;
+ case N2_m4_rng:
+ base = RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT;
+ base3 = base | RNG_CTL_LFSR |
+ ((RNG_v1_SELFTEST_TICKS - 2) << RNG_v2_CTL_WAIT_SHIFT);
+ break;
+ default:
+ base = RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT;
+ base3 = base | RNG_CTL_LFSR |
+ (RNG_v2_SELFTEST_TICKS << RNG_v2_CTL_WAIT_SHIFT);
+ break;
+ }
+
+ np->test_control[0] = base;
+ np->test_control[1] = base;
+ np->test_control[2] = base;
+ np->test_control[3] = base3;
+
+ err = n2rng_entropy_diag_read(np, unit, np->test_control,
+ HV_RNG_STATE_HEALTHCHECK,
+ np->test_buffer,
+ sizeof(np->test_buffer),
+ &np->units[unit].control[0],
+ np->hv_state);
+ if (err)
+ return err;
+
+ return n2rng_check_selftest_buffer(np, unit);
+}
+
+static int n2rng_control_check(struct n2rng *np)
+{
+ int i;
+
+ for (i = 0; i < np->num_units; i++) {
+ int err = n2rng_control_selftest(np, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/* The sanity checks passed, install the final configuration into the
+ * chip, it's ready to use.
+ */
+static int n2rng_control_configure_units(struct n2rng *np)
+{
+ int unit, err;
+
+ err = 0;
+ for (unit = 0; unit < np->num_units; unit++) {
+ struct n2rng_unit *up = &np->units[unit];
+ unsigned long ctl_ra = __pa(&up->control[0]);
+ int esrc;
+ u64 base, shift;
+
+ if (np->data->chip_version == 1) {
+ base = ((np->accum_cycles << RNG_v1_CTL_WAIT_SHIFT) |
+ (RNG_v1_CTL_ASEL_NOOUT << RNG_v1_CTL_ASEL_SHIFT) |
+ RNG_CTL_LFSR);
+ shift = RNG_v1_CTL_VCO_SHIFT;
+ } else {
+ base = ((np->accum_cycles << RNG_v2_CTL_WAIT_SHIFT) |
+ (RNG_v2_CTL_ASEL_NOOUT << RNG_v2_CTL_ASEL_SHIFT) |
+ RNG_CTL_LFSR);
+ shift = RNG_v2_CTL_VCO_SHIFT;
+ }
+
+ /* XXX This isn't the best. We should fetch a bunch
+ * XXX of words using each entropy source combined XXX
+ * with each VCO setting, and see which combinations
+ * XXX give the best random data.
+ */
+ for (esrc = 0; esrc < 3; esrc++)
+ up->control[esrc] = base |
+ (esrc << shift) |
+ (RNG_CTL_ES1 << esrc);
+
+ up->control[3] = base |
+ (RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3);
+
+ err = n2rng_generic_write_control(np, ctl_ra, unit,
+ HV_RNG_STATE_CONFIGURED);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static void n2rng_work(struct work_struct *work)
+{
+ struct n2rng *np = container_of(work, struct n2rng, work.work);
+ int err = 0;
+ static int retries = 4;
+
+ if (!(np->flags & N2RNG_FLAG_CONTROL)) {
+ err = n2rng_guest_check(np);
+ } else {
+ preempt_disable();
+ err = n2rng_control_check(np);
+ preempt_enable();
+
+ if (!err)
+ err = n2rng_control_configure_units(np);
+ }
+
+ if (!err) {
+ np->flags |= N2RNG_FLAG_READY;
+ dev_info(&np->op->dev, "RNG ready\n");
+ }
+
+ if (--retries == 0)
+ dev_err(&np->op->dev, "Self-test retries failed, RNG not ready\n");
+ else if (err && !(np->flags & N2RNG_FLAG_SHUTDOWN))
+ schedule_delayed_work(&np->work, HZ * 2);
+}
+
+static void n2rng_driver_version(void)
+{
+ static int n2rng_version_printed;
+
+ if (n2rng_version_printed++ == 0)
+ pr_info("%s", version);
+}
+
+static const struct of_device_id n2rng_match[];
+static int n2rng_probe(struct platform_device *op)
+{
+ const struct of_device_id *match;
+ int err = -ENOMEM;
+ struct n2rng *np;
+
+ match = of_match_device(n2rng_match, &op->dev);
+ if (!match)
+ return -EINVAL;
+
+ n2rng_driver_version();
+ np = devm_kzalloc(&op->dev, sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto out;
+ np->op = op;
+ np->data = (struct n2rng_template *)match->data;
+
+ INIT_DELAYED_WORK(&np->work, n2rng_work);
+
+ if (np->data->multi_capable)
+ np->flags |= N2RNG_FLAG_MULTI;
+
+ err = -ENODEV;
+ np->hvapi_major = 2;
+ if (sun4v_hvapi_register(HV_GRP_RNG,
+ np->hvapi_major,
+ &np->hvapi_minor)) {
+ np->hvapi_major = 1;
+ if (sun4v_hvapi_register(HV_GRP_RNG,
+ np->hvapi_major,
+ &np->hvapi_minor)) {
+ dev_err(&op->dev, "Cannot register suitable "
+ "HVAPI version.\n");
+ goto out;
+ }
+ }
+
+ if (np->flags & N2RNG_FLAG_MULTI) {
+ if (np->hvapi_major < 2) {
+ dev_err(&op->dev, "multi-unit-capable RNG requires "
+ "HVAPI major version 2 or later, got %lu\n",
+ np->hvapi_major);
+ goto out_hvapi_unregister;
+ }
+ np->num_units = of_getintprop_default(op->dev.of_node,
+ "rng-#units", 0);
+ if (!np->num_units) {
+ dev_err(&op->dev, "VF RNG lacks rng-#units property\n");
+ goto out_hvapi_unregister;
+ }
+ } else {
+ np->num_units = 1;
+ }
+
+ dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n",
+ np->hvapi_major, np->hvapi_minor);
+ np->units = devm_kcalloc(&op->dev, np->num_units, sizeof(*np->units),
+ GFP_KERNEL);
+ err = -ENOMEM;
+ if (!np->units)
+ goto out_hvapi_unregister;
+
+ err = n2rng_init_control(np);
+ if (err)
+ goto out_hvapi_unregister;
+
+ dev_info(&op->dev, "Found %s RNG, units: %d\n",
+ ((np->flags & N2RNG_FLAG_MULTI) ?
+ "multi-unit-capable" : "single-unit"),
+ np->num_units);
+
+ np->hwrng.name = DRV_MODULE_NAME;
+ np->hwrng.data_read = n2rng_data_read;
+ np->hwrng.priv = (unsigned long) np;
+
+ err = devm_hwrng_register(&op->dev, &np->hwrng);
+ if (err)
+ goto out_hvapi_unregister;
+
+ platform_set_drvdata(op, np);
+
+ schedule_delayed_work(&np->work, 0);
+
+ return 0;
+
+out_hvapi_unregister:
+ sun4v_hvapi_unregister(HV_GRP_RNG);
+
+out:
+ return err;
+}
+
+static int n2rng_remove(struct platform_device *op)
+{
+ struct n2rng *np = platform_get_drvdata(op);
+
+ np->flags |= N2RNG_FLAG_SHUTDOWN;
+
+ cancel_delayed_work_sync(&np->work);
+
+ sun4v_hvapi_unregister(HV_GRP_RNG);
+
+ return 0;
+}
+
+static struct n2rng_template n2_template = {
+ .id = N2_n2_rng,
+ .multi_capable = 0,
+ .chip_version = 1,
+};
+
+static struct n2rng_template vf_template = {
+ .id = N2_vf_rng,
+ .multi_capable = 1,
+ .chip_version = 1,
+};
+
+static struct n2rng_template kt_template = {
+ .id = N2_kt_rng,
+ .multi_capable = 1,
+ .chip_version = 1,
+};
+
+static struct n2rng_template m4_template = {
+ .id = N2_m4_rng,
+ .multi_capable = 1,
+ .chip_version = 2,
+};
+
+static struct n2rng_template m7_template = {
+ .id = N2_m7_rng,
+ .multi_capable = 1,
+ .chip_version = 2,
+};
+
+static const struct of_device_id n2rng_match[] = {
+ {
+ .name = "random-number-generator",
+ .compatible = "SUNW,n2-rng",
+ .data = &n2_template,
+ },
+ {
+ .name = "random-number-generator",
+ .compatible = "SUNW,vf-rng",
+ .data = &vf_template,
+ },
+ {
+ .name = "random-number-generator",
+ .compatible = "SUNW,kt-rng",
+ .data = &kt_template,
+ },
+ {
+ .name = "random-number-generator",
+ .compatible = "ORCL,m4-rng",
+ .data = &m4_template,
+ },
+ {
+ .name = "random-number-generator",
+ .compatible = "ORCL,m7-rng",
+ .data = &m7_template,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, n2rng_match);
+
+static struct platform_driver n2rng_driver = {
+ .driver = {
+ .name = "n2rng",
+ .of_match_table = n2rng_match,
+ },
+ .probe = n2rng_probe,
+ .remove = n2rng_remove,
+};
+
+module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
new file mode 100644
index 0000000000..9a870f5dc3
--- /dev/null
+++ b/drivers/char/hw_random/n2rng.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* n2rng.h: Niagara2 RNG defines.
+ *
+ * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#ifndef _N2RNG_H
+#define _N2RNG_H
+
+/* ver1 devices - n2-rng, vf-rng, kt-rng */
+#define RNG_v1_CTL_WAIT 0x0000000001fffe00ULL /* Minimum wait time */
+#define RNG_v1_CTL_WAIT_SHIFT 9
+#define RNG_v1_CTL_BYPASS 0x0000000000000100ULL /* VCO voltage source */
+#define RNG_v1_CTL_VCO 0x00000000000000c0ULL /* VCO rate control */
+#define RNG_v1_CTL_VCO_SHIFT 6
+#define RNG_v1_CTL_ASEL 0x0000000000000030ULL /* Analog MUX select */
+#define RNG_v1_CTL_ASEL_SHIFT 4
+#define RNG_v1_CTL_ASEL_NOOUT 2
+
+/* these are the same in v2 as in v1 */
+#define RNG_CTL_LFSR 0x0000000000000008ULL /* Use LFSR or plain shift */
+#define RNG_CTL_ES3 0x0000000000000004ULL /* Enable entropy source 3 */
+#define RNG_CTL_ES2 0x0000000000000002ULL /* Enable entropy source 2 */
+#define RNG_CTL_ES1 0x0000000000000001ULL /* Enable entropy source 1 */
+
+/* ver2 devices - m4-rng, m7-rng */
+#define RNG_v2_CTL_WAIT 0x0000000007fff800ULL /* Minimum wait time */
+#define RNG_v2_CTL_WAIT_SHIFT 12
+#define RNG_v2_CTL_BYPASS 0x0000000000000400ULL /* VCO voltage source */
+#define RNG_v2_CTL_VCO 0x0000000000000300ULL /* VCO rate control */
+#define RNG_v2_CTL_VCO_SHIFT 9
+#define RNG_v2_CTL_PERF 0x0000000000000180ULL /* Perf */
+#define RNG_v2_CTL_ASEL 0x0000000000000070ULL /* Analog MUX select */
+#define RNG_v2_CTL_ASEL_SHIFT 4
+#define RNG_v2_CTL_ASEL_NOOUT 7
+
+
+#define HV_FAST_RNG_GET_DIAG_CTL 0x130
+#define HV_FAST_RNG_CTL_READ 0x131
+#define HV_FAST_RNG_CTL_WRITE 0x132
+#define HV_FAST_RNG_DATA_READ_DIAG 0x133
+#define HV_FAST_RNG_DATA_READ 0x134
+
+#define HV_RNG_STATE_UNCONFIGURED 0
+#define HV_RNG_STATE_CONFIGURED 1
+#define HV_RNG_STATE_HEALTHCHECK 2
+#define HV_RNG_STATE_ERROR 3
+
+#define HV_RNG_NUM_CONTROL 4
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_rng_get_diag_ctl(void);
+extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra,
+ unsigned long *state,
+ unsigned long *tick_delta);
+extern unsigned long sun4v_rng_ctl_read_v2(unsigned long ctl_regs_ra,
+ unsigned long unit,
+ unsigned long *state,
+ unsigned long *tick_delta,
+ unsigned long *watchdog,
+ unsigned long *write_status);
+extern unsigned long sun4v_rng_ctl_write_v1(unsigned long ctl_regs_ra,
+ unsigned long state,
+ unsigned long write_timeout,
+ unsigned long *tick_delta);
+extern unsigned long sun4v_rng_ctl_write_v2(unsigned long ctl_regs_ra,
+ unsigned long state,
+ unsigned long write_timeout,
+ unsigned long unit);
+extern unsigned long sun4v_rng_data_read_diag_v1(unsigned long data_ra,
+ unsigned long len,
+ unsigned long *tick_delta);
+extern unsigned long sun4v_rng_data_read_diag_v2(unsigned long data_ra,
+ unsigned long len,
+ unsigned long unit,
+ unsigned long *tick_delta);
+extern unsigned long sun4v_rng_data_read(unsigned long data_ra,
+ unsigned long *tick_delta);
+
+enum n2rng_compat_id {
+ N2_n2_rng,
+ N2_vf_rng,
+ N2_kt_rng,
+ N2_m4_rng,
+ N2_m7_rng,
+};
+
+struct n2rng_template {
+ enum n2rng_compat_id id;
+ int multi_capable;
+ int chip_version;
+};
+
+struct n2rng_unit {
+ u64 control[HV_RNG_NUM_CONTROL];
+};
+
+struct n2rng {
+ struct platform_device *op;
+
+ unsigned long flags;
+#define N2RNG_FLAG_MULTI 0x00000001 /* Multi-unit capable RNG */
+#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */
+#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */
+#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */
+#define N2RNG_FLAG_BUFFER_VALID 0x00000020 /* u32 buffer holds valid data */
+
+ struct n2rng_template *data;
+ int num_units;
+ struct n2rng_unit *units;
+
+ struct hwrng hwrng;
+ u32 buffer;
+
+ /* Registered hypervisor group API major and minor version. */
+ unsigned long hvapi_major;
+ unsigned long hvapi_minor;
+
+ struct delayed_work work;
+
+ unsigned long hv_state; /* HV_RNG_STATE_foo */
+
+ unsigned long health_check_sec;
+ unsigned long accum_cycles;
+ unsigned long wd_timeo;
+#define N2RNG_HEALTH_CHECK_SEC_DEFAULT 0
+#define N2RNG_ACCUM_CYCLES_DEFAULT 2048
+#define N2RNG_WD_TIMEO_DEFAULT 0
+
+ u64 scratch_control[HV_RNG_NUM_CONTROL];
+
+#define RNG_v1_SELFTEST_TICKS 38859
+#define RNG_v1_SELFTEST_VAL ((u64)0xB8820C7BD387E32C)
+#define RNG_v2_SELFTEST_TICKS 64
+#define RNG_v2_SELFTEST_VAL ((u64)0xffffffffffffffff)
+#define SELFTEST_POLY ((u64)0x231DCEE91262B8A3)
+#define SELFTEST_MATCH_GOAL 6
+#define SELFTEST_LOOPS_MAX 40000
+#define SELFTEST_BUFFER_WORDS 8
+
+ u64 test_data;
+ u64 test_control[HV_RNG_NUM_CONTROL];
+ u64 test_buffer[SELFTEST_BUFFER_WORDS];
+};
+
+#define N2RNG_BLOCK_LIMIT 60000
+#define N2RNG_BUSY_LIMIT 100
+#define N2RNG_HCHECK_LIMIT 100
+
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* _N2RNG_H */
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
new file mode 100644
index 0000000000..8c6a40d6ce
--- /dev/null
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Nomadik RNG support
+ * Copyright 2009 Alessandro Rubini
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ void __iomem *base = (void __iomem *)rng->priv;
+
+ /*
+ * The register is 32 bits and gives 16 random bits (low half).
+ * A subsequent read will delay the core for 400ns, so we just read
+ * once and accept the very unlikely very small delay, even if wait==0.
+ */
+ *(u16 *)data = __raw_readl(base + 8) & 0xffff;
+ return 2;
+}
+
+/* we have at most one RNG per machine, granted */
+static struct hwrng nmk_rng = {
+ .name = "nomadik",
+ .read = nmk_rng_read,
+};
+
+static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
+{
+ struct clk *rng_clk;
+ void __iomem *base;
+ int ret;
+
+ rng_clk = devm_clk_get_enabled(&dev->dev, NULL);
+ if (IS_ERR(rng_clk))
+ return dev_err_probe(&dev->dev, PTR_ERR(rng_clk), "could not get rng clock\n");
+
+ ret = amba_request_regions(dev, dev->dev.init_name);
+ if (ret)
+ return ret;
+ ret = -ENOMEM;
+ base = devm_ioremap(&dev->dev, dev->res.start,
+ resource_size(&dev->res));
+ if (!base)
+ goto out_release;
+ nmk_rng.priv = (unsigned long)base;
+ ret = devm_hwrng_register(&dev->dev, &nmk_rng);
+ if (ret)
+ goto out_release;
+ return 0;
+
+out_release:
+ amba_release_regions(dev);
+ return ret;
+}
+
+static void nmk_rng_remove(struct amba_device *dev)
+{
+ amba_release_regions(dev);
+}
+
+static const struct amba_id nmk_rng_ids[] = {
+ {
+ .id = 0x000805e1,
+ .mask = 0x000fffff, /* top bits are rev and cfg: accept all */
+ },
+ {0, 0},
+};
+
+MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
+
+static struct amba_driver nmk_rng_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "rng",
+ },
+ .probe = nmk_rng_probe,
+ .remove = nmk_rng_remove,
+ .id_table = nmk_rng_ids,
+};
+
+module_amba_driver(nmk_rng_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
new file mode 100644
index 0000000000..8a304b7542
--- /dev/null
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+
+#define NPCM_RNGCS_REG 0x00 /* Control and status register */
+#define NPCM_RNGD_REG 0x04 /* Data register */
+#define NPCM_RNGMODE_REG 0x08 /* Mode register */
+
+#define NPCM_RNG_CLK_SET_62_5MHZ BIT(2) /* 60-80 MHz */
+#define NPCM_RNG_CLK_SET_25MHZ GENMASK(4, 3) /* 20-25 MHz */
+#define NPCM_RNG_DATA_VALID BIT(1)
+#define NPCM_RNG_ENABLE BIT(0)
+#define NPCM_RNG_M1ROSEL BIT(1)
+
+#define NPCM_RNG_TIMEOUT_USEC 20000
+#define NPCM_RNG_POLL_USEC 1000
+
+#define to_npcm_rng(p) container_of(p, struct npcm_rng, rng)
+
+struct npcm_rng {
+ void __iomem *base;
+ struct hwrng rng;
+ u32 clkp;
+};
+
+static int npcm_rng_init(struct hwrng *rng)
+{
+ struct npcm_rng *priv = to_npcm_rng(rng);
+
+ writel(priv->clkp | NPCM_RNG_ENABLE, priv->base + NPCM_RNGCS_REG);
+
+ return 0;
+}
+
+static void npcm_rng_cleanup(struct hwrng *rng)
+{
+ struct npcm_rng *priv = to_npcm_rng(rng);
+
+ writel(priv->clkp, priv->base + NPCM_RNGCS_REG);
+}
+
+static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct npcm_rng *priv = to_npcm_rng(rng);
+ int retval = 0;
+ int ready;
+
+ pm_runtime_get_sync((struct device *)priv->rng.priv);
+
+ while (max) {
+ if (wait) {
+ if (readb_poll_timeout(priv->base + NPCM_RNGCS_REG,
+ ready,
+ ready & NPCM_RNG_DATA_VALID,
+ NPCM_RNG_POLL_USEC,
+ NPCM_RNG_TIMEOUT_USEC))
+ break;
+ } else {
+ if ((readb(priv->base + NPCM_RNGCS_REG) &
+ NPCM_RNG_DATA_VALID) == 0)
+ break;
+ }
+
+ *(u8 *)buf = readb(priv->base + NPCM_RNGD_REG);
+ retval++;
+ buf++;
+ max--;
+ }
+
+ pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+
+ return retval || !wait ? retval : -EIO;
+}
+
+static int npcm_rng_probe(struct platform_device *pdev)
+{
+ struct npcm_rng *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ dev_set_drvdata(&pdev->dev, priv);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+#ifndef CONFIG_PM
+ priv->rng.init = npcm_rng_init;
+ priv->rng.cleanup = npcm_rng_cleanup;
+#endif
+ priv->rng.name = pdev->name;
+ priv->rng.read = npcm_rng_read;
+ priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG);
+
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register rng device: %d\n",
+ ret);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int npcm_rng_remove(struct platform_device *pdev)
+{
+ struct npcm_rng *priv = platform_get_drvdata(pdev);
+
+ devm_hwrng_unregister(&pdev->dev, &priv->rng);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int npcm_rng_runtime_suspend(struct device *dev)
+{
+ struct npcm_rng *priv = dev_get_drvdata(dev);
+
+ npcm_rng_cleanup(&priv->rng);
+
+ return 0;
+}
+
+static int npcm_rng_runtime_resume(struct device *dev)
+{
+ struct npcm_rng *priv = dev_get_drvdata(dev);
+
+ return npcm_rng_init(&priv->rng);
+}
+#endif
+
+static const struct dev_pm_ops npcm_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(npcm_rng_runtime_suspend,
+ npcm_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id rng_dt_id[] __maybe_unused = {
+ { .compatible = "nuvoton,npcm750-rng",
+ .data = (void *)NPCM_RNG_CLK_SET_25MHZ },
+ { .compatible = "nuvoton,npcm845-rng",
+ .data = (void *)NPCM_RNG_CLK_SET_62_5MHZ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rng_dt_id);
+
+static struct platform_driver npcm_rng_driver = {
+ .driver = {
+ .name = "npcm-rng",
+ .pm = &npcm_rng_pm_ops,
+ .of_match_table = of_match_ptr(rng_dt_id),
+ },
+ .probe = npcm_rng_probe,
+ .remove = npcm_rng_remove,
+};
+
+module_platform_driver(npcm_rng_driver);
+
+MODULE_DESCRIPTION("Nuvoton NPCM Random Number Generator Driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
new file mode 100644
index 0000000000..8561a09b46
--- /dev/null
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -0,0 +1,118 @@
+/*
+ * Hardware Random Number Generator support for Cavium Networks
+ * Octeon processor family.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Cavium Networks
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/gfp.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-rnm-defs.h>
+
+struct octeon_rng {
+ struct hwrng ops;
+ void __iomem *control_status;
+ void __iomem *result;
+};
+
+static int octeon_rng_init(struct hwrng *rng)
+{
+ union cvmx_rnm_ctl_status ctl;
+ struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
+
+ ctl.u64 = 0;
+ ctl.s.ent_en = 1; /* Enable the entropy source. */
+ ctl.s.rng_en = 1; /* Enable the RNG hardware. */
+ cvmx_write_csr((__force u64)p->control_status, ctl.u64);
+ return 0;
+}
+
+static void octeon_rng_cleanup(struct hwrng *rng)
+{
+ union cvmx_rnm_ctl_status ctl;
+ struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
+
+ ctl.u64 = 0;
+ /* Disable everything. */
+ cvmx_write_csr((__force u64)p->control_status, ctl.u64);
+}
+
+static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
+
+ *data = cvmx_read64_uint32((__force u64)p->result);
+ return sizeof(u32);
+}
+
+static int octeon_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res_ports;
+ struct resource *res_result;
+ struct octeon_rng *rng;
+ int ret;
+ struct hwrng ops = {
+ .name = "octeon",
+ .init = octeon_rng_init,
+ .cleanup = octeon_rng_cleanup,
+ .data_read = octeon_rng_data_read
+ };
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ res_ports = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_ports)
+ return -ENOENT;
+
+ res_result = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_result)
+ return -ENOENT;
+
+
+ rng->control_status = devm_ioremap(&pdev->dev,
+ res_ports->start,
+ sizeof(u64));
+ if (!rng->control_status)
+ return -ENOENT;
+
+ rng->result = devm_ioremap(&pdev->dev,
+ res_result->start,
+ sizeof(u64));
+ if (!rng->result)
+ return -ENOENT;
+
+ rng->ops = ops;
+
+ platform_set_drvdata(pdev, &rng->ops);
+ ret = devm_hwrng_register(&pdev->dev, &rng->ops);
+ if (ret)
+ return -ENOENT;
+
+ dev_info(&pdev->dev, "Octeon Random Number Generator\n");
+
+ return 0;
+}
+
+static struct platform_driver octeon_rng_driver = {
+ .driver = {
+ .name = "octeon_rng",
+ },
+ .probe = octeon_rng_probe,
+};
+
+module_platform_driver(octeon_rng_driver);
+
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
new file mode 100644
index 0000000000..be03f76a2a
--- /dev/null
+++ b/drivers/char/hw_random/omap-rng.c
@@ -0,0 +1,569 @@
+/*
+ * omap-rng.c - RNG driver for TI OMAP CPU family
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Mostly based on original driver:
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#define RNG_REG_STATUS_RDY (1 << 0)
+
+#define RNG_REG_INTACK_RDY_MASK (1 << 0)
+#define RNG_REG_INTACK_SHUTDOWN_OFLO_MASK (1 << 1)
+#define RNG_SHUTDOWN_OFLO_MASK (1 << 1)
+
+#define RNG_CONTROL_STARTUP_CYCLES_SHIFT 16
+#define RNG_CONTROL_STARTUP_CYCLES_MASK (0xffff << 16)
+#define RNG_CONTROL_ENABLE_TRNG_SHIFT 10
+#define RNG_CONTROL_ENABLE_TRNG_MASK (1 << 10)
+
+#define RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT 16
+#define RNG_CONFIG_MAX_REFIL_CYCLES_MASK (0xffff << 16)
+#define RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT 0
+#define RNG_CONFIG_MIN_REFIL_CYCLES_MASK (0xff << 0)
+
+#define RNG_CONTROL_STARTUP_CYCLES 0xff
+#define RNG_CONFIG_MIN_REFIL_CYCLES 0x21
+#define RNG_CONFIG_MAX_REFIL_CYCLES 0x22
+
+#define RNG_ALARMCNT_ALARM_TH_SHIFT 0x0
+#define RNG_ALARMCNT_ALARM_TH_MASK (0xff << 0)
+#define RNG_ALARMCNT_SHUTDOWN_TH_SHIFT 16
+#define RNG_ALARMCNT_SHUTDOWN_TH_MASK (0x1f << 16)
+#define RNG_ALARM_THRESHOLD 0xff
+#define RNG_SHUTDOWN_THRESHOLD 0x4
+
+#define RNG_REG_FROENABLE_MASK 0xffffff
+#define RNG_REG_FRODETUNE_MASK 0xffffff
+
+#define OMAP2_RNG_OUTPUT_SIZE 0x4
+#define OMAP4_RNG_OUTPUT_SIZE 0x8
+#define EIP76_RNG_OUTPUT_SIZE 0x10
+
+/*
+ * EIP76 RNG takes approx. 700us to produce 16 bytes of output data
+ * as per testing results. And to account for the lack of udelay()'s
+ * reliability, we keep the timeout as 1000us.
+ */
+#define RNG_DATA_FILL_TIMEOUT 100
+
+enum {
+ RNG_OUTPUT_0_REG = 0,
+ RNG_OUTPUT_1_REG,
+ RNG_OUTPUT_2_REG,
+ RNG_OUTPUT_3_REG,
+ RNG_STATUS_REG,
+ RNG_INTMASK_REG,
+ RNG_INTACK_REG,
+ RNG_CONTROL_REG,
+ RNG_CONFIG_REG,
+ RNG_ALARMCNT_REG,
+ RNG_FROENABLE_REG,
+ RNG_FRODETUNE_REG,
+ RNG_ALARMMASK_REG,
+ RNG_ALARMSTOP_REG,
+ RNG_REV_REG,
+ RNG_SYSCONFIG_REG,
+};
+
+static const u16 reg_map_omap2[] = {
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_STATUS_REG] = 0x4,
+ [RNG_CONFIG_REG] = 0x28,
+ [RNG_REV_REG] = 0x3c,
+ [RNG_SYSCONFIG_REG] = 0x40,
+};
+
+static const u16 reg_map_omap4[] = {
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
+ [RNG_STATUS_REG] = 0x8,
+ [RNG_INTMASK_REG] = 0xc,
+ [RNG_INTACK_REG] = 0x10,
+ [RNG_CONTROL_REG] = 0x14,
+ [RNG_CONFIG_REG] = 0x18,
+ [RNG_ALARMCNT_REG] = 0x1c,
+ [RNG_FROENABLE_REG] = 0x20,
+ [RNG_FRODETUNE_REG] = 0x24,
+ [RNG_ALARMMASK_REG] = 0x28,
+ [RNG_ALARMSTOP_REG] = 0x2c,
+ [RNG_REV_REG] = 0x1FE0,
+ [RNG_SYSCONFIG_REG] = 0x1FE4,
+};
+
+static const u16 reg_map_eip76[] = {
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
+ [RNG_OUTPUT_2_REG] = 0x8,
+ [RNG_OUTPUT_3_REG] = 0xc,
+ [RNG_STATUS_REG] = 0x10,
+ [RNG_INTACK_REG] = 0x10,
+ [RNG_CONTROL_REG] = 0x14,
+ [RNG_CONFIG_REG] = 0x18,
+ [RNG_ALARMCNT_REG] = 0x1c,
+ [RNG_FROENABLE_REG] = 0x20,
+ [RNG_FRODETUNE_REG] = 0x24,
+ [RNG_ALARMMASK_REG] = 0x28,
+ [RNG_ALARMSTOP_REG] = 0x2c,
+ [RNG_REV_REG] = 0x7c,
+};
+
+struct omap_rng_dev;
+/**
+ * struct omap_rng_pdata - RNG IP block-specific data
+ * @regs: Pointer to the register offsets structure.
+ * @data_size: No. of bytes in RNG output.
+ * @data_present: Callback to determine if data is available.
+ * @init: Callback for IP specific initialization sequence.
+ * @cleanup: Callback for IP specific cleanup sequence.
+ */
+struct omap_rng_pdata {
+ u16 *regs;
+ u32 data_size;
+ u32 (*data_present)(struct omap_rng_dev *priv);
+ int (*init)(struct omap_rng_dev *priv);
+ void (*cleanup)(struct omap_rng_dev *priv);
+};
+
+struct omap_rng_dev {
+ void __iomem *base;
+ struct device *dev;
+ const struct omap_rng_pdata *pdata;
+ struct hwrng rng;
+ struct clk *clk;
+ struct clk *clk_reg;
+};
+
+static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
+{
+ return __raw_readl(priv->base + priv->pdata->regs[reg]);
+}
+
+static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
+ u32 val)
+{
+ __raw_writel(val, priv->base + priv->pdata->regs[reg]);
+}
+
+
+static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct omap_rng_dev *priv;
+ int i, present;
+
+ priv = (struct omap_rng_dev *)rng->priv;
+
+ if (max < priv->pdata->data_size)
+ return 0;
+
+ for (i = 0; i < RNG_DATA_FILL_TIMEOUT; i++) {
+ present = priv->pdata->data_present(priv);
+ if (present || !wait)
+ break;
+
+ udelay(10);
+ }
+ if (!present)
+ return 0;
+
+ memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG],
+ priv->pdata->data_size);
+
+ if (priv->pdata->regs[RNG_INTACK_REG])
+ omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
+
+ return priv->pdata->data_size;
+}
+
+static int omap_rng_init(struct hwrng *rng)
+{
+ struct omap_rng_dev *priv;
+
+ priv = (struct omap_rng_dev *)rng->priv;
+ return priv->pdata->init(priv);
+}
+
+static void omap_rng_cleanup(struct hwrng *rng)
+{
+ struct omap_rng_dev *priv;
+
+ priv = (struct omap_rng_dev *)rng->priv;
+ priv->pdata->cleanup(priv);
+}
+
+
+static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
+{
+ return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1;
+}
+
+static int omap2_rng_init(struct omap_rng_dev *priv)
+{
+ omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1);
+ return 0;
+}
+
+static void omap2_rng_cleanup(struct omap_rng_dev *priv)
+{
+ omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0);
+}
+
+static struct omap_rng_pdata omap2_rng_pdata = {
+ .regs = (u16 *)reg_map_omap2,
+ .data_size = OMAP2_RNG_OUTPUT_SIZE,
+ .data_present = omap2_rng_data_present,
+ .init = omap2_rng_init,
+ .cleanup = omap2_rng_cleanup,
+};
+
+static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
+{
+ return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
+}
+
+static int eip76_rng_init(struct omap_rng_dev *priv)
+{
+ u32 val;
+
+ /* Return if RNG is already running. */
+ if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+ return 0;
+
+ /* Number of 512 bit blocks of raw Noise Source output data that must
+ * be processed by either the Conditioning Function or the
+ * SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’
+ * output value.
+ */
+ val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+
+ /* Number of FRO samples that are XOR-ed together into one bit to be
+ * shifted into the main shift register
+ */
+ val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+ omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+ /* Enable all available FROs */
+ omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+ omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+
+ /* Enable TRNG */
+ val = RNG_CONTROL_ENABLE_TRNG_MASK;
+ omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+ return 0;
+}
+
+static int omap4_rng_init(struct omap_rng_dev *priv)
+{
+ u32 val;
+
+ /* Return if RNG is already running. */
+ if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+ return 0;
+
+ val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+ val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+ omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+ omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+ omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+ val = RNG_ALARM_THRESHOLD << RNG_ALARMCNT_ALARM_TH_SHIFT;
+ val |= RNG_SHUTDOWN_THRESHOLD << RNG_ALARMCNT_SHUTDOWN_TH_SHIFT;
+ omap_rng_write(priv, RNG_ALARMCNT_REG, val);
+
+ val = RNG_CONTROL_STARTUP_CYCLES << RNG_CONTROL_STARTUP_CYCLES_SHIFT;
+ val |= RNG_CONTROL_ENABLE_TRNG_MASK;
+ omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+ return 0;
+}
+
+static void omap4_rng_cleanup(struct omap_rng_dev *priv)
+{
+ int val;
+
+ val = omap_rng_read(priv, RNG_CONTROL_REG);
+ val &= ~RNG_CONTROL_ENABLE_TRNG_MASK;
+ omap_rng_write(priv, RNG_CONTROL_REG, val);
+}
+
+static irqreturn_t omap4_rng_irq(int irq, void *dev_id)
+{
+ struct omap_rng_dev *priv = dev_id;
+ u32 fro_detune, fro_enable;
+
+ /*
+ * Interrupt raised by a fro shutdown threshold, do the following:
+ * 1. Clear the alarm events.
+ * 2. De tune the FROs which are shutdown.
+ * 3. Re enable the shutdown FROs.
+ */
+ omap_rng_write(priv, RNG_ALARMMASK_REG, 0x0);
+ omap_rng_write(priv, RNG_ALARMSTOP_REG, 0x0);
+
+ fro_enable = omap_rng_read(priv, RNG_FROENABLE_REG);
+ fro_detune = ~fro_enable & RNG_REG_FRODETUNE_MASK;
+ fro_detune = fro_detune | omap_rng_read(priv, RNG_FRODETUNE_REG);
+ fro_enable = RNG_REG_FROENABLE_MASK;
+
+ omap_rng_write(priv, RNG_FRODETUNE_REG, fro_detune);
+ omap_rng_write(priv, RNG_FROENABLE_REG, fro_enable);
+
+ omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_SHUTDOWN_OFLO_MASK);
+
+ return IRQ_HANDLED;
+}
+
+static struct omap_rng_pdata omap4_rng_pdata = {
+ .regs = (u16 *)reg_map_omap4,
+ .data_size = OMAP4_RNG_OUTPUT_SIZE,
+ .data_present = omap4_rng_data_present,
+ .init = omap4_rng_init,
+ .cleanup = omap4_rng_cleanup,
+};
+
+static struct omap_rng_pdata eip76_rng_pdata = {
+ .regs = (u16 *)reg_map_eip76,
+ .data_size = EIP76_RNG_OUTPUT_SIZE,
+ .data_present = omap4_rng_data_present,
+ .init = eip76_rng_init,
+ .cleanup = omap4_rng_cleanup,
+};
+
+static const struct of_device_id omap_rng_of_match[] __maybe_unused = {
+ {
+ .compatible = "ti,omap2-rng",
+ .data = &omap2_rng_pdata,
+ },
+ {
+ .compatible = "ti,omap4-rng",
+ .data = &omap4_rng_pdata,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip76",
+ .data = &eip76_rng_pdata,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_rng_of_match);
+
+static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq, err;
+
+ priv->pdata = of_device_get_match_data(dev);
+ if (!priv->pdata)
+ return -ENODEV;
+
+
+ if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
+ of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, omap4_rng_irq,
+ IRQF_TRIGGER_NONE, dev_name(dev), priv);
+ if (err) {
+ dev_err(dev, "unable to request irq %d, err = %d\n",
+ irq, err);
+ return err;
+ }
+
+ /*
+ * On OMAP4, enabling the shutdown_oflo interrupt is
+ * done in the interrupt mask register. There is no
+ * such register on EIP76, and it's enabled by the
+ * same bit in the control register
+ */
+ if (priv->pdata->regs[RNG_INTMASK_REG])
+ omap_rng_write(priv, RNG_INTMASK_REG,
+ RNG_SHUTDOWN_OFLO_MASK);
+ else
+ omap_rng_write(priv, RNG_CONTROL_REG,
+ RNG_SHUTDOWN_OFLO_MASK);
+ }
+ return 0;
+}
+
+static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
+{
+ /* Only OMAP2/3 can be non-DT */
+ omap_rng->pdata = &omap2_rng_pdata;
+ return 0;
+}
+
+static int omap_rng_probe(struct platform_device *pdev)
+{
+ struct omap_rng_dev *priv;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->rng.read = omap_rng_do_read;
+ priv->rng.init = omap_rng_init;
+ priv->rng.cleanup = omap_rng_cleanup;
+ priv->rng.quality = 900;
+
+ priv->rng.priv = (unsigned long)priv;
+ platform_set_drvdata(pdev, priv);
+ priv->dev = dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto err_ioremap;
+ }
+
+ priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!priv->rng.name) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
+ goto err_ioremap;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk)) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to enable the clk: %d\n", ret);
+ goto err_register;
+ }
+ }
+
+ priv->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ if (PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk_reg)) {
+ ret = clk_prepare_enable(priv->clk_reg);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to enable the register clk: %d\n",
+ ret);
+ goto err_register;
+ }
+ }
+
+ ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
+ get_omap_rng_device_details(priv);
+ if (ret)
+ goto err_register;
+
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ if (ret)
+ goto err_register;
+
+ dev_info(&pdev->dev, "Random Number Generator ver. %02x\n",
+ omap_rng_read(priv, RNG_REV_REG));
+
+ return 0;
+
+err_register:
+ priv->base = NULL;
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk_reg);
+ clk_disable_unprepare(priv->clk);
+err_ioremap:
+ dev_err(dev, "initialization failed.\n");
+ return ret;
+}
+
+static int omap_rng_remove(struct platform_device *pdev)
+{
+ struct omap_rng_dev *priv = platform_get_drvdata(pdev);
+
+
+ priv->pdata->cleanup(priv);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_reg);
+
+ return 0;
+}
+
+static int __maybe_unused omap_rng_suspend(struct device *dev)
+{
+ struct omap_rng_dev *priv = dev_get_drvdata(dev);
+
+ priv->pdata->cleanup(priv);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int __maybe_unused omap_rng_resume(struct device *dev)
+{
+ struct omap_rng_dev *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to runtime_get device: %d\n", ret);
+ return ret;
+ }
+
+ priv->pdata->init(priv);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
+
+static struct platform_driver omap_rng_driver = {
+ .driver = {
+ .name = "omap_rng",
+ .pm = &omap_rng_pm,
+ .of_match_table = of_match_ptr(omap_rng_of_match),
+ },
+ .probe = omap_rng_probe,
+ .remove = omap_rng_remove,
+};
+
+module_platform_driver(omap_rng_driver);
+MODULE_ALIAS("platform:omap_rng");
+MODULE_AUTHOR("Deepak Saxena (and others)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
new file mode 100644
index 0000000000..18dc46b1b5
--- /dev/null
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -0,0 +1,181 @@
+/*
+ * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Juha Yrjola <juha.yrjola@solidboot.com>
+ *
+ * Copyright (C) 2013 Pali Rohár <pali@kernel.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define RNG_RESET 0x01
+#define RNG_GEN_PRNG_HW_INIT 0x02
+#define RNG_GEN_HW 0x08
+
+struct omap_rom_rng {
+ struct clk *clk;
+ struct device *dev;
+ struct hwrng ops;
+ u32 (*rom_rng_call)(u32 ptr, u32 count, u32 flag);
+};
+
+static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
+{
+ struct omap_rom_rng *ddata;
+ u32 ptr;
+ int r;
+
+ ddata = (struct omap_rom_rng *)rng->priv;
+
+ r = pm_runtime_get_sync(ddata->dev);
+ if (r < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+
+ return r;
+ }
+
+ ptr = virt_to_phys(data);
+ r = ddata->rom_rng_call(ptr, 4, RNG_GEN_HW);
+ if (r != 0)
+ r = -EINVAL;
+ else
+ r = 4;
+
+ pm_runtime_mark_last_busy(ddata->dev);
+ pm_runtime_put_autosuspend(ddata->dev);
+
+ return r;
+}
+
+static int __maybe_unused omap_rom_rng_runtime_suspend(struct device *dev)
+{
+ struct omap_rom_rng *ddata;
+ int r;
+
+ ddata = dev_get_drvdata(dev);
+
+ r = ddata->rom_rng_call(0, 0, RNG_RESET);
+ if (r != 0)
+ dev_err(dev, "reset failed: %d\n", r);
+
+ clk_disable_unprepare(ddata->clk);
+
+ return 0;
+}
+
+static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev)
+{
+ struct omap_rom_rng *ddata;
+ int r;
+
+ ddata = dev_get_drvdata(dev);
+
+ r = clk_prepare_enable(ddata->clk);
+ if (r < 0)
+ return r;
+
+ r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
+ if (r != 0) {
+ clk_disable_unprepare(ddata->clk);
+ dev_err(dev, "HW init failed: %d\n", r);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void omap_rom_rng_finish(void *data)
+{
+ struct omap_rom_rng *ddata = data;
+
+ pm_runtime_dont_use_autosuspend(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+}
+
+static int omap3_rom_rng_probe(struct platform_device *pdev)
+{
+ struct omap_rom_rng *ddata;
+ int ret = 0;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->dev = &pdev->dev;
+ ddata->ops.priv = (unsigned long)ddata;
+ ddata->ops.name = "omap3-rom";
+ ddata->ops.read = of_device_get_match_data(&pdev->dev);
+ ddata->ops.quality = 900;
+ if (!ddata->ops.read) {
+ dev_err(&pdev->dev, "missing rom code handler\n");
+
+ return -ENODEV;
+ }
+ dev_set_drvdata(ddata->dev, ddata);
+
+ ddata->rom_rng_call = pdev->dev.platform_data;
+ if (!ddata->rom_rng_call) {
+ dev_err(ddata->dev, "rom_rng_call is NULL\n");
+ return -EINVAL;
+ }
+
+ ddata->clk = devm_clk_get(ddata->dev, "ick");
+ if (IS_ERR(ddata->clk)) {
+ dev_err(ddata->dev, "unable to get RNG clock\n");
+ return PTR_ERR(ddata->clk);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ ret = devm_add_action_or_reset(ddata->dev, omap_rom_rng_finish,
+ ddata);
+ if (ret)
+ return ret;
+
+ return devm_hwrng_register(ddata->dev, &ddata->ops);
+}
+
+static const struct of_device_id omap_rom_rng_match[] = {
+ { .compatible = "nokia,n900-rom-rng", .data = omap3_rom_rng_read, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, omap_rom_rng_match);
+
+static const struct dev_pm_ops omap_rom_rng_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_rom_rng_runtime_suspend,
+ omap_rom_rng_runtime_resume)
+};
+
+static struct platform_driver omap3_rom_rng_driver = {
+ .driver = {
+ .name = "omap3-rom-rng",
+ .of_match_table = omap_rom_rng_match,
+ .pm = &omap_rom_rng_pm_ops,
+ },
+ .probe = omap3_rom_rng_probe,
+};
+
+module_platform_driver(omap3_rom_rng_driver);
+
+MODULE_ALIAS("platform:omap3-rom-rng");
+MODULE_AUTHOR("Juha Yrjola");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
new file mode 100644
index 0000000000..96b5d546d1
--- /dev/null
+++ b/drivers/char/hw_random/optee-rng.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019 Linaro Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#define DRIVER_NAME "optee-rng"
+
+#define TEE_ERROR_HEALTH_TEST_FAIL 0x00000001
+
+/*
+ * TA_CMD_GET_ENTROPY - Get Entropy from RNG
+ *
+ * param[0] (inout memref) - Entropy buffer memory reference
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_NOT_SUPPORTED - Requested entropy size greater than size of pool
+ * TEE_ERROR_HEALTH_TEST_FAIL - Continuous health testing failed
+ */
+#define TA_CMD_GET_ENTROPY 0x0
+
+/*
+ * TA_CMD_GET_RNG_INFO - Get RNG information
+ *
+ * param[0] (out value) - value.a: RNG data-rate in bytes per second
+ * value.b: Quality/Entropy per 1024 bit of data
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ */
+#define TA_CMD_GET_RNG_INFO 0x1
+
+#define MAX_ENTROPY_REQ_SZ (4 * 1024)
+
+/**
+ * struct optee_rng_private - OP-TEE Random Number Generator private data
+ * @dev: OP-TEE based RNG device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: RNG TA session identifier.
+ * @data_rate: RNG data rate.
+ * @entropy_shm_pool: Memory pool shared with RNG device.
+ * @optee_rng: OP-TEE RNG driver structure.
+ */
+struct optee_rng_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ u32 data_rate;
+ struct tee_shm *entropy_shm_pool;
+ struct hwrng optee_rng;
+};
+
+#define to_optee_rng_private(r) \
+ container_of(r, struct optee_rng_private, optee_rng)
+
+static size_t get_optee_rng_data(struct optee_rng_private *pvt_data,
+ void *buf, size_t req_size)
+{
+ int ret = 0;
+ u8 *rng_data = NULL;
+ size_t rng_size = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke TA_CMD_GET_ENTROPY function of Trusted App */
+ inv_arg.func = TA_CMD_GET_ENTROPY;
+ inv_arg.session = pvt_data->session_id;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = pvt_data->entropy_shm_pool;
+ param[0].u.memref.size = req_size;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(pvt_data->ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data->dev, "TA_CMD_GET_ENTROPY invoke err: %x\n",
+ inv_arg.ret);
+ return 0;
+ }
+
+ rng_data = tee_shm_get_va(pvt_data->entropy_shm_pool, 0);
+ if (IS_ERR(rng_data)) {
+ dev_err(pvt_data->dev, "tee_shm_get_va failed\n");
+ return 0;
+ }
+
+ rng_size = param[0].u.memref.size;
+ memcpy(buf, rng_data, rng_size);
+
+ return rng_size;
+}
+
+static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+ size_t read = 0, rng_size;
+ int timeout = 1;
+ u8 *data = buf;
+
+ if (max > MAX_ENTROPY_REQ_SZ)
+ max = MAX_ENTROPY_REQ_SZ;
+
+ while (read < max) {
+ rng_size = get_optee_rng_data(pvt_data, data, (max - read));
+
+ data += rng_size;
+ read += rng_size;
+
+ if (wait && pvt_data->data_rate) {
+ if ((timeout-- == 0) || (read == max))
+ return read;
+ msleep((1000 * (max - read)) / pvt_data->data_rate);
+ } else {
+ return read;
+ }
+ }
+
+ return read;
+}
+
+static int optee_rng_init(struct hwrng *rng)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+ struct tee_shm *entropy_shm_pool = NULL;
+
+ entropy_shm_pool = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+ MAX_ENTROPY_REQ_SZ);
+ if (IS_ERR(entropy_shm_pool)) {
+ dev_err(pvt_data->dev, "tee_shm_alloc_kernel_buf failed\n");
+ return PTR_ERR(entropy_shm_pool);
+ }
+
+ pvt_data->entropy_shm_pool = entropy_shm_pool;
+
+ return 0;
+}
+
+static void optee_rng_cleanup(struct hwrng *rng)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+
+ tee_shm_free(pvt_data->entropy_shm_pool);
+}
+
+static struct optee_rng_private pvt_data = {
+ .optee_rng = {
+ .name = DRIVER_NAME,
+ .init = optee_rng_init,
+ .cleanup = optee_rng_cleanup,
+ .read = optee_rng_read,
+ }
+};
+
+static int get_optee_rng_info(struct device *dev)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke TA_CMD_GET_RNG_INFO function of Trusted App */
+ inv_arg.func = TA_CMD_GET_RNG_INFO;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(dev, "TA_CMD_GET_RNG_INFO invoke err: %x\n",
+ inv_arg.ret);
+ return -EINVAL;
+ }
+
+ pvt_data.data_rate = param[0].u.value.a;
+ pvt_data.optee_rng.quality = param[0].u.value.b;
+
+ return 0;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int optee_rng_probe(struct device *dev)
+{
+ struct tee_client_device *rng_device = to_tee_client_device(dev);
+ int ret = 0, err = -ENODEV;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with TEE driver */
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ /* Open session with hwrng Trusted App */
+ export_uuid(sess_arg.uuid, &rng_device->id.uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if ((ret < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ err = get_optee_rng_info(dev);
+ if (err)
+ goto out_sess;
+
+ err = devm_hwrng_register(dev, &pvt_data.optee_rng);
+ if (err) {
+ dev_err(dev, "hwrng registration failed (%d)\n", err);
+ goto out_sess;
+ }
+
+ pvt_data.dev = dev;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return err;
+}
+
+static int optee_rng_remove(struct device *dev)
+{
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id optee_rng_id_table[] = {
+ {UUID_INIT(0xab7a617c, 0xb8e7, 0x4d8f,
+ 0x83, 0x01, 0xd0, 0x9b, 0x61, 0x03, 0x6b, 0x64)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_rng_id_table);
+
+static struct tee_client_driver optee_rng_driver = {
+ .id_table = optee_rng_id_table,
+ .driver = {
+ .name = DRIVER_NAME,
+ .bus = &tee_bus_type,
+ .probe = optee_rng_probe,
+ .remove = optee_rng_remove,
+ },
+};
+
+static int __init optee_rng_mod_init(void)
+{
+ return driver_register(&optee_rng_driver.driver);
+}
+
+static void __exit optee_rng_mod_exit(void)
+{
+ driver_unregister(&optee_rng_driver.driver);
+}
+
+module_init(optee_rng_mod_init);
+module_exit(optee_rng_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sumit Garg <sumit.garg@linaro.org>");
+MODULE_DESCRIPTION("OP-TEE based random number generator driver");
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
new file mode 100644
index 0000000000..6959d6edd4
--- /dev/null
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip rng
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#define SDCRNG_CTL_REG 0x00
+#define SDCRNG_CTL_FVLD_M 0x0000f000
+#define SDCRNG_CTL_FVLD_S 12
+#define SDCRNG_CTL_KSZ 0x00000800
+#define SDCRNG_CTL_RSRC_CRG 0x00000010
+#define SDCRNG_CTL_RSRC_RRG 0x00000000
+#define SDCRNG_CTL_CE 0x00000004
+#define SDCRNG_CTL_RE 0x00000002
+#define SDCRNG_CTL_DR 0x00000001
+#define SDCRNG_CTL_SELECT_RRG_RNG (SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG)
+#define SDCRNG_CTL_SELECT_CRG_RNG (SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG)
+#define SDCRNG_VAL_REG 0x20
+
+#define MODULE_NAME "pasemi_rng"
+
+static int pasemi_rng_data_present(struct hwrng *rng, int wait)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = (in_le32(rng_regs + SDCRNG_CTL_REG)
+ & SDCRNG_CTL_FVLD_M) ? 1 : 0;
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
+}
+
+static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ *data = in_le32(rng_regs + SDCRNG_VAL_REG);
+ return 4;
+}
+
+static int pasemi_rng_init(struct hwrng *rng)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ u32 ctl;
+
+ ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ;
+ out_le32(rng_regs + SDCRNG_CTL_REG, ctl);
+ out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR);
+
+ return 0;
+}
+
+static void pasemi_rng_cleanup(struct hwrng *rng)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ u32 ctl;
+
+ ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE;
+ out_le32(rng_regs + SDCRNG_CTL_REG,
+ in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl);
+}
+
+static struct hwrng pasemi_rng = {
+ .name = MODULE_NAME,
+ .init = pasemi_rng_init,
+ .cleanup = pasemi_rng_cleanup,
+ .data_present = pasemi_rng_data_present,
+ .data_read = pasemi_rng_data_read,
+};
+
+static int rng_probe(struct platform_device *pdev)
+{
+ void __iomem *rng_regs;
+
+ rng_regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rng_regs))
+ return PTR_ERR(rng_regs);
+
+ pasemi_rng.priv = (unsigned long)rng_regs;
+
+ pr_info("Registering PA Semi RNG\n");
+ return devm_hwrng_register(&pdev->dev, &pasemi_rng);
+}
+
+static const struct of_device_id rng_match[] = {
+ { .compatible = "1682m-rng", },
+ { .compatible = "pasemi,pwrficient-rng", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rng_match);
+
+static struct platform_driver rng_driver = {
+ .driver = {
+ .name = "pasemi-rng",
+ .of_match_table = rng_match,
+ },
+ .probe = rng_probe,
+};
+
+module_platform_driver(rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor");
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
new file mode 100644
index 0000000000..888e6f5cec
--- /dev/null
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PIC32 RNG driver
+ *
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2016 Microchip Technology Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RNGCON 0x04
+#define TRNGEN BIT(8)
+#define TRNGMOD BIT(11)
+#define RNGSEED1 0x18
+#define RNGSEED2 0x1C
+#define RNGRCNT 0x20
+#define RCNT_MASK 0x7F
+
+struct pic32_rng {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+/*
+ * The TRNG can generate up to 24Mbps. This is a timeout that should be safe
+ * enough given the instructions in the loop and that the TRNG may not always
+ * be at maximum rate.
+ */
+#define RNG_TIMEOUT 500
+
+static int pic32_rng_init(struct hwrng *rng)
+{
+ struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+
+ /* enable TRNG in enhanced mode */
+ writel(TRNGEN | TRNGMOD, priv->base + RNGCON);
+ return 0;
+}
+
+static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+ u64 *data = buf;
+ u32 t;
+ unsigned int timeout = RNG_TIMEOUT;
+
+ do {
+ t = readl(priv->base + RNGRCNT) & RCNT_MASK;
+ if (t == 64) {
+ /* TRNG value comes through the seed registers */
+ *data = ((u64)readl(priv->base + RNGSEED2) << 32) +
+ readl(priv->base + RNGSEED1);
+ return 8;
+ }
+ } while (wait && --timeout);
+
+ return -EIO;
+}
+
+static void pic32_rng_cleanup(struct hwrng *rng)
+{
+ struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+
+ writel(0, priv->base + RNGCON);
+}
+
+static int pic32_rng_probe(struct platform_device *pdev)
+{
+ struct pic32_rng *priv;
+ struct clk *clk;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = pic32_rng_init;
+ priv->rng.read = pic32_rng_read;
+ priv->rng.cleanup = pic32_rng_cleanup;
+
+ return devm_hwrng_register(&pdev->dev, &priv->rng);
+}
+
+static const struct of_device_id pic32_rng_of_match[] __maybe_unused = {
+ { .compatible = "microchip,pic32mzda-rng", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_rng_of_match);
+
+static struct platform_driver pic32_rng_driver = {
+ .probe = pic32_rng_probe,
+ .driver = {
+ .name = "pic32-rng",
+ .of_match_table = pic32_rng_of_match,
+ },
+};
+
+module_platform_driver(pic32_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joshua Henderson <joshua.henderson@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 RNG Driver");
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
new file mode 100644
index 0000000000..47b88de029
--- /dev/null
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2013 Michael Ellerman, Guo Chao, IBM Corp.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+#include <asm/archrandom.h>
+
+static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ unsigned long *buf;
+ int i, len;
+
+ /* We rely on rng_buffer_size() being >= sizeof(unsigned long) */
+ len = max / sizeof(unsigned long);
+
+ buf = (unsigned long *)data;
+
+ for (i = 0; i < len; i++)
+ pnv_get_random_long(buf++);
+
+ return len * sizeof(unsigned long);
+}
+
+static struct hwrng powernv_hwrng = {
+ .name = "powernv-rng",
+ .read = powernv_rng_read,
+};
+
+static int powernv_rng_probe(struct platform_device *pdev)
+{
+ int rc;
+
+ rc = devm_hwrng_register(&pdev->dev, &powernv_hwrng);
+ if (rc) {
+ /* We only register one device, ignore any others */
+ if (rc == -EEXIST)
+ rc = -ENODEV;
+
+ return rc;
+ }
+
+ pr_info("Registered powernv hwrng.\n");
+
+ return 0;
+}
+
+static const struct of_device_id powernv_rng_match[] = {
+ { .compatible = "ibm,power-rng",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, powernv_rng_match);
+
+static struct platform_driver powernv_rng_driver = {
+ .driver = {
+ .name = "powernv_rng",
+ .of_match_table = powernv_rng_match,
+ },
+ .probe = powernv_rng_probe,
+};
+module_platform_driver(powernv_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bare metal HWRNG driver for POWER7+ and above");
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
new file mode 100644
index 0000000000..62bdd5af13
--- /dev/null
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 Michael Neuling IBM Corporation
+ *
+ * Driver for the pseries hardware RNG for POWER7+ and above
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hw_random.h>
+#include <asm/vio.h>
+
+
+static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ u64 buffer[PLPAR_HCALL_BUFSIZE];
+ int rc;
+
+ rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
+ if (rc != H_SUCCESS) {
+ pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
+ return -EIO;
+ }
+ memcpy(data, buffer, 8);
+
+ /* The hypervisor interface returns 64 bits */
+ return 8;
+}
+
+/*
+ * pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations
+ *
+ * This is a required function for a driver to operate in a CMO environment
+ * but this device does not make use of DMA allocations, return 0.
+ *
+ * Return value:
+ * Number of bytes of IO data the driver will need to perform well -> 0
+ */
+static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
+{
+ return 0;
+};
+
+static struct hwrng pseries_rng = {
+ .name = KBUILD_MODNAME,
+ .read = pseries_rng_read,
+};
+
+static int pseries_rng_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
+{
+ return hwrng_register(&pseries_rng);
+}
+
+static void pseries_rng_remove(struct vio_dev *dev)
+{
+ hwrng_unregister(&pseries_rng);
+}
+
+static const struct vio_device_id pseries_rng_driver_ids[] = {
+ { "ibm,random-v1", "ibm,random"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
+
+static struct vio_driver pseries_rng_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = pseries_rng_probe,
+ .remove = pseries_rng_remove,
+ .get_desired_dma = pseries_rng_get_desired_dma,
+ .id_table = pseries_rng_driver_ids
+};
+
+static int __init rng_init(void)
+{
+ pr_info("Registering IBM pSeries RNG driver\n");
+ return vio_register_driver(&pseries_rng_driver);
+}
+
+module_init(rng_init);
+
+static void __exit rng_exit(void)
+{
+ vio_unregister_driver(&pseries_rng_driver);
+}
+module_exit(rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Neuling <mikey@neuling.org>");
+MODULE_DESCRIPTION("H/W RNG driver for IBM pSeries processors");
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
new file mode 100644
index 0000000000..d27e32e9bf
--- /dev/null
+++ b/drivers/char/hw_random/s390-trng.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * s390 TRNG device driver
+ *
+ * Driver for the TRNG (true random number generation) command
+ * available via CPACF extension MSA 7 on the s390 arch.
+
+ * Copyright IBM Corp. 2017
+ * Author(s): Harald Freudenberger <freude@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "trng"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
+#include <linux/atomic.h>
+#include <linux/random.h>
+#include <linux/sched/signal.h>
+#include <asm/debug.h>
+#include <asm/cpacf.h>
+#include <asm/archrandom.h>
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 CPACF TRNG device driver");
+
+
+/* trng related debug feature things */
+
+static debug_info_t *debug_info;
+
+#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
+#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
+#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
+#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
+
+
+/* trng helpers */
+
+static atomic64_t trng_dev_counter = ATOMIC64_INIT(0);
+static atomic64_t trng_hwrng_counter = ATOMIC64_INIT(0);
+
+
+/* file io functions */
+
+static int trng_open(struct inode *inode, struct file *file)
+{
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t trng_read(struct file *file, char __user *ubuf,
+ size_t nbytes, loff_t *ppos)
+{
+ u8 buf[32];
+ u8 *p = buf;
+ unsigned int n;
+ ssize_t ret = 0;
+
+ /*
+ * use buf for requests <= sizeof(buf),
+ * otherwise allocate one page and fetch
+ * pagewise.
+ */
+
+ if (nbytes > sizeof(buf)) {
+ p = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ }
+
+ while (nbytes) {
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ }
+ n = nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes;
+ cpacf_trng(NULL, 0, p, n);
+ atomic64_add(n, &trng_dev_counter);
+ if (copy_to_user(ubuf, p, n)) {
+ ret = -EFAULT;
+ break;
+ }
+ nbytes -= n;
+ ubuf += n;
+ ret += n;
+ }
+
+ if (p != buf)
+ free_page((unsigned long) p);
+
+ DEBUG_DBG("trng_read()=%zd\n", ret);
+ return ret;
+}
+
+
+/* sysfs */
+
+static ssize_t trng_counter_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 dev_counter = atomic64_read(&trng_dev_counter);
+ u64 hwrng_counter = atomic64_read(&trng_hwrng_counter);
+ u64 arch_counter = atomic64_read(&s390_arch_random_counter);
+
+ return sysfs_emit(buf,
+ "trng: %llu\n"
+ "hwrng: %llu\n"
+ "arch: %llu\n"
+ "total: %llu\n",
+ dev_counter, hwrng_counter, arch_counter,
+ dev_counter + hwrng_counter + arch_counter);
+}
+static DEVICE_ATTR(byte_counter, 0444, trng_counter_show, NULL);
+
+static struct attribute *trng_dev_attrs[] = {
+ &dev_attr_byte_counter.attr,
+ NULL
+};
+
+static const struct attribute_group trng_dev_attr_group = {
+ .attrs = trng_dev_attrs
+};
+
+static const struct attribute_group *trng_dev_attr_groups[] = {
+ &trng_dev_attr_group,
+ NULL
+};
+
+static const struct file_operations trng_fops = {
+ .owner = THIS_MODULE,
+ .open = &trng_open,
+ .release = NULL,
+ .read = &trng_read,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice trng_dev = {
+ .name = "trng",
+ .minor = MISC_DYNAMIC_MINOR,
+ .mode = 0444,
+ .fops = &trng_fops,
+ .groups = trng_dev_attr_groups,
+};
+
+
+/* hwrng_register */
+
+static inline void _trng_hwrng_read(u8 *buf, size_t len)
+{
+ cpacf_trng(NULL, 0, buf, len);
+ atomic64_add(len, &trng_hwrng_counter);
+}
+
+static int trng_hwrng_data_read(struct hwrng *rng, u32 *data)
+{
+ size_t len = sizeof(*data);
+
+ _trng_hwrng_read((u8 *) data, len);
+
+ DEBUG_DBG("trng_hwrng_data_read()=%zu\n", len);
+
+ return len;
+}
+
+static int trng_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ size_t len = max <= PAGE_SIZE ? max : PAGE_SIZE;
+
+ _trng_hwrng_read((u8 *) data, len);
+
+ DEBUG_DBG("trng_hwrng_read()=%zu\n", len);
+
+ return len;
+}
+
+/*
+ * hwrng register struct
+ * The trng is supposed to have 100% entropy, and thus we register with a very
+ * high quality value. If we ever have a better driver in the future, we should
+ * change this value again when we merge this driver.
+ */
+static struct hwrng trng_hwrng_dev = {
+ .name = "s390-trng",
+ .data_read = trng_hwrng_data_read,
+ .read = trng_hwrng_read,
+};
+
+
+/* init and exit */
+
+static void __init trng_debug_init(void)
+{
+ debug_info = debug_register("trng", 1, 1, 4 * sizeof(long));
+ debug_register_view(debug_info, &debug_sprintf_view);
+ debug_set_level(debug_info, 3);
+}
+
+static void trng_debug_exit(void)
+{
+ debug_unregister(debug_info);
+}
+
+static int __init trng_init(void)
+{
+ int ret;
+
+ trng_debug_init();
+
+ /* check if subfunction CPACF_PRNO_TRNG is available */
+ if (!cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) {
+ DEBUG_INFO("trng_init CPACF_PRNO_TRNG not available\n");
+ ret = -ENODEV;
+ goto out_dbg;
+ }
+
+ ret = misc_register(&trng_dev);
+ if (ret) {
+ DEBUG_WARN("trng_init misc_register() failed rc=%d\n", ret);
+ goto out_dbg;
+ }
+
+ ret = hwrng_register(&trng_hwrng_dev);
+ if (ret) {
+ DEBUG_WARN("trng_init hwrng_register() failed rc=%d\n", ret);
+ goto out_misc;
+ }
+
+ DEBUG_DBG("trng_init successful\n");
+
+ return 0;
+
+out_misc:
+ misc_deregister(&trng_dev);
+out_dbg:
+ trng_debug_exit();
+ return ret;
+}
+
+static void __exit trng_exit(void)
+{
+ hwrng_unregister(&trng_hwrng_dev);
+ misc_deregister(&trng_dev);
+ trng_debug_exit();
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, trng_init);
+module_exit(trng_exit);
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
new file mode 100644
index 0000000000..6e9dfac9fc
--- /dev/null
+++ b/drivers/char/hw_random/st-rng.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ST Random Number Generator Driver ST's Platforms
+ *
+ * Author: Pankaj Dev: <pankaj.dev@st.com>
+ * Lee Jones <lee.jones@linaro.org>
+ *
+ * Copyright (C) 2015 STMicroelectronics (R&D) Limited
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Registers */
+#define ST_RNG_STATUS_REG 0x20
+#define ST_RNG_DATA_REG 0x24
+
+/* Registers fields */
+#define ST_RNG_STATUS_BAD_SEQUENCE BIT(0)
+#define ST_RNG_STATUS_BAD_ALTERNANCE BIT(1)
+#define ST_RNG_STATUS_FIFO_FULL BIT(5)
+
+#define ST_RNG_SAMPLE_SIZE 2 /* 2 Byte (16bit) samples */
+#define ST_RNG_FIFO_DEPTH 4
+#define ST_RNG_FIFO_SIZE (ST_RNG_FIFO_DEPTH * ST_RNG_SAMPLE_SIZE)
+
+/*
+ * Samples are documented to be available every 0.667us, so in theory
+ * the 4 sample deep FIFO should take 2.668us to fill. However, during
+ * thorough testing, it became apparent that filling the FIFO actually
+ * takes closer to 12us. We then multiply by 2 in order to account for
+ * the lack of udelay()'s reliability, suggested by Russell King.
+ */
+#define ST_RNG_FILL_FIFO_TIMEOUT (12 * 2)
+
+struct st_rng_data {
+ void __iomem *base;
+ struct hwrng ops;
+};
+
+static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct st_rng_data *ddata = (struct st_rng_data *)rng->priv;
+ u32 status;
+ int i;
+
+ /* Wait until FIFO is full - max 4uS*/
+ for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) {
+ status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG);
+ if (status & ST_RNG_STATUS_FIFO_FULL)
+ break;
+ udelay(1);
+ }
+
+ if (i == ST_RNG_FILL_FIFO_TIMEOUT)
+ return 0;
+
+ for (i = 0; i < ST_RNG_FIFO_SIZE && i < max; i += 2)
+ *(u16 *)(data + i) =
+ readl_relaxed(ddata->base + ST_RNG_DATA_REG);
+
+ return i; /* No of bytes read */
+}
+
+static int st_rng_probe(struct platform_device *pdev)
+{
+ struct st_rng_data *ddata;
+ struct clk *clk;
+ void __iomem *base;
+ int ret;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ddata->ops.priv = (unsigned long)ddata;
+ ddata->ops.read = st_rng_read;
+ ddata->ops.name = pdev->name;
+ ddata->base = base;
+
+ ret = devm_hwrng_register(&pdev->dev, &ddata->ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register HW RNG\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Successfully registered HW RNG\n");
+
+ return 0;
+}
+
+static const struct of_device_id st_rng_match[] __maybe_unused = {
+ { .compatible = "st,rng" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_rng_match);
+
+static struct platform_driver st_rng_driver = {
+ .driver = {
+ .name = "st-hwrandom",
+ .of_match_table = of_match_ptr(st_rng_match),
+ },
+ .probe = st_rng_probe,
+};
+
+module_platform_driver(st_rng_driver);
+
+MODULE_AUTHOR("Pankaj Dev <pankaj.dev@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
new file mode 100644
index 0000000000..efb6a9f9a1
--- /dev/null
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2015, Daniel Thompson
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define RNG_CR 0x00
+#define RNG_CR_RNGEN BIT(2)
+#define RNG_CR_CED BIT(5)
+
+#define RNG_SR 0x04
+#define RNG_SR_SEIS BIT(6)
+#define RNG_SR_CEIS BIT(5)
+#define RNG_SR_DRDY BIT(0)
+
+#define RNG_DR 0x08
+
+struct stm32_rng_private {
+ struct hwrng rng;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+ bool ced;
+};
+
+static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct stm32_rng_private *priv =
+ container_of(rng, struct stm32_rng_private, rng);
+ u32 sr;
+ int retval = 0;
+
+ pm_runtime_get_sync((struct device *) priv->rng.priv);
+
+ while (max >= sizeof(u32)) {
+ sr = readl_relaxed(priv->base + RNG_SR);
+ /* Manage timeout which is based on timer and take */
+ /* care of initial delay time when enabling rng */
+ if (!sr && wait) {
+ int err;
+
+ err = readl_relaxed_poll_timeout_atomic(priv->base
+ + RNG_SR,
+ sr, sr,
+ 10, 50000);
+ if (err)
+ dev_err((struct device *)priv->rng.priv,
+ "%s: timeout %x!\n", __func__, sr);
+ }
+
+ /* If error detected or data not ready... */
+ if (sr != RNG_SR_DRDY) {
+ if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS),
+ "bad RNG status - %x\n", sr))
+ writel_relaxed(0, priv->base + RNG_SR);
+ break;
+ }
+
+ *(u32 *)data = readl_relaxed(priv->base + RNG_DR);
+
+ retval += sizeof(u32);
+ data += sizeof(u32);
+ max -= sizeof(u32);
+ }
+
+ pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
+
+ return retval || !wait ? retval : -EIO;
+}
+
+static int stm32_rng_init(struct hwrng *rng)
+{
+ struct stm32_rng_private *priv =
+ container_of(rng, struct stm32_rng_private, rng);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ if (priv->ced)
+ writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
+ else
+ writel_relaxed(RNG_CR_RNGEN | RNG_CR_CED,
+ priv->base + RNG_CR);
+
+ /* clear error indicators */
+ writel_relaxed(0, priv->base + RNG_SR);
+
+ return 0;
+}
+
+static void stm32_rng_cleanup(struct hwrng *rng)
+{
+ struct stm32_rng_private *priv =
+ container_of(rng, struct stm32_rng_private, rng);
+
+ writel_relaxed(0, priv->base + RNG_CR);
+ clk_disable_unprepare(priv->clk);
+}
+
+static int stm32_rng_probe(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct stm32_rng_private *priv;
+ struct resource res;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(struct stm32_rng_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ return err;
+
+ priv->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(&ofdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
+ if (!IS_ERR(priv->rst)) {
+ reset_control_assert(priv->rst);
+ udelay(2);
+ reset_control_deassert(priv->rst);
+ }
+
+ priv->ced = of_property_read_bool(np, "clock-error-detect");
+
+ dev_set_drvdata(dev, priv);
+
+ priv->rng.name = dev_driver_string(dev);
+#ifndef CONFIG_PM
+ priv->rng.init = stm32_rng_init;
+ priv->rng.cleanup = stm32_rng_cleanup;
+#endif
+ priv->rng.read = stm32_rng_read;
+ priv->rng.priv = (unsigned long) dev;
+ priv->rng.quality = 900;
+
+ pm_runtime_set_autosuspend_delay(dev, 100);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ return devm_hwrng_register(dev, &priv->rng);
+}
+
+static int stm32_rng_remove(struct platform_device *ofdev)
+{
+ pm_runtime_disable(&ofdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_rng_runtime_suspend(struct device *dev)
+{
+ struct stm32_rng_private *priv = dev_get_drvdata(dev);
+
+ stm32_rng_cleanup(&priv->rng);
+
+ return 0;
+}
+
+static int stm32_rng_runtime_resume(struct device *dev)
+{
+ struct stm32_rng_private *priv = dev_get_drvdata(dev);
+
+ return stm32_rng_init(&priv->rng);
+}
+#endif
+
+static const struct dev_pm_ops stm32_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_rng_runtime_suspend,
+ stm32_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+
+static const struct of_device_id stm32_rng_match[] = {
+ {
+ .compatible = "st,stm32-rng",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_rng_match);
+
+static struct platform_driver stm32_rng_driver = {
+ .driver = {
+ .name = "stm32-rng",
+ .pm = &stm32_rng_pm_ops,
+ .of_match_table = stm32_rng_match,
+ },
+ .probe = stm32_rng_probe,
+ .remove = stm32_rng_remove,
+};
+
+module_platform_driver(stm32_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Thompson <daniel.thompson@linaro.org>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 RNG device driver");
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
new file mode 100644
index 0000000000..3db9d868ef
--- /dev/null
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/char/hw_random/timeriomem-rng.c
+ *
+ * Copyright (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ *
+ * Derived from drivers/char/hw_random/omap-rng.c
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Overview:
+ * This driver is useful for platforms that have an IO range that provides
+ * periodic random data from a single IO memory address. All the platform
+ * has to do is provide the address and 'wait time' that new data becomes
+ * available.
+ *
+ * TODO: add support for reading sizes other than 32bits and masking
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/timeriomem-rng.h>
+
+struct timeriomem_rng_private {
+ void __iomem *io_base;
+ ktime_t period;
+ unsigned int present:1;
+
+ struct hrtimer timer;
+ struct completion completion;
+
+ struct hwrng rng_ops;
+};
+
+static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
+ size_t max, bool wait)
+{
+ struct timeriomem_rng_private *priv =
+ container_of(hwrng, struct timeriomem_rng_private, rng_ops);
+ int retval = 0;
+ int period_us = ktime_to_us(priv->period);
+
+ /*
+ * There may not have been enough time for new data to be generated
+ * since the last request. If the caller doesn't want to wait, let them
+ * bail out. Otherwise, wait for the completion. If the new data has
+ * already been generated, the completion should already be available.
+ */
+ if (!wait && !priv->present)
+ return 0;
+
+ wait_for_completion(&priv->completion);
+
+ do {
+ /*
+ * After the first read, all additional reads will need to wait
+ * for the RNG to generate new data. Since the period can have
+ * a wide range of values (1us to 1s have been observed), allow
+ * for 1% tolerance in the sleep time rather than a fixed value.
+ */
+ if (retval > 0)
+ usleep_range(period_us,
+ period_us + max(1, period_us / 100));
+
+ *(u32 *)data = readl(priv->io_base);
+ retval += sizeof(u32);
+ data += sizeof(u32);
+ max -= sizeof(u32);
+ } while (wait && max > sizeof(u32));
+
+ /*
+ * Block any new callers until the RNG has had time to generate new
+ * data.
+ */
+ priv->present = 0;
+ reinit_completion(&priv->completion);
+ hrtimer_forward_now(&priv->timer, priv->period);
+ hrtimer_restart(&priv->timer);
+
+ return retval;
+}
+
+static enum hrtimer_restart timeriomem_rng_trigger(struct hrtimer *timer)
+{
+ struct timeriomem_rng_private *priv
+ = container_of(timer, struct timeriomem_rng_private, timer);
+
+ priv->present = 1;
+ complete(&priv->completion);
+
+ return HRTIMER_NORESTART;
+}
+
+static int timeriomem_rng_probe(struct platform_device *pdev)
+{
+ struct timeriomem_rng_data *pdata = pdev->dev.platform_data;
+ struct timeriomem_rng_private *priv;
+ struct resource *res;
+ int err = 0;
+ int period;
+
+ if (!pdev->dev.of_node && !pdata) {
+ dev_err(&pdev->dev, "timeriomem_rng_data is missing\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct timeriomem_rng_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->io_base))
+ return PTR_ERR(priv->io_base);
+
+ if (res->start % 4 != 0 || resource_size(res) < 4) {
+ dev_err(&pdev->dev,
+ "address must be at least four bytes wide and 32-bit aligned\n");
+ return -EINVAL;
+ }
+
+ if (pdev->dev.of_node) {
+ int i;
+
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "period", &i))
+ period = i;
+ else {
+ dev_err(&pdev->dev, "missing period\n");
+ return -EINVAL;
+ }
+
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "quality", &i))
+ priv->rng_ops.quality = i;
+ } else {
+ period = pdata->period;
+ priv->rng_ops.quality = pdata->quality;
+ }
+
+ priv->period = ns_to_ktime(period * NSEC_PER_USEC);
+ init_completion(&priv->completion);
+ hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ priv->timer.function = timeriomem_rng_trigger;
+
+ priv->rng_ops.name = dev_name(&pdev->dev);
+ priv->rng_ops.read = timeriomem_rng_read;
+
+ /* Assume random data is already available. */
+ priv->present = 1;
+ complete(&priv->completion);
+
+ err = devm_hwrng_register(&pdev->dev, &priv->rng_ops);
+ if (err) {
+ dev_err(&pdev->dev, "problem registering\n");
+ return err;
+ }
+
+ dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
+ priv->io_base, period);
+
+ return 0;
+}
+
+static int timeriomem_rng_remove(struct platform_device *pdev)
+{
+ struct timeriomem_rng_private *priv = platform_get_drvdata(pdev);
+
+ hrtimer_cancel(&priv->timer);
+
+ return 0;
+}
+
+static const struct of_device_id timeriomem_rng_match[] = {
+ { .compatible = "timeriomem_rng" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, timeriomem_rng_match);
+
+static struct platform_driver timeriomem_rng_driver = {
+ .driver = {
+ .name = "timeriomem_rng",
+ .of_match_table = timeriomem_rng_match,
+ },
+ .probe = timeriomem_rng_probe,
+ .remove = timeriomem_rng_remove,
+};
+
+module_platform_driver(timeriomem_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
+MODULE_DESCRIPTION("Timer IOMEM H/W RNG driver");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
new file mode 100644
index 0000000000..a9a0a3b09c
--- /dev/null
+++ b/drivers/char/hw_random/via-rng.c
@@ -0,0 +1,227 @@
+/*
+ * RNG driver for VIA RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <crypto/padlock.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <asm/cpu_device_id.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/cpufeature.h>
+#include <asm/fpu/api.h>
+
+
+
+
+enum {
+ VIA_STRFILT_CNT_SHIFT = 16,
+ VIA_STRFILT_FAIL = (1 << 15),
+ VIA_STRFILT_ENABLE = (1 << 14),
+ VIA_RAWBITS_ENABLE = (1 << 13),
+ VIA_RNG_ENABLE = (1 << 6),
+ VIA_NOISESRC1 = (1 << 8),
+ VIA_NOISESRC2 = (1 << 9),
+ VIA_XSTORE_CNT_MASK = 0x0F,
+
+ VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */
+ VIA_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */
+ VIA_RNG_CHUNK_4_MASK = 0xFFFFFFFF,
+ VIA_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */
+ VIA_RNG_CHUNK_2_MASK = 0xFFFF,
+ VIA_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */
+ VIA_RNG_CHUNK_1_MASK = 0xFF,
+};
+
+/*
+ * Investigate using the 'rep' prefix to obtain 32 bits of random data
+ * in one insn. The upside is potentially better performance. The
+ * downside is that the instruction becomes no longer atomic. Due to
+ * this, just like familiar issues with /dev/random itself, the worst
+ * case of a 'rep xstore' could potentially pause a cpu for an
+ * unreasonably long time. In practice, this condition would likely
+ * only occur when the hardware is failing. (or so we hope :))
+ *
+ * Another possible performance boost may come from simply buffering
+ * until we have 4 bytes, thus returning a u32 at a time,
+ * instead of the current u8-at-a-time.
+ *
+ * Padlock instructions can generate a spurious DNA fault, but the
+ * kernel doesn't use CR0.TS, so this doesn't matter.
+ */
+
+static inline u32 xstore(u32 *addr, u32 edx_in)
+{
+ u32 eax_out;
+
+ asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
+ : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
+
+ return eax_out;
+}
+
+static int via_rng_data_present(struct hwrng *rng, int wait)
+{
+ char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+ ((aligned(STACK_ALIGN)));
+ u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+ u32 bytes_out;
+ int i;
+
+ /* We choose the recommended 1-byte-per-instruction RNG rate,
+ * for greater randomness at the expense of speed. Larger
+ * values 2, 4, or 8 bytes-per-instruction yield greater
+ * speed at lesser randomness.
+ *
+ * If you change this to another VIA_CHUNK_n, you must also
+ * change the ->n_bytes values in rng_vendor_ops[] tables.
+ * VIA_CHUNK_8 requires further code changes.
+ *
+ * A copy of MSR_VIA_RNG is placed in eax_out when xstore
+ * completes.
+ */
+
+ for (i = 0; i < 20; i++) {
+ *via_rng_datum = 0; /* paranoia, not really necessary */
+ bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
+ bytes_out &= VIA_XSTORE_CNT_MASK;
+ if (bytes_out || !wait)
+ break;
+ udelay(10);
+ }
+ rng->priv = *via_rng_datum;
+ return bytes_out ? 1 : 0;
+}
+
+static int via_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ u32 via_rng_datum = (u32)rng->priv;
+
+ *data = via_rng_datum;
+
+ return 1;
+}
+
+static int via_rng_init(struct hwrng *rng)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ u32 lo, hi, old_lo;
+
+ /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG
+ * is always enabled if CPUID rng_en is set. There is no
+ * RNG configuration like it used to be the case in this
+ * register */
+ if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){
+ if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
+ pr_err(PFX "can't enable hardware RNG "
+ "if XSTORE is not enabled\n");
+ return -ENODEV;
+ }
+ return 0;
+ }
+
+ /* Control the RNG via MSR. Tread lightly and pay very close
+ * attention to values written, as the reserved fields
+ * are documented to be "undefined and unpredictable"; but it
+ * does not say to write them as zero, so I make a guess that
+ * we restore the values we find in the register.
+ */
+ rdmsr(MSR_VIA_RNG, lo, hi);
+
+ old_lo = lo;
+ lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT);
+ lo &= ~VIA_XSTORE_CNT_MASK;
+ lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
+ lo |= VIA_RNG_ENABLE;
+ lo |= VIA_NOISESRC1;
+
+ /* Enable secondary noise source on CPUs where it is present. */
+
+ /* Nehemiah stepping 8 and higher */
+ if ((c->x86_model == 9) && (c->x86_stepping > 7))
+ lo |= VIA_NOISESRC2;
+
+ /* Esther */
+ if (c->x86_model >= 10)
+ lo |= VIA_NOISESRC2;
+
+ if (lo != old_lo)
+ wrmsr(MSR_VIA_RNG, lo, hi);
+
+ /* perhaps-unnecessary sanity check; remove after testing if
+ unneeded */
+ rdmsr(MSR_VIA_RNG, lo, hi);
+ if ((lo & VIA_RNG_ENABLE) == 0) {
+ pr_err(PFX "cannot enable VIA C3 RNG, aborting\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+
+static struct hwrng via_rng = {
+ .name = "via",
+ .init = via_rng_init,
+ .data_present = via_rng_data_present,
+ .data_read = via_rng_data_read,
+};
+
+
+static int __init via_rng_mod_init(void)
+{
+ int err;
+
+ if (!boot_cpu_has(X86_FEATURE_XSTORE))
+ return -ENODEV;
+
+ pr_info("VIA RNG detected\n");
+ err = hwrng_register(&via_rng);
+ if (err) {
+ pr_err(PFX "RNG registering failed (%d)\n",
+ err);
+ goto out;
+ }
+out:
+ return err;
+}
+module_init(via_rng_mod_init);
+
+static void __exit via_rng_mod_exit(void)
+{
+ hwrng_unregister(&via_rng);
+}
+module_exit(via_rng_mod_exit);
+
+static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
+ X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id);
+
+MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
new file mode 100644
index 0000000000..e41a84e6b4
--- /dev/null
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Randomness driver for virtio
+ * Copyright (C) 2007, 2008 Rusty Russell IBM Corporation
+ */
+
+#include <asm/barrier.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/virtio.h>
+#include <linux/virtio_rng.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+static DEFINE_IDA(rng_index_ida);
+
+struct virtrng_info {
+ struct hwrng hwrng;
+ struct virtqueue *vq;
+ char name[25];
+ int index;
+ bool hwrng_register_done;
+ bool hwrng_removed;
+ /* data transfer */
+ struct completion have_data;
+ unsigned int data_avail;
+ unsigned int data_idx;
+ /* minimal size returned by rng_buffer_size() */
+#if SMP_CACHE_BYTES < 32
+ u8 data[32];
+#else
+ u8 data[SMP_CACHE_BYTES];
+#endif
+};
+
+static void random_recv_done(struct virtqueue *vq)
+{
+ struct virtrng_info *vi = vq->vdev->priv;
+ unsigned int len;
+
+ /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
+ if (!virtqueue_get_buf(vi->vq, &len))
+ return;
+
+ smp_store_release(&vi->data_avail, len);
+ complete(&vi->have_data);
+}
+
+static void request_entropy(struct virtrng_info *vi)
+{
+ struct scatterlist sg;
+
+ reinit_completion(&vi->have_data);
+ vi->data_idx = 0;
+
+ sg_init_one(&sg, vi->data, sizeof(vi->data));
+
+ /* There should always be room for one buffer. */
+ virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL);
+
+ virtqueue_kick(vi->vq);
+}
+
+static unsigned int copy_data(struct virtrng_info *vi, void *buf,
+ unsigned int size)
+{
+ size = min_t(unsigned int, size, vi->data_avail);
+ memcpy(buf, vi->data + vi->data_idx, size);
+ vi->data_idx += size;
+ vi->data_avail -= size;
+ if (vi->data_avail == 0)
+ request_entropy(vi);
+ return size;
+}
+
+static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
+{
+ int ret;
+ struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
+ unsigned int chunk;
+ size_t read;
+
+ if (vi->hwrng_removed)
+ return -ENODEV;
+
+ read = 0;
+
+ /* copy available data */
+ if (smp_load_acquire(&vi->data_avail)) {
+ chunk = copy_data(vi, buf, size);
+ size -= chunk;
+ read += chunk;
+ }
+
+ if (!wait)
+ return read;
+
+ /* We have already copied available entropy,
+ * so either size is 0 or data_avail is 0
+ */
+ while (size != 0) {
+ /* data_avail is 0 but a request is pending */
+ ret = wait_for_completion_killable(&vi->have_data);
+ if (ret < 0)
+ return ret;
+ /* if vi->data_avail is 0, we have been interrupted
+ * by a cleanup, but buffer stays in the queue
+ */
+ if (vi->data_avail == 0)
+ return read;
+
+ chunk = copy_data(vi, buf + read, size);
+ size -= chunk;
+ read += chunk;
+ }
+
+ return read;
+}
+
+static void virtio_cleanup(struct hwrng *rng)
+{
+ struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
+
+ complete(&vi->have_data);
+}
+
+static int probe_common(struct virtio_device *vdev)
+{
+ int err, index;
+ struct virtrng_info *vi = NULL;
+
+ vi = kzalloc(sizeof(struct virtrng_info), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
+
+ vi->index = index = ida_simple_get(&rng_index_ida, 0, 0, GFP_KERNEL);
+ if (index < 0) {
+ err = index;
+ goto err_ida;
+ }
+ sprintf(vi->name, "virtio_rng.%d", index);
+ init_completion(&vi->have_data);
+
+ vi->hwrng = (struct hwrng) {
+ .read = virtio_read,
+ .cleanup = virtio_cleanup,
+ .priv = (unsigned long)vi,
+ .name = vi->name,
+ };
+ vdev->priv = vi;
+
+ /* We expect a single virtqueue. */
+ vi->vq = virtio_find_single_vq(vdev, random_recv_done, "input");
+ if (IS_ERR(vi->vq)) {
+ err = PTR_ERR(vi->vq);
+ goto err_find;
+ }
+
+ virtio_device_ready(vdev);
+
+ /* we always have a pending entropy request */
+ request_entropy(vi);
+
+ return 0;
+
+err_find:
+ ida_simple_remove(&rng_index_ida, index);
+err_ida:
+ kfree(vi);
+ return err;
+}
+
+static void remove_common(struct virtio_device *vdev)
+{
+ struct virtrng_info *vi = vdev->priv;
+
+ vi->hwrng_removed = true;
+ vi->data_avail = 0;
+ vi->data_idx = 0;
+ complete(&vi->have_data);
+ if (vi->hwrng_register_done)
+ hwrng_unregister(&vi->hwrng);
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+ ida_simple_remove(&rng_index_ida, vi->index);
+ kfree(vi);
+}
+
+static int virtrng_probe(struct virtio_device *vdev)
+{
+ return probe_common(vdev);
+}
+
+static void virtrng_remove(struct virtio_device *vdev)
+{
+ remove_common(vdev);
+}
+
+static void virtrng_scan(struct virtio_device *vdev)
+{
+ struct virtrng_info *vi = vdev->priv;
+ int err;
+
+ err = hwrng_register(&vi->hwrng);
+ if (!err)
+ vi->hwrng_register_done = true;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtrng_freeze(struct virtio_device *vdev)
+{
+ remove_common(vdev);
+ return 0;
+}
+
+static int virtrng_restore(struct virtio_device *vdev)
+{
+ int err;
+
+ err = probe_common(vdev);
+ if (!err) {
+ struct virtrng_info *vi = vdev->priv;
+
+ /*
+ * Set hwrng_removed to ensure that virtio_read()
+ * does not block waiting for data before the
+ * registration is complete.
+ */
+ vi->hwrng_removed = true;
+ err = hwrng_register(&vi->hwrng);
+ if (!err) {
+ vi->hwrng_register_done = true;
+ vi->hwrng_removed = false;
+ }
+ }
+
+ return err;
+}
+#endif
+
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_rng_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtrng_probe,
+ .remove = virtrng_remove,
+ .scan = virtrng_scan,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtrng_freeze,
+ .restore = virtrng_restore,
+#endif
+};
+
+module_virtio_driver(virtio_rng_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio random number driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
new file mode 100644
index 0000000000..99f4e86ac3
--- /dev/null
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * APM X-Gene SoC RNG Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Rameshwar Prasad Sahu <rsahu@apm.com>
+ * Shamal Winchurkar <swinchurkar@apm.com>
+ * Feng Kan <fkan@apm.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+
+#define RNG_MAX_DATUM 4
+#define MAX_TRY 100
+#define XGENE_RNG_RETRY_COUNT 20
+#define XGENE_RNG_RETRY_INTERVAL 10
+
+/* RNG Registers */
+#define RNG_INOUT_0 0x00
+#define RNG_INTR_STS_ACK 0x10
+#define RNG_CONTROL 0x14
+#define RNG_CONFIG 0x18
+#define RNG_ALARMCNT 0x1c
+#define RNG_FROENABLE 0x20
+#define RNG_FRODETUNE 0x24
+#define RNG_ALARMMASK 0x28
+#define RNG_ALARMSTOP 0x2c
+#define RNG_OPTIONS 0x78
+#define RNG_EIP_REV 0x7c
+
+#define MONOBIT_FAIL_MASK BIT(7)
+#define POKER_FAIL_MASK BIT(6)
+#define LONG_RUN_FAIL_MASK BIT(5)
+#define RUN_FAIL_MASK BIT(4)
+#define NOISE_FAIL_MASK BIT(3)
+#define STUCK_OUT_MASK BIT(2)
+#define SHUTDOWN_OFLO_MASK BIT(1)
+#define READY_MASK BIT(0)
+
+#define MAJOR_HW_REV_RD(src) (((src) & 0x0f000000) >> 24)
+#define MINOR_HW_REV_RD(src) (((src) & 0x00f00000) >> 20)
+#define HW_PATCH_LEVEL_RD(src) (((src) & 0x000f0000) >> 16)
+#define MAX_REFILL_CYCLES_SET(dst, src) \
+ ((dst & ~0xffff0000) | (((u32)src << 16) & 0xffff0000))
+#define MIN_REFILL_CYCLES_SET(dst, src) \
+ ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
+#define ALARM_THRESHOLD_SET(dst, src) \
+ ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
+#define ENABLE_RNG_SET(dst, src) \
+ ((dst & ~BIT(10)) | (((u32)src << 10) & BIT(10)))
+#define REGSPEC_TEST_MODE_SET(dst, src) \
+ ((dst & ~BIT(8)) | (((u32)src << 8) & BIT(8)))
+#define MONOBIT_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(7)) | (((u32)src << 7) & BIT(7)))
+#define POKER_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(6)) | (((u32)src << 6) & BIT(6)))
+#define LONG_RUN_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(5)) | (((u32)src << 5) & BIT(5)))
+#define RUN_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(4)) | (((u32)src << 4) & BIT(4)))
+#define NOISE_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(3)) | (((u32)src << 3) & BIT(3)))
+#define STUCK_OUT_MASK_SET(dst, src) \
+ ((dst & ~BIT(2)) | (((u32)src << 2) & BIT(2)))
+#define SHUTDOWN_OFLO_MASK_SET(dst, src) \
+ ((dst & ~BIT(1)) | (((u32)src << 1) & BIT(1)))
+
+struct xgene_rng_dev {
+ u32 irq;
+ void __iomem *csr_base;
+ u32 revision;
+ u32 datum_size;
+ u32 failure_cnt; /* Failure count last minute */
+ unsigned long failure_ts;/* First failure timestamp */
+ struct timer_list failure_timer;
+ struct device *dev;
+};
+
+static void xgene_rng_expired_timer(struct timer_list *t)
+{
+ struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer);
+
+ /* Clear failure counter as timer expired */
+ disable_irq(ctx->irq);
+ ctx->failure_cnt = 0;
+ del_timer(&ctx->failure_timer);
+ enable_irq(ctx->irq);
+}
+
+static void xgene_rng_start_timer(struct xgene_rng_dev *ctx)
+{
+ ctx->failure_timer.expires = jiffies + 120 * HZ;
+ add_timer(&ctx->failure_timer);
+}
+
+/*
+ * Initialize or reinit free running oscillators (FROs)
+ */
+static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val)
+{
+ writel(fro_val, ctx->csr_base + RNG_FRODETUNE);
+ writel(0x00000000, ctx->csr_base + RNG_ALARMMASK);
+ writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP);
+ writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE);
+}
+
+static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
+{
+ u32 val;
+
+ val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
+ if (val & MONOBIT_FAIL_MASK)
+ /*
+ * LFSR detected an out-of-bounds number of 1s after
+ * checking 20,000 bits (test T1 as specified in the
+ * AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val);
+ if (val & POKER_FAIL_MASK)
+ /*
+ * LFSR detected an out-of-bounds value in at least one
+ * of the 16 poker_count_X counters or an out of bounds sum
+ * of squares value after checking 20,000 bits (test T2 as
+ * specified in the AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test poker failure error 0x%08X\n", val);
+ if (val & LONG_RUN_FAIL_MASK)
+ /*
+ * LFSR detected a sequence of 34 identical bits
+ * (test T4 as specified in the AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test long run failure error 0x%08X\n", val);
+ if (val & RUN_FAIL_MASK)
+ /*
+ * LFSR detected an outof-bounds value for at least one
+ * of the running counters after checking 20,000 bits
+ * (test T3 as specified in the AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test run failure error 0x%08X\n", val);
+ if (val & NOISE_FAIL_MASK)
+ /* LFSR detected a sequence of 48 identical bits */
+ dev_err(ctx->dev, "noise failure error 0x%08X\n", val);
+ if (val & STUCK_OUT_MASK)
+ /*
+ * Detected output data registers generated same value twice
+ * in a row
+ */
+ dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val);
+
+ if (val & SHUTDOWN_OFLO_MASK) {
+ u32 frostopped;
+
+ /* FROs shut down after a second error event. Try recover. */
+ if (++ctx->failure_cnt == 1) {
+ /* 1st time, just recover */
+ ctx->failure_ts = jiffies;
+ frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
+ xgene_rng_init_fro(ctx, frostopped);
+
+ /*
+ * We must start a timer to clear out this error
+ * in case the system timer wrap around
+ */
+ xgene_rng_start_timer(ctx);
+ } else {
+ /* 2nd time failure in lesser than 1 minute? */
+ if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) {
+ dev_err(ctx->dev,
+ "FRO shutdown failure error 0x%08X\n",
+ val);
+ } else {
+ /* 2nd time failure after 1 minutes, recover */
+ ctx->failure_ts = jiffies;
+ ctx->failure_cnt = 1;
+ /*
+ * We must start a timer to clear out this
+ * error in case the system timer wrap
+ * around
+ */
+ xgene_rng_start_timer(ctx);
+ }
+ frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
+ xgene_rng_init_fro(ctx, frostopped);
+ }
+ }
+ /* Clear them all */
+ writel(val, ctx->csr_base + RNG_INTR_STS_ACK);
+}
+
+static irqreturn_t xgene_rng_irq_handler(int irq, void *id)
+{
+ struct xgene_rng_dev *ctx = id;
+
+ /* RNG Alarm Counter overflow */
+ xgene_rng_chk_overflow(ctx);
+
+ return IRQ_HANDLED;
+}
+
+static int xgene_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+ u32 i, val = 0;
+
+ for (i = 0; i < XGENE_RNG_RETRY_COUNT; i++) {
+ val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
+ if ((val & READY_MASK) || !wait)
+ break;
+ udelay(XGENE_RNG_RETRY_INTERVAL);
+ }
+
+ return (val & READY_MASK);
+}
+
+static int xgene_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+ int i;
+
+ for (i = 0; i < ctx->datum_size; i++)
+ data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4);
+
+ /* Clear ready bit to start next transaction */
+ writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
+
+ return ctx->datum_size << 2;
+}
+
+static void xgene_rng_init_internal(struct xgene_rng_dev *ctx)
+{
+ u32 val;
+
+ writel(0x00000000, ctx->csr_base + RNG_CONTROL);
+
+ val = MAX_REFILL_CYCLES_SET(0, 10);
+ val = MIN_REFILL_CYCLES_SET(val, 10);
+ writel(val, ctx->csr_base + RNG_CONFIG);
+
+ val = ALARM_THRESHOLD_SET(0, 0xFF);
+ writel(val, ctx->csr_base + RNG_ALARMCNT);
+
+ xgene_rng_init_fro(ctx, 0);
+
+ writel(MONOBIT_FAIL_MASK |
+ POKER_FAIL_MASK |
+ LONG_RUN_FAIL_MASK |
+ RUN_FAIL_MASK |
+ NOISE_FAIL_MASK |
+ STUCK_OUT_MASK |
+ SHUTDOWN_OFLO_MASK |
+ READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
+
+ val = ENABLE_RNG_SET(0, 1);
+ val = MONOBIT_FAIL_MASK_SET(val, 1);
+ val = POKER_FAIL_MASK_SET(val, 1);
+ val = LONG_RUN_FAIL_MASK_SET(val, 1);
+ val = RUN_FAIL_MASK_SET(val, 1);
+ val = NOISE_FAIL_MASK_SET(val, 1);
+ val = STUCK_OUT_MASK_SET(val, 1);
+ val = SHUTDOWN_OFLO_MASK_SET(val, 1);
+ writel(val, ctx->csr_base + RNG_CONTROL);
+}
+
+static int xgene_rng_init(struct hwrng *rng)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+
+ ctx->failure_cnt = 0;
+ timer_setup(&ctx->failure_timer, xgene_rng_expired_timer, 0);
+
+ ctx->revision = readl(ctx->csr_base + RNG_EIP_REV);
+
+ dev_dbg(ctx->dev, "Rev %d.%d.%d\n",
+ MAJOR_HW_REV_RD(ctx->revision),
+ MINOR_HW_REV_RD(ctx->revision),
+ HW_PATCH_LEVEL_RD(ctx->revision));
+
+ dev_dbg(ctx->dev, "Options 0x%08X",
+ readl(ctx->csr_base + RNG_OPTIONS));
+
+ xgene_rng_init_internal(ctx);
+
+ ctx->datum_size = RNG_MAX_DATUM;
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_rng_acpi_match[] = {
+ { "APMC0D18", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_rng_acpi_match);
+#endif
+
+static struct hwrng xgene_rng_func = {
+ .name = "xgene-rng",
+ .init = xgene_rng_init,
+ .data_present = xgene_rng_data_present,
+ .data_read = xgene_rng_data_read,
+};
+
+static int xgene_rng_probe(struct platform_device *pdev)
+{
+ struct xgene_rng_dev *ctx;
+ struct clk *clk;
+ int rc = 0;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctx);
+
+ ctx->csr_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->csr_base))
+ return PTR_ERR(ctx->csr_base);
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
+ return rc;
+ ctx->irq = rc;
+
+ dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
+ ctx->csr_base, ctx->irq);
+
+ rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
+ dev_name(&pdev->dev), ctx);
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "Could not request RNG alarm IRQ\n");
+
+ /* Enable IP clock */
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk), "Couldn't get the clock for RNG\n");
+
+ xgene_rng_func.priv = (unsigned long) ctx;
+
+ rc = devm_hwrng_register(&pdev->dev, &xgene_rng_func);
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "RNG registering failed\n");
+
+ rc = device_init_wakeup(&pdev->dev, 1);
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "RNG device_init_wakeup failed\n");
+
+ return 0;
+}
+
+static int xgene_rng_remove(struct platform_device *pdev)
+{
+ int rc;
+
+ rc = device_init_wakeup(&pdev->dev, 0);
+ if (rc)
+ dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
+
+ return 0;
+}
+
+static const struct of_device_id xgene_rng_of_match[] = {
+ { .compatible = "apm,xgene-rng" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
+
+static struct platform_driver xgene_rng_driver = {
+ .probe = xgene_rng_probe,
+ .remove = xgene_rng_remove,
+ .driver = {
+ .name = "xgene-rng",
+ .of_match_table = xgene_rng_of_match,
+ .acpi_match_table = ACPI_PTR(xgene_rng_acpi_match),
+ },
+};
+
+module_platform_driver(xgene_rng_driver);
+MODULE_DESCRIPTION("APM X-Gene RNG driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c
new file mode 100644
index 0000000000..2c586d1fe8
--- /dev/null
+++ b/drivers/char/hw_random/xiphera-trng.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Xiphera Ltd. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define CONTROL_REG 0x00000000
+#define STATUS_REG 0x00000004
+#define RAND_REG 0x00000000
+
+#define HOST_TO_TRNG_RESET 0x00000001
+#define HOST_TO_TRNG_RELEASE_RESET 0x00000002
+#define HOST_TO_TRNG_ENABLE 0x80000000
+#define HOST_TO_TRNG_ZEROIZE 0x80000004
+#define HOST_TO_TRNG_ACK_ZEROIZE 0x80000008
+#define HOST_TO_TRNG_READ 0x8000000F
+
+/* trng statuses */
+#define TRNG_ACK_RESET 0x000000AC
+#define TRNG_SUCCESSFUL_STARTUP 0x00000057
+#define TRNG_FAILED_STARTUP 0x000000FA
+#define TRNG_NEW_RAND_AVAILABLE 0x000000ED
+
+struct xiphera_trng {
+ void __iomem *mem;
+ struct hwrng rng;
+};
+
+static int xiphera_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct xiphera_trng *trng = container_of(rng, struct xiphera_trng, rng);
+ int ret = 0;
+
+ while (max >= sizeof(u32)) {
+ /* check for data */
+ if (readl(trng->mem + STATUS_REG) == TRNG_NEW_RAND_AVAILABLE) {
+ *(u32 *)buf = readl(trng->mem + RAND_REG);
+ /*
+ * Inform the trng of the read
+ * and re-enable it to produce a new random number
+ */
+ writel(HOST_TO_TRNG_READ, trng->mem + CONTROL_REG);
+ writel(HOST_TO_TRNG_ENABLE, trng->mem + CONTROL_REG);
+ ret += sizeof(u32);
+ buf += sizeof(u32);
+ max -= sizeof(u32);
+ } else {
+ break;
+ }
+ }
+ return ret;
+}
+
+static int xiphera_trng_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct xiphera_trng *trng;
+ struct device *dev = &pdev->dev;
+
+ trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->mem))
+ return PTR_ERR(trng->mem);
+
+ /*
+ * the trng needs to be reset first which might not happen in time,
+ * hence we incorporate a small delay to ensure proper behaviour
+ */
+ writel(HOST_TO_TRNG_RESET, trng->mem + CONTROL_REG);
+ usleep_range(100, 200);
+
+ if (readl(trng->mem + STATUS_REG) != TRNG_ACK_RESET) {
+ /*
+ * there is a small chance the trng is just not ready yet,
+ * so we try one more time. If the second time fails, we give up
+ */
+ usleep_range(100, 200);
+ if (readl(trng->mem + STATUS_REG) != TRNG_ACK_RESET) {
+ dev_err(dev, "failed to reset the trng ip\n");
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * once again, to ensure proper behaviour we sleep
+ * for a while after zeroizing the trng
+ */
+ writel(HOST_TO_TRNG_RELEASE_RESET, trng->mem + CONTROL_REG);
+ writel(HOST_TO_TRNG_ENABLE, trng->mem + CONTROL_REG);
+ writel(HOST_TO_TRNG_ZEROIZE, trng->mem + CONTROL_REG);
+ msleep(20);
+
+ if (readl(trng->mem + STATUS_REG) != TRNG_SUCCESSFUL_STARTUP) {
+ /* diagnose the reason for the failure */
+ if (readl(trng->mem + STATUS_REG) == TRNG_FAILED_STARTUP) {
+ dev_err(dev, "trng ip startup-tests failed\n");
+ return -ENODEV;
+ }
+ dev_err(dev, "startup-tests yielded no response\n");
+ return -ENODEV;
+ }
+
+ writel(HOST_TO_TRNG_ACK_ZEROIZE, trng->mem + CONTROL_REG);
+
+ trng->rng.name = pdev->name;
+ trng->rng.read = xiphera_trng_read;
+ trng->rng.quality = 900;
+
+ ret = devm_hwrng_register(dev, &trng->rng);
+ if (ret) {
+ dev_err(dev, "failed to register rng device: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, trng);
+
+ return 0;
+}
+
+static const struct of_device_id xiphera_trng_of_match[] = {
+ { .compatible = "xiphera,xip8001b-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xiphera_trng_of_match);
+
+static struct platform_driver xiphera_trng_driver = {
+ .driver = {
+ .name = "xiphera-trng",
+ .of_match_table = xiphera_trng_of_match,
+ },
+ .probe = xiphera_trng_probe,
+};
+
+module_platform_driver(xiphera_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Atte Tommiska");
+MODULE_DESCRIPTION("Xiphera FPGA-based true random number generator driver");
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
new file mode 100644
index 0000000000..f4adc6feb3
--- /dev/null
+++ b/drivers/char/ipmi/Kconfig
@@ -0,0 +1,190 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# IPMI device configuration
+#
+
+menuconfig IPMI_HANDLER
+ tristate 'IPMI top-level message handler'
+ depends on HAS_IOMEM
+ select IPMI_DMI_DECODE if DMI
+ help
+ This enables the central IPMI message handler, required for IPMI
+ to work.
+
+ IPMI is a standard for managing sensors (temperature,
+ voltage, etc.) in a system.
+
+ See <file:Documentation/driver-api/ipmi.rst> for more details on the driver.
+
+ If unsure, say N.
+
+config IPMI_DMI_DECODE
+ select IPMI_PLAT_DATA
+ bool
+
+config IPMI_PLAT_DATA
+ bool
+
+if IPMI_HANDLER
+
+config IPMI_PANIC_EVENT
+ bool 'Generate a panic event to all BMCs on a panic'
+ help
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate an IPMI event describing the panic to each
+ interface registered with the message handler. This is always
+ available, the module parameter for ipmi_msghandler named
+ panic_op can be set to "event" to chose this value, this config
+ simply causes the default value to be set to "event".
+
+config IPMI_PANIC_STRING
+ bool 'Generate OEM events containing the panic string'
+ depends on IPMI_PANIC_EVENT
+ help
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate IPMI OEM type f0 events holding the IPMB
+ address of the panic generator (byte 4 of the event), a sequence
+ number for the string (byte 5 of the event) and part of the
+ string (the rest of the event). Bytes 1, 2, and 3 are the normal
+ usage for an OEM event. You can fetch these events and use the
+ sequence numbers to piece the string together. This config
+ parameter sets the default value to generate these events,
+ the module parameter for ipmi_msghandler named panic_op can
+ be set to "string" to chose this value, this config simply
+ causes the default value to be set to "string".
+
+config IPMI_DEVICE_INTERFACE
+ tristate 'Device interface for IPMI'
+ help
+ This provides an IOCTL interface to the IPMI message handler so
+ userland processes may use IPMI. It supports poll() and select().
+
+config IPMI_SI
+ tristate 'IPMI System Interface handler'
+ select IPMI_PLAT_DATA
+ help
+ Provides a driver for System Interfaces (KCS, SMIC, BT).
+ Currently, only KCS and SMIC are supported. If
+ you are using IPMI, you should probably say "y" here.
+
+config IPMI_SSIF
+ tristate 'IPMI SMBus handler (SSIF)'
+ depends on I2C
+ help
+ Provides a driver for a SMBus interface to a BMC, meaning that you
+ have a driver that must be accessed over an I2C bus instead of a
+ standard interface. This module requires I2C support.
+
+config IPMI_IPMB
+ tristate 'IPMI IPMB interface'
+ depends on I2C && I2C_SLAVE
+ help
+ Provides a driver for a system running right on the IPMB bus.
+ It supports normal system interface messages to a BMC on the IPMB
+ bus, and it also supports direct messaging on the bus using
+ IPMB direct messages. This module requires I2C support.
+
+config IPMI_POWERNV
+ depends on PPC_POWERNV
+ tristate 'POWERNV (OPAL firmware) IPMI interface'
+ help
+ Provides a driver for OPAL firmware-based IPMI interfaces.
+
+config IPMI_WATCHDOG
+ tristate 'IPMI Watchdog Timer'
+ help
+ This enables the IPMI watchdog timer.
+
+config IPMI_POWEROFF
+ tristate 'IPMI Poweroff'
+ help
+ This enables a function to power off the system with IPMI if
+ the IPMI management controller is capable of this.
+
+endif # IPMI_HANDLER
+
+config IPMI_KCS_BMC
+ tristate
+
+config ASPEED_KCS_IPMI_BMC
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select IPMI_KCS_BMC
+ select REGMAP_MMIO
+ tristate "Aspeed KCS IPMI BMC driver"
+ help
+ Provides a driver for the KCS (Keyboard Controller Style) IPMI
+ interface found on Aspeed SOCs (AST2400 and AST2500).
+
+ The driver implements the BMC side of the KCS contorller, it
+ provides the access of KCS IO space for BMC side.
+
+config NPCM7XX_KCS_IPMI_BMC
+ depends on ARCH_NPCM || COMPILE_TEST
+ select IPMI_KCS_BMC
+ select REGMAP_MMIO
+ tristate "NPCM KCS IPMI BMC driver"
+ help
+ Provides a driver for the KCS (Keyboard Controller Style) IPMI
+ interface found on Nuvoton NPCM SOCs.
+
+ The driver implements the BMC side of the KCS contorller, it
+ provides the access of KCS IO space for BMC side.
+
+ This support is also available as a module. If so, the module
+ will be called kcs_bmc_npcm7xx.
+
+config IPMI_KCS_BMC_CDEV_IPMI
+ depends on IPMI_KCS_BMC
+ tristate "IPMI character device interface for BMC KCS devices"
+ help
+ Provides a BMC-side character device implementing IPMI
+ semantics for KCS IPMI devices.
+
+ Say YES if you wish to expose KCS devices on the BMC for IPMI
+ purposes.
+
+ This support is also available as a module. The module will be
+ called kcs_bmc_cdev_ipmi.
+
+config IPMI_KCS_BMC_SERIO
+ depends on IPMI_KCS_BMC && SERIO
+ tristate "SerIO adaptor for BMC KCS devices"
+ help
+ Adapts the BMC KCS device for the SerIO subsystem. This allows users
+ to take advantage of userspace interfaces provided by SerIO where
+ appropriate.
+
+ Say YES if you wish to expose KCS devices on the BMC via SerIO
+ interfaces.
+
+ This support is also available as a module. The module will be
+ called kcs_bmc_serio.
+
+config ASPEED_BT_IPMI_BMC
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on MFD_SYSCON
+ select REGMAP_MMIO
+ tristate "BT IPMI bmc driver"
+ help
+ Provides a driver for the BT (Block Transfer) IPMI interface
+ found on Aspeed SOCs (AST2400 and AST2500). The driver
+ implements the BMC side of the BT interface.
+
+config SSIF_IPMI_BMC
+ tristate "SSIF IPMI BMC driver"
+ depends on I2C && I2C_SLAVE
+ help
+ This enables the IPMI SMBus system interface (SSIF) at the
+ management (BMC) side.
+
+ The driver implements the BMC side of the SMBus system
+ interface (SSIF).
+
+config IPMB_DEVICE_INTERFACE
+ tristate 'IPMB Interface handler'
+ depends on I2C
+ depends on I2C_SLAVE
+ help
+ Provides a driver for a device (Satellite MC) to
+ receive requests and send responses back to the BMC via
+ the IPMB interface. This module requires I2C support.
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
new file mode 100644
index 0000000000..cb6138b8de
--- /dev/null
+++ b/drivers/char/ipmi/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the ipmi drivers.
+#
+
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
+ ipmi_si_hotmod.o ipmi_si_hardcode.o ipmi_si_platform.o \
+ ipmi_si_port_io.o ipmi_si_mem_io.o
+ifdef CONFIG_PCI
+ipmi_si-y += ipmi_si_pci.o
+endif
+ifdef CONFIG_PARISC
+ipmi_si-y += ipmi_si_parisc.o
+endif
+
+obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
+obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
+obj-$(CONFIG_IPMI_SI) += ipmi_si.o
+obj-$(CONFIG_IPMI_DMI_DECODE) += ipmi_dmi.o
+obj-$(CONFIG_IPMI_PLAT_DATA) += ipmi_plat_data.o
+obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
+obj-$(CONFIG_IPMI_IPMB) += ipmi_ipmb.o
+obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
+obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
+obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_IPMI_KCS_BMC) += kcs_bmc.o
+obj-$(CONFIG_IPMI_KCS_BMC_SERIO) += kcs_bmc_serio.o
+obj-$(CONFIG_IPMI_KCS_BMC_CDEV_IPMI) += kcs_bmc_cdev_ipmi.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
+obj-$(CONFIG_ASPEED_KCS_IPMI_BMC) += kcs_bmc_aspeed.o
+obj-$(CONFIG_NPCM7XX_KCS_IPMI_BMC) += kcs_bmc_npcm7xx.o
+obj-$(CONFIG_IPMB_DEVICE_INTERFACE) += ipmb_dev_int.o
+obj-$(CONFIG_SSIF_IPMI_BMC) += ssif_bmc.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 0000000000..7450904e33
--- /dev/null
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME "ipmi-bt-host"
+
+#define BT_IO_BASE 0xe4
+#define BT_IRQ 10
+
+#define BT_CR0 0x0
+#define BT_CR0_IO_BASE 16
+#define BT_CR0_IRQ 12
+#define BT_CR0_EN_CLR_SLV_RDP 0x8
+#define BT_CR0_EN_CLR_SLV_WRP 0x4
+#define BT_CR0_ENABLE_IBT 0x1
+#define BT_CR1 0x4
+#define BT_CR1_IRQ_H2B 0x01
+#define BT_CR1_IRQ_HBUSY 0x40
+#define BT_CR2 0x8
+#define BT_CR2_IRQ_H2B 0x01
+#define BT_CR2_IRQ_HBUSY 0x40
+#define BT_CR3 0xc
+#define BT_CTRL 0x10
+#define BT_CTRL_B_BUSY 0x80
+#define BT_CTRL_H_BUSY 0x40
+#define BT_CTRL_OEM0 0x20
+#define BT_CTRL_SMS_ATN 0x10
+#define BT_CTRL_B2H_ATN 0x08
+#define BT_CTRL_H2B_ATN 0x04
+#define BT_CTRL_CLR_RD_PTR 0x02
+#define BT_CTRL_CLR_WR_PTR 0x01
+#define BT_BMC2HOST 0x14
+#define BT_INTMASK 0x18
+#define BT_INTMASK_B2H_IRQEN 0x01
+#define BT_INTMASK_B2H_IRQ 0x02
+#define BT_INTMASK_BMC_HWRST 0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+ struct device dev;
+ struct miscdevice miscdev;
+ void __iomem *base;
+ int irq;
+ wait_queue_head_t queue;
+ struct timer_list poll_timer;
+ struct mutex mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+ return readb(bt_bmc->base + reg);
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+ writeb(data, bt_bmc->base + reg);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+ return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ buf[i] = bt_read(bt_bmc);
+ return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+ bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ bt_write(bt_bmc, buf[i]);
+ return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+ return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ if (atomic_inc_return(&open_count) == 1) {
+ clr_b_busy(bt_bmc);
+ return 0;
+ }
+
+ atomic_dec(&open_count);
+ return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N
+ * Length NetFn/LUN Seq Cmd Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 len;
+ int len_byte = 1;
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nread;
+
+ WARN_ON(*ppos);
+
+ if (wait_event_interruptible(bt_bmc->queue,
+ bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ set_b_busy(bt_bmc);
+ clr_h2b_atn(bt_bmc);
+ clr_rd_ptr(bt_bmc);
+
+ /*
+ * The BT frames start with the message length, which does not
+ * include the length byte.
+ */
+ kbuffer[0] = bt_read(bt_bmc);
+ len = kbuffer[0];
+
+ /* We pass the length back to userspace as well */
+ if (len + 1 > count)
+ len = count - 1;
+
+ while (len) {
+ nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+ bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+ if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+ ret = -EFAULT;
+ break;
+ }
+ len -= nread;
+ buf += nread + len_byte;
+ ret += nread + len_byte;
+ len_byte = 0;
+ }
+
+ clr_b_busy(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N
+ * Length NetFn/LUN Seq Cmd Code Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nwritten;
+
+ /*
+ * send a minimum response size
+ */
+ if (count < 5)
+ return -EINVAL;
+
+ WARN_ON(*ppos);
+
+ /*
+ * There's no interrupt for clearing bmc busy so we have to
+ * poll
+ */
+ if (wait_event_interruptible(bt_bmc->queue,
+ !(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ clr_wr_ptr(bt_bmc);
+
+ while (count) {
+ nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+ if (copy_from_user(&kbuffer, buf, nwritten)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ bt_writen(bt_bmc, kbuffer, nwritten);
+
+ count -= nwritten;
+ buf += nwritten;
+ ret += nwritten;
+ }
+
+ set_b2h_atn(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ switch (cmd) {
+ case BT_BMC_IOCTL_SMS_ATN:
+ set_sms_atn(bt_bmc);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ atomic_dec(&open_count);
+ set_b_busy(bt_bmc);
+ return 0;
+}
+
+static __poll_t bt_bmc_poll(struct file *file, poll_table *wait)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ __poll_t mask = 0;
+ u8 ctrl;
+
+ poll_wait(file, &bt_bmc->queue, wait);
+
+ ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+ if (ctrl & BT_CTRL_H2B_ATN)
+ mask |= EPOLLIN;
+
+ if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+ mask |= EPOLLOUT;
+
+ return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+ .owner = THIS_MODULE,
+ .open = bt_bmc_open,
+ .read = bt_bmc_read,
+ .write = bt_bmc_write,
+ .release = bt_bmc_release,
+ .poll = bt_bmc_poll,
+ .unlocked_ioctl = bt_bmc_ioctl,
+};
+
+static void poll_timer(struct timer_list *t)
+{
+ struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
+
+ bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+ wake_up(&bt_bmc->queue);
+ add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+ struct bt_bmc *bt_bmc = arg;
+ u32 reg;
+
+ reg = readl(bt_bmc->base + BT_CR2);
+
+ reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+ if (!reg)
+ return IRQ_NONE;
+
+ /* ack pending IRQs */
+ writel(reg, bt_bmc->base + BT_CR2);
+
+ wake_up(&bt_bmc->queue);
+ return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int rc;
+ u32 reg;
+
+ bt_bmc->irq = platform_get_irq_optional(pdev, 0);
+ if (bt_bmc->irq < 0)
+ return bt_bmc->irq;
+
+ rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+ DEVICE_NAME, bt_bmc);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+ bt_bmc->irq = rc;
+ return rc;
+ }
+
+ /*
+ * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+ * H2B will be asserted when the bmc has data for us; HBUSY
+ * will be cleared (along with B2H) when we can write the next
+ * message to the BT buffer
+ */
+ reg = readl(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ writel(reg, bt_bmc->base + BT_CR1);
+
+ return 0;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc;
+ struct device *dev;
+ int rc;
+
+ dev = &pdev->dev;
+ dev_info(dev, "Found bt bmc device\n");
+
+ bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+ if (!bt_bmc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, bt_bmc);
+
+ bt_bmc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
+
+ mutex_init(&bt_bmc->mutex);
+ init_waitqueue_head(&bt_bmc->queue);
+
+ bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
+ bt_bmc->miscdev.name = DEVICE_NAME;
+ bt_bmc->miscdev.fops = &bt_bmc_fops;
+ bt_bmc->miscdev.parent = dev;
+ rc = misc_register(&bt_bmc->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register misc device\n");
+ return rc;
+ }
+
+ bt_bmc_config_irq(bt_bmc, pdev);
+
+ if (bt_bmc->irq >= 0) {
+ dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+ } else {
+ dev_info(dev, "No IRQ; using timer\n");
+ timer_setup(&bt_bmc->poll_timer, poll_timer, 0);
+ bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+ add_timer(&bt_bmc->poll_timer);
+ }
+
+ writel((BT_IO_BASE << BT_CR0_IO_BASE) |
+ (BT_IRQ << BT_CR0_IRQ) |
+ BT_CR0_EN_CLR_SLV_RDP |
+ BT_CR0_EN_CLR_SLV_WRP |
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
+
+ clr_b_busy(bt_bmc);
+
+ return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&bt_bmc->miscdev);
+ if (bt_bmc->irq < 0)
+ del_timer_sync(&bt_bmc->poll_timer);
+ return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+ { .compatible = "aspeed,ast2400-ibt-bmc" },
+ { .compatible = "aspeed,ast2500-ibt-bmc" },
+ { .compatible = "aspeed,ast2600-ibt-bmc" },
+ { },
+};
+
+static struct platform_driver bt_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = bt_bmc_match,
+ },
+ .probe = bt_bmc_probe,
+ .remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
new file mode 100644
index 0000000000..49100845fc
--- /dev/null
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * IPMB driver to receive a request and send a response
+ *
+ * Copyright (C) 2019 Mellanox Techologies, Ltd.
+ *
+ * This was inspired by Brendan Higgins' ipmi-bmc-bt-i2c driver.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#define MAX_MSG_LEN 240
+#define IPMB_REQUEST_LEN_MIN 7
+#define NETFN_RSP_BIT_MASK 0x4
+#define REQUEST_QUEUE_MAX_LEN 256
+
+#define IPMB_MSG_LEN_IDX 0
+#define RQ_SA_8BIT_IDX 1
+#define NETFN_LUN_IDX 2
+
+#define GET_7BIT_ADDR(addr_8bit) (addr_8bit >> 1)
+#define GET_8BIT_ADDR(addr_7bit) ((addr_7bit << 1) & 0xff)
+
+#define IPMB_MSG_PAYLOAD_LEN_MAX (MAX_MSG_LEN - IPMB_REQUEST_LEN_MIN - 1)
+
+#define SMBUS_MSG_HEADER_LENGTH 2
+#define SMBUS_MSG_IDX_OFFSET (SMBUS_MSG_HEADER_LENGTH + 1)
+
+struct ipmb_msg {
+ u8 len;
+ u8 rs_sa;
+ u8 netfn_rs_lun;
+ u8 checksum1;
+ u8 rq_sa;
+ u8 rq_seq_rq_lun;
+ u8 cmd;
+ u8 payload[IPMB_MSG_PAYLOAD_LEN_MAX];
+ /* checksum2 is included in payload */
+} __packed;
+
+struct ipmb_request_elem {
+ struct list_head list;
+ struct ipmb_msg request;
+};
+
+struct ipmb_dev {
+ struct i2c_client *client;
+ struct miscdevice miscdev;
+ struct ipmb_msg request;
+ struct list_head request_queue;
+ atomic_t request_queue_len;
+ size_t msg_idx;
+ spinlock_t lock;
+ wait_queue_head_t wait_queue;
+ struct mutex file_mutex;
+ bool is_i2c_protocol;
+};
+
+static inline struct ipmb_dev *to_ipmb_dev(struct file *file)
+{
+ return container_of(file->private_data, struct ipmb_dev, miscdev);
+}
+
+static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
+ struct ipmb_request_elem *queue_elem;
+ struct ipmb_msg msg;
+ ssize_t ret = 0;
+
+ memset(&msg, 0, sizeof(msg));
+
+ spin_lock_irq(&ipmb_dev->lock);
+
+ while (list_empty(&ipmb_dev->request_queue)) {
+ spin_unlock_irq(&ipmb_dev->lock);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(ipmb_dev->wait_queue,
+ !list_empty(&ipmb_dev->request_queue));
+ if (ret)
+ return ret;
+
+ spin_lock_irq(&ipmb_dev->lock);
+ }
+
+ queue_elem = list_first_entry(&ipmb_dev->request_queue,
+ struct ipmb_request_elem, list);
+ memcpy(&msg, &queue_elem->request, sizeof(msg));
+ list_del(&queue_elem->list);
+ kfree(queue_elem);
+ atomic_dec(&ipmb_dev->request_queue_len);
+
+ spin_unlock_irq(&ipmb_dev->lock);
+
+ count = min_t(size_t, count, msg.len + 1);
+ if (copy_to_user(buf, &msg, count))
+ ret = -EFAULT;
+
+ return ret < 0 ? ret : count;
+}
+
+static int ipmb_i2c_write(struct i2c_client *client, u8 *msg, u8 addr)
+{
+ struct i2c_msg i2c_msg;
+
+ /*
+ * subtract 1 byte (rq_sa) from the length of the msg passed to
+ * raw i2c_transfer
+ */
+ i2c_msg.len = msg[IPMB_MSG_LEN_IDX] - 1;
+
+ /* Assign message to buffer except first 2 bytes (length and address) */
+ i2c_msg.buf = msg + 2;
+
+ i2c_msg.addr = addr;
+ i2c_msg.flags = client->flags & I2C_CLIENT_PEC;
+
+ return i2c_transfer(client->adapter, &i2c_msg, 1);
+}
+
+static ssize_t ipmb_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
+ u8 rq_sa, netf_rq_lun, msg_len;
+ struct i2c_client *temp_client;
+ u8 msg[MAX_MSG_LEN];
+ ssize_t ret;
+
+ if (count > sizeof(msg))
+ return -EINVAL;
+
+ if (copy_from_user(&msg, buf, count))
+ return -EFAULT;
+
+ if (count < msg[0])
+ return -EINVAL;
+
+ rq_sa = GET_7BIT_ADDR(msg[RQ_SA_8BIT_IDX]);
+ netf_rq_lun = msg[NETFN_LUN_IDX];
+
+ /* Check i2c block transfer vs smbus */
+ if (ipmb_dev->is_i2c_protocol) {
+ ret = ipmb_i2c_write(ipmb_dev->client, msg, rq_sa);
+ return (ret == 1) ? count : ret;
+ }
+
+ /*
+ * subtract rq_sa and netf_rq_lun from the length of the msg. Fill the
+ * temporary client. Note that its use is an exception for IPMI.
+ */
+ msg_len = msg[IPMB_MSG_LEN_IDX] - SMBUS_MSG_HEADER_LENGTH;
+ temp_client = kmemdup(ipmb_dev->client, sizeof(*temp_client), GFP_KERNEL);
+ if (!temp_client)
+ return -ENOMEM;
+
+ temp_client->addr = rq_sa;
+
+ ret = i2c_smbus_write_block_data(temp_client, netf_rq_lun, msg_len,
+ msg + SMBUS_MSG_IDX_OFFSET);
+ kfree(temp_client);
+
+ return ret < 0 ? ret : count;
+}
+
+static __poll_t ipmb_poll(struct file *file, poll_table *wait)
+{
+ struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
+ __poll_t mask = EPOLLOUT;
+
+ mutex_lock(&ipmb_dev->file_mutex);
+ poll_wait(file, &ipmb_dev->wait_queue, wait);
+
+ if (atomic_read(&ipmb_dev->request_queue_len))
+ mask |= EPOLLIN;
+ mutex_unlock(&ipmb_dev->file_mutex);
+
+ return mask;
+}
+
+static const struct file_operations ipmb_fops = {
+ .owner = THIS_MODULE,
+ .read = ipmb_read,
+ .write = ipmb_write,
+ .poll = ipmb_poll,
+};
+
+/* Called with ipmb_dev->lock held. */
+static void ipmb_handle_request(struct ipmb_dev *ipmb_dev)
+{
+ struct ipmb_request_elem *queue_elem;
+
+ if (atomic_read(&ipmb_dev->request_queue_len) >=
+ REQUEST_QUEUE_MAX_LEN)
+ return;
+
+ queue_elem = kmalloc(sizeof(*queue_elem), GFP_ATOMIC);
+ if (!queue_elem)
+ return;
+
+ memcpy(&queue_elem->request, &ipmb_dev->request,
+ sizeof(struct ipmb_msg));
+ list_add(&queue_elem->list, &ipmb_dev->request_queue);
+ atomic_inc(&ipmb_dev->request_queue_len);
+ wake_up_all(&ipmb_dev->wait_queue);
+}
+
+static u8 ipmb_verify_checksum1(struct ipmb_dev *ipmb_dev, u8 rs_sa)
+{
+ /* The 8 lsb of the sum is 0 when the checksum is valid */
+ return (rs_sa + ipmb_dev->request.netfn_rs_lun +
+ ipmb_dev->request.checksum1);
+}
+
+/*
+ * Verify if message has proper ipmb header with minimum length
+ * and correct checksum byte.
+ */
+static bool is_ipmb_msg(struct ipmb_dev *ipmb_dev, u8 rs_sa)
+{
+ if ((ipmb_dev->msg_idx >= IPMB_REQUEST_LEN_MIN) &&
+ (!ipmb_verify_checksum1(ipmb_dev, rs_sa)))
+ return true;
+
+ return false;
+}
+
+/*
+ * The IPMB protocol only supports I2C Writes so there is no need
+ * to support I2C_SLAVE_READ* events.
+ * This i2c callback function only monitors IPMB request messages
+ * and adds them in a queue, so that they can be handled by
+ * receive_ipmb_request.
+ */
+static int ipmb_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct ipmb_dev *ipmb_dev = i2c_get_clientdata(client);
+ u8 *buf = (u8 *)&ipmb_dev->request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipmb_dev->lock, flags);
+ switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ memset(&ipmb_dev->request, 0, sizeof(ipmb_dev->request));
+ ipmb_dev->msg_idx = 0;
+
+ /*
+ * At index 0, ipmb_msg stores the length of msg,
+ * skip it for now.
+ * The len will be populated once the whole
+ * buf is populated.
+ *
+ * The I2C bus driver's responsibility is to pass the
+ * data bytes to the backend driver; it does not
+ * forward the i2c slave address.
+ * Since the first byte in the IPMB message is the
+ * address of the responder, it is the responsibility
+ * of the IPMB driver to format the message properly.
+ * So this driver prepends the address of the responder
+ * to the received i2c data before the request message
+ * is handled in userland.
+ */
+ buf[++ipmb_dev->msg_idx] = GET_8BIT_ADDR(client->addr);
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
+ break;
+
+ buf[++ipmb_dev->msg_idx] = *val;
+ break;
+
+ case I2C_SLAVE_STOP:
+ ipmb_dev->request.len = ipmb_dev->msg_idx;
+ if (is_ipmb_msg(ipmb_dev, GET_8BIT_ADDR(client->addr)))
+ ipmb_handle_request(ipmb_dev);
+ break;
+
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&ipmb_dev->lock, flags);
+
+ return 0;
+}
+
+static int ipmb_probe(struct i2c_client *client)
+{
+ struct ipmb_dev *ipmb_dev;
+ int ret;
+
+ ipmb_dev = devm_kzalloc(&client->dev, sizeof(*ipmb_dev),
+ GFP_KERNEL);
+ if (!ipmb_dev)
+ return -ENOMEM;
+
+ spin_lock_init(&ipmb_dev->lock);
+ init_waitqueue_head(&ipmb_dev->wait_queue);
+ atomic_set(&ipmb_dev->request_queue_len, 0);
+ INIT_LIST_HEAD(&ipmb_dev->request_queue);
+
+ mutex_init(&ipmb_dev->file_mutex);
+
+ ipmb_dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+
+ ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL,
+ "%s%d", "ipmb-",
+ client->adapter->nr);
+ ipmb_dev->miscdev.fops = &ipmb_fops;
+ ipmb_dev->miscdev.parent = &client->dev;
+ ret = misc_register(&ipmb_dev->miscdev);
+ if (ret)
+ return ret;
+
+ ipmb_dev->is_i2c_protocol
+ = device_property_read_bool(&client->dev, "i2c-protocol");
+
+ ipmb_dev->client = client;
+ i2c_set_clientdata(client, ipmb_dev);
+ ret = i2c_slave_register(client, ipmb_slave_cb);
+ if (ret) {
+ misc_deregister(&ipmb_dev->miscdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ipmb_remove(struct i2c_client *client)
+{
+ struct ipmb_dev *ipmb_dev = i2c_get_clientdata(client);
+
+ i2c_slave_unregister(client);
+ misc_deregister(&ipmb_dev->miscdev);
+}
+
+static const struct i2c_device_id ipmb_id[] = {
+ { "ipmb-dev", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ipmb_id);
+
+static const struct acpi_device_id acpi_ipmb_id[] = {
+ { "IPMB0001", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id);
+
+static struct i2c_driver ipmb_driver = {
+ .driver = {
+ .name = "ipmb-dev",
+ .acpi_match_table = ACPI_PTR(acpi_ipmb_id),
+ },
+ .probe = ipmb_probe,
+ .remove = ipmb_remove,
+ .id_table = ipmb_id,
+};
+module_i2c_driver(ipmb_driver);
+
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_DESCRIPTION("IPMB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
new file mode 100644
index 0000000000..f41f78972b
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_bt_sm.c
+ *
+ * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part
+ * of the driver architecture at http://sourceforge.net/projects/openipmi
+ *
+ * Author: Rocky Craig <first.last@hp.com>
+ */
+
+#define DEBUG /* So dev_dbg() is always available. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+#define BT_DEBUG_OFF 0 /* Used in production */
+#define BT_DEBUG_ENABLE 1 /* Generic messages */
+#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */
+#define BT_DEBUG_STATES 4 /* Verbose look at state changes */
+/*
+ * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
+ * value
+ */
+
+static int bt_debug; /* 0 == BT_DEBUG_OFF */
+
+module_param(bt_debug, int, 0644);
+MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/*
+ * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
+ * and 64 byte buffers. However, one HP implementation wants 255 bytes of
+ * buffer (with a documented message of 160 bytes) so go for the max.
+ * Since the Open IPMI architecture is single-message oriented at this
+ * stage, the queue depth of BT is of no concern.
+ */
+
+#define BT_NORMAL_TIMEOUT 5 /* seconds */
+#define BT_NORMAL_RETRY_LIMIT 2
+#define BT_RESET_DELAY 6 /* seconds after warm reset */
+
+/*
+ * States are written in chronological order and usually cover
+ * multiple rows of the state table discussion in the IPMI spec.
+ */
+
+enum bt_states {
+ BT_STATE_IDLE = 0, /* Order is critical in this list */
+ BT_STATE_XACTION_START,
+ BT_STATE_WRITE_BYTES,
+ BT_STATE_WRITE_CONSUME,
+ BT_STATE_READ_WAIT,
+ BT_STATE_CLEAR_B2H,
+ BT_STATE_READ_BYTES,
+ BT_STATE_RESET1, /* These must come last */
+ BT_STATE_RESET2,
+ BT_STATE_RESET3,
+ BT_STATE_RESTART,
+ BT_STATE_PRINTME,
+ BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
+};
+
+/*
+ * Macros seen at the end of state "case" blocks. They help with legibility
+ * and debugging.
+ */
+
+#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
+
+#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; }
+
+struct si_sm_data {
+ enum bt_states state;
+ unsigned char seq; /* BT sequence number */
+ struct si_sm_io *io;
+ unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int write_count;
+ unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int read_count;
+ int truncated;
+ long timeout; /* microseconds countdown */
+ int error_retries; /* end of "common" fields */
+ int nonzero_status; /* hung BMCs stay all 0 */
+ enum bt_states complete; /* to divert the state machine */
+ long BT_CAP_req2rsp;
+ int BT_CAP_retries; /* Recommended retries */
+};
+
+#define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */
+#define BT_CLR_RD_PTR 0x02
+#define BT_H2B_ATN 0x04
+#define BT_B2H_ATN 0x08
+#define BT_SMS_ATN 0x10
+#define BT_OEM0 0x20
+#define BT_H_BUSY 0x40
+#define BT_B_BUSY 0x80
+
+/*
+ * Some bits are toggled on each write: write once to set it, once
+ * more to clear it; writing a zero does nothing. To absolutely
+ * clear it, check its state and write if set. This avoids the "get
+ * current then use as mask" scheme to modify one bit. Note that the
+ * variable "bt" is hardcoded into these macros.
+ */
+
+#define BT_STATUS bt->io->inputb(bt->io, 0)
+#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x)
+
+#define BMC2HOST bt->io->inputb(bt->io, 1)
+#define HOST2BMC(x) bt->io->outputb(bt->io, 1, x)
+
+#define BT_INTMASK_R bt->io->inputb(bt->io, 2)
+#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x)
+
+/*
+ * Convenience routines for debugging. These are not multi-open safe!
+ * Note the macros have hardcoded variables in them.
+ */
+
+static char *state2txt(unsigned char state)
+{
+ switch (state) {
+ case BT_STATE_IDLE: return("IDLE");
+ case BT_STATE_XACTION_START: return("XACTION");
+ case BT_STATE_WRITE_BYTES: return("WR_BYTES");
+ case BT_STATE_WRITE_CONSUME: return("WR_CONSUME");
+ case BT_STATE_READ_WAIT: return("RD_WAIT");
+ case BT_STATE_CLEAR_B2H: return("CLEAR_B2H");
+ case BT_STATE_READ_BYTES: return("RD_BYTES");
+ case BT_STATE_RESET1: return("RESET1");
+ case BT_STATE_RESET2: return("RESET2");
+ case BT_STATE_RESET3: return("RESET3");
+ case BT_STATE_RESTART: return("RESTART");
+ case BT_STATE_LONG_BUSY: return("LONG_BUSY");
+ }
+ return("BAD STATE");
+}
+#define STATE2TXT state2txt(bt->state)
+
+static char *status2txt(unsigned char status)
+{
+ /*
+ * This cannot be called by two threads at the same time and
+ * the buffer is always consumed immediately, so the static is
+ * safe to use.
+ */
+ static char buf[40];
+
+ strcpy(buf, "[ ");
+ if (status & BT_B_BUSY)
+ strcat(buf, "B_BUSY ");
+ if (status & BT_H_BUSY)
+ strcat(buf, "H_BUSY ");
+ if (status & BT_OEM0)
+ strcat(buf, "OEM0 ");
+ if (status & BT_SMS_ATN)
+ strcat(buf, "SMS ");
+ if (status & BT_B2H_ATN)
+ strcat(buf, "B2H ");
+ if (status & BT_H2B_ATN)
+ strcat(buf, "H2B ");
+ strcat(buf, "]");
+ return buf;
+}
+#define STATUS2TXT status2txt(status)
+
+/* called externally at insmod time, and internally on cleanup */
+
+static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
+{
+ memset(bt, 0, sizeof(struct si_sm_data));
+ if (bt->io != io) {
+ /* external: one-time only things */
+ bt->io = io;
+ bt->seq = 0;
+ }
+ bt->state = BT_STATE_IDLE; /* start here */
+ bt->complete = BT_STATE_IDLE; /* end here */
+ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
+ bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
+ return 3; /* We claim 3 bytes of space; ought to check SPMI table */
+}
+
+/* Jam a completion code (probably an error) into a response */
+
+static void force_result(struct si_sm_data *bt, unsigned char completion_code)
+{
+ bt->read_data[0] = 4; /* # following bytes */
+ bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */
+ bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */
+ bt->read_data[3] = bt->write_data[3]; /* Command */
+ bt->read_data[4] = completion_code;
+ bt->read_count = 5;
+}
+
+/* The upper state machine starts here */
+
+static int bt_start_transaction(struct si_sm_data *bt,
+ unsigned char *data,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > IPMI_MAX_MSG_LENGTH)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if (bt->state == BT_STATE_LONG_BUSY)
+ return IPMI_NODE_BUSY_ERR;
+
+ if (bt->state != BT_STATE_IDLE) {
+ dev_warn(bt->io->dev, "BT in invalid state %d\n", bt->state);
+ return IPMI_NOT_IN_MY_STATE_ERR;
+ }
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ dev_dbg(bt->io->dev, "+++++++++++++++++ New command\n");
+ dev_dbg(bt->io->dev, "NetFn/LUN CMD [%d data]:", size - 2);
+ for (i = 0; i < size; i ++)
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
+ }
+ bt->write_data[0] = size + 1; /* all data plus seq byte */
+ bt->write_data[1] = *data; /* NetFn/LUN */
+ bt->write_data[2] = bt->seq++;
+ memcpy(bt->write_data + 3, data + 1, size - 1);
+ bt->write_count = size + 2;
+ bt->error_retries = 0;
+ bt->nonzero_status = 0;
+ bt->truncated = 0;
+ bt->state = BT_STATE_XACTION_START;
+ bt->timeout = bt->BT_CAP_req2rsp;
+ force_result(bt, IPMI_ERR_UNSPECIFIED);
+ return 0;
+}
+
+/*
+ * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
+ * it calls this. Strip out the length and seq bytes.
+ */
+
+static int bt_get_result(struct si_sm_data *bt,
+ unsigned char *data,
+ unsigned int length)
+{
+ int i, msg_len;
+
+ msg_len = bt->read_count - 2; /* account for length & seq */
+ if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
+ force_result(bt, IPMI_ERR_UNSPECIFIED);
+ msg_len = 3;
+ }
+ data[0] = bt->read_data[1];
+ data[1] = bt->read_data[3];
+ if (length < msg_len || bt->truncated) {
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ msg_len = 3;
+ } else
+ memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ dev_dbg(bt->io->dev, "result %d bytes:", msg_len);
+ for (i = 0; i < msg_len; i++)
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
+ }
+ return msg_len;
+}
+
+/* This bit's functionality is optional */
+#define BT_BMC_HWRST 0x80
+
+static void reset_flags(struct si_sm_data *bt)
+{
+ if (bt_debug)
+ dev_dbg(bt->io->dev, "flag reset %s\n", status2txt(BT_STATUS));
+ if (BT_STATUS & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* force clear */
+ BT_CONTROL(BT_CLR_WR_PTR); /* always reset */
+ BT_CONTROL(BT_SMS_ATN); /* always clear */
+ BT_INTMASK_W(BT_BMC_HWRST);
+}
+
+/*
+ * Get rid of an unwanted/stale response. This should only be needed for
+ * BMCs that support multiple outstanding requests.
+ */
+
+static void drain_BMC2HOST(struct si_sm_data *bt)
+{
+ int i, size;
+
+ if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */
+ return;
+
+ BT_CONTROL(BT_H_BUSY); /* now set */
+ BT_CONTROL(BT_B2H_ATN); /* always clear */
+ BT_STATUS; /* pause */
+ BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */
+ BT_CONTROL(BT_CLR_RD_PTR); /* always reset */
+ if (bt_debug)
+ dev_dbg(bt->io->dev, "stale response %s; ",
+ status2txt(BT_STATUS));
+ size = BMC2HOST;
+ for (i = 0; i < size ; i++)
+ BMC2HOST;
+ BT_CONTROL(BT_H_BUSY); /* now clear */
+ if (bt_debug)
+ pr_cont("drained %d bytes\n", size + 1);
+}
+
+static inline void write_all_bytes(struct si_sm_data *bt)
+{
+ int i;
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ dev_dbg(bt->io->dev, "write %d bytes seq=0x%02X",
+ bt->write_count, bt->seq);
+ for (i = 0; i < bt->write_count; i++)
+ pr_cont(" %02x", bt->write_data[i]);
+ pr_cont("\n");
+ }
+ for (i = 0; i < bt->write_count; i++)
+ HOST2BMC(bt->write_data[i]);
+}
+
+static inline int read_all_bytes(struct si_sm_data *bt)
+{
+ unsigned int i;
+
+ /*
+ * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+ * Keep layout of first four bytes aligned with write_data[]
+ */
+
+ bt->read_data[0] = BMC2HOST;
+ bt->read_count = bt->read_data[0];
+
+ if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
+ if (bt_debug & BT_DEBUG_MSG)
+ dev_dbg(bt->io->dev,
+ "bad raw rsp len=%d\n", bt->read_count);
+ bt->truncated = 1;
+ return 1; /* let next XACTION START clean it up */
+ }
+ for (i = 1; i <= bt->read_count; i++)
+ bt->read_data[i] = BMC2HOST;
+ bt->read_count++; /* Account internally for length byte */
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ int max = bt->read_count;
+
+ dev_dbg(bt->io->dev,
+ "got %d bytes seq=0x%02X", max, bt->read_data[2]);
+ if (max > 16)
+ max = 16;
+ for (i = 0; i < max; i++)
+ pr_cont(" %02x", bt->read_data[i]);
+ pr_cont("%s\n", bt->read_count == max ? "" : " ...");
+ }
+
+ /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
+ if ((bt->read_data[3] == bt->write_data[3]) &&
+ (bt->read_data[2] == bt->write_data[2]) &&
+ ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+ return 1;
+
+ if (bt_debug & BT_DEBUG_MSG)
+ dev_dbg(bt->io->dev,
+ "IPMI BT: bad packet: want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
+ bt->write_data[1] | 0x04, bt->write_data[2],
+ bt->write_data[3],
+ bt->read_data[1], bt->read_data[2], bt->read_data[3]);
+ return 0;
+}
+
+/* Restart if retries are left, or return an error completion code */
+
+static enum si_sm_result error_recovery(struct si_sm_data *bt,
+ unsigned char status,
+ unsigned char cCode)
+{
+ char *reason;
+
+ bt->timeout = bt->BT_CAP_req2rsp;
+
+ switch (cCode) {
+ case IPMI_TIMEOUT_ERR:
+ reason = "timeout";
+ break;
+ default:
+ reason = "internal error";
+ break;
+ }
+
+ dev_warn(bt->io->dev, "IPMI BT: %s in %s %s ", /* open-ended line */
+ reason, STATE2TXT, STATUS2TXT);
+
+ /*
+ * Per the IPMI spec, retries are based on the sequence number
+ * known only to this module, so manage a restart here.
+ */
+ (bt->error_retries)++;
+ if (bt->error_retries < bt->BT_CAP_retries) {
+ pr_cont("%d retries left\n",
+ bt->BT_CAP_retries - bt->error_retries);
+ bt->state = BT_STATE_RESTART;
+ return SI_SM_CALL_WITHOUT_DELAY;
+ }
+
+ dev_warn(bt->io->dev, "failed %d retries, sending error response\n",
+ bt->BT_CAP_retries);
+ if (!bt->nonzero_status)
+ dev_err(bt->io->dev, "stuck, try power cycle\n");
+
+ /* this is most likely during insmod */
+ else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
+ dev_warn(bt->io->dev, "BT reset (takes 5 secs)\n");
+ bt->state = BT_STATE_RESET1;
+ return SI_SM_CALL_WITHOUT_DELAY;
+ }
+
+ /*
+ * Concoct a useful error message, set up the next state, and
+ * be done with this sequence.
+ */
+
+ bt->state = BT_STATE_IDLE;
+ switch (cCode) {
+ case IPMI_TIMEOUT_ERR:
+ if (status & BT_B_BUSY) {
+ cCode = IPMI_NODE_BUSY_ERR;
+ bt->state = BT_STATE_LONG_BUSY;
+ }
+ break;
+ default:
+ break;
+ }
+ force_result(bt, cCode);
+ return SI_SM_TRANSACTION_COMPLETE;
+}
+
+/* Check status and (usually) take action and change this state machine. */
+
+static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
+{
+ unsigned char status;
+ static enum bt_states last_printed = BT_STATE_PRINTME;
+ int i;
+
+ status = BT_STATUS;
+ bt->nonzero_status |= status;
+ if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
+ dev_dbg(bt->io->dev, "BT: %s %s TO=%ld - %ld\n",
+ STATE2TXT,
+ STATUS2TXT,
+ bt->timeout,
+ time);
+ last_printed = bt->state;
+ }
+
+ /*
+ * Commands that time out may still (eventually) provide a response.
+ * This stale response will get in the way of a new response so remove
+ * it if possible (hopefully during IDLE). Even if it comes up later
+ * it will be rejected by its (now-forgotten) seq number.
+ */
+
+ if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
+ drain_BMC2HOST(bt);
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ }
+
+ if ((bt->state != BT_STATE_IDLE) &&
+ (bt->state < BT_STATE_PRINTME)) {
+ /* check timeout */
+ bt->timeout -= time;
+ if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
+ return error_recovery(bt,
+ status,
+ IPMI_TIMEOUT_ERR);
+ }
+
+ switch (bt->state) {
+
+ /*
+ * Idle state first checks for asynchronous messages from another
+ * channel, then does some opportunistic housekeeping.
+ */
+
+ case BT_STATE_IDLE:
+ if (status & BT_SMS_ATN) {
+ BT_CONTROL(BT_SMS_ATN); /* clear it */
+ return SI_SM_ATTN;
+ }
+
+ if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
+ BT_CONTROL(BT_H_BUSY);
+
+ BT_SI_SM_RETURN(SI_SM_IDLE);
+
+ case BT_STATE_XACTION_START:
+ if (status & (BT_B_BUSY | BT_H2B_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ if (BT_STATUS & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* force clear */
+ BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_WRITE_BYTES:
+ if (status & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* clear */
+ BT_CONTROL(BT_CLR_WR_PTR);
+ write_all_bytes(bt);
+ BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */
+ BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_WRITE_CONSUME:
+ if (status & (BT_B_BUSY | BT_H2B_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ /* Spinning hard can suppress B2H_ATN and force a timeout */
+
+ case BT_STATE_READ_WAIT:
+ if (!(status & BT_B2H_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ BT_CONTROL(BT_H_BUSY); /* set */
+
+ /*
+ * Uncached, ordered writes should just proceed serially but
+ * some BMCs don't clear B2H_ATN with one hit. Fast-path a
+ * workaround without too much penalty to the general case.
+ */
+
+ BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */
+ BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_CLEAR_B2H:
+ if (status & BT_B2H_ATN) {
+ /* keep hitting it */
+ BT_CONTROL(BT_B2H_ATN);
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ }
+ BT_STATE_CHANGE(BT_STATE_READ_BYTES,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_READ_BYTES:
+ if (!(status & BT_H_BUSY))
+ /* check in case of retry */
+ BT_CONTROL(BT_H_BUSY);
+ BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */
+ i = read_all_bytes(bt); /* true == packet seq match */
+ BT_CONTROL(BT_H_BUSY); /* NOW clear */
+ if (!i) /* Not my message */
+ BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+ SI_SM_CALL_WITHOUT_DELAY);
+ bt->state = bt->complete;
+ return bt->state == BT_STATE_IDLE ? /* where to next? */
+ SI_SM_TRANSACTION_COMPLETE : /* normal */
+ SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */
+
+ case BT_STATE_LONG_BUSY: /* For example: after FW update */
+ if (!(status & BT_B_BUSY)) {
+ reset_flags(bt); /* next state is now IDLE */
+ bt_init_data(bt, bt->io);
+ }
+ return SI_SM_CALL_WITH_DELAY; /* No repeat printing */
+
+ case BT_STATE_RESET1:
+ reset_flags(bt);
+ drain_BMC2HOST(bt);
+ BT_STATE_CHANGE(BT_STATE_RESET2,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_RESET2: /* Send a soft reset */
+ BT_CONTROL(BT_CLR_WR_PTR);
+ HOST2BMC(3); /* number of bytes following */
+ HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */
+ HOST2BMC(42); /* Sequence number */
+ HOST2BMC(3); /* Cmd == Soft reset */
+ BT_CONTROL(BT_H2B_ATN);
+ bt->timeout = BT_RESET_DELAY * USEC_PER_SEC;
+ BT_STATE_CHANGE(BT_STATE_RESET3,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_RESET3: /* Hold off everything for a bit */
+ if (bt->timeout > 0)
+ return SI_SM_CALL_WITH_DELAY;
+ drain_BMC2HOST(bt);
+ BT_STATE_CHANGE(BT_STATE_RESTART,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_RESTART: /* don't reset retries or seq! */
+ bt->read_count = 0;
+ bt->nonzero_status = 0;
+ bt->timeout = bt->BT_CAP_req2rsp;
+ BT_STATE_CHANGE(BT_STATE_XACTION_START,
+ SI_SM_CALL_WITH_DELAY);
+
+ default: /* should never occur */
+ return error_recovery(bt,
+ status,
+ IPMI_ERR_UNSPECIFIED);
+ }
+ return SI_SM_CALL_WITH_DELAY;
+}
+
+static int bt_detect(struct si_sm_data *bt)
+{
+ unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+ unsigned char BT_CAP[8];
+ enum si_sm_result smi_result;
+ int rv;
+
+ /*
+ * It's impossible for the BT status and interrupt registers to be
+ * all 1's, (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first. The calling routine uses negative logic.
+ */
+
+ if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
+ return 1;
+ reset_flags(bt);
+
+ /*
+ * Try getting the BT capabilities here.
+ */
+ rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+ if (rv) {
+ dev_warn(bt->io->dev,
+ "Can't start capabilities transaction: %d\n", rv);
+ goto out_no_bt_cap;
+ }
+
+ smi_result = SI_SM_CALL_WITHOUT_DELAY;
+ for (;;) {
+ if (smi_result == SI_SM_CALL_WITH_DELAY ||
+ smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+ schedule_timeout_uninterruptible(1);
+ smi_result = bt_event(bt, jiffies_to_usecs(1));
+ } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ smi_result = bt_event(bt, 0);
+ } else
+ break;
+ }
+
+ rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+ bt_init_data(bt, bt->io);
+ if (rv < 8) {
+ dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
+ goto out_no_bt_cap;
+ }
+
+ if (BT_CAP[2]) {
+ dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
+out_no_bt_cap:
+ dev_warn(bt->io->dev, "using default values\n");
+ } else {
+ bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
+ bt->BT_CAP_retries = BT_CAP[7];
+ }
+
+ dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
+ bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
+
+ return 0;
+}
+
+static void bt_cleanup(struct si_sm_data *bt)
+{
+}
+
+static int bt_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+const struct si_sm_handlers bt_smi_handlers = {
+ .init_data = bt_init_data,
+ .start_transaction = bt_start_transaction,
+ .get_result = bt_get_result,
+ .event = bt_event,
+ .detect = bt_detect,
+ .cleanup = bt_cleanup,
+ .size = bt_size,
+};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
new file mode 100644
index 0000000000..332082e02e
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_devintf.c
+ *
+ * Linux device interface for the IPMI message handler.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/compat.h>
+
+struct ipmi_file_private
+{
+ struct ipmi_user *user;
+ spinlock_t recv_msg_lock;
+ struct list_head recv_msgs;
+ struct fasync_struct *fasync_queue;
+ wait_queue_head_t wait;
+ struct mutex recv_mutex;
+ int default_retries;
+ unsigned int default_retry_time_ms;
+};
+
+static void file_receive_handler(struct ipmi_recv_msg *msg,
+ void *handler_data)
+{
+ struct ipmi_file_private *priv = handler_data;
+ int was_empty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+ was_empty = list_empty(&priv->recv_msgs);
+ list_add_tail(&msg->link, &priv->recv_msgs);
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+ if (was_empty) {
+ wake_up_interruptible(&priv->wait);
+ kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
+ }
+}
+
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ __poll_t mask = 0;
+ unsigned long flags;
+
+ poll_wait(file, &priv->wait, wait);
+
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+
+ if (!list_empty(&priv->recv_msgs))
+ mask |= (EPOLLIN | EPOLLRDNORM);
+
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+ return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+ struct ipmi_file_private *priv = file->private_data;
+
+ return fasync_helper(fd, file, on, &priv->fasync_queue);
+}
+
+static const struct ipmi_user_hndl ipmi_hndlrs =
+{
+ .ipmi_recv_hndl = file_receive_handler,
+};
+
+static int ipmi_open(struct inode *inode, struct file *file)
+{
+ int if_num = iminor(inode);
+ int rv;
+ struct ipmi_file_private *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rv = ipmi_create_user(if_num,
+ &ipmi_hndlrs,
+ priv,
+ &priv->user);
+ if (rv) {
+ kfree(priv);
+ goto out;
+ }
+
+ file->private_data = priv;
+
+ spin_lock_init(&priv->recv_msg_lock);
+ INIT_LIST_HEAD(&priv->recv_msgs);
+ init_waitqueue_head(&priv->wait);
+ priv->fasync_queue = NULL;
+ mutex_init(&priv->recv_mutex);
+
+ /* Use the low-level defaults. */
+ priv->default_retries = -1;
+ priv->default_retry_time_ms = 0;
+
+out:
+ return rv;
+}
+
+static int ipmi_release(struct inode *inode, struct file *file)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ int rv;
+ struct ipmi_recv_msg *msg, *next;
+
+ rv = ipmi_destroy_user(priv->user);
+ if (rv)
+ return rv;
+
+ list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
+ ipmi_free_recv_msg(msg);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int handle_send_req(struct ipmi_user *user,
+ struct ipmi_req *req,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ int rv;
+ struct ipmi_addr addr;
+ struct kernel_ipmi_msg msg;
+
+ if (req->addr_len > sizeof(struct ipmi_addr))
+ return -EINVAL;
+
+ if (copy_from_user(&addr, req->addr, req->addr_len))
+ return -EFAULT;
+
+ msg.netfn = req->msg.netfn;
+ msg.cmd = req->msg.cmd;
+ msg.data_len = req->msg.data_len;
+ msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!msg.data)
+ return -ENOMEM;
+
+ /* From here out we cannot return, we must jump to "out" for
+ error exits to free msgdata. */
+
+ rv = ipmi_validate_addr(&addr, req->addr_len);
+ if (rv)
+ goto out;
+
+ if (req->msg.data != NULL) {
+ if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
+ rv = -EMSGSIZE;
+ goto out;
+ }
+
+ if (copy_from_user(msg.data,
+ req->msg.data,
+ req->msg.data_len)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ } else {
+ msg.data_len = 0;
+ }
+
+ rv = ipmi_request_settime(user,
+ &addr,
+ req->msgid,
+ &msg,
+ NULL,
+ 0,
+ retries,
+ retry_time_ms);
+ out:
+ kfree(msg.data);
+ return rv;
+}
+
+static int handle_recv(struct ipmi_file_private *priv,
+ bool trunc, struct ipmi_recv *rsp,
+ int (*copyout)(struct ipmi_recv *, void __user *),
+ void __user *to)
+{
+ int addr_len;
+ struct list_head *entry;
+ struct ipmi_recv_msg *msg;
+ unsigned long flags;
+ int rv = 0, rv2 = 0;
+
+ /* We claim a mutex because we don't want two
+ users getting something from the queue at a time.
+ Since we have to release the spinlock before we can
+ copy the data to the user, it's possible another
+ user will grab something from the queue, too. Then
+ the messages might get out of order if something
+ fails and the message gets put back onto the
+ queue. This mutex prevents that problem. */
+ mutex_lock(&priv->recv_mutex);
+
+ /* Grab the message off the list. */
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+ if (list_empty(&(priv->recv_msgs))) {
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+ rv = -EAGAIN;
+ goto recv_err;
+ }
+ entry = priv->recv_msgs.next;
+ msg = list_entry(entry, struct ipmi_recv_msg, link);
+ list_del(entry);
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+ addr_len = ipmi_addr_length(msg->addr.addr_type);
+ if (rsp->addr_len < addr_len) {
+ rv = -EINVAL;
+ goto recv_putback_on_err;
+ }
+
+ if (copy_to_user(rsp->addr, &msg->addr, addr_len)) {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp->addr_len = addr_len;
+
+ rsp->recv_type = msg->recv_type;
+ rsp->msgid = msg->msgid;
+ rsp->msg.netfn = msg->msg.netfn;
+ rsp->msg.cmd = msg->msg.cmd;
+
+ if (msg->msg.data_len > 0) {
+ if (rsp->msg.data_len < msg->msg.data_len) {
+ if (trunc) {
+ rv2 = -EMSGSIZE;
+ msg->msg.data_len = rsp->msg.data_len;
+ } else {
+ rv = -EMSGSIZE;
+ goto recv_putback_on_err;
+ }
+ }
+
+ if (copy_to_user(rsp->msg.data,
+ msg->msg.data,
+ msg->msg.data_len)) {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp->msg.data_len = msg->msg.data_len;
+ } else {
+ rsp->msg.data_len = 0;
+ }
+
+ rv = copyout(rsp, to);
+ if (rv)
+ goto recv_putback_on_err;
+
+ mutex_unlock(&priv->recv_mutex);
+ ipmi_free_recv_msg(msg);
+ return rv2;
+
+recv_putback_on_err:
+ /* If we got an error, put the message back onto
+ the head of the queue. */
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+ list_add(entry, &priv->recv_msgs);
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+recv_err:
+ mutex_unlock(&priv->recv_mutex);
+ return rv;
+}
+
+static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
+{
+ return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
+}
+
+static long ipmi_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long data)
+{
+ int rv = -EINVAL;
+ struct ipmi_file_private *priv = file->private_data;
+ void __user *arg = (void __user *)data;
+
+ switch (cmd)
+ {
+ case IPMICTL_SEND_COMMAND:
+ {
+ struct ipmi_req req;
+ int retries;
+ unsigned int retry_time_ms;
+
+ if (copy_from_user(&req, arg, sizeof(req))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ mutex_lock(&priv->recv_mutex);
+ retries = priv->default_retries;
+ retry_time_ms = priv->default_retry_time_ms;
+ mutex_unlock(&priv->recv_mutex);
+
+ rv = handle_send_req(priv->user, &req, retries, retry_time_ms);
+ break;
+ }
+
+ case IPMICTL_SEND_COMMAND_SETTIME:
+ {
+ struct ipmi_req_settime req;
+
+ if (copy_from_user(&req, arg, sizeof(req))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = handle_send_req(priv->user,
+ &req.req,
+ req.retries,
+ req.retry_time_ms);
+ break;
+ }
+
+ case IPMICTL_RECEIVE_MSG:
+ case IPMICTL_RECEIVE_MSG_TRUNC:
+ {
+ struct ipmi_recv rsp;
+
+ if (copy_from_user(&rsp, arg, sizeof(rsp)))
+ rv = -EFAULT;
+ else
+ rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
+ &rsp, copyout_recv, arg);
+ break;
+ }
+
+ case IPMICTL_REGISTER_FOR_CMD:
+ {
+ struct ipmi_cmdspec val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+ IPMI_CHAN_ALL);
+ break;
+ }
+
+ case IPMICTL_UNREGISTER_FOR_CMD:
+ {
+ struct ipmi_cmdspec val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+ IPMI_CHAN_ALL);
+ break;
+ }
+
+ case IPMICTL_REGISTER_FOR_CMD_CHANS:
+ {
+ struct ipmi_cmdspec_chans val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+ val.chans);
+ break;
+ }
+
+ case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
+ {
+ struct ipmi_cmdspec_chans val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+ val.chans);
+ break;
+ }
+
+ case IPMICTL_SET_GETS_EVENTS_CMD:
+ {
+ int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_gets_events(priv->user, val);
+ break;
+ }
+
+ /* The next four are legacy, not per-channel. */
+ case IPMICTL_SET_MY_ADDRESS_CMD:
+ {
+ unsigned int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_my_address(priv->user, 0, val);
+ break;
+ }
+
+ case IPMICTL_GET_MY_ADDRESS_CMD:
+ {
+ unsigned int val;
+ unsigned char rval;
+
+ rv = ipmi_get_my_address(priv->user, 0, &rval);
+ if (rv)
+ break;
+
+ val = rval;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_MY_LUN_CMD:
+ {
+ unsigned int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_my_LUN(priv->user, 0, val);
+ break;
+ }
+
+ case IPMICTL_GET_MY_LUN_CMD:
+ {
+ unsigned int val;
+ unsigned char rval;
+
+ rv = ipmi_get_my_LUN(priv->user, 0, &rval);
+ if (rv)
+ break;
+
+ val = rval;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ return ipmi_set_my_address(priv->user, val.channel, val.value);
+ }
+
+ case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
+ if (rv)
+ break;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
+ break;
+ }
+
+ case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
+ if (rv)
+ break;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_TIMING_PARMS_CMD:
+ {
+ struct ipmi_timing_parms parms;
+
+ if (copy_from_user(&parms, arg, sizeof(parms))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ mutex_lock(&priv->recv_mutex);
+ priv->default_retries = parms.retries;
+ priv->default_retry_time_ms = parms.retry_time_ms;
+ mutex_unlock(&priv->recv_mutex);
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_GET_TIMING_PARMS_CMD:
+ {
+ struct ipmi_timing_parms parms;
+
+ mutex_lock(&priv->recv_mutex);
+ parms.retries = priv->default_retries;
+ parms.retry_time_ms = priv->default_retry_time_ms;
+ mutex_unlock(&priv->recv_mutex);
+
+ if (copy_to_user(arg, &parms, sizeof(parms))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_GET_MAINTENANCE_MODE_CMD:
+ {
+ int mode;
+
+ mode = ipmi_get_maintenance_mode(priv->user);
+ if (copy_to_user(arg, &mode, sizeof(mode))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_SET_MAINTENANCE_MODE_CMD:
+ {
+ int mode;
+
+ if (copy_from_user(&mode, arg, sizeof(mode))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = ipmi_set_maintenance_mode(priv->user, mode);
+ break;
+ }
+
+ default:
+ rv = -ENOTTY;
+ break;
+ }
+
+ return rv;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The following code contains code for supporting 32-bit compatible
+ * ioctls on 64-bit kernels. This allows running 32-bit apps on the
+ * 64-bit kernel
+ */
+#define COMPAT_IPMICTL_SEND_COMMAND \
+ _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
+#define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
+ _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
+#define COMPAT_IPMICTL_RECEIVE_MSG \
+ _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
+#define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
+ _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
+
+struct compat_ipmi_msg {
+ u8 netfn;
+ u8 cmd;
+ u16 data_len;
+ compat_uptr_t data;
+};
+
+struct compat_ipmi_req {
+ compat_uptr_t addr;
+ compat_uint_t addr_len;
+ compat_long_t msgid;
+ struct compat_ipmi_msg msg;
+};
+
+struct compat_ipmi_recv {
+ compat_int_t recv_type;
+ compat_uptr_t addr;
+ compat_uint_t addr_len;
+ compat_long_t msgid;
+ struct compat_ipmi_msg msg;
+};
+
+struct compat_ipmi_req_settime {
+ struct compat_ipmi_req req;
+ compat_int_t retries;
+ compat_uint_t retry_time_ms;
+};
+
+/*
+ * Define some helper functions for copying IPMI data
+ */
+static void get_compat_ipmi_msg(struct ipmi_msg *p64,
+ struct compat_ipmi_msg *p32)
+{
+ p64->netfn = p32->netfn;
+ p64->cmd = p32->cmd;
+ p64->data_len = p32->data_len;
+ p64->data = compat_ptr(p32->data);
+}
+
+static void get_compat_ipmi_req(struct ipmi_req *p64,
+ struct compat_ipmi_req *p32)
+{
+ p64->addr = compat_ptr(p32->addr);
+ p64->addr_len = p32->addr_len;
+ p64->msgid = p32->msgid;
+ get_compat_ipmi_msg(&p64->msg, &p32->msg);
+}
+
+static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
+ struct compat_ipmi_req_settime *p32)
+{
+ get_compat_ipmi_req(&p64->req, &p32->req);
+ p64->retries = p32->retries;
+ p64->retry_time_ms = p32->retry_time_ms;
+}
+
+static void get_compat_ipmi_recv(struct ipmi_recv *p64,
+ struct compat_ipmi_recv *p32)
+{
+ memset(p64, 0, sizeof(struct ipmi_recv));
+ p64->recv_type = p32->recv_type;
+ p64->addr = compat_ptr(p32->addr);
+ p64->addr_len = p32->addr_len;
+ p64->msgid = p32->msgid;
+ get_compat_ipmi_msg(&p64->msg, &p32->msg);
+}
+
+static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
+{
+ struct compat_ipmi_recv v32;
+ memset(&v32, 0, sizeof(struct compat_ipmi_recv));
+ v32.recv_type = p64->recv_type;
+ v32.addr = ptr_to_compat(p64->addr);
+ v32.addr_len = p64->addr_len;
+ v32.msgid = p64->msgid;
+ v32.msg.netfn = p64->msg.netfn;
+ v32.msg.cmd = p64->msg.cmd;
+ v32.msg.data_len = p64->msg.data_len;
+ v32.msg.data = ptr_to_compat(p64->msg.data);
+ return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
+}
+
+/*
+ * Handle compatibility ioctls
+ */
+static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ipmi_file_private *priv = filep->private_data;
+
+ switch(cmd) {
+ case COMPAT_IPMICTL_SEND_COMMAND:
+ {
+ struct ipmi_req rp;
+ struct compat_ipmi_req r32;
+ int retries;
+ unsigned int retry_time_ms;
+
+ if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
+ return -EFAULT;
+
+ get_compat_ipmi_req(&rp, &r32);
+
+ mutex_lock(&priv->recv_mutex);
+ retries = priv->default_retries;
+ retry_time_ms = priv->default_retry_time_ms;
+ mutex_unlock(&priv->recv_mutex);
+
+ return handle_send_req(priv->user, &rp,
+ retries, retry_time_ms);
+ }
+ case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
+ {
+ struct ipmi_req_settime sp;
+ struct compat_ipmi_req_settime sp32;
+
+ if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
+ return -EFAULT;
+
+ get_compat_ipmi_req_settime(&sp, &sp32);
+
+ return handle_send_req(priv->user, &sp.req,
+ sp.retries, sp.retry_time_ms);
+ }
+ case COMPAT_IPMICTL_RECEIVE_MSG:
+ case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
+ {
+ struct ipmi_recv recv64;
+ struct compat_ipmi_recv recv32;
+
+ if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
+ return -EFAULT;
+
+ get_compat_ipmi_recv(&recv64, &recv32);
+
+ return handle_recv(priv,
+ cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
+ &recv64, copyout_recv32, compat_ptr(arg));
+ }
+ default:
+ return ipmi_ioctl(filep, cmd, arg);
+ }
+}
+#endif
+
+static const struct file_operations ipmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ipmi_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_ipmi_ioctl,
+#endif
+ .open = ipmi_open,
+ .release = ipmi_release,
+ .fasync = ipmi_fasync,
+ .poll = ipmi_poll,
+ .llseek = noop_llseek,
+};
+
+#define DEVICE_NAME "ipmidev"
+
+static int ipmi_major;
+module_param(ipmi_major, int, 0);
+MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
+ " default, or if you set it to zero, it will choose the next"
+ " available device. Setting it to -1 will disable the"
+ " interface. Other values will set the major device number"
+ " to that value.");
+
+/* Keep track of the devices that are registered. */
+struct ipmi_reg_list {
+ dev_t dev;
+ struct list_head link;
+};
+static LIST_HEAD(reg_list);
+static DEFINE_MUTEX(reg_list_mutex);
+
+static const struct class ipmi_class = {
+ .name = "ipmi",
+};
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+ dev_t dev = MKDEV(ipmi_major, if_num);
+ struct ipmi_reg_list *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("ipmi_devintf: Unable to create the ipmi class device link\n");
+ return;
+ }
+ entry->dev = dev;
+
+ mutex_lock(&reg_list_mutex);
+ device_create(&ipmi_class, device, dev, NULL, "ipmi%d", if_num);
+ list_add(&entry->link, &reg_list);
+ mutex_unlock(&reg_list_mutex);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+ dev_t dev = MKDEV(ipmi_major, if_num);
+ struct ipmi_reg_list *entry;
+
+ mutex_lock(&reg_list_mutex);
+ list_for_each_entry(entry, &reg_list, link) {
+ if (entry->dev == dev) {
+ list_del(&entry->link);
+ kfree(entry);
+ break;
+ }
+ }
+ device_destroy(&ipmi_class, dev);
+ mutex_unlock(&reg_list_mutex);
+}
+
+static struct ipmi_smi_watcher smi_watcher =
+{
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_new_smi,
+ .smi_gone = ipmi_smi_gone,
+};
+
+static int __init init_ipmi_devintf(void)
+{
+ int rv;
+
+ if (ipmi_major < 0)
+ return -EINVAL;
+
+ pr_info("ipmi device interface\n");
+
+ rv = class_register(&ipmi_class);
+ if (rv)
+ return rv;
+
+ rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
+ if (rv < 0) {
+ class_unregister(&ipmi_class);
+ pr_err("ipmi: can't get major %d\n", ipmi_major);
+ return rv;
+ }
+
+ if (ipmi_major == 0) {
+ ipmi_major = rv;
+ }
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+ if (rv) {
+ unregister_chrdev(ipmi_major, DEVICE_NAME);
+ class_unregister(&ipmi_class);
+ pr_warn("ipmi: can't register smi watcher\n");
+ return rv;
+ }
+
+ return 0;
+}
+module_init(init_ipmi_devintf);
+
+static void __exit cleanup_ipmi(void)
+{
+ struct ipmi_reg_list *entry, *entry2;
+ mutex_lock(&reg_list_mutex);
+ list_for_each_entry_safe(entry, entry2, &reg_list, link) {
+ list_del(&entry->link);
+ device_destroy(&ipmi_class, entry->dev);
+ kfree(entry);
+ }
+ mutex_unlock(&reg_list_mutex);
+ class_unregister(&ipmi_class);
+ ipmi_smi_watcher_unregister(&smi_watcher);
+ unregister_chrdev(ipmi_major, DEVICE_NAME);
+}
+module_exit(cleanup_ipmi);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
new file mode 100644
index 0000000000..bbf7029e22
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * A hack to create a platform device from a DMI entry. This will
+ * allow autoloading of the IPMI drive based on SMBIOS entries.
+ */
+
+#define pr_fmt(fmt) "%s" fmt, "ipmi:dmi: "
+#define dev_fmt pr_fmt
+
+#include <linux/ipmi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include "ipmi_dmi.h"
+#include "ipmi_plat_data.h"
+
+#define IPMI_DMI_TYPE_KCS 0x01
+#define IPMI_DMI_TYPE_SMIC 0x02
+#define IPMI_DMI_TYPE_BT 0x03
+#define IPMI_DMI_TYPE_SSIF 0x04
+
+struct ipmi_dmi_info {
+ enum si_type si_type;
+ unsigned int space; /* addr space for si, intf# for ssif */
+ unsigned long addr;
+ u8 slave_addr;
+ struct ipmi_dmi_info *next;
+};
+
+static struct ipmi_dmi_info *ipmi_dmi_infos;
+
+static int ipmi_dmi_nr __initdata;
+
+static void __init dmi_add_platform_ipmi(unsigned long base_addr,
+ unsigned int space,
+ u8 slave_addr,
+ int irq,
+ int offset,
+ int type)
+{
+ const char *name;
+ struct ipmi_dmi_info *info;
+ struct ipmi_plat_data p;
+
+ memset(&p, 0, sizeof(p));
+
+ name = "dmi-ipmi-si";
+ p.iftype = IPMI_PLAT_IF_SI;
+ switch (type) {
+ case IPMI_DMI_TYPE_SSIF:
+ name = "dmi-ipmi-ssif";
+ p.iftype = IPMI_PLAT_IF_SSIF;
+ p.type = SI_TYPE_INVALID;
+ break;
+ case IPMI_DMI_TYPE_BT:
+ p.type = SI_BT;
+ break;
+ case IPMI_DMI_TYPE_KCS:
+ p.type = SI_KCS;
+ break;
+ case IPMI_DMI_TYPE_SMIC:
+ p.type = SI_SMIC;
+ break;
+ default:
+ pr_err("Invalid IPMI type: %d\n", type);
+ return;
+ }
+
+ p.addr = base_addr;
+ p.space = space;
+ p.regspacing = offset;
+ p.irq = irq;
+ p.slave_addr = slave_addr;
+ p.addr_source = SI_SMBIOS;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_warn("Could not allocate dmi info\n");
+ } else {
+ info->si_type = p.type;
+ info->space = space;
+ info->addr = base_addr;
+ info->slave_addr = slave_addr;
+ info->next = ipmi_dmi_infos;
+ ipmi_dmi_infos = info;
+ }
+
+ if (ipmi_platform_add(name, ipmi_dmi_nr, &p))
+ ipmi_dmi_nr++;
+}
+
+/*
+ * Look up the slave address for a given interface. This is here
+ * because ACPI doesn't have a slave address while SMBIOS does, but we
+ * prefer using ACPI so the ACPI code can use the IPMI namespace.
+ * This function allows an ACPI-specified IPMI device to look up the
+ * slave address from the DMI table.
+ */
+int ipmi_dmi_get_slave_addr(enum si_type si_type, unsigned int space,
+ unsigned long base_addr)
+{
+ struct ipmi_dmi_info *info = ipmi_dmi_infos;
+
+ while (info) {
+ if (info->si_type == si_type &&
+ info->space == space &&
+ info->addr == base_addr)
+ return info->slave_addr;
+ info = info->next;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_dmi_get_slave_addr);
+
+#define DMI_IPMI_MIN_LENGTH 0x10
+#define DMI_IPMI_VER2_LENGTH 0x12
+#define DMI_IPMI_TYPE 4
+#define DMI_IPMI_SLAVEADDR 6
+#define DMI_IPMI_ADDR 8
+#define DMI_IPMI_ACCESS 0x10
+#define DMI_IPMI_IRQ 0x11
+#define DMI_IPMI_IO_MASK 0xfffe
+
+static void __init dmi_decode_ipmi(const struct dmi_header *dm)
+{
+ const u8 *data = (const u8 *) dm;
+ int space = IPMI_IO_ADDR_SPACE;
+ unsigned long base_addr;
+ u8 len = dm->length;
+ u8 slave_addr;
+ int irq = 0, offset = 0;
+ int type;
+
+ if (len < DMI_IPMI_MIN_LENGTH)
+ return;
+
+ type = data[DMI_IPMI_TYPE];
+ slave_addr = data[DMI_IPMI_SLAVEADDR];
+
+ memcpy(&base_addr, data + DMI_IPMI_ADDR, sizeof(unsigned long));
+ if (!base_addr) {
+ pr_err("Base address is zero, assuming no IPMI interface\n");
+ return;
+ }
+ if (len >= DMI_IPMI_VER2_LENGTH) {
+ if (type == IPMI_DMI_TYPE_SSIF) {
+ space = 0; /* Match I2C interface 0. */
+ base_addr = data[DMI_IPMI_ADDR] >> 1;
+ if (base_addr == 0) {
+ /*
+ * Some broken systems put the I2C address in
+ * the slave address field. We try to
+ * accommodate them here.
+ */
+ base_addr = data[DMI_IPMI_SLAVEADDR] >> 1;
+ slave_addr = 0;
+ }
+ } else {
+ if (base_addr & 1) {
+ /* I/O */
+ base_addr &= DMI_IPMI_IO_MASK;
+ } else {
+ /* Memory */
+ space = IPMI_MEM_ADDR_SPACE;
+ }
+
+ /*
+ * If bit 4 of byte 0x10 is set, then the lsb
+ * for the address is odd.
+ */
+ base_addr |= (data[DMI_IPMI_ACCESS] >> 4) & 1;
+
+ irq = data[DMI_IPMI_IRQ];
+
+ /*
+ * The top two bits of byte 0x10 hold the
+ * register spacing.
+ */
+ switch ((data[DMI_IPMI_ACCESS] >> 6) & 3) {
+ case 0: /* Byte boundaries */
+ offset = 1;
+ break;
+ case 1: /* 32-bit boundaries */
+ offset = 4;
+ break;
+ case 2: /* 16-byte boundaries */
+ offset = 16;
+ break;
+ default:
+ pr_err("Invalid offset: 0\n");
+ return;
+ }
+ }
+ } else {
+ /* Old DMI spec. */
+ /*
+ * Note that technically, the lower bit of the base
+ * address should be 1 if the address is I/O and 0 if
+ * the address is in memory. So many systems get that
+ * wrong (and all that I have seen are I/O) so we just
+ * ignore that bit and assume I/O. Systems that use
+ * memory should use the newer spec, anyway.
+ */
+ base_addr = base_addr & DMI_IPMI_IO_MASK;
+ offset = 1;
+ }
+
+ dmi_add_platform_ipmi(base_addr, space, slave_addr, irq,
+ offset, type);
+}
+
+static int __init scan_for_dmi_ipmi(void)
+{
+ const struct dmi_device *dev = NULL;
+
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev)))
+ dmi_decode_ipmi((const struct dmi_header *) dev->device_data);
+
+ return 0;
+}
+subsys_initcall(scan_for_dmi_ipmi);
diff --git a/drivers/char/ipmi/ipmi_dmi.h b/drivers/char/ipmi/ipmi_dmi.h
new file mode 100644
index 0000000000..e16a9dbdcc
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_dmi.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * DMI defines for use by IPMI
+ */
+#include "ipmi_si.h"
+
+#ifdef CONFIG_IPMI_DMI_DECODE
+int ipmi_dmi_get_slave_addr(enum si_type si_type, unsigned int space,
+ unsigned long base_addr);
+#endif
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
new file mode 100644
index 0000000000..4e335832fc
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Driver to talk to a remote management controller on IPMB.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/ipmi_msgdefs.h>
+#include <linux/ipmi_smi.h>
+
+#define DEVICE_NAME "ipmi-ipmb"
+
+static int bmcaddr = 0x20;
+module_param(bmcaddr, int, 0644);
+MODULE_PARM_DESC(bmcaddr, "Address to use for BMC.");
+
+static unsigned int retry_time_ms = 250;
+module_param(retry_time_ms, uint, 0644);
+MODULE_PARM_DESC(retry_time_ms, "Timeout time between retries, in milliseconds.");
+
+static unsigned int max_retries = 1;
+module_param(max_retries, uint, 0644);
+MODULE_PARM_DESC(max_retries, "Max resends of a command before timing out.");
+
+/* Add room for the two slave addresses, two checksums, and rqSeq. */
+#define IPMB_MAX_MSG_LEN (IPMI_MAX_MSG_LENGTH + 5)
+
+struct ipmi_ipmb_dev {
+ struct ipmi_smi *intf;
+ struct i2c_client *client;
+ struct i2c_client *slave;
+
+ struct ipmi_smi_handlers handlers;
+
+ bool ready;
+
+ u8 curr_seq;
+
+ u8 bmcaddr;
+ u32 retry_time_ms;
+ u32 max_retries;
+
+ struct ipmi_smi_msg *next_msg;
+ struct ipmi_smi_msg *working_msg;
+
+ /* Transmit thread. */
+ struct task_struct *thread;
+ struct semaphore wake_thread;
+ struct semaphore got_rsp;
+ spinlock_t lock;
+ bool stopping;
+
+ u8 xmitmsg[IPMB_MAX_MSG_LEN];
+ unsigned int xmitlen;
+
+ u8 rcvmsg[IPMB_MAX_MSG_LEN];
+ unsigned int rcvlen;
+ bool overrun;
+};
+
+static bool valid_ipmb(struct ipmi_ipmb_dev *iidev)
+{
+ u8 *msg = iidev->rcvmsg;
+ u8 netfn;
+
+ if (iidev->overrun)
+ return false;
+
+ /* Minimum message size. */
+ if (iidev->rcvlen < 7)
+ return false;
+
+ /* Is it a response? */
+ netfn = msg[1] >> 2;
+ if (netfn & 1) {
+ /* Response messages have an added completion code. */
+ if (iidev->rcvlen < 8)
+ return false;
+ }
+
+ if (ipmb_checksum(msg, 3) != 0)
+ return false;
+ if (ipmb_checksum(msg + 3, iidev->rcvlen - 3) != 0)
+ return false;
+
+ return true;
+}
+
+static void ipmi_ipmb_check_msg_done(struct ipmi_ipmb_dev *iidev)
+{
+ struct ipmi_smi_msg *imsg = NULL;
+ u8 *msg = iidev->rcvmsg;
+ bool is_cmd;
+ unsigned long flags;
+
+ if (iidev->rcvlen == 0)
+ return;
+ if (!valid_ipmb(iidev))
+ goto done;
+
+ is_cmd = ((msg[1] >> 2) & 1) == 0;
+
+ if (is_cmd) {
+ /* Ignore commands until we are up. */
+ if (!iidev->ready)
+ goto done;
+
+ /* It's a command, allocate a message for it. */
+ imsg = ipmi_alloc_smi_msg();
+ if (!imsg)
+ goto done;
+ imsg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ imsg->data_size = 0;
+ } else {
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->working_msg) {
+ u8 seq = msg[4] >> 2;
+ bool xmit_rsp = (iidev->working_msg->data[0] >> 2) & 1;
+
+ /*
+ * Responses should carry the sequence we sent
+ * them with. If it's a transmitted response,
+ * ignore it. And if the message hasn't been
+ * transmitted, ignore it.
+ */
+ if (!xmit_rsp && seq == iidev->curr_seq) {
+ iidev->curr_seq = (iidev->curr_seq + 1) & 0x3f;
+
+ imsg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ }
+
+ if (!imsg)
+ goto done;
+
+ if (imsg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Keep the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 3, iidev->rcvlen - 4);
+ imsg->rsp_size = iidev->rcvlen - 3;
+ } else {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Skip the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 5, iidev->rcvlen - 6);
+ imsg->rsp_size = iidev->rcvlen - 5;
+ }
+ ipmi_smi_msg_received(iidev->intf, imsg);
+ if (!is_cmd)
+ up(&iidev->got_rsp);
+
+done:
+ iidev->overrun = false;
+ iidev->rcvlen = 0;
+}
+
+/*
+ * The IPMB protocol only supports i2c writes so there is no need to
+ * support I2C_SLAVE_READ* events, except to know if the other end has
+ * issued a read without going to stop mode.
+ */
+static int ipmi_ipmb_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ ipmi_ipmb_check_msg_done(iidev);
+ /*
+ * First byte is the slave address, to ease the checksum
+ * calculation.
+ */
+ iidev->rcvmsg[0] = client->addr << 1;
+ iidev->rcvlen = 1;
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (iidev->rcvlen >= sizeof(iidev->rcvmsg))
+ iidev->overrun = true;
+ else
+ iidev->rcvmsg[iidev->rcvlen++] = *val;
+ break;
+
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_STOP:
+ ipmi_ipmb_check_msg_done(iidev);
+ break;
+
+ case I2C_SLAVE_READ_PROCESSED:
+ break;
+ }
+
+ return 0;
+}
+
+static void ipmi_ipmb_send_response(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg, u8 cc)
+{
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response being sent, we need to return a
+ * response to the response. Fake a send msg command
+ * response with channel 0. This will always be ipmb
+ * direct.
+ */
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST | 1) << 2;
+ msg->data[3] = IPMI_SEND_MSG_CMD;
+ msg->data[4] = cc;
+ msg->data_size = 5;
+ }
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = msg->data[2];
+ msg->rsp[3] = msg->data[3];
+ msg->rsp[4] = cc;
+ msg->rsp_size = 5;
+ } else {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = cc;
+ msg->rsp_size = 3;
+ }
+ ipmi_smi_msg_received(iidev->intf, msg);
+}
+
+static void ipmi_ipmb_format_for_xmit(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg)
+{
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ iidev->xmitmsg[0] = msg->data[1];
+ iidev->xmitmsg[1] = msg->data[0];
+ memcpy(iidev->xmitmsg + 4, msg->data + 2, msg->data_size - 2);
+ iidev->xmitlen = msg->data_size + 2;
+ } else {
+ iidev->xmitmsg[0] = iidev->bmcaddr;
+ iidev->xmitmsg[1] = msg->data[0];
+ iidev->xmitmsg[4] = 0;
+ memcpy(iidev->xmitmsg + 5, msg->data + 1, msg->data_size - 1);
+ iidev->xmitlen = msg->data_size + 4;
+ }
+ iidev->xmitmsg[3] = iidev->slave->addr << 1;
+ if (((msg->data[0] >> 2) & 1) == 0)
+ /* If it's a command, put in our own sequence number. */
+ iidev->xmitmsg[4] = ((iidev->xmitmsg[4] & 0x03) |
+ (iidev->curr_seq << 2));
+
+ /* Now add on the final checksums. */
+ iidev->xmitmsg[2] = ipmb_checksum(iidev->xmitmsg, 2);
+ iidev->xmitmsg[iidev->xmitlen] =
+ ipmb_checksum(iidev->xmitmsg + 3, iidev->xmitlen - 3);
+ iidev->xmitlen++;
+}
+
+static int ipmi_ipmb_thread(void *data)
+{
+ struct ipmi_ipmb_dev *iidev = data;
+
+ while (!kthread_should_stop()) {
+ long ret;
+ struct i2c_msg i2c_msg;
+ struct ipmi_smi_msg *msg = NULL;
+ unsigned long flags;
+ unsigned int retries = 0;
+
+ /* Wait for a message to send */
+ ret = down_interruptible(&iidev->wake_thread);
+ if (iidev->stopping)
+ break;
+ if (ret)
+ continue;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->next_msg) {
+ msg = iidev->next_msg;
+ iidev->next_msg = NULL;
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ if (!msg)
+ continue;
+
+ ipmi_ipmb_format_for_xmit(iidev, msg);
+
+retry:
+ i2c_msg.len = iidev->xmitlen - 1;
+ if (i2c_msg.len > 32) {
+ ipmi_ipmb_send_response(iidev, msg,
+ IPMI_REQ_LEN_EXCEEDED_ERR);
+ continue;
+ }
+
+ i2c_msg.addr = iidev->xmitmsg[0] >> 1;
+ i2c_msg.flags = 0;
+ i2c_msg.buf = iidev->xmitmsg + 1;
+
+ /* Rely on i2c_transfer for a barrier. */
+ iidev->working_msg = msg;
+
+ ret = i2c_transfer(iidev->client->adapter, &i2c_msg, 1);
+
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response, nothing will be returned
+ * by the other end.
+ */
+
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg,
+ ret < 0 ? IPMI_BUS_ERR : 0);
+ continue;
+ }
+ if (ret < 0) {
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg, IPMI_BUS_ERR);
+ continue;
+ }
+
+ /* A command was sent, wait for its response. */
+ ret = down_timeout(&iidev->got_rsp,
+ msecs_to_jiffies(iidev->retry_time_ms));
+
+ /*
+ * Grab the message if we can. If the handler hasn't
+ * already handled it, the message will still be there.
+ */
+ spin_lock_irqsave(&iidev->lock, flags);
+ msg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ if (!msg && ret) {
+ /*
+ * If working_msg is not set and we timed out,
+ * that means the message grabbed by
+ * check_msg_done before we could grab it
+ * here. Wait again for check_msg_done to up
+ * the semaphore.
+ */
+ down(&iidev->got_rsp);
+ } else if (msg && ++retries <= iidev->max_retries) {
+ spin_lock_irqsave(&iidev->lock, flags);
+ iidev->working_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ goto retry;
+ }
+
+ if (msg)
+ ipmi_ipmb_send_response(iidev, msg, IPMI_TIMEOUT_ERR);
+ }
+
+ if (iidev->next_msg)
+ /* Return an unspecified error. */
+ ipmi_ipmb_send_response(iidev, iidev->next_msg, 0xff);
+
+ return 0;
+}
+
+static int ipmi_ipmb_start_processing(void *send_info,
+ struct ipmi_smi *new_intf)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ iidev->intf = new_intf;
+ iidev->ready = true;
+ return 0;
+}
+
+static void ipmi_ipmb_stop_thread(struct ipmi_ipmb_dev *iidev)
+{
+ if (iidev->thread) {
+ struct task_struct *t = iidev->thread;
+
+ iidev->thread = NULL;
+ iidev->stopping = true;
+ up(&iidev->wake_thread);
+ up(&iidev->got_rsp);
+ kthread_stop(t);
+ }
+}
+
+static void ipmi_ipmb_shutdown(void *send_info)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ ipmi_ipmb_stop_thread(iidev);
+}
+
+static void ipmi_ipmb_sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ BUG_ON(iidev->next_msg);
+
+ iidev->next_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ up(&iidev->wake_thread);
+}
+
+static void ipmi_ipmb_request_events(void *send_info)
+{
+ /* We don't fetch events here. */
+}
+
+static void ipmi_ipmb_cleanup(struct ipmi_ipmb_dev *iidev)
+{
+ if (iidev->slave) {
+ i2c_slave_unregister(iidev->slave);
+ if (iidev->slave != iidev->client)
+ i2c_unregister_device(iidev->slave);
+ }
+ iidev->slave = NULL;
+ iidev->client = NULL;
+ ipmi_ipmb_stop_thread(iidev);
+}
+
+static void ipmi_ipmb_remove(struct i2c_client *client)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ ipmi_ipmb_cleanup(iidev);
+ ipmi_unregister_smi(iidev->intf);
+}
+
+static int ipmi_ipmb_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ipmi_ipmb_dev *iidev;
+ struct device_node *slave_np;
+ struct i2c_adapter *slave_adap = NULL;
+ struct i2c_client *slave = NULL;
+ int rv;
+
+ iidev = devm_kzalloc(&client->dev, sizeof(*iidev), GFP_KERNEL);
+ if (!iidev)
+ return -ENOMEM;
+
+ if (of_property_read_u8(dev->of_node, "bmcaddr", &iidev->bmcaddr) != 0)
+ iidev->bmcaddr = bmcaddr;
+ if (iidev->bmcaddr == 0 || iidev->bmcaddr & 1) {
+ /* Can't have the write bit set. */
+ dev_notice(&client->dev,
+ "Invalid bmc address value %2.2x\n", iidev->bmcaddr);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dev->of_node, "retry-time",
+ &iidev->retry_time_ms) != 0)
+ iidev->retry_time_ms = retry_time_ms;
+
+ if (of_property_read_u32(dev->of_node, "max-retries",
+ &iidev->max_retries) != 0)
+ iidev->max_retries = max_retries;
+
+ slave_np = of_parse_phandle(dev->of_node, "slave-dev", 0);
+ if (slave_np) {
+ slave_adap = of_get_i2c_adapter_by_node(slave_np);
+ of_node_put(slave_np);
+ if (!slave_adap) {
+ dev_notice(&client->dev,
+ "Could not find slave adapter\n");
+ return -EINVAL;
+ }
+ }
+
+ iidev->client = client;
+
+ if (slave_adap) {
+ struct i2c_board_info binfo;
+
+ memset(&binfo, 0, sizeof(binfo));
+ strscpy(binfo.type, "ipmb-slave", I2C_NAME_SIZE);
+ binfo.addr = client->addr;
+ binfo.flags = I2C_CLIENT_SLAVE;
+ slave = i2c_new_client_device(slave_adap, &binfo);
+ i2c_put_adapter(slave_adap);
+ if (IS_ERR(slave)) {
+ rv = PTR_ERR(slave);
+ dev_notice(&client->dev,
+ "Could not allocate slave device: %d\n", rv);
+ return rv;
+ }
+ i2c_set_clientdata(slave, iidev);
+ } else {
+ slave = client;
+ }
+ i2c_set_clientdata(client, iidev);
+ slave->flags |= I2C_CLIENT_SLAVE;
+
+ rv = i2c_slave_register(slave, ipmi_ipmb_slave_cb);
+ if (rv)
+ goto out_err;
+ iidev->slave = slave;
+ slave = NULL;
+
+ iidev->handlers.flags = IPMI_SMI_CAN_HANDLE_IPMB_DIRECT;
+ iidev->handlers.start_processing = ipmi_ipmb_start_processing;
+ iidev->handlers.shutdown = ipmi_ipmb_shutdown;
+ iidev->handlers.sender = ipmi_ipmb_sender;
+ iidev->handlers.request_events = ipmi_ipmb_request_events;
+
+ spin_lock_init(&iidev->lock);
+ sema_init(&iidev->wake_thread, 0);
+ sema_init(&iidev->got_rsp, 0);
+
+ iidev->thread = kthread_run(ipmi_ipmb_thread, iidev,
+ "kipmb%4.4x", client->addr);
+ if (IS_ERR(iidev->thread)) {
+ rv = PTR_ERR(iidev->thread);
+ dev_notice(&client->dev,
+ "Could not start kernel thread: error %d\n", rv);
+ goto out_err;
+ }
+
+ rv = ipmi_register_smi(&iidev->handlers,
+ iidev,
+ &client->dev,
+ iidev->bmcaddr);
+ if (rv)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ if (slave && slave != client)
+ i2c_unregister_device(slave);
+ ipmi_ipmb_cleanup(iidev);
+ return rv;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_ipmb_match[] = {
+ { .type = "ipmi", .compatible = DEVICE_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match);
+#else
+#define of_ipmi_ipmb_match NULL
+#endif
+
+static const struct i2c_device_id ipmi_ipmb_id[] = {
+ { DEVICE_NAME, 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id);
+
+static struct i2c_driver ipmi_ipmb_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = of_ipmi_ipmb_match,
+ },
+ .probe = ipmi_ipmb_probe,
+ .remove = ipmi_ipmb_remove,
+ .id_table = ipmi_ipmb_id,
+};
+module_i2c_driver(ipmi_ipmb_driver);
+
+MODULE_AUTHOR("Corey Minyard");
+MODULE_DESCRIPTION("IPMI IPMB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
new file mode 100644
index 0000000000..ecfcb50302
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_kcs_sm.c
+ *
+ * State machine for handling IPMI KCS interfaces.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+/*
+ * This state machine is taken from the state machine in the IPMI spec,
+ * pretty much verbatim. If you have questions about the states, see
+ * that document.
+ */
+
+#define DEBUG /* So dev_dbg() is always available. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* kcs_debug is a bit-field
+ * KCS_DEBUG_ENABLE - turned on for now
+ * KCS_DEBUG_MSG - commands and their responses
+ * KCS_DEBUG_STATES - state machine
+ */
+#define KCS_DEBUG_STATES 4
+#define KCS_DEBUG_MSG 2
+#define KCS_DEBUG_ENABLE 1
+
+static int kcs_debug;
+module_param(kcs_debug, int, 0644);
+MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/* The states the KCS driver may be in. */
+enum kcs_states {
+ /* The KCS interface is currently doing nothing. */
+ KCS_IDLE,
+
+ /*
+ * We are starting an operation. The data is in the output
+ * buffer, but nothing has been done to the interface yet. This
+ * was added to the state machine in the spec to wait for the
+ * initial IBF.
+ */
+ KCS_START_OP,
+
+ /* We have written a write cmd to the interface. */
+ KCS_WAIT_WRITE_START,
+
+ /* We are writing bytes to the interface. */
+ KCS_WAIT_WRITE,
+
+ /*
+ * We have written the write end cmd to the interface, and
+ * still need to write the last byte.
+ */
+ KCS_WAIT_WRITE_END,
+
+ /* We are waiting to read data from the interface. */
+ KCS_WAIT_READ,
+
+ /*
+ * State to transition to the error handler, this was added to
+ * the state machine in the spec to be sure IBF was there.
+ */
+ KCS_ERROR0,
+
+ /*
+ * First stage error handler, wait for the interface to
+ * respond.
+ */
+ KCS_ERROR1,
+
+ /*
+ * The abort cmd has been written, wait for the interface to
+ * respond.
+ */
+ KCS_ERROR2,
+
+ /*
+ * We wrote some data to the interface, wait for it to switch
+ * to read mode.
+ */
+ KCS_ERROR3,
+
+ /* The hardware failed to follow the state machine. */
+ KCS_HOSED
+};
+
+#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
+#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
+
+/* Timeouts in microseconds. */
+#define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define MAX_ERROR_RETRIES 10
+#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
+
+struct si_sm_data {
+ enum kcs_states state;
+ struct si_sm_io *io;
+ unsigned char write_data[MAX_KCS_WRITE_SIZE];
+ int write_pos;
+ int write_count;
+ int orig_write_count;
+ unsigned char read_data[MAX_KCS_READ_SIZE];
+ int read_pos;
+ int truncated;
+
+ unsigned int error_retries;
+ long ibf_timeout;
+ long obf_timeout;
+ unsigned long error0_timeout;
+};
+
+static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
+ struct si_sm_io *io, enum kcs_states state)
+{
+ kcs->state = state;
+ kcs->io = io;
+ kcs->write_pos = 0;
+ kcs->write_count = 0;
+ kcs->orig_write_count = 0;
+ kcs->read_pos = 0;
+ kcs->error_retries = 0;
+ kcs->truncated = 0;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+
+ /* Reserve 2 I/O bytes. */
+ return 2;
+}
+
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
+{
+ return init_kcs_data_with_state(kcs, io, KCS_IDLE);
+}
+
+static inline unsigned char read_status(struct si_sm_data *kcs)
+{
+ return kcs->io->inputb(kcs->io, 1);
+}
+
+static inline unsigned char read_data(struct si_sm_data *kcs)
+{
+ return kcs->io->inputb(kcs->io, 0);
+}
+
+static inline void write_cmd(struct si_sm_data *kcs, unsigned char data)
+{
+ kcs->io->outputb(kcs->io, 1, data);
+}
+
+static inline void write_data(struct si_sm_data *kcs, unsigned char data)
+{
+ kcs->io->outputb(kcs->io, 0, data);
+}
+
+/* Control codes. */
+#define KCS_GET_STATUS_ABORT 0x60
+#define KCS_WRITE_START 0x61
+#define KCS_WRITE_END 0x62
+#define KCS_READ_BYTE 0x68
+
+/* Status bits. */
+#define GET_STATUS_STATE(status) (((status) >> 6) & 0x03)
+#define KCS_IDLE_STATE 0
+#define KCS_READ_STATE 1
+#define KCS_WRITE_STATE 2
+#define KCS_ERROR_STATE 3
+#define GET_STATUS_ATN(status) ((status) & 0x04)
+#define GET_STATUS_IBF(status) ((status) & 0x02)
+#define GET_STATUS_OBF(status) ((status) & 0x01)
+
+
+static inline void write_next_byte(struct si_sm_data *kcs)
+{
+ write_data(kcs, kcs->write_data[kcs->write_pos]);
+ (kcs->write_pos)++;
+ (kcs->write_count)--;
+}
+
+static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
+{
+ (kcs->error_retries)++;
+ if (kcs->error_retries > MAX_ERROR_RETRIES) {
+ if (kcs_debug & KCS_DEBUG_ENABLE)
+ dev_dbg(kcs->io->dev, "ipmi_kcs_sm: kcs hosed: %s\n",
+ reason);
+ kcs->state = KCS_HOSED;
+ } else {
+ kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
+ kcs->state = KCS_ERROR0;
+ }
+}
+
+static inline void read_next_byte(struct si_sm_data *kcs)
+{
+ if (kcs->read_pos >= MAX_KCS_READ_SIZE) {
+ /* Throw the data away and mark it truncated. */
+ read_data(kcs);
+ kcs->truncated = 1;
+ } else {
+ kcs->read_data[kcs->read_pos] = read_data(kcs);
+ (kcs->read_pos)++;
+ }
+ write_data(kcs, KCS_READ_BYTE);
+}
+
+static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
+ long time)
+{
+ if (GET_STATUS_IBF(status)) {
+ kcs->ibf_timeout -= time;
+ if (kcs->ibf_timeout < 0) {
+ start_error_recovery(kcs, "IBF not ready in time");
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ return 1;
+ }
+ return 0;
+ }
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ return 1;
+}
+
+static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
+ long time)
+{
+ if (!GET_STATUS_OBF(status)) {
+ kcs->obf_timeout -= time;
+ if (kcs->obf_timeout < 0) {
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ start_error_recovery(kcs, "OBF not ready in time");
+ return 1;
+ }
+ return 0;
+ }
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ return 1;
+}
+
+static void clear_obf(struct si_sm_data *kcs, unsigned char status)
+{
+ if (GET_STATUS_OBF(status))
+ read_data(kcs);
+}
+
+static void restart_kcs_transaction(struct si_sm_data *kcs)
+{
+ kcs->write_count = kcs->orig_write_count;
+ kcs->write_pos = 0;
+ kcs->read_pos = 0;
+ kcs->state = KCS_WAIT_WRITE_START;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ write_cmd(kcs, KCS_WRITE_START);
+}
+
+static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > MAX_KCS_WRITE_SIZE)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if (kcs->state != KCS_IDLE) {
+ dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
+ return IPMI_NOT_IN_MY_STATE_ERR;
+ }
+
+ if (kcs_debug & KCS_DEBUG_MSG) {
+ dev_dbg(kcs->io->dev, "%s -", __func__);
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
+ }
+ kcs->error_retries = 0;
+ memcpy(kcs->write_data, data, size);
+ kcs->write_count = size;
+ kcs->orig_write_count = size;
+ kcs->write_pos = 0;
+ kcs->read_pos = 0;
+ kcs->state = KCS_START_OP;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ return 0;
+}
+
+static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
+ unsigned int length)
+{
+ if (length < kcs->read_pos) {
+ kcs->read_pos = length;
+ kcs->truncated = 1;
+ }
+
+ memcpy(data, kcs->read_data, kcs->read_pos);
+
+ if ((length >= 3) && (kcs->read_pos < 3)) {
+ /* Guarantee that we return at least 3 bytes, with an
+ error in the third byte if it is too short. */
+ data[2] = IPMI_ERR_UNSPECIFIED;
+ kcs->read_pos = 3;
+ }
+ if (kcs->truncated) {
+ /*
+ * Report a truncated error. We might overwrite
+ * another error, but that's too bad, the user needs
+ * to know it was truncated.
+ */
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ kcs->truncated = 0;
+ }
+
+ return kcs->read_pos;
+}
+
+/*
+ * This implements the state machine defined in the IPMI manual, see
+ * that for details on how this works. Divide that flowchart into
+ * sections delimited by "Wait for IBF" and this will become clear.
+ */
+static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
+{
+ unsigned char status;
+ unsigned char state;
+
+ status = read_status(kcs);
+
+ if (kcs_debug & KCS_DEBUG_STATES)
+ dev_dbg(kcs->io->dev,
+ "KCS: State = %d, %x\n", kcs->state, status);
+
+ /* All states wait for ibf, so just do it here. */
+ if (!check_ibf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ /* Just about everything looks at the KCS state, so grab that, too. */
+ state = GET_STATUS_STATE(status);
+
+ switch (kcs->state) {
+ case KCS_IDLE:
+ /* If there's and interrupt source, turn it off. */
+ clear_obf(kcs, status);
+
+ if (GET_STATUS_ATN(status))
+ return SI_SM_ATTN;
+ else
+ return SI_SM_IDLE;
+
+ case KCS_START_OP:
+ if (state != KCS_IDLE_STATE) {
+ start_error_recovery(kcs,
+ "State machine not idle at start");
+ break;
+ }
+
+ clear_obf(kcs, status);
+ write_cmd(kcs, KCS_WRITE_START);
+ kcs->state = KCS_WAIT_WRITE_START;
+ break;
+
+ case KCS_WAIT_WRITE_START:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(
+ kcs,
+ "Not in write state at write start");
+ break;
+ }
+ read_data(kcs);
+ if (kcs->write_count == 1) {
+ write_cmd(kcs, KCS_WRITE_END);
+ kcs->state = KCS_WAIT_WRITE_END;
+ } else {
+ write_next_byte(kcs);
+ kcs->state = KCS_WAIT_WRITE;
+ }
+ break;
+
+ case KCS_WAIT_WRITE:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(kcs,
+ "Not in write state for write");
+ break;
+ }
+ clear_obf(kcs, status);
+ if (kcs->write_count == 1) {
+ write_cmd(kcs, KCS_WRITE_END);
+ kcs->state = KCS_WAIT_WRITE_END;
+ } else {
+ write_next_byte(kcs);
+ }
+ break;
+
+ case KCS_WAIT_WRITE_END:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(kcs,
+ "Not in write state"
+ " for write end");
+ break;
+ }
+ clear_obf(kcs, status);
+ write_next_byte(kcs);
+ kcs->state = KCS_WAIT_READ;
+ break;
+
+ case KCS_WAIT_READ:
+ if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
+ start_error_recovery(
+ kcs,
+ "Not in read or idle in read state");
+ break;
+ }
+
+ if (state == KCS_READ_STATE) {
+ if (!check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+ read_next_byte(kcs);
+ } else {
+ /*
+ * We don't implement this exactly like the state
+ * machine in the spec. Some broken hardware
+ * does not write the final dummy byte to the
+ * read register. Thus obf will never go high
+ * here. We just go straight to idle, and we
+ * handle clearing out obf in idle state if it
+ * happens to come in.
+ */
+ clear_obf(kcs, status);
+ kcs->orig_write_count = 0;
+ kcs->state = KCS_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+ break;
+
+ case KCS_ERROR0:
+ clear_obf(kcs, status);
+ status = read_status(kcs);
+ if (GET_STATUS_OBF(status))
+ /* controller isn't responding */
+ if (time_before(jiffies, kcs->error0_timeout))
+ return SI_SM_CALL_WITH_TICK_DELAY;
+ write_cmd(kcs, KCS_GET_STATUS_ABORT);
+ kcs->state = KCS_ERROR1;
+ break;
+
+ case KCS_ERROR1:
+ clear_obf(kcs, status);
+ write_data(kcs, 0);
+ kcs->state = KCS_ERROR2;
+ break;
+
+ case KCS_ERROR2:
+ if (state != KCS_READ_STATE) {
+ start_error_recovery(kcs,
+ "Not in read state for error2");
+ break;
+ }
+ if (!check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ clear_obf(kcs, status);
+ write_data(kcs, KCS_READ_BYTE);
+ kcs->state = KCS_ERROR3;
+ break;
+
+ case KCS_ERROR3:
+ if (state != KCS_IDLE_STATE) {
+ start_error_recovery(kcs,
+ "Not in idle state for error3");
+ break;
+ }
+
+ if (!check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ clear_obf(kcs, status);
+ if (kcs->orig_write_count) {
+ restart_kcs_transaction(kcs);
+ } else {
+ kcs->state = KCS_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+ break;
+
+ case KCS_HOSED:
+ break;
+ }
+
+ if (kcs->state == KCS_HOSED) {
+ init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
+ return SI_SM_HOSED;
+ }
+
+ return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int kcs_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+static int kcs_detect(struct si_sm_data *kcs)
+{
+ /*
+ * It's impossible for the KCS status register to be all 1's,
+ * (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first.
+ */
+ if (read_status(kcs) == 0xff)
+ return 1;
+
+ return 0;
+}
+
+static void kcs_cleanup(struct si_sm_data *kcs)
+{
+}
+
+const struct si_sm_handlers kcs_smi_handlers = {
+ .init_data = init_kcs_data,
+ .start_transaction = start_kcs_transaction,
+ .get_result = get_kcs_result,
+ .event = kcs_event,
+ .detect = kcs_detect,
+ .cleanup = kcs_cleanup,
+ .size = kcs_size,
+};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
new file mode 100644
index 0000000000..186f1fee75
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -0,0 +1,5573 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_msghandler.c
+ *
+ * Incoming and outgoing message routing for an IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#define pr_fmt(fmt) "IPMI message handler: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/panic_notifier.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/uuid.h>
+#include <linux/nospec.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+#define IPMI_DRIVER_VERSION "39.2"
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static int ipmi_init_msghandler(void);
+static void smi_recv_tasklet(struct tasklet_struct *t);
+static void handle_new_recv_msgs(struct ipmi_smi *intf);
+static void need_waiter(struct ipmi_smi *intf);
+static int handle_one_recv_msg(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg);
+
+static bool initialized;
+static bool drvregistered;
+
+/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
+enum ipmi_panic_event_op {
+ IPMI_SEND_PANIC_EVENT_NONE,
+ IPMI_SEND_PANIC_EVENT,
+ IPMI_SEND_PANIC_EVENT_STRING,
+ IPMI_SEND_PANIC_EVENT_MAX
+};
+
+/* Indices in this array should be mapped to enum ipmi_panic_event_op */
+static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
+
+#ifdef CONFIG_IPMI_PANIC_STRING
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
+#elif defined(CONFIG_IPMI_PANIC_EVENT)
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
+#else
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
+#endif
+
+static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
+
+static int panic_op_write_handler(const char *val,
+ const struct kernel_param *kp)
+{
+ char valcp[16];
+ int e;
+
+ strscpy(valcp, val, sizeof(valcp));
+ e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
+ if (e < 0)
+ return e;
+
+ ipmi_send_panic_event = e;
+ return 0;
+}
+
+static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
+{
+ const char *event_str;
+
+ if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
+ event_str = "???";
+ else
+ event_str = ipmi_panic_event_str[ipmi_send_panic_event];
+
+ return sprintf(buffer, "%s\n", event_str);
+}
+
+static const struct kernel_param_ops panic_op_ops = {
+ .set = panic_op_write_handler,
+ .get = panic_op_read_handler
+};
+module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
+MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
+
+
+#define MAX_EVENTS_IN_QUEUE 25
+
+/* Remain in auto-maintenance mode for this amount of time (in ms). */
+static unsigned long maintenance_mode_timeout_ms = 30000;
+module_param(maintenance_mode_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(maintenance_mode_timeout_ms,
+ "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
+
+/*
+ * Don't let a message sit in a queue forever, always time it with at lest
+ * the max message timer. This is in milliseconds.
+ */
+#define MAX_MSG_TIMEOUT 60000
+
+/*
+ * Timeout times below are in milliseconds, and are done off a 1
+ * second timer. So setting the value to 1000 would mean anything
+ * between 0 and 1000ms. So really the only reasonable minimum
+ * setting it 2000ms, which is between 1 and 2 seconds.
+ */
+
+/* The default timeout for message retries. */
+static unsigned long default_retry_ms = 2000;
+module_param(default_retry_ms, ulong, 0644);
+MODULE_PARM_DESC(default_retry_ms,
+ "The time (milliseconds) between retry sends");
+
+/* The default timeout for maintenance mode message retries. */
+static unsigned long default_maintenance_retry_ms = 3000;
+module_param(default_maintenance_retry_ms, ulong, 0644);
+MODULE_PARM_DESC(default_maintenance_retry_ms,
+ "The time (milliseconds) between retry sends in maintenance mode");
+
+/* The default maximum number of retries */
+static unsigned int default_max_retries = 4;
+module_param(default_max_retries, uint, 0644);
+MODULE_PARM_DESC(default_max_retries,
+ "The time (milliseconds) between retry sends in maintenance mode");
+
+/* The default maximum number of users that may register. */
+static unsigned int max_users = 30;
+module_param(max_users, uint, 0644);
+MODULE_PARM_DESC(max_users,
+ "The most users that may use the IPMI stack at one time.");
+
+/* The default maximum number of message a user may have outstanding. */
+static unsigned int max_msgs_per_user = 100;
+module_param(max_msgs_per_user, uint, 0644);
+MODULE_PARM_DESC(max_msgs_per_user,
+ "The most message a user may have outstanding.");
+
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME 1000
+
+/* How many jiffies does it take to get to the timeout time. */
+#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
+
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
+#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
+
+/* How long should we cache dynamic device IDs? */
+#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
+
+/*
+ * The main "user" data structure.
+ */
+struct ipmi_user {
+ struct list_head link;
+
+ /*
+ * Set to NULL when the user is destroyed, a pointer to myself
+ * so srcu_dereference can be used on it.
+ */
+ struct ipmi_user *self;
+ struct srcu_struct release_barrier;
+
+ struct kref refcount;
+
+ /* The upper layer that handles receive messages. */
+ const struct ipmi_user_hndl *handler;
+ void *handler_data;
+
+ /* The interface this user is bound to. */
+ struct ipmi_smi *intf;
+
+ /* Does this interface receive IPMI events? */
+ bool gets_events;
+
+ atomic_t nr_msgs;
+
+ /* Free must run in process context for RCU cleanup. */
+ struct work_struct remove_work;
+};
+
+static struct workqueue_struct *remove_work_wq;
+
+static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
+ __acquires(user->release_barrier)
+{
+ struct ipmi_user *ruser;
+
+ *index = srcu_read_lock(&user->release_barrier);
+ ruser = srcu_dereference(user->self, &user->release_barrier);
+ if (!ruser)
+ srcu_read_unlock(&user->release_barrier, *index);
+ return ruser;
+}
+
+static void release_ipmi_user(struct ipmi_user *user, int index)
+{
+ srcu_read_unlock(&user->release_barrier, index);
+}
+
+struct cmd_rcvr {
+ struct list_head link;
+
+ struct ipmi_user *user;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned int chans;
+
+ /*
+ * This is used to form a linked lised during mass deletion.
+ * Since this is in an RCU list, we cannot use the link above
+ * or change any data until the RCU period completes. So we
+ * use this next variable during mass deletion so we can have
+ * a list and don't have to wait and restart the search on
+ * every individual deletion of a command.
+ */
+ struct cmd_rcvr *next;
+};
+
+struct seq_table {
+ unsigned int inuse : 1;
+ unsigned int broadcast : 1;
+
+ unsigned long timeout;
+ unsigned long orig_timeout;
+ unsigned int retries_left;
+
+ /*
+ * To verify on an incoming send message response that this is
+ * the message that the response is for, we keep a sequence id
+ * and increment it every time we send a message.
+ */
+ long seqid;
+
+ /*
+ * This is held so we can properly respond to the message on a
+ * timeout, and it is used to hold the temporary data for
+ * retransmission, too.
+ */
+ struct ipmi_recv_msg *recv_msg;
+};
+
+/*
+ * Store the information in a msgid (long) to allow us to find a
+ * sequence table entry from the msgid.
+ */
+#define STORE_SEQ_IN_MSGID(seq, seqid) \
+ ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
+
+#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
+ do { \
+ seq = (((msgid) >> 26) & 0x3f); \
+ seqid = ((msgid) & 0x3ffffff); \
+ } while (0)
+
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
+
+#define IPMI_MAX_CHANNELS 16
+struct ipmi_channel {
+ unsigned char medium;
+ unsigned char protocol;
+};
+
+struct ipmi_channel_set {
+ struct ipmi_channel c[IPMI_MAX_CHANNELS];
+};
+
+struct ipmi_my_addrinfo {
+ /*
+ * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
+ * but may be changed by the user.
+ */
+ unsigned char address;
+
+ /*
+ * My LUN. This should generally stay the SMS LUN, but just in
+ * case...
+ */
+ unsigned char lun;
+};
+
+/*
+ * Note that the product id, manufacturer id, guid, and device id are
+ * immutable in this structure, so dyn_mutex is not required for
+ * accessing those. If those change on a BMC, a new BMC is allocated.
+ */
+struct bmc_device {
+ struct platform_device pdev;
+ struct list_head intfs; /* Interfaces on this BMC. */
+ struct ipmi_device_id id;
+ struct ipmi_device_id fetch_id;
+ int dyn_id_set;
+ unsigned long dyn_id_expiry;
+ struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
+ guid_t guid;
+ guid_t fetch_guid;
+ int dyn_guid_set;
+ struct kref usecount;
+ struct work_struct remove_work;
+ unsigned char cc; /* completion code */
+};
+#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+
+static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid);
+
+/*
+ * Various statistics for IPMI, these index stats[] in the ipmi_smi
+ * structure.
+ */
+enum ipmi_stat_indexes {
+ /* Commands we got from the user that were invalid. */
+ IPMI_STAT_sent_invalid_commands = 0,
+
+ /* Commands we sent to the MC. */
+ IPMI_STAT_sent_local_commands,
+
+ /* Responses from the MC that were delivered to a user. */
+ IPMI_STAT_handled_local_responses,
+
+ /* Responses from the MC that were not delivered to a user. */
+ IPMI_STAT_unhandled_local_responses,
+
+ /* Commands we sent out to the IPMB bus. */
+ IPMI_STAT_sent_ipmb_commands,
+
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ IPMI_STAT_sent_ipmb_command_errs,
+
+ /* Each retransmit increments this count. */
+ IPMI_STAT_retransmitted_ipmb_commands,
+
+ /*
+ * When a message times out (runs out of retransmits) this is
+ * incremented.
+ */
+ IPMI_STAT_timed_out_ipmb_commands,
+
+ /*
+ * This is like above, but for broadcasts. Broadcasts are
+ * *not* included in the above count (they are expected to
+ * time out).
+ */
+ IPMI_STAT_timed_out_ipmb_broadcasts,
+
+ /* Responses I have sent to the IPMB bus. */
+ IPMI_STAT_sent_ipmb_responses,
+
+ /* The response was delivered to the user. */
+ IPMI_STAT_handled_ipmb_responses,
+
+ /* The response had invalid data in it. */
+ IPMI_STAT_invalid_ipmb_responses,
+
+ /* The response didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_ipmb_responses,
+
+ /* Commands we sent out to the IPMB bus. */
+ IPMI_STAT_sent_lan_commands,
+
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ IPMI_STAT_sent_lan_command_errs,
+
+ /* Each retransmit increments this count. */
+ IPMI_STAT_retransmitted_lan_commands,
+
+ /*
+ * When a message times out (runs out of retransmits) this is
+ * incremented.
+ */
+ IPMI_STAT_timed_out_lan_commands,
+
+ /* Responses I have sent to the IPMB bus. */
+ IPMI_STAT_sent_lan_responses,
+
+ /* The response was delivered to the user. */
+ IPMI_STAT_handled_lan_responses,
+
+ /* The response had invalid data in it. */
+ IPMI_STAT_invalid_lan_responses,
+
+ /* The response didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_lan_responses,
+
+ /* The command was delivered to the user. */
+ IPMI_STAT_handled_commands,
+
+ /* The command had invalid data in it. */
+ IPMI_STAT_invalid_commands,
+
+ /* The command didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_commands,
+
+ /* Invalid data in an event. */
+ IPMI_STAT_invalid_events,
+
+ /* Events that were received with the proper format. */
+ IPMI_STAT_events,
+
+ /* Retransmissions on IPMB that failed. */
+ IPMI_STAT_dropped_rexmit_ipmb_commands,
+
+ /* Retransmissions on LAN that failed. */
+ IPMI_STAT_dropped_rexmit_lan_commands,
+
+ /* This *must* remain last, add new values above this. */
+ IPMI_NUM_STATS
+};
+
+
+#define IPMI_IPMB_NUM_SEQ 64
+struct ipmi_smi {
+ struct module *owner;
+
+ /* What interface number are we? */
+ int intf_num;
+
+ struct kref refcount;
+
+ /* Set when the interface is being unregistered. */
+ bool in_shutdown;
+
+ /* Used for a list of interfaces. */
+ struct list_head link;
+
+ /*
+ * The list of upper layers that are using me. seq_lock write
+ * protects this. Read protection is with srcu.
+ */
+ struct list_head users;
+ struct srcu_struct users_srcu;
+ atomic_t nr_users;
+ struct device_attribute nr_users_devattr;
+ struct device_attribute nr_msgs_devattr;
+
+
+ /* Used for wake ups at startup. */
+ wait_queue_head_t waitq;
+
+ /*
+ * Prevents the interface from being unregistered when the
+ * interface is used by being looked up through the BMC
+ * structure.
+ */
+ struct mutex bmc_reg_mutex;
+
+ struct bmc_device tmp_bmc;
+ struct bmc_device *bmc;
+ bool bmc_registered;
+ struct list_head bmc_link;
+ char *my_dev_name;
+ bool in_bmc_register; /* Handle recursive situations. Yuck. */
+ struct work_struct bmc_reg_work;
+
+ const struct ipmi_smi_handlers *handlers;
+ void *send_info;
+
+ /* Driver-model device for the system interface. */
+ struct device *si_dev;
+
+ /*
+ * A table of sequence numbers for this interface. We use the
+ * sequence numbers for IPMB messages that go out of the
+ * interface to match them up with their responses. A routine
+ * is called periodically to time the items in this list.
+ */
+ spinlock_t seq_lock;
+ struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
+ int curr_seq;
+
+ /*
+ * Messages queued for delivery. If delivery fails (out of memory
+ * for instance), They will stay in here to be processed later in a
+ * periodic timer interrupt. The tasklet is for handling received
+ * messages directly from the handler.
+ */
+ spinlock_t waiting_rcv_msgs_lock;
+ struct list_head waiting_rcv_msgs;
+ atomic_t watchdog_pretimeouts_to_deliver;
+ struct tasklet_struct recv_tasklet;
+
+ spinlock_t xmit_msgs_lock;
+ struct list_head xmit_msgs;
+ struct ipmi_smi_msg *curr_msg;
+ struct list_head hp_xmit_msgs;
+
+ /*
+ * The list of command receivers that are registered for commands
+ * on this interface.
+ */
+ struct mutex cmd_rcvrs_mutex;
+ struct list_head cmd_rcvrs;
+
+ /*
+ * Events that were queues because no one was there to receive
+ * them.
+ */
+ spinlock_t events_lock; /* For dealing with event stuff. */
+ struct list_head waiting_events;
+ unsigned int waiting_events_count; /* How many events in queue? */
+ char delivering_events;
+ char event_msg_printed;
+
+ /* How many users are waiting for events? */
+ atomic_t event_waiters;
+ unsigned int ticks_to_req_ev;
+
+ spinlock_t watch_lock; /* For dealing with watch stuff below. */
+
+ /* How many users are waiting for commands? */
+ unsigned int command_waiters;
+
+ /* How many users are waiting for watchdogs? */
+ unsigned int watchdog_waiters;
+
+ /* How many users are waiting for message responses? */
+ unsigned int response_waiters;
+
+ /*
+ * Tells what the lower layer has last been asked to watch for,
+ * messages and/or watchdogs. Protected by watch_lock.
+ */
+ unsigned int last_watch_mask;
+
+ /*
+ * The event receiver for my BMC, only really used at panic
+ * shutdown as a place to store this.
+ */
+ unsigned char event_receiver;
+ unsigned char event_receiver_lun;
+ unsigned char local_sel_device;
+ unsigned char local_event_generator;
+
+ /* For handling of maintenance mode. */
+ int maintenance_mode;
+ bool maintenance_mode_enable;
+ int auto_maintenance_timeout;
+ spinlock_t maintenance_mode_lock; /* Used in a timer... */
+
+ /*
+ * If we are doing maintenance on something on IPMB, extend
+ * the timeout time to avoid timeouts writing firmware and
+ * such.
+ */
+ int ipmb_maintenance_mode_timeout;
+
+ /*
+ * A cheap hack, if this is non-null and a message to an
+ * interface comes in with a NULL user, call this routine with
+ * it. Note that the message will still be freed by the
+ * caller. This only works on the system interface.
+ *
+ * Protected by bmc_reg_mutex.
+ */
+ void (*null_user_handler)(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg);
+
+ /*
+ * When we are scanning the channels for an SMI, this will
+ * tell which channel we are scanning.
+ */
+ int curr_channel;
+
+ /* Channel information */
+ struct ipmi_channel_set *channel_list;
+ unsigned int curr_working_cset; /* First index into the following. */
+ struct ipmi_channel_set wchannels[2];
+ struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
+ bool channels_ready;
+
+ atomic_t stats[IPMI_NUM_STATS];
+
+ /*
+ * run_to_completion duplicate of smb_info, smi_info
+ * and ipmi_serial_info structures. Used to decrease numbers of
+ * parameters passed by "low" level IPMI code.
+ */
+ int run_to_completion;
+};
+#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
+
+static void __get_guid(struct ipmi_smi *intf);
+static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
+static int __ipmi_bmc_register(struct ipmi_smi *intf,
+ struct ipmi_device_id *id,
+ bool guid_set, guid_t *guid, int intf_num);
+static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+
+
+/*
+ * The driver model view of the IPMI messaging driver.
+ */
+static struct platform_driver ipmidriver = {
+ .driver = {
+ .name = "ipmi",
+ .bus = &platform_bus_type
+ }
+};
+/*
+ * This mutex keeps us from adding the same BMC twice.
+ */
+static DEFINE_MUTEX(ipmidriver_mutex);
+
+static LIST_HEAD(ipmi_interfaces);
+static DEFINE_MUTEX(ipmi_interfaces_mutex);
+#define ipmi_interfaces_mutex_held() \
+ lockdep_is_held(&ipmi_interfaces_mutex)
+static struct srcu_struct ipmi_interfaces_srcu;
+
+/*
+ * List of watchers that want to know when smi's are added and deleted.
+ */
+static LIST_HEAD(smi_watchers);
+static DEFINE_MUTEX(smi_watchers_mutex);
+
+#define ipmi_inc_stat(intf, stat) \
+ atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+#define ipmi_get_stat(intf, stat) \
+ ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+static const char * const addr_src_to_str[] = {
+ "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
+ "device-tree", "platform"
+};
+
+const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
+{
+ if (src >= SI_LAST)
+ src = 0; /* Invalid */
+ return addr_src_to_str[src];
+}
+EXPORT_SYMBOL(ipmi_addr_src_to_str);
+
+static int is_lan_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_LAN_ADDR_TYPE;
+}
+
+static int is_ipmb_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
+}
+
+static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
+}
+
+static int is_ipmb_direct_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
+}
+
+static void free_recv_msg_list(struct list_head *q)
+{
+ struct ipmi_recv_msg *msg, *msg2;
+
+ list_for_each_entry_safe(msg, msg2, q, link) {
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+}
+
+static void free_smi_msg_list(struct list_head *q)
+{
+ struct ipmi_smi_msg *msg, *msg2;
+
+ list_for_each_entry_safe(msg, msg2, q, link) {
+ list_del(&msg->link);
+ ipmi_free_smi_msg(msg);
+ }
+}
+
+static void clean_up_interface_data(struct ipmi_smi *intf)
+{
+ int i;
+ struct cmd_rcvr *rcvr, *rcvr2;
+ struct list_head list;
+
+ tasklet_kill(&intf->recv_tasklet);
+
+ free_smi_msg_list(&intf->waiting_rcv_msgs);
+ free_recv_msg_list(&intf->waiting_events);
+
+ /*
+ * Wholesale remove all the entries from the list in the
+ * interface and wait for RCU to know that none are in use.
+ */
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ INIT_LIST_HEAD(&list);
+ list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+
+ list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+ kfree(rcvr);
+
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ if ((intf->seq_table[i].inuse)
+ && (intf->seq_table[i].recv_msg))
+ ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+ }
+}
+
+static void intf_free(struct kref *ref)
+{
+ struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
+
+ clean_up_interface_data(intf);
+ kfree(intf);
+}
+
+int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
+{
+ struct ipmi_smi *intf;
+ int index, rv;
+
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+
+ mutex_lock(&smi_watchers_mutex);
+
+ list_add(&watcher->link, &smi_watchers);
+
+ index = srcu_read_lock(&ipmi_interfaces_srcu);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
+ lockdep_is_held(&smi_watchers_mutex)) {
+ int intf_num = READ_ONCE(intf->intf_num);
+
+ if (intf_num == -1)
+ continue;
+ watcher->new_smi(intf_num, intf->si_dev);
+ }
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+ mutex_unlock(&smi_watchers_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_register);
+
+int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
+{
+ mutex_lock(&smi_watchers_mutex);
+ list_del(&watcher->link);
+ mutex_unlock(&smi_watchers_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
+
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
+static void
+call_smi_watchers(int i, struct device *dev)
+{
+ struct ipmi_smi_watcher *w;
+
+ mutex_lock(&smi_watchers_mutex);
+ list_for_each_entry(w, &smi_watchers, link) {
+ if (try_module_get(w->owner)) {
+ w->new_smi(i, dev);
+ module_put(w->owner);
+ }
+ }
+ mutex_unlock(&smi_watchers_mutex);
+}
+
+static int
+ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
+{
+ if (addr1->addr_type != addr2->addr_type)
+ return 0;
+
+ if (addr1->channel != addr2->channel)
+ return 0;
+
+ if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ struct ipmi_system_interface_addr *smi_addr1
+ = (struct ipmi_system_interface_addr *) addr1;
+ struct ipmi_system_interface_addr *smi_addr2
+ = (struct ipmi_system_interface_addr *) addr2;
+ return (smi_addr1->lun == smi_addr2->lun);
+ }
+
+ if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
+ struct ipmi_ipmb_addr *ipmb_addr1
+ = (struct ipmi_ipmb_addr *) addr1;
+ struct ipmi_ipmb_addr *ipmb_addr2
+ = (struct ipmi_ipmb_addr *) addr2;
+
+ return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
+ && (ipmb_addr1->lun == ipmb_addr2->lun));
+ }
+
+ if (is_ipmb_direct_addr(addr1)) {
+ struct ipmi_ipmb_direct_addr *daddr1
+ = (struct ipmi_ipmb_direct_addr *) addr1;
+ struct ipmi_ipmb_direct_addr *daddr2
+ = (struct ipmi_ipmb_direct_addr *) addr2;
+
+ return daddr1->slave_addr == daddr2->slave_addr &&
+ daddr1->rq_lun == daddr2->rq_lun &&
+ daddr1->rs_lun == daddr2->rs_lun;
+ }
+
+ if (is_lan_addr(addr1)) {
+ struct ipmi_lan_addr *lan_addr1
+ = (struct ipmi_lan_addr *) addr1;
+ struct ipmi_lan_addr *lan_addr2
+ = (struct ipmi_lan_addr *) addr2;
+
+ return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
+ && (lan_addr1->local_SWID == lan_addr2->local_SWID)
+ && (lan_addr1->session_handle
+ == lan_addr2->session_handle)
+ && (lan_addr1->lun == lan_addr2->lun));
+ }
+
+ return 1;
+}
+
+int ipmi_validate_addr(struct ipmi_addr *addr, int len)
+{
+ if (len < sizeof(struct ipmi_system_interface_addr))
+ return -EINVAL;
+
+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ if (addr->channel != IPMI_BMC_CHANNEL)
+ return -EINVAL;
+ return 0;
+ }
+
+ if ((addr->channel == IPMI_BMC_CHANNEL)
+ || (addr->channel >= IPMI_MAX_CHANNELS)
+ || (addr->channel < 0))
+ return -EINVAL;
+
+ if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+ if (len < sizeof(struct ipmi_ipmb_addr))
+ return -EINVAL;
+ return 0;
+ }
+
+ if (is_ipmb_direct_addr(addr)) {
+ struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
+
+ if (addr->channel != 0)
+ return -EINVAL;
+ if (len < sizeof(struct ipmi_ipmb_direct_addr))
+ return -EINVAL;
+
+ if (daddr->slave_addr & 0x01)
+ return -EINVAL;
+ if (daddr->rq_lun >= 4)
+ return -EINVAL;
+ if (daddr->rs_lun >= 4)
+ return -EINVAL;
+ return 0;
+ }
+
+ if (is_lan_addr(addr)) {
+ if (len < sizeof(struct ipmi_lan_addr))
+ return -EINVAL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ipmi_validate_addr);
+
+unsigned int ipmi_addr_length(int addr_type)
+{
+ if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ return sizeof(struct ipmi_system_interface_addr);
+
+ if ((addr_type == IPMI_IPMB_ADDR_TYPE)
+ || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
+ return sizeof(struct ipmi_ipmb_addr);
+
+ if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
+ return sizeof(struct ipmi_ipmb_direct_addr);
+
+ if (addr_type == IPMI_LAN_ADDR_TYPE)
+ return sizeof(struct ipmi_lan_addr);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_addr_length);
+
+static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+ int rv = 0;
+
+ if (!msg->user) {
+ /* Special handling for NULL users. */
+ if (intf->null_user_handler) {
+ intf->null_user_handler(intf, msg);
+ } else {
+ /* No handler, so give up. */
+ rv = -EINVAL;
+ }
+ ipmi_free_recv_msg(msg);
+ } else if (oops_in_progress) {
+ /*
+ * If we are running in the panic context, calling the
+ * receive handler doesn't much meaning and has a deadlock
+ * risk. At this moment, simply skip it in that case.
+ */
+ ipmi_free_recv_msg(msg);
+ atomic_dec(&msg->user->nr_msgs);
+ } else {
+ int index;
+ struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
+
+ if (user) {
+ atomic_dec(&user->nr_msgs);
+ user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ release_ipmi_user(user, index);
+ } else {
+ /* User went away, give up. */
+ ipmi_free_recv_msg(msg);
+ rv = -EINVAL;
+ }
+ }
+
+ return rv;
+}
+
+static void deliver_local_response(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg)
+{
+ if (deliver_response(intf, msg))
+ ipmi_inc_stat(intf, unhandled_local_responses);
+ else
+ ipmi_inc_stat(intf, handled_local_responses);
+}
+
+static void deliver_err_response(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg, int err)
+{
+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ msg->msg_data[0] = err;
+ msg->msg.netfn |= 1; /* Convert to a response. */
+ msg->msg.data_len = 1;
+ msg->msg.data = msg->msg_data;
+ deliver_local_response(intf, msg);
+}
+
+static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
+{
+ unsigned long iflags;
+
+ if (!intf->handlers->set_need_watch)
+ return;
+
+ spin_lock_irqsave(&intf->watch_lock, iflags);
+ if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
+ intf->response_waiters++;
+
+ if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
+ intf->watchdog_waiters++;
+
+ if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
+ intf->command_waiters++;
+
+ if ((intf->last_watch_mask & flags) != flags) {
+ intf->last_watch_mask |= flags;
+ intf->handlers->set_need_watch(intf->send_info,
+ intf->last_watch_mask);
+ }
+ spin_unlock_irqrestore(&intf->watch_lock, iflags);
+}
+
+static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
+{
+ unsigned long iflags;
+
+ if (!intf->handlers->set_need_watch)
+ return;
+
+ spin_lock_irqsave(&intf->watch_lock, iflags);
+ if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
+ intf->response_waiters--;
+
+ if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
+ intf->watchdog_waiters--;
+
+ if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
+ intf->command_waiters--;
+
+ flags = 0;
+ if (intf->response_waiters)
+ flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
+ if (intf->watchdog_waiters)
+ flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
+ if (intf->command_waiters)
+ flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
+
+ if (intf->last_watch_mask != flags) {
+ intf->last_watch_mask = flags;
+ intf->handlers->set_need_watch(intf->send_info,
+ intf->last_watch_mask);
+ }
+ spin_unlock_irqrestore(&intf->watch_lock, iflags);
+}
+
+/*
+ * Find the next sequence number not being used and add the given
+ * message with the given timeout to the sequence table. This must be
+ * called with the interface's seq_lock held.
+ */
+static int intf_next_seq(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned long timeout,
+ int retries,
+ int broadcast,
+ unsigned char *seq,
+ long *seqid)
+{
+ int rv = 0;
+ unsigned int i;
+
+ if (timeout == 0)
+ timeout = default_retry_ms;
+ if (retries < 0)
+ retries = default_max_retries;
+
+ for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
+ i = (i+1)%IPMI_IPMB_NUM_SEQ) {
+ if (!intf->seq_table[i].inuse)
+ break;
+ }
+
+ if (!intf->seq_table[i].inuse) {
+ intf->seq_table[i].recv_msg = recv_msg;
+
+ /*
+ * Start with the maximum timeout, when the send response
+ * comes in we will start the real timer.
+ */
+ intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
+ intf->seq_table[i].orig_timeout = timeout;
+ intf->seq_table[i].retries_left = retries;
+ intf->seq_table[i].broadcast = broadcast;
+ intf->seq_table[i].inuse = 1;
+ intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
+ *seq = i;
+ *seqid = intf->seq_table[i].seqid;
+ intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
+ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ need_waiter(intf);
+ } else {
+ rv = -EAGAIN;
+ }
+
+ return rv;
+}
+
+/*
+ * Return the receive message for the given sequence number and
+ * release the sequence number so it can be reused. Some other data
+ * is passed in to be sure the message matches up correctly (to help
+ * guard against message coming in after their timeout and the
+ * sequence number being reused).
+ */
+static int intf_find_seq(struct ipmi_smi *intf,
+ unsigned char seq,
+ short channel,
+ unsigned char cmd,
+ unsigned char netfn,
+ struct ipmi_addr *addr,
+ struct ipmi_recv_msg **recv_msg)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+
+ if (seq >= IPMI_IPMB_NUM_SEQ)
+ return -EINVAL;
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ if (intf->seq_table[seq].inuse) {
+ struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
+
+ if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
+ && (msg->msg.netfn == netfn)
+ && (ipmi_addr_equal(addr, &msg->addr))) {
+ *recv_msg = msg;
+ intf->seq_table[seq].inuse = 0;
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ rv = 0;
+ }
+ }
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ return rv;
+}
+
+
+/* Start the timer for a specific sequence table entry. */
+static int intf_start_seq_timer(struct ipmi_smi *intf,
+ long msgid)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+ unsigned char seq;
+ unsigned long seqid;
+
+
+ GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ /*
+ * We do this verification because the user can be deleted
+ * while a message is outstanding.
+ */
+ if ((intf->seq_table[seq].inuse)
+ && (intf->seq_table[seq].seqid == seqid)) {
+ struct seq_table *ent = &intf->seq_table[seq];
+ ent->timeout = ent->orig_timeout;
+ rv = 0;
+ }
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ return rv;
+}
+
+/* Got an error for the send message for a specific sequence number. */
+static int intf_err_seq(struct ipmi_smi *intf,
+ long msgid,
+ unsigned int err)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+ unsigned char seq;
+ unsigned long seqid;
+ struct ipmi_recv_msg *msg = NULL;
+
+
+ GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ /*
+ * We do this verification because the user can be deleted
+ * while a message is outstanding.
+ */
+ if ((intf->seq_table[seq].inuse)
+ && (intf->seq_table[seq].seqid == seqid)) {
+ struct seq_table *ent = &intf->seq_table[seq];
+
+ ent->inuse = 0;
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ msg = ent->recv_msg;
+ rv = 0;
+ }
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ if (msg)
+ deliver_err_response(intf, msg, err);
+
+ return rv;
+}
+
+static void free_user_work(struct work_struct *work)
+{
+ struct ipmi_user *user = container_of(work, struct ipmi_user,
+ remove_work);
+
+ cleanup_srcu_struct(&user->release_barrier);
+ vfree(user);
+}
+
+int ipmi_create_user(unsigned int if_num,
+ const struct ipmi_user_hndl *handler,
+ void *handler_data,
+ struct ipmi_user **user)
+{
+ unsigned long flags;
+ struct ipmi_user *new_user;
+ int rv, index;
+ struct ipmi_smi *intf;
+
+ /*
+ * There is no module usecount here, because it's not
+ * required. Since this can only be used by and called from
+ * other modules, they will implicitly use this module, and
+ * thus this can't be removed unless the other modules are
+ * removed.
+ */
+
+ if (handler == NULL)
+ return -EINVAL;
+
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+
+ new_user = vzalloc(sizeof(*new_user));
+ if (!new_user)
+ return -ENOMEM;
+
+ index = srcu_read_lock(&ipmi_interfaces_srcu);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num)
+ goto found;
+ }
+ /* Not found, return an error */
+ rv = -EINVAL;
+ goto out_kfree;
+
+ found:
+ if (atomic_add_return(1, &intf->nr_users) > max_users) {
+ rv = -EBUSY;
+ goto out_kfree;
+ }
+
+ INIT_WORK(&new_user->remove_work, free_user_work);
+
+ rv = init_srcu_struct(&new_user->release_barrier);
+ if (rv)
+ goto out_kfree;
+
+ if (!try_module_get(intf->owner)) {
+ rv = -ENODEV;
+ goto out_kfree;
+ }
+
+ /* Note that each existing user holds a refcount to the interface. */
+ kref_get(&intf->refcount);
+
+ atomic_set(&new_user->nr_msgs, 0);
+ kref_init(&new_user->refcount);
+ new_user->handler = handler;
+ new_user->handler_data = handler_data;
+ new_user->intf = intf;
+ new_user->gets_events = false;
+
+ rcu_assign_pointer(new_user->self, new_user);
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ list_add_rcu(&new_user->link, &intf->users);
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+ if (handler->ipmi_watchdog_pretimeout)
+ /* User wants pretimeouts, so make sure to watch for them. */
+ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ *user = new_user;
+ return 0;
+
+out_kfree:
+ atomic_dec(&intf->nr_users);
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ vfree(new_user);
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_create_user);
+
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+ int rv, index;
+ struct ipmi_smi *intf;
+
+ index = srcu_read_lock(&ipmi_interfaces_srcu);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num)
+ goto found;
+ }
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+ /* Not found, return an error */
+ return -EINVAL;
+
+found:
+ if (!intf->handlers->get_smi_info)
+ rv = -ENOTTY;
+ else
+ rv = intf->handlers->get_smi_info(intf->send_info, data);
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
+static void free_user(struct kref *ref)
+{
+ struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+
+ /* SRCU cleanup must happen in task context. */
+ queue_work(remove_work_wq, &user->remove_work);
+}
+
+static void _ipmi_destroy_user(struct ipmi_user *user)
+{
+ struct ipmi_smi *intf = user->intf;
+ int i;
+ unsigned long flags;
+ struct cmd_rcvr *rcvr;
+ struct cmd_rcvr *rcvrs = NULL;
+ struct module *owner;
+
+ if (!acquire_ipmi_user(user, &i)) {
+ /*
+ * The user has already been cleaned up, just make sure
+ * nothing is using it and return.
+ */
+ synchronize_srcu(&user->release_barrier);
+ return;
+ }
+
+ rcu_assign_pointer(user->self, NULL);
+ release_ipmi_user(user, i);
+
+ synchronize_srcu(&user->release_barrier);
+
+ if (user->handler->shutdown)
+ user->handler->shutdown(user->handler_data);
+
+ if (user->handler->ipmi_watchdog_pretimeout)
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
+
+ if (user->gets_events)
+ atomic_dec(&intf->event_waiters);
+
+ /* Remove the user from the interface's sequence table. */
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ list_del_rcu(&user->link);
+ atomic_dec(&intf->nr_users);
+
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ if (intf->seq_table[i].inuse
+ && (intf->seq_table[i].recv_msg->user == user)) {
+ intf->seq_table[i].inuse = 0;
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+ }
+ }
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ /*
+ * Remove the user from the command receiver's table. First
+ * we build a list of everything (not using the standard link,
+ * since other things may be using it till we do
+ * synchronize_srcu()) then free everything in that list.
+ */
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
+ if (rcvr->user == user) {
+ list_del_rcu(&rcvr->link);
+ rcvr->next = rcvrs;
+ rcvrs = rcvr;
+ }
+ }
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+ synchronize_rcu();
+ while (rcvrs) {
+ rcvr = rcvrs;
+ rcvrs = rcvr->next;
+ kfree(rcvr);
+ }
+
+ owner = intf->owner;
+ kref_put(&intf->refcount, intf_free);
+ module_put(owner);
+}
+
+int ipmi_destroy_user(struct ipmi_user *user)
+{
+ _ipmi_destroy_user(user);
+
+ kref_put(&user->refcount, free_user);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_destroy_user);
+
+int ipmi_get_version(struct ipmi_user *user,
+ unsigned char *major,
+ unsigned char *minor)
+{
+ struct ipmi_device_id id;
+ int rv, index;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
+ if (!rv) {
+ *major = ipmi_version_major(&id);
+ *minor = ipmi_version_minor(&id);
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_version);
+
+int ipmi_set_my_address(struct ipmi_user *user,
+ unsigned int channel,
+ unsigned char address)
+{
+ int index, rv = 0;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ if (channel >= IPMI_MAX_CHANNELS) {
+ rv = -EINVAL;
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
+ user->intf->addrinfo[channel].address = address;
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_set_my_address);
+
+int ipmi_get_my_address(struct ipmi_user *user,
+ unsigned int channel,
+ unsigned char *address)
+{
+ int index, rv = 0;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ if (channel >= IPMI_MAX_CHANNELS) {
+ rv = -EINVAL;
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
+ *address = user->intf->addrinfo[channel].address;
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_my_address);
+
+int ipmi_set_my_LUN(struct ipmi_user *user,
+ unsigned int channel,
+ unsigned char LUN)
+{
+ int index, rv = 0;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ if (channel >= IPMI_MAX_CHANNELS) {
+ rv = -EINVAL;
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
+ user->intf->addrinfo[channel].lun = LUN & 0x3;
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_set_my_LUN);
+
+int ipmi_get_my_LUN(struct ipmi_user *user,
+ unsigned int channel,
+ unsigned char *address)
+{
+ int index, rv = 0;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ if (channel >= IPMI_MAX_CHANNELS) {
+ rv = -EINVAL;
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
+ *address = user->intf->addrinfo[channel].lun;
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_my_LUN);
+
+int ipmi_get_maintenance_mode(struct ipmi_user *user)
+{
+ int mode, index;
+ unsigned long flags;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
+ mode = user->intf->maintenance_mode;
+ spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
+ release_ipmi_user(user, index);
+
+ return mode;
+}
+EXPORT_SYMBOL(ipmi_get_maintenance_mode);
+
+static void maintenance_mode_update(struct ipmi_smi *intf)
+{
+ if (intf->handlers->set_maintenance_mode)
+ intf->handlers->set_maintenance_mode(
+ intf->send_info, intf->maintenance_mode_enable);
+}
+
+int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
+{
+ int rv = 0, index;
+ unsigned long flags;
+ struct ipmi_smi *intf = user->intf;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ if (intf->maintenance_mode != mode) {
+ switch (mode) {
+ case IPMI_MAINTENANCE_MODE_AUTO:
+ intf->maintenance_mode_enable
+ = (intf->auto_maintenance_timeout > 0);
+ break;
+
+ case IPMI_MAINTENANCE_MODE_OFF:
+ intf->maintenance_mode_enable = false;
+ break;
+
+ case IPMI_MAINTENANCE_MODE_ON:
+ intf->maintenance_mode_enable = true;
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto out_unlock;
+ }
+ intf->maintenance_mode = mode;
+
+ maintenance_mode_update(intf);
+ }
+ out_unlock:
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_set_maintenance_mode);
+
+int ipmi_set_gets_events(struct ipmi_user *user, bool val)
+{
+ unsigned long flags;
+ struct ipmi_smi *intf = user->intf;
+ struct ipmi_recv_msg *msg, *msg2;
+ struct list_head msgs;
+ int index;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&msgs);
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+ if (user->gets_events == val)
+ goto out;
+
+ user->gets_events = val;
+
+ if (val) {
+ if (atomic_inc_return(&intf->event_waiters) == 1)
+ need_waiter(intf);
+ } else {
+ atomic_dec(&intf->event_waiters);
+ }
+
+ if (intf->delivering_events)
+ /*
+ * Another thread is delivering events for this, so
+ * let it handle any new events.
+ */
+ goto out;
+
+ /* Deliver any queued events. */
+ while (user->gets_events && !list_empty(&intf->waiting_events)) {
+ list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
+ list_move_tail(&msg->link, &msgs);
+ intf->waiting_events_count = 0;
+ if (intf->event_msg_printed) {
+ dev_warn(intf->si_dev, "Event queue no longer full\n");
+ intf->event_msg_printed = 0;
+ }
+
+ intf->delivering_events = 1;
+ spin_unlock_irqrestore(&intf->events_lock, flags);
+
+ list_for_each_entry_safe(msg, msg2, &msgs, link) {
+ msg->user = user;
+ kref_get(&user->refcount);
+ deliver_local_response(intf, msg);
+ }
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+ intf->delivering_events = 0;
+ }
+
+ out:
+ spin_unlock_irqrestore(&intf->events_lock, flags);
+ release_ipmi_user(user, index);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_set_gets_events);
+
+static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned char chan)
+{
+ struct cmd_rcvr *rcvr;
+
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+ && (rcvr->chans & (1 << chan)))
+ return rcvr;
+ }
+ return NULL;
+}
+
+static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans)
+{
+ struct cmd_rcvr *rcvr;
+
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+ && (rcvr->chans & chans))
+ return 0;
+ }
+ return 1;
+}
+
+int ipmi_register_for_cmd(struct ipmi_user *user,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans)
+{
+ struct ipmi_smi *intf = user->intf;
+ struct cmd_rcvr *rcvr;
+ int rv = 0, index;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
+ if (!rcvr) {
+ rv = -ENOMEM;
+ goto out_release;
+ }
+ rcvr->cmd = cmd;
+ rcvr->netfn = netfn;
+ rcvr->chans = chans;
+ rcvr->user = user;
+
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ /* Make sure the command/netfn is not already registered. */
+ if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
+ rv = -EBUSY;
+ goto out_unlock;
+ }
+
+ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
+
+ list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
+
+out_unlock:
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+ if (rv)
+ kfree(rcvr);
+out_release:
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_register_for_cmd);
+
+int ipmi_unregister_for_cmd(struct ipmi_user *user,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans)
+{
+ struct ipmi_smi *intf = user->intf;
+ struct cmd_rcvr *rcvr;
+ struct cmd_rcvr *rcvrs = NULL;
+ int i, rv = -ENOENT, index;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
+ if (((1 << i) & chans) == 0)
+ continue;
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
+ if (rcvr == NULL)
+ continue;
+ if (rcvr->user == user) {
+ rv = 0;
+ rcvr->chans &= ~chans;
+ if (rcvr->chans == 0) {
+ list_del_rcu(&rcvr->link);
+ rcvr->next = rcvrs;
+ rcvrs = rcvr;
+ }
+ }
+ }
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+ synchronize_rcu();
+ release_ipmi_user(user, index);
+ while (rcvrs) {
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
+ rcvr = rcvrs;
+ rcvrs = rcvr->next;
+ kfree(rcvr);
+ }
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_unregister_for_cmd);
+
+unsigned char
+ipmb_checksum(unsigned char *data, int size)
+{
+ unsigned char csum = 0;
+
+ for (; size > 0; size--, data++)
+ csum += *data;
+
+ return -csum;
+}
+EXPORT_SYMBOL(ipmb_checksum);
+
+static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_ipmb_addr *ipmb_addr,
+ long msgid,
+ unsigned char ipmb_seq,
+ int broadcast,
+ unsigned char source_address,
+ unsigned char source_lun)
+{
+ int i = broadcast;
+
+ /* Format the IPMB header data. */
+ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+ smi_msg->data[2] = ipmb_addr->channel;
+ if (broadcast)
+ smi_msg->data[3] = 0;
+ smi_msg->data[i+3] = ipmb_addr->slave_addr;
+ smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
+ smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
+ smi_msg->data[i+6] = source_address;
+ smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
+ smi_msg->data[i+8] = msg->cmd;
+
+ /* Now tack on the data to the message. */
+ if (msg->data_len > 0)
+ memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 9;
+
+ /* Now calculate the checksum and tack it on. */
+ smi_msg->data[i+smi_msg->data_size]
+ = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
+
+ /*
+ * Add on the checksum size and the offset from the
+ * broadcast.
+ */
+ smi_msg->data_size += 1 + i;
+
+ smi_msg->msgid = msgid;
+}
+
+static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_lan_addr *lan_addr,
+ long msgid,
+ unsigned char ipmb_seq,
+ unsigned char source_lun)
+{
+ /* Format the IPMB header data. */
+ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+ smi_msg->data[2] = lan_addr->channel;
+ smi_msg->data[3] = lan_addr->session_handle;
+ smi_msg->data[4] = lan_addr->remote_SWID;
+ smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
+ smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
+ smi_msg->data[7] = lan_addr->local_SWID;
+ smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
+ smi_msg->data[9] = msg->cmd;
+
+ /* Now tack on the data to the message. */
+ if (msg->data_len > 0)
+ memcpy(&smi_msg->data[10], msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 10;
+
+ /* Now calculate the checksum and tack it on. */
+ smi_msg->data[smi_msg->data_size]
+ = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
+
+ /*
+ * Add on the checksum size and the offset from the
+ * broadcast.
+ */
+ smi_msg->data_size += 1;
+
+ smi_msg->msgid = msgid;
+}
+
+static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *smi_msg,
+ int priority)
+{
+ if (intf->curr_msg) {
+ if (priority > 0)
+ list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
+ else
+ list_add_tail(&smi_msg->link, &intf->xmit_msgs);
+ smi_msg = NULL;
+ } else {
+ intf->curr_msg = smi_msg;
+ }
+
+ return smi_msg;
+}
+
+static void smi_send(struct ipmi_smi *intf,
+ const struct ipmi_smi_handlers *handlers,
+ struct ipmi_smi_msg *smi_msg, int priority)
+{
+ int run_to_completion = intf->run_to_completion;
+ unsigned long flags = 0;
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
+ if (smi_msg)
+ handlers->sender(intf->send_info, smi_msg);
+}
+
+static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
+{
+ return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
+ && ((msg->cmd == IPMI_COLD_RESET_CMD)
+ || (msg->cmd == IPMI_WARM_RESET_CMD)))
+ || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
+}
+
+static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ struct ipmi_system_interface_addr *smi_addr;
+
+ if (msg->netfn & 1)
+ /* Responses are not allowed to the SMI. */
+ return -EINVAL;
+
+ smi_addr = (struct ipmi_system_interface_addr *) addr;
+ if (smi_addr->lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
+
+ if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
+ && ((msg->cmd == IPMI_SEND_MSG_CMD)
+ || (msg->cmd == IPMI_GET_MSG_CMD)
+ || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
+ /*
+ * We don't let the user do these, since we manage
+ * the sequence numbers.
+ */
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ if (is_maintenance_mode_cmd(msg)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ intf->auto_maintenance_timeout
+ = maintenance_mode_timeout_ms;
+ if (!intf->maintenance_mode
+ && !intf->maintenance_mode_enable) {
+ intf->maintenance_mode_enable = true;
+ maintenance_mode_update(intf);
+ }
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
+ }
+
+ if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
+ smi_msg->data[1] = msg->cmd;
+ smi_msg->msgid = msgid;
+ smi_msg->user_data = recv_msg;
+ if (msg->data_len > 0)
+ memcpy(&smi_msg->data[2], msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 2;
+ ipmi_inc_stat(intf, sent_local_commands);
+
+ return 0;
+}
+
+static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned char source_address,
+ unsigned char source_lun,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ struct ipmi_ipmb_addr *ipmb_addr;
+ unsigned char ipmb_seq;
+ long seqid;
+ int broadcast = 0;
+ struct ipmi_channel *chans;
+ int rv = 0;
+
+ if (addr->channel >= IPMI_MAX_CHANNELS) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
+ /*
+ * Broadcasts add a zero at the beginning of the
+ * message, but otherwise is the same as an IPMB
+ * address.
+ */
+ addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ broadcast = 1;
+ retries = 0; /* Don't retry broadcasts. */
+ }
+
+ /*
+ * 9 for the header and 1 for the checksum, plus
+ * possibly one for the broadcast.
+ */
+ if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ ipmb_addr = (struct ipmi_ipmb_addr *) addr;
+ if (ipmb_addr->lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
+
+ if (recv_msg->msg.netfn & 0x1) {
+ /*
+ * It's a response, so use the user's sequence
+ * from msgid.
+ */
+ ipmi_inc_stat(intf, sent_ipmb_responses);
+ format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
+ msgid, broadcast,
+ source_address, source_lun);
+
+ /*
+ * Save the receive message so we can use it
+ * to deliver the response.
+ */
+ smi_msg->user_data = recv_msg;
+ } else {
+ /* It's a command, so get a sequence for it. */
+ unsigned long flags;
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+
+ if (is_maintenance_mode_cmd(msg))
+ intf->ipmb_maintenance_mode_timeout =
+ maintenance_mode_timeout_ms;
+
+ if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
+ /* Different default in maintenance mode */
+ retry_time_ms = default_maintenance_retry_ms;
+
+ /*
+ * Create a sequence number with a 1 second
+ * timeout and 4 retries.
+ */
+ rv = intf_next_seq(intf,
+ recv_msg,
+ retry_time_ms,
+ retries,
+ broadcast,
+ &ipmb_seq,
+ &seqid);
+ if (rv)
+ /*
+ * We have used up all the sequence numbers,
+ * probably, so abort.
+ */
+ goto out_err;
+
+ ipmi_inc_stat(intf, sent_ipmb_commands);
+
+ /*
+ * Store the sequence number in the message,
+ * so that when the send message response
+ * comes back we can start the timer.
+ */
+ format_ipmb_msg(smi_msg, msg, ipmb_addr,
+ STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+ ipmb_seq, broadcast,
+ source_address, source_lun);
+
+ /*
+ * Copy the message into the recv message data, so we
+ * can retransmit it later if necessary.
+ */
+ memcpy(recv_msg->msg_data, smi_msg->data,
+ smi_msg->data_size);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = smi_msg->data_size;
+
+ /*
+ * We don't unlock until here, because we need
+ * to copy the completed message into the
+ * recv_msg before we release the lock.
+ * Otherwise, race conditions may bite us. I
+ * know that's pretty paranoid, but I prefer
+ * to be correct.
+ */
+out_err:
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+ }
+
+ return rv;
+}
+
+static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned char source_lun)
+{
+ struct ipmi_ipmb_direct_addr *daddr;
+ bool is_cmd = !(recv_msg->msg.netfn & 0x1);
+
+ if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
+ return -EAFNOSUPPORT;
+
+ /* Responses must have a completion code. */
+ if (!is_cmd && msg->data_len < 1) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ daddr = (struct ipmi_ipmb_direct_addr *) addr;
+ if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ smi_msg->msgid = msgid;
+
+ if (is_cmd) {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
+ } else {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
+ }
+ smi_msg->data[1] = daddr->slave_addr;
+ smi_msg->data[3] = msg->cmd;
+
+ memcpy(smi_msg->data + 4, msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 4;
+
+ smi_msg->user_data = recv_msg;
+
+ return 0;
+}
+
+static int i_ipmi_req_lan(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned char source_lun,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ struct ipmi_lan_addr *lan_addr;
+ unsigned char ipmb_seq;
+ long seqid;
+ struct ipmi_channel *chans;
+ int rv = 0;
+
+ if (addr->channel >= IPMI_MAX_CHANNELS) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ if ((chans[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_8023LAN)
+ && (chans[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_ASYNC)) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ /* 11 for the header and 1 for the checksum. */
+ if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ lan_addr = (struct ipmi_lan_addr *) addr;
+ if (lan_addr->lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
+
+ if (recv_msg->msg.netfn & 0x1) {
+ /*
+ * It's a response, so use the user's sequence
+ * from msgid.
+ */
+ ipmi_inc_stat(intf, sent_lan_responses);
+ format_lan_msg(smi_msg, msg, lan_addr, msgid,
+ msgid, source_lun);
+
+ /*
+ * Save the receive message so we can use it
+ * to deliver the response.
+ */
+ smi_msg->user_data = recv_msg;
+ } else {
+ /* It's a command, so get a sequence for it. */
+ unsigned long flags;
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+
+ /*
+ * Create a sequence number with a 1 second
+ * timeout and 4 retries.
+ */
+ rv = intf_next_seq(intf,
+ recv_msg,
+ retry_time_ms,
+ retries,
+ 0,
+ &ipmb_seq,
+ &seqid);
+ if (rv)
+ /*
+ * We have used up all the sequence numbers,
+ * probably, so abort.
+ */
+ goto out_err;
+
+ ipmi_inc_stat(intf, sent_lan_commands);
+
+ /*
+ * Store the sequence number in the message,
+ * so that when the send message response
+ * comes back we can start the timer.
+ */
+ format_lan_msg(smi_msg, msg, lan_addr,
+ STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+ ipmb_seq, source_lun);
+
+ /*
+ * Copy the message into the recv message data, so we
+ * can retransmit it later if necessary.
+ */
+ memcpy(recv_msg->msg_data, smi_msg->data,
+ smi_msg->data_size);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = smi_msg->data_size;
+
+ /*
+ * We don't unlock until here, because we need
+ * to copy the completed message into the
+ * recv_msg before we release the lock.
+ * Otherwise, race conditions may bite us. I
+ * know that's pretty paranoid, but I prefer
+ * to be correct.
+ */
+out_err:
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+ }
+
+ return rv;
+}
+
+/*
+ * Separate from ipmi_request so that the user does not have to be
+ * supplied in certain circumstances (mainly at panic time). If
+ * messages are supplied, they will be freed, even if an error
+ * occurs.
+ */
+static int i_ipmi_request(struct ipmi_user *user,
+ struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ void *supplied_smi,
+ struct ipmi_recv_msg *supplied_recv,
+ int priority,
+ unsigned char source_address,
+ unsigned char source_lun,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ struct ipmi_smi_msg *smi_msg;
+ struct ipmi_recv_msg *recv_msg;
+ int rv = 0;
+
+ if (user) {
+ if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+ /* Decrement will happen at the end of the routine. */
+ rv = -EBUSY;
+ goto out;
+ }
+ }
+
+ if (supplied_recv)
+ recv_msg = supplied_recv;
+ else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (recv_msg == NULL) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ }
+ recv_msg->user_msg_data = user_msg_data;
+
+ if (supplied_smi)
+ smi_msg = supplied_smi;
+ else {
+ smi_msg = ipmi_alloc_smi_msg();
+ if (smi_msg == NULL) {
+ if (!supplied_recv)
+ ipmi_free_recv_msg(recv_msg);
+ rv = -ENOMEM;
+ goto out;
+ }
+ }
+
+ rcu_read_lock();
+ if (intf->in_shutdown) {
+ rv = -ENODEV;
+ goto out_err;
+ }
+
+ recv_msg->user = user;
+ if (user)
+ /* The put happens when the message is freed. */
+ kref_get(&user->refcount);
+ recv_msg->msgid = msgid;
+ /*
+ * Store the message to send in the receive message so timeout
+ * responses can get the proper response data.
+ */
+ recv_msg->msg = *msg;
+
+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
+ recv_msg, retries, retry_time_ms);
+ } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+ rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
+ source_address, source_lun,
+ retries, retry_time_ms);
+ } else if (is_ipmb_direct_addr(addr)) {
+ rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
+ recv_msg, source_lun);
+ } else if (is_lan_addr(addr)) {
+ rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
+ source_lun, retries, retry_time_ms);
+ } else {
+ /* Unknown address type. */
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ }
+
+ if (rv) {
+out_err:
+ ipmi_free_smi_msg(smi_msg);
+ ipmi_free_recv_msg(recv_msg);
+ } else {
+ dev_dbg(intf->si_dev, "Send: %*ph\n",
+ smi_msg->data_size, smi_msg->data);
+
+ smi_send(intf, intf->handlers, smi_msg, priority);
+ }
+ rcu_read_unlock();
+
+out:
+ if (rv && user)
+ atomic_dec(&user->nr_msgs);
+ return rv;
+}
+
+static int check_addr(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ unsigned char *saddr,
+ unsigned char *lun)
+{
+ if (addr->channel >= IPMI_MAX_CHANNELS)
+ return -EINVAL;
+ addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
+ *lun = intf->addrinfo[addr->channel].lun;
+ *saddr = intf->addrinfo[addr->channel].address;
+ return 0;
+}
+
+int ipmi_request_settime(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ int priority,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ unsigned char saddr = 0, lun = 0;
+ int rv, index;
+
+ if (!user)
+ return -EINVAL;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ rv = check_addr(user->intf, addr, &saddr, &lun);
+ if (!rv)
+ rv = i_ipmi_request(user,
+ user->intf,
+ addr,
+ msgid,
+ msg,
+ user_msg_data,
+ NULL, NULL,
+ priority,
+ saddr,
+ lun,
+ retries,
+ retry_time_ms);
+
+ release_ipmi_user(user, index);
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_request_settime);
+
+int ipmi_request_supply_msgs(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ void *supplied_smi,
+ struct ipmi_recv_msg *supplied_recv,
+ int priority)
+{
+ unsigned char saddr = 0, lun = 0;
+ int rv, index;
+
+ if (!user)
+ return -EINVAL;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ rv = check_addr(user->intf, addr, &saddr, &lun);
+ if (!rv)
+ rv = i_ipmi_request(user,
+ user->intf,
+ addr,
+ msgid,
+ msg,
+ user_msg_data,
+ supplied_smi,
+ supplied_recv,
+ priority,
+ saddr,
+ lun,
+ -1, 0);
+
+ release_ipmi_user(user, index);
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_request_supply_msgs);
+
+static void bmc_device_id_handler(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg)
+{
+ int rv;
+
+ if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+ || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
+ dev_warn(intf->si_dev,
+ "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
+ msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
+ return;
+ }
+
+ if (msg->msg.data[0]) {
+ dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
+ msg->msg.data[0]);
+ intf->bmc->dyn_id_set = 0;
+ goto out;
+ }
+
+ rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
+ msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
+ if (rv) {
+ dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
+ /* record completion code when error */
+ intf->bmc->cc = msg->msg.data[0];
+ intf->bmc->dyn_id_set = 0;
+ } else {
+ /*
+ * Make sure the id data is available before setting
+ * dyn_id_set.
+ */
+ smp_wmb();
+ intf->bmc->dyn_id_set = 1;
+ }
+out:
+ wake_up(&intf->waitq);
+}
+
+static int
+send_get_device_id_cmd(struct ipmi_smi *intf)
+{
+ struct ipmi_system_interface_addr si;
+ struct kernel_ipmi_msg msg;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ -1, 0);
+}
+
+static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
+{
+ int rv;
+ unsigned int retry_count = 0;
+
+ intf->null_user_handler = bmc_device_id_handler;
+
+retry:
+ bmc->cc = 0;
+ bmc->dyn_id_set = 2;
+
+ rv = send_get_device_id_cmd(intf);
+ if (rv)
+ goto out_reset_handler;
+
+ wait_event(intf->waitq, bmc->dyn_id_set != 2);
+
+ if (!bmc->dyn_id_set) {
+ if (bmc->cc != IPMI_CC_NO_ERROR &&
+ ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
+ msleep(500);
+ dev_warn(intf->si_dev,
+ "BMC returned 0x%2.2x, retry get bmc device id\n",
+ bmc->cc);
+ goto retry;
+ }
+
+ rv = -EIO; /* Something went wrong in the fetch. */
+ }
+
+ /* dyn_id_set makes the id data available. */
+ smp_rmb();
+
+out_reset_handler:
+ intf->null_user_handler = NULL;
+
+ return rv;
+}
+
+/*
+ * Fetch the device id for the bmc/interface. You must pass in either
+ * bmc or intf, this code will get the other one. If the data has
+ * been recently fetched, this will just use the cached data. Otherwise
+ * it will run a new fetch.
+ *
+ * Except for the first time this is called (in ipmi_add_smi()),
+ * this will always return good data;
+ */
+static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid, int intf_num)
+{
+ int rv = 0;
+ int prev_dyn_id_set, prev_guid_set;
+ bool intf_set = intf != NULL;
+
+ if (!intf) {
+ mutex_lock(&bmc->dyn_mutex);
+retry_bmc_lock:
+ if (list_empty(&bmc->intfs)) {
+ mutex_unlock(&bmc->dyn_mutex);
+ return -ENOENT;
+ }
+ intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
+ bmc_link);
+ kref_get(&intf->refcount);
+ mutex_unlock(&bmc->dyn_mutex);
+ mutex_lock(&intf->bmc_reg_mutex);
+ mutex_lock(&bmc->dyn_mutex);
+ if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
+ bmc_link)) {
+ mutex_unlock(&intf->bmc_reg_mutex);
+ kref_put(&intf->refcount, intf_free);
+ goto retry_bmc_lock;
+ }
+ } else {
+ mutex_lock(&intf->bmc_reg_mutex);
+ bmc = intf->bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ kref_get(&intf->refcount);
+ }
+
+ /* If we have a valid and current ID, just return that. */
+ if (intf->in_bmc_register ||
+ (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
+ goto out_noprocessing;
+
+ prev_guid_set = bmc->dyn_guid_set;
+ __get_guid(intf);
+
+ prev_dyn_id_set = bmc->dyn_id_set;
+ rv = __get_device_id(intf, bmc);
+ if (rv)
+ goto out;
+
+ /*
+ * The guid, device id, manufacturer id, and product id should
+ * not change on a BMC. If it does we have to do some dancing.
+ */
+ if (!intf->bmc_registered
+ || (!prev_guid_set && bmc->dyn_guid_set)
+ || (!prev_dyn_id_set && bmc->dyn_id_set)
+ || (prev_guid_set && bmc->dyn_guid_set
+ && !guid_equal(&bmc->guid, &bmc->fetch_guid))
+ || bmc->id.device_id != bmc->fetch_id.device_id
+ || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
+ || bmc->id.product_id != bmc->fetch_id.product_id) {
+ struct ipmi_device_id id = bmc->fetch_id;
+ int guid_set = bmc->dyn_guid_set;
+ guid_t guid;
+
+ guid = bmc->fetch_guid;
+ mutex_unlock(&bmc->dyn_mutex);
+
+ __ipmi_bmc_unregister(intf);
+ /* Fill in the temporary BMC for good measure. */
+ intf->bmc->id = id;
+ intf->bmc->dyn_guid_set = guid_set;
+ intf->bmc->guid = guid;
+ if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
+ need_waiter(intf); /* Retry later on an error. */
+ else
+ __scan_channels(intf, &id);
+
+
+ if (!intf_set) {
+ /*
+ * We weren't given the interface on the
+ * command line, so restart the operation on
+ * the next interface for the BMC.
+ */
+ mutex_unlock(&intf->bmc_reg_mutex);
+ mutex_lock(&bmc->dyn_mutex);
+ goto retry_bmc_lock;
+ }
+
+ /* We have a new BMC, set it up. */
+ bmc = intf->bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ goto out_noprocessing;
+ } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
+ /* Version info changes, scan the channels again. */
+ __scan_channels(intf, &bmc->fetch_id);
+
+ bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+out:
+ if (rv && prev_dyn_id_set) {
+ rv = 0; /* Ignore failures if we have previous data. */
+ bmc->dyn_id_set = prev_dyn_id_set;
+ }
+ if (!rv) {
+ bmc->id = bmc->fetch_id;
+ if (bmc->dyn_guid_set)
+ bmc->guid = bmc->fetch_guid;
+ else if (prev_guid_set)
+ /*
+ * The guid used to be valid and it failed to fetch,
+ * just use the cached value.
+ */
+ bmc->dyn_guid_set = prev_guid_set;
+ }
+out_noprocessing:
+ if (!rv) {
+ if (id)
+ *id = bmc->id;
+
+ if (guid_set)
+ *guid_set = bmc->dyn_guid_set;
+
+ if (guid && bmc->dyn_guid_set)
+ *guid = bmc->guid;
+ }
+
+ mutex_unlock(&bmc->dyn_mutex);
+ mutex_unlock(&intf->bmc_reg_mutex);
+
+ kref_put(&intf->refcount, intf_free);
+ return rv;
+}
+
+static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid)
+{
+ return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
+}
+
+static ssize_t device_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u\n", id.device_id);
+}
+static DEVICE_ATTR_RO(device_id);
+
+static ssize_t provides_device_sdrs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
+}
+static DEVICE_ATTR_RO(provides_device_sdrs);
+
+static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
+}
+static DEVICE_ATTR_RO(revision);
+
+static ssize_t firmware_revision_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
+ id.firmware_revision_2);
+}
+static DEVICE_ATTR_RO(firmware_revision);
+
+static ssize_t ipmi_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u.%u\n",
+ ipmi_version_major(&id),
+ ipmi_version_minor(&id));
+}
+static DEVICE_ATTR_RO(ipmi_version);
+
+static ssize_t add_dev_support_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
+}
+static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
+ NULL);
+
+static ssize_t manufacturer_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
+}
+static DEVICE_ATTR_RO(manufacturer_id);
+
+static ssize_t product_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
+}
+static DEVICE_ATTR_RO(product_id);
+
+static ssize_t aux_firmware_rev_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ id.aux_firmware_revision[3],
+ id.aux_firmware_revision[2],
+ id.aux_firmware_revision[1],
+ id.aux_firmware_revision[0]);
+}
+static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
+
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ bool guid_set;
+ guid_t guid;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
+ if (rv)
+ return rv;
+ if (!guid_set)
+ return -ENOENT;
+
+ return sysfs_emit(buf, "%pUl\n", &guid);
+}
+static DEVICE_ATTR_RO(guid);
+
+static struct attribute *bmc_dev_attrs[] = {
+ &dev_attr_device_id.attr,
+ &dev_attr_provides_device_sdrs.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_firmware_revision.attr,
+ &dev_attr_ipmi_version.attr,
+ &dev_attr_additional_device_support.attr,
+ &dev_attr_manufacturer_id.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_aux_firmware_revision.attr,
+ &dev_attr_guid.attr,
+ NULL
+};
+
+static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct bmc_device *bmc = to_bmc_device(dev);
+ umode_t mode = attr->mode;
+ int rv;
+
+ if (attr == &dev_attr_aux_firmware_revision.attr) {
+ struct ipmi_device_id id;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ return (!rv && id.aux_firmware_revision_set) ? mode : 0;
+ }
+ if (attr == &dev_attr_guid.attr) {
+ bool guid_set;
+
+ rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
+ return (!rv && guid_set) ? mode : 0;
+ }
+ return mode;
+}
+
+static const struct attribute_group bmc_dev_attr_group = {
+ .attrs = bmc_dev_attrs,
+ .is_visible = bmc_dev_attr_is_visible,
+};
+
+static const struct attribute_group *bmc_dev_attr_groups[] = {
+ &bmc_dev_attr_group,
+ NULL
+};
+
+static const struct device_type bmc_device_type = {
+ .groups = bmc_dev_attr_groups,
+};
+
+static int __find_bmc_guid(struct device *dev, const void *data)
+{
+ const guid_t *guid = data;
+ struct bmc_device *bmc;
+ int rv;
+
+ if (dev->type != &bmc_device_type)
+ return 0;
+
+ bmc = to_bmc_device(dev);
+ rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
+ if (rv)
+ rv = kref_get_unless_zero(&bmc->usecount);
+ return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
+ guid_t *guid)
+{
+ struct device *dev;
+ struct bmc_device *bmc = NULL;
+
+ dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
+ if (dev) {
+ bmc = to_bmc_device(dev);
+ put_device(dev);
+ }
+ return bmc;
+}
+
+struct prod_dev_id {
+ unsigned int product_id;
+ unsigned char device_id;
+};
+
+static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
+{
+ const struct prod_dev_id *cid = data;
+ struct bmc_device *bmc;
+ int rv;
+
+ if (dev->type != &bmc_device_type)
+ return 0;
+
+ bmc = to_bmc_device(dev);
+ rv = (bmc->id.product_id == cid->product_id
+ && bmc->id.device_id == cid->device_id);
+ if (rv)
+ rv = kref_get_unless_zero(&bmc->usecount);
+ return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_prod_dev_id(
+ struct device_driver *drv,
+ unsigned int product_id, unsigned char device_id)
+{
+ struct prod_dev_id id = {
+ .product_id = product_id,
+ .device_id = device_id,
+ };
+ struct device *dev;
+ struct bmc_device *bmc = NULL;
+
+ dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
+ if (dev) {
+ bmc = to_bmc_device(dev);
+ put_device(dev);
+ }
+ return bmc;
+}
+
+static DEFINE_IDA(ipmi_bmc_ida);
+
+static void
+release_bmc_device(struct device *dev)
+{
+ kfree(to_bmc_device(dev));
+}
+
+static void cleanup_bmc_work(struct work_struct *work)
+{
+ struct bmc_device *bmc = container_of(work, struct bmc_device,
+ remove_work);
+ int id = bmc->pdev.id; /* Unregister overwrites id */
+
+ platform_device_unregister(&bmc->pdev);
+ ida_simple_remove(&ipmi_bmc_ida, id);
+}
+
+static void
+cleanup_bmc_device(struct kref *ref)
+{
+ struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
+
+ /*
+ * Remove the platform device in a work queue to avoid issues
+ * with removing the device attributes while reading a device
+ * attribute.
+ */
+ queue_work(remove_work_wq, &bmc->remove_work);
+}
+
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
+{
+ struct bmc_device *bmc = intf->bmc;
+
+ if (!intf->bmc_registered)
+ return;
+
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+ sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ kref_put(&bmc->usecount, cleanup_bmc_device);
+ intf->bmc_registered = false;
+}
+
+static void ipmi_bmc_unregister(struct ipmi_smi *intf)
+{
+ mutex_lock(&intf->bmc_reg_mutex);
+ __ipmi_bmc_unregister(intf);
+ mutex_unlock(&intf->bmc_reg_mutex);
+}
+
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static int __ipmi_bmc_register(struct ipmi_smi *intf,
+ struct ipmi_device_id *id,
+ bool guid_set, guid_t *guid, int intf_num)
+{
+ int rv;
+ struct bmc_device *bmc;
+ struct bmc_device *old_bmc;
+
+ /*
+ * platform_device_register() can cause bmc_reg_mutex to
+ * be claimed because of the is_visible functions of
+ * the attributes. Eliminate possible recursion and
+ * release the lock.
+ */
+ intf->in_bmc_register = true;
+ mutex_unlock(&intf->bmc_reg_mutex);
+
+ /*
+ * Try to find if there is an bmc_device struct
+ * representing the interfaced BMC already
+ */
+ mutex_lock(&ipmidriver_mutex);
+ if (guid_set)
+ old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
+ else
+ old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
+ id->product_id,
+ id->device_id);
+
+ /*
+ * If there is already an bmc_device, free the new one,
+ * otherwise register the new BMC device
+ */
+ if (old_bmc) {
+ bmc = old_bmc;
+ /*
+ * Note: old_bmc already has usecount incremented by
+ * the BMC find functions.
+ */
+ intf->bmc = old_bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ list_add_tail(&intf->bmc_link, &bmc->intfs);
+ mutex_unlock(&bmc->dyn_mutex);
+
+ dev_info(intf->si_dev,
+ "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
+ } else {
+ bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
+ if (!bmc) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(&bmc->intfs);
+ mutex_init(&bmc->dyn_mutex);
+ INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
+
+ bmc->id = *id;
+ bmc->dyn_id_set = 1;
+ bmc->dyn_guid_set = guid_set;
+ bmc->guid = *guid;
+ bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+ bmc->pdev.name = "ipmi_bmc";
+
+ rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
+ if (rv < 0) {
+ kfree(bmc);
+ goto out;
+ }
+
+ bmc->pdev.dev.driver = &ipmidriver.driver;
+ bmc->pdev.id = rv;
+ bmc->pdev.dev.release = release_bmc_device;
+ bmc->pdev.dev.type = &bmc_device_type;
+ kref_init(&bmc->usecount);
+
+ intf->bmc = bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ list_add_tail(&intf->bmc_link, &bmc->intfs);
+ mutex_unlock(&bmc->dyn_mutex);
+
+ rv = platform_device_register(&bmc->pdev);
+ if (rv) {
+ dev_err(intf->si_dev,
+ "Unable to register bmc device: %d\n",
+ rv);
+ goto out_list_del;
+ }
+
+ dev_info(intf->si_dev,
+ "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
+ }
+
+ /*
+ * create symlink from system interface device to bmc device
+ * and back.
+ */
+ rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
+ if (rv) {
+ dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
+ goto out_put_bmc;
+ }
+
+ if (intf_num == -1)
+ intf_num = intf->intf_num;
+ intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
+ if (!intf->my_dev_name) {
+ rv = -ENOMEM;
+ dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
+ rv);
+ goto out_unlink1;
+ }
+
+ rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
+ intf->my_dev_name);
+ if (rv) {
+ dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
+ rv);
+ goto out_free_my_dev_name;
+ }
+
+ intf->bmc_registered = true;
+
+out:
+ mutex_unlock(&ipmidriver_mutex);
+ mutex_lock(&intf->bmc_reg_mutex);
+ intf->in_bmc_register = false;
+ return rv;
+
+
+out_free_my_dev_name:
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+
+out_unlink1:
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+
+out_put_bmc:
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ kref_put(&bmc->usecount, cleanup_bmc_device);
+ goto out;
+
+out_list_del:
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ put_device(&bmc->pdev.dev);
+ goto out;
+}
+
+static int
+send_guid_cmd(struct ipmi_smi *intf, int chan)
+{
+ struct kernel_ipmi_msg msg;
+ struct ipmi_system_interface_addr si;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ -1, 0);
+}
+
+static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+ struct bmc_device *bmc = intf->bmc;
+
+ if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+ || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
+ /* Not for me */
+ return;
+
+ if (msg->msg.data[0] != 0) {
+ /* Error from getting the GUID, the BMC doesn't have one. */
+ bmc->dyn_guid_set = 0;
+ goto out;
+ }
+
+ if (msg->msg.data_len < UUID_SIZE + 1) {
+ bmc->dyn_guid_set = 0;
+ dev_warn(intf->si_dev,
+ "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
+ msg->msg.data_len, UUID_SIZE + 1);
+ goto out;
+ }
+
+ import_guid(&bmc->fetch_guid, msg->msg.data + 1);
+ /*
+ * Make sure the guid data is available before setting
+ * dyn_guid_set.
+ */
+ smp_wmb();
+ bmc->dyn_guid_set = 1;
+ out:
+ wake_up(&intf->waitq);
+}
+
+static void __get_guid(struct ipmi_smi *intf)
+{
+ int rv;
+ struct bmc_device *bmc = intf->bmc;
+
+ bmc->dyn_guid_set = 2;
+ intf->null_user_handler = guid_handler;
+ rv = send_guid_cmd(intf, 0);
+ if (rv)
+ /* Send failed, no GUID available. */
+ bmc->dyn_guid_set = 0;
+ else
+ wait_event(intf->waitq, bmc->dyn_guid_set != 2);
+
+ /* dyn_guid_set makes the guid data available. */
+ smp_rmb();
+
+ intf->null_user_handler = NULL;
+}
+
+static int
+send_channel_info_cmd(struct ipmi_smi *intf, int chan)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[1];
+ struct ipmi_system_interface_addr si;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
+ msg.data = data;
+ msg.data_len = 1;
+ data[0] = chan;
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ -1, 0);
+}
+
+static void
+channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+ int rv = 0;
+ int ch;
+ unsigned int set = intf->curr_working_cset;
+ struct ipmi_channel *chans;
+
+ if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+ && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
+ /* It's the one we want */
+ if (msg->msg.data[0] != 0) {
+ /* Got an error from the channel, just go on. */
+ if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
+ /*
+ * If the MC does not support this
+ * command, that is legal. We just
+ * assume it has one IPMB at channel
+ * zero.
+ */
+ intf->wchannels[set].c[0].medium
+ = IPMI_CHANNEL_MEDIUM_IPMB;
+ intf->wchannels[set].c[0].protocol
+ = IPMI_CHANNEL_PROTOCOL_IPMB;
+
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ wake_up(&intf->waitq);
+ goto out;
+ }
+ goto next_channel;
+ }
+ if (msg->msg.data_len < 4) {
+ /* Message not big enough, just go on. */
+ goto next_channel;
+ }
+ ch = intf->curr_channel;
+ chans = intf->wchannels[set].c;
+ chans[ch].medium = msg->msg.data[2] & 0x7f;
+ chans[ch].protocol = msg->msg.data[3] & 0x1f;
+
+ next_channel:
+ intf->curr_channel++;
+ if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ wake_up(&intf->waitq);
+ } else {
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ rv = send_channel_info_cmd(intf, intf->curr_channel);
+ }
+
+ if (rv) {
+ /* Got an error somehow, just give up. */
+ dev_warn(intf->si_dev,
+ "Error sending channel information for channel %d: %d\n",
+ intf->curr_channel, rv);
+
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ wake_up(&intf->waitq);
+ }
+ }
+ out:
+ return;
+}
+
+/*
+ * Must be holding intf->bmc_reg_mutex to call this.
+ */
+static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
+{
+ int rv;
+
+ if (ipmi_version_major(id) > 1
+ || (ipmi_version_major(id) == 1
+ && ipmi_version_minor(id) >= 5)) {
+ unsigned int set;
+
+ /*
+ * Start scanning the channels to see what is
+ * available.
+ */
+ set = !intf->curr_working_cset;
+ intf->curr_working_cset = set;
+ memset(&intf->wchannels[set], 0,
+ sizeof(struct ipmi_channel_set));
+
+ intf->null_user_handler = channel_handler;
+ intf->curr_channel = 0;
+ rv = send_channel_info_cmd(intf, 0);
+ if (rv) {
+ dev_warn(intf->si_dev,
+ "Error sending channel information for channel 0, %d\n",
+ rv);
+ intf->null_user_handler = NULL;
+ return -EIO;
+ }
+
+ /* Wait for the channel info to be read. */
+ wait_event(intf->waitq, intf->channels_ready);
+ intf->null_user_handler = NULL;
+ } else {
+ unsigned int set = intf->curr_working_cset;
+
+ /* Assume a single IPMB channel at zero. */
+ intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
+ intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ }
+
+ return 0;
+}
+
+static void ipmi_poll(struct ipmi_smi *intf)
+{
+ if (intf->handlers->poll)
+ intf->handlers->poll(intf->send_info);
+ /* In case something came in */
+ handle_new_recv_msgs(intf);
+}
+
+void ipmi_poll_interface(struct ipmi_user *user)
+{
+ ipmi_poll(user->intf);
+}
+EXPORT_SYMBOL(ipmi_poll_interface);
+
+static ssize_t nr_users_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi, nr_users_devattr);
+
+ return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users));
+}
+static DEVICE_ATTR_RO(nr_users);
+
+static ssize_t nr_msgs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi, nr_msgs_devattr);
+ struct ipmi_user *user;
+ int index;
+ unsigned int count = 0;
+
+ index = srcu_read_lock(&intf->users_srcu);
+ list_for_each_entry_rcu(user, &intf->users, link)
+ count += atomic_read(&user->nr_msgs);
+ srcu_read_unlock(&intf->users_srcu, index);
+
+ return sysfs_emit(buf, "%u\n", count);
+}
+static DEVICE_ATTR_RO(nr_msgs);
+
+static void redo_bmc_reg(struct work_struct *work)
+{
+ struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
+ bmc_reg_work);
+
+ if (!intf->in_shutdown)
+ bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
+
+ kref_put(&intf->refcount, intf_free);
+}
+
+int ipmi_add_smi(struct module *owner,
+ const struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct device *si_dev,
+ unsigned char slave_addr)
+{
+ int i, j;
+ int rv;
+ struct ipmi_smi *intf, *tintf;
+ struct list_head *link;
+ struct ipmi_device_id id;
+
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+
+ intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ if (!intf)
+ return -ENOMEM;
+
+ rv = init_srcu_struct(&intf->users_srcu);
+ if (rv) {
+ kfree(intf);
+ return rv;
+ }
+
+ intf->owner = owner;
+ intf->bmc = &intf->tmp_bmc;
+ INIT_LIST_HEAD(&intf->bmc->intfs);
+ mutex_init(&intf->bmc->dyn_mutex);
+ INIT_LIST_HEAD(&intf->bmc_link);
+ mutex_init(&intf->bmc_reg_mutex);
+ intf->intf_num = -1; /* Mark it invalid for now. */
+ kref_init(&intf->refcount);
+ INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
+ intf->si_dev = si_dev;
+ for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
+ intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
+ intf->addrinfo[j].lun = 2;
+ }
+ if (slave_addr != 0)
+ intf->addrinfo[0].address = slave_addr;
+ INIT_LIST_HEAD(&intf->users);
+ atomic_set(&intf->nr_users, 0);
+ intf->handlers = handlers;
+ intf->send_info = send_info;
+ spin_lock_init(&intf->seq_lock);
+ for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
+ intf->seq_table[j].inuse = 0;
+ intf->seq_table[j].seqid = 0;
+ }
+ intf->curr_seq = 0;
+ spin_lock_init(&intf->waiting_rcv_msgs_lock);
+ INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+ tasklet_setup(&intf->recv_tasklet,
+ smi_recv_tasklet);
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
+ spin_lock_init(&intf->xmit_msgs_lock);
+ INIT_LIST_HEAD(&intf->xmit_msgs);
+ INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+ spin_lock_init(&intf->events_lock);
+ spin_lock_init(&intf->watch_lock);
+ atomic_set(&intf->event_waiters, 0);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ INIT_LIST_HEAD(&intf->waiting_events);
+ intf->waiting_events_count = 0;
+ mutex_init(&intf->cmd_rcvrs_mutex);
+ spin_lock_init(&intf->maintenance_mode_lock);
+ INIT_LIST_HEAD(&intf->cmd_rcvrs);
+ init_waitqueue_head(&intf->waitq);
+ for (i = 0; i < IPMI_NUM_STATS; i++)
+ atomic_set(&intf->stats[i], 0);
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ /* Look for a hole in the numbers. */
+ i = 0;
+ link = &ipmi_interfaces;
+ list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
+ ipmi_interfaces_mutex_held()) {
+ if (tintf->intf_num != i) {
+ link = &tintf->link;
+ break;
+ }
+ i++;
+ }
+ /* Add the new interface in numeric order. */
+ if (i == 0)
+ list_add_rcu(&intf->link, &ipmi_interfaces);
+ else
+ list_add_tail_rcu(&intf->link, link);
+
+ rv = handlers->start_processing(send_info, intf);
+ if (rv)
+ goto out_err;
+
+ rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
+ if (rv) {
+ dev_err(si_dev, "Unable to get the device id: %d\n", rv);
+ goto out_err_started;
+ }
+
+ mutex_lock(&intf->bmc_reg_mutex);
+ rv = __scan_channels(intf, &id);
+ mutex_unlock(&intf->bmc_reg_mutex);
+ if (rv)
+ goto out_err_bmc_reg;
+
+ intf->nr_users_devattr = dev_attr_nr_users;
+ sysfs_attr_init(&intf->nr_users_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->nr_users_devattr);
+ if (rv)
+ goto out_err_bmc_reg;
+
+ intf->nr_msgs_devattr = dev_attr_nr_msgs;
+ sysfs_attr_init(&intf->nr_msgs_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr);
+ if (rv) {
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+ goto out_err_bmc_reg;
+ }
+
+ /*
+ * Keep memory order straight for RCU readers. Make
+ * sure everything else is committed to memory before
+ * setting intf_num to mark the interface valid.
+ */
+ smp_wmb();
+ intf->intf_num = i;
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ /* After this point the interface is legal to use. */
+ call_smi_watchers(i, intf->si_dev);
+
+ return 0;
+
+ out_err_bmc_reg:
+ ipmi_bmc_unregister(intf);
+ out_err_started:
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
+ out_err:
+ list_del_rcu(&intf->link);
+ mutex_unlock(&ipmi_interfaces_mutex);
+ synchronize_srcu(&ipmi_interfaces_srcu);
+ cleanup_srcu_struct(&intf->users_srcu);
+ kref_put(&intf->refcount, intf_free);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_add_smi);
+
+static void deliver_smi_err_response(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg,
+ unsigned char err)
+{
+ int rv;
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = err;
+ msg->rsp_size = 3;
+
+ /* This will never requeue, but it may ask us to free the message. */
+ rv = handle_one_recv_msg(intf, msg);
+ if (rv == 0)
+ ipmi_free_smi_msg(msg);
+}
+
+static void cleanup_smi_msgs(struct ipmi_smi *intf)
+{
+ int i;
+ struct seq_table *ent;
+ struct ipmi_smi_msg *msg;
+ struct list_head *entry;
+ struct list_head tmplist;
+
+ /* Clear out our transmit queues and hold the messages. */
+ INIT_LIST_HEAD(&tmplist);
+ list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
+ list_splice_tail(&intf->xmit_msgs, &tmplist);
+
+ /* Current message first, to preserve order */
+ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+ /* Wait for the message to clear out. */
+ schedule_timeout(1);
+ }
+
+ /* No need for locks, the interface is down. */
+
+ /*
+ * Return errors for all pending messages in queue and in the
+ * tables waiting for remote responses.
+ */
+ while (!list_empty(&tmplist)) {
+ entry = tmplist.next;
+ list_del(entry);
+ msg = list_entry(entry, struct ipmi_smi_msg, link);
+ deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
+ }
+
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ ent = &intf->seq_table[i];
+ if (!ent->inuse)
+ continue;
+ deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+ }
+}
+
+void ipmi_unregister_smi(struct ipmi_smi *intf)
+{
+ struct ipmi_smi_watcher *w;
+ int intf_num, index;
+
+ if (!intf)
+ return;
+ intf_num = intf->intf_num;
+ mutex_lock(&ipmi_interfaces_mutex);
+ intf->intf_num = -1;
+ intf->in_shutdown = true;
+ list_del_rcu(&intf->link);
+ mutex_unlock(&ipmi_interfaces_mutex);
+ synchronize_srcu(&ipmi_interfaces_srcu);
+
+ /* At this point no users can be added to the interface. */
+
+ device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+
+ /*
+ * Call all the watcher interfaces to tell them that
+ * an interface is going away.
+ */
+ mutex_lock(&smi_watchers_mutex);
+ list_for_each_entry(w, &smi_watchers, link)
+ w->smi_gone(intf_num);
+ mutex_unlock(&smi_watchers_mutex);
+
+ index = srcu_read_lock(&intf->users_srcu);
+ while (!list_empty(&intf->users)) {
+ struct ipmi_user *user =
+ container_of(list_next_rcu(&intf->users),
+ struct ipmi_user, link);
+
+ _ipmi_destroy_user(user);
+ }
+ srcu_read_unlock(&intf->users_srcu, index);
+
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
+
+ cleanup_smi_msgs(intf);
+
+ ipmi_bmc_unregister(intf);
+
+ cleanup_srcu_struct(&intf->users_srcu);
+ kref_put(&intf->refcount, intf_free);
+}
+EXPORT_SYMBOL(ipmi_unregister_smi);
+
+static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_ipmb_addr ipmb_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ /*
+ * This is 11, not 10, because the response must contain a
+ * completion code.
+ */
+ if (msg->rsp_size < 11) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_ipmb_responses);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr.slave_addr = msg->rsp[6];
+ ipmb_addr.channel = msg->rsp[3] & 0x0f;
+ ipmb_addr.lun = msg->rsp[7] & 3;
+
+ /*
+ * It's a response from a remote entity. Look up the sequence
+ * number and handle the response.
+ */
+ if (intf_find_seq(intf,
+ msg->rsp[7] >> 2,
+ msg->rsp[3] & 0x0f,
+ msg->rsp[8],
+ (msg->rsp[4] >> 2) & (~1),
+ (struct ipmi_addr *) &ipmb_addr,
+ &recv_msg)) {
+ /*
+ * We were unable to find the sequence number,
+ * so just nuke the message.
+ */
+ ipmi_inc_stat(intf, unhandled_ipmb_responses);
+ return 0;
+ }
+
+ memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
+ /*
+ * The other fields matched, so no need to set them, except
+ * for netfn, which needs to be the response that was
+ * returned, not the request value.
+ */
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_ipmb_responses);
+ else
+ ipmi_inc_stat(intf, handled_ipmb_responses);
+
+ return 0;
+}
+
+static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ struct ipmi_user *user = NULL;
+ struct ipmi_ipmb_addr *ipmb_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ if (msg->rsp_size < 10) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ netfn = msg->rsp[4] >> 2;
+ cmd = msg->rsp[8];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = msg->rsp[3];
+ msg->data[3] = msg->rsp[6];
+ msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
+ msg->data[5] = ipmb_checksum(&msg->data[3], 2);
+ msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
+ /* rqseq/lun */
+ msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
+ msg->data[8] = msg->rsp[8]; /* cmd */
+ msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[10] = ipmb_checksum(&msg->data[6], 4);
+ msg->data_size = 11;
+
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+ rcu_read_lock();
+ if (!intf->in_shutdown) {
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value
+ * that causes it to not be freed or
+ * queued.
+ */
+ rv = -1;
+ }
+ rcu_read_unlock();
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data, &msg->rsp[9],
+ msg->rsp_size - 10);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ struct ipmi_user *user = NULL;
+ struct ipmi_ipmb_direct_addr *daddr;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned char netfn = msg->rsp[0] >> 2;
+ unsigned char cmd = msg->rsp[3];
+
+ rcu_read_lock();
+ /* We always use channel 0 for direct messages. */
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ msg->data[0] = (netfn + 1) << 2;
+ msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
+ msg->data[1] = msg->rsp[1]; /* Addr */
+ msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
+ msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
+ msg->data[3] = cmd;
+ msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data_size = 5;
+
+ rcu_read_lock();
+ if (!intf->in_shutdown) {
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value
+ * that causes it to not be freed or
+ * queued.
+ */
+ rv = -1;
+ }
+ rcu_read_unlock();
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg;
+ struct ipmi_ipmb_direct_addr *daddr;
+
+ recv_msg = msg->user_data;
+ if (recv_msg == NULL) {
+ dev_warn(intf->si_dev,
+ "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
+ return 0;
+ }
+
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ recv_msg->msgid = msg->msgid;
+ daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rq_lun = msg->rsp[0] & 3;
+ daddr->rs_lun = msg->rsp[2] & 3;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ deliver_local_response(intf, recv_msg);
+
+ return 0;
+}
+
+static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_lan_addr lan_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+
+ /*
+ * This is 13, not 12, because the response must contain a
+ * completion code.
+ */
+ if (msg->rsp_size < 13) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_lan_responses);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr.session_handle = msg->rsp[4];
+ lan_addr.remote_SWID = msg->rsp[8];
+ lan_addr.local_SWID = msg->rsp[5];
+ lan_addr.channel = msg->rsp[3] & 0x0f;
+ lan_addr.privilege = msg->rsp[3] >> 4;
+ lan_addr.lun = msg->rsp[9] & 3;
+
+ /*
+ * It's a response from a remote entity. Look up the sequence
+ * number and handle the response.
+ */
+ if (intf_find_seq(intf,
+ msg->rsp[9] >> 2,
+ msg->rsp[3] & 0x0f,
+ msg->rsp[10],
+ (msg->rsp[6] >> 2) & (~1),
+ (struct ipmi_addr *) &lan_addr,
+ &recv_msg)) {
+ /*
+ * We were unable to find the sequence number,
+ * so just nuke the message.
+ */
+ ipmi_inc_stat(intf, unhandled_lan_responses);
+ return 0;
+ }
+
+ memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
+ /*
+ * The other fields matched, so no need to set them, except
+ * for netfn, which needs to be the response that was
+ * returned, not the request value.
+ */
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_lan_responses);
+ else
+ ipmi_inc_stat(intf, handled_lan_responses);
+
+ return 0;
+}
+
+static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ struct ipmi_user *user = NULL;
+ struct ipmi_lan_addr *lan_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ if (msg->rsp_size < 12) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ netfn = msg->rsp[6] >> 2;
+ cmd = msg->rsp[10];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, just give up. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ /*
+ * Don't do anything with these messages, just allow
+ * them to be freed.
+ */
+ rv = 0;
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data, &msg->rsp[11],
+ msg->rsp_size - 12);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+/*
+ * This routine will handle "Get Message" command responses with
+ * channels that use an OEM Medium. The message format belongs to
+ * the OEM. See IPMI 2.0 specification, Chapter 6 and
+ * Chapter 22, sections 22.6 and 22.24 for more details.
+ */
+static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ struct ipmi_user *user = NULL;
+ struct ipmi_system_interface_addr *smi_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ /*
+ * We expect the OEM SW to perform error checking
+ * so we just do some basic sanity checks
+ */
+ if (msg->rsp_size < 4) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ /*
+ * This is an OEM Message so the OEM needs to know how
+ * handle the message. We do no interpretation.
+ */
+ netfn = msg->rsp[0] >> 2;
+ cmd = msg->rsp[1];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, just give up. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ /*
+ * Don't do anything with these messages, just allow
+ * them to be freed.
+ */
+
+ rv = 0;
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user = user;
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * The message starts at byte 4 which follows the
+ * Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, &msg->rsp[4],
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_system_interface_addr *smi_addr;
+
+ recv_msg->msgid = 0;
+ smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+ recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 3;
+}
+
+static int handle_read_event_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg, *recv_msg2;
+ struct list_head msgs;
+ struct ipmi_user *user;
+ int rv = 0, deliver_count = 0, index;
+ unsigned long flags;
+
+ if (msg->rsp_size < 19) {
+ /* Message is too small to be an IPMB event. */
+ ipmi_inc_stat(intf, invalid_events);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the event, just ignore it. */
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&msgs);
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+
+ ipmi_inc_stat(intf, events);
+
+ /*
+ * Allocate and fill in one message for every user that is
+ * getting events.
+ */
+ index = srcu_read_lock(&intf->users_srcu);
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (!user->gets_events)
+ continue;
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ rcu_read_unlock();
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+ link) {
+ list_del(&recv_msg->link);
+ ipmi_free_recv_msg(recv_msg);
+ }
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ goto out;
+ }
+
+ deliver_count++;
+
+ copy_event_into_recv_msg(recv_msg, msg);
+ recv_msg->user = user;
+ kref_get(&user->refcount);
+ list_add_tail(&recv_msg->link, &msgs);
+ }
+ srcu_read_unlock(&intf->users_srcu, index);
+
+ if (deliver_count) {
+ /* Now deliver all the messages. */
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
+ list_del(&recv_msg->link);
+ deliver_local_response(intf, recv_msg);
+ }
+ } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
+ /*
+ * No one to receive the message, put it in queue if there's
+ * not already too many things in the queue.
+ */
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ goto out;
+ }
+
+ copy_event_into_recv_msg(recv_msg, msg);
+ list_add_tail(&recv_msg->link, &intf->waiting_events);
+ intf->waiting_events_count++;
+ } else if (!intf->event_msg_printed) {
+ /*
+ * There's too many things in the queue, discard this
+ * message.
+ */
+ dev_warn(intf->si_dev,
+ "Event queue full, discarding incoming events\n");
+ intf->event_msg_printed = 1;
+ }
+
+ out:
+ spin_unlock_irqrestore(&intf->events_lock, flags);
+
+ return rv;
+}
+
+static int handle_bmc_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg;
+ struct ipmi_system_interface_addr *smi_addr;
+
+ recv_msg = msg->user_data;
+ if (recv_msg == NULL) {
+ dev_warn(intf->si_dev,
+ "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
+ return 0;
+ }
+
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ recv_msg->msgid = msg->msgid;
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 2;
+ deliver_local_response(intf, recv_msg);
+
+ return 0;
+}
+
+/*
+ * Handle a received message. Return 1 if the message should be requeued,
+ * 0 if the message should be freed, or -1 if the message should not
+ * be freed or requeued.
+ */
+static int handle_one_recv_msg(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ int requeue = 0;
+ int chan;
+ unsigned char cc;
+ bool is_cmd = !((msg->rsp[0] >> 2) & 1);
+
+ dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp);
+
+ if (msg->rsp_size < 2) {
+ /* Message is too small to be correct. */
+ dev_warn(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+
+return_unspecified:
+ /* Generate an error response for the message. */
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ /* commands must have at least 4 bytes, responses 5. */
+ if (is_cmd && (msg->rsp_size < 4)) {
+ ipmi_inc_stat(intf, invalid_commands);
+ goto out;
+ }
+ if (!is_cmd && (msg->rsp_size < 5)) {
+ ipmi_inc_stat(intf, invalid_ipmb_responses);
+ /* Construct a valid error response. */
+ msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
+ msg->rsp[0] |= (1 << 2); /* Make it a response */
+ msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
+ msg->rsp[1] = msg->data[1]; /* Addr */
+ msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
+ msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
+ msg->rsp[3] = msg->data[3]; /* Cmd */
+ msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 5;
+ }
+ } else if ((msg->data_size >= 2)
+ && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
+ && (msg->data[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data == NULL)) {
+
+ if (intf->in_shutdown)
+ goto out;
+
+ /*
+ * This is the local response to a command send, start
+ * the timer for these. The user_data will not be
+ * NULL if this is a response send, and we will let
+ * response sends just go through.
+ */
+
+ /*
+ * Check for errors, if we get certain errors (ones
+ * that mean basically we can try again later), we
+ * ignore them and start the timer. Otherwise we
+ * report the error immediately.
+ */
+ if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
+ && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
+ && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
+ && (msg->rsp[2] != IPMI_BUS_ERR)
+ && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
+ int ch = msg->rsp[3] & 0xf;
+ struct ipmi_channel *chans;
+
+ /* Got an error sending the message, handle it. */
+
+ chans = READ_ONCE(intf->channel_list)->c;
+ if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
+ || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
+ ipmi_inc_stat(intf, sent_lan_command_errs);
+ else
+ ipmi_inc_stat(intf, sent_ipmb_command_errs);
+ intf_err_seq(intf, msg->msgid, msg->rsp[2]);
+ } else
+ /* The message was sent, start the timer. */
+ intf_start_seq_timer(intf, msg->msgid);
+ requeue = 0;
+ goto out;
+ } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
+ || (msg->rsp[1] != msg->data[1])) {
+ /*
+ * The NetFN and Command in the response is not even
+ * marginally correct.
+ */
+ dev_warn(intf->si_dev,
+ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
+
+ goto return_unspecified;
+ }
+
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ if ((msg->data[0] >> 2) & 1) {
+ /* It's a response to a sent response. */
+ chan = 0;
+ cc = msg->rsp[4];
+ goto process_response_response;
+ }
+ if (is_cmd)
+ requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
+ else
+ requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data != NULL)) {
+ /*
+ * It's a response to a response we sent. For this we
+ * deliver a send message response to the user.
+ */
+ struct ipmi_recv_msg *recv_msg;
+
+ chan = msg->data[2] & 0x0f;
+ if (chan >= IPMI_MAX_CHANNELS)
+ /* Invalid channel number */
+ goto out;
+ cc = msg->rsp[2];
+
+process_response_response:
+ recv_msg = msg->user_data;
+
+ requeue = 0;
+ if (!recv_msg)
+ goto out;
+
+ recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg_data[0] = cc;
+ recv_msg->msg.data_len = 1;
+ deliver_local_response(intf, recv_msg);
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
+ struct ipmi_channel *chans;
+
+ /* It's from the receive queue. */
+ chan = msg->rsp[3] & 0xf;
+ if (chan >= IPMI_MAX_CHANNELS) {
+ /* Invalid channel number */
+ requeue = 0;
+ goto out;
+ }
+
+ /*
+ * We need to make sure the channels have been initialized.
+ * The channel_handler routine will set the "curr_channel"
+ * equal to or greater than IPMI_MAX_CHANNELS when all the
+ * channels for this interface have been initialized.
+ */
+ if (!intf->channels_ready) {
+ requeue = 0; /* Throw the message away */
+ goto out;
+ }
+
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ switch (chans[chan].medium) {
+ case IPMI_CHANNEL_MEDIUM_IPMB:
+ if (msg->rsp[4] & 0x04) {
+ /*
+ * It's a response, so find the
+ * requesting message and send it up.
+ */
+ requeue = handle_ipmb_get_msg_rsp(intf, msg);
+ } else {
+ /*
+ * It's a command to the SMS from some other
+ * entity. Handle that.
+ */
+ requeue = handle_ipmb_get_msg_cmd(intf, msg);
+ }
+ break;
+
+ case IPMI_CHANNEL_MEDIUM_8023LAN:
+ case IPMI_CHANNEL_MEDIUM_ASYNC:
+ if (msg->rsp[6] & 0x04) {
+ /*
+ * It's a response, so find the
+ * requesting message and send it up.
+ */
+ requeue = handle_lan_get_msg_rsp(intf, msg);
+ } else {
+ /*
+ * It's a command to the SMS from some other
+ * entity. Handle that.
+ */
+ requeue = handle_lan_get_msg_cmd(intf, msg);
+ }
+ break;
+
+ default:
+ /* Check for OEM Channels. Clients had better
+ register for these commands. */
+ if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
+ && (chans[chan].medium
+ <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
+ requeue = handle_oem_get_msg_cmd(intf, msg);
+ } else {
+ /*
+ * We don't handle the channel type, so just
+ * free the message.
+ */
+ requeue = 0;
+ }
+ }
+
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
+ /* It's an asynchronous event. */
+ requeue = handle_read_event_rsp(intf, msg);
+ } else {
+ /* It's a response from the local BMC. */
+ requeue = handle_bmc_rsp(intf, msg);
+ }
+
+ out:
+ return requeue;
+}
+
+/*
+ * If there are messages in the queue or pretimeouts, handle them.
+ */
+static void handle_new_recv_msgs(struct ipmi_smi *intf)
+{
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = intf->run_to_completion;
+
+ /* See if any waiting messages need to be processed. */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ while (!list_empty(&intf->waiting_rcv_msgs)) {
+ smi_msg = list_entry(intf->waiting_rcv_msgs.next,
+ struct ipmi_smi_msg, link);
+ list_del(&smi_msg->link);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
+ rv = handle_one_recv_msg(intf, smi_msg);
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ if (rv > 0) {
+ /*
+ * To preserve message order, quit if we
+ * can't handle a message. Add the message
+ * back at the head, this is safe because this
+ * tasklet is the only thing that pulls the
+ * messages.
+ */
+ list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
+ break;
+ } else {
+ if (rv == 0)
+ /* Message handled */
+ ipmi_free_smi_msg(smi_msg);
+ /* If rv < 0, fatal error, del but don't free. */
+ }
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ struct ipmi_user *user;
+ int index;
+
+ index = srcu_read_lock(&intf->users_srcu);
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ srcu_read_unlock(&intf->users_srcu, index);
+ }
+}
+
+static void smi_recv_tasklet(struct tasklet_struct *t)
+{
+ unsigned long flags = 0; /* keep us warning-free. */
+ struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
+ int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *newmsg = NULL;
+
+ /*
+ * Start the next message if available.
+ *
+ * Do this here, not in the actual receiver, because we may deadlock
+ * because the lower layer is allowed to hold locks while calling
+ * message delivery.
+ */
+
+ rcu_read_lock();
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ if (intf->curr_msg == NULL && !intf->in_shutdown) {
+ struct list_head *entry = NULL;
+
+ /* Pick the high priority queue first. */
+ if (!list_empty(&intf->hp_xmit_msgs))
+ entry = intf->hp_xmit_msgs.next;
+ else if (!list_empty(&intf->xmit_msgs))
+ entry = intf->xmit_msgs.next;
+
+ if (entry) {
+ list_del(entry);
+ newmsg = list_entry(entry, struct ipmi_smi_msg, link);
+ intf->curr_msg = newmsg;
+ }
+ }
+
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+ if (newmsg)
+ intf->handlers->sender(intf->send_info, newmsg);
+
+ rcu_read_unlock();
+
+ handle_new_recv_msgs(intf);
+}
+
+/* Handle a new message from the lower layer. */
+void ipmi_smi_msg_received(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ unsigned long flags = 0; /* keep us warning-free. */
+ int run_to_completion = intf->run_to_completion;
+
+ /*
+ * To preserve message order, we keep a queue and deliver from
+ * a tasklet.
+ */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ /*
+ * We can get an asynchronous event or receive message in addition
+ * to commands we send.
+ */
+ if (msg == intf->curr_msg)
+ intf->curr_msg = NULL;
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
+ if (run_to_completion)
+ smi_recv_tasklet(&intf->recv_tasklet);
+ else
+ tasklet_schedule(&intf->recv_tasklet);
+}
+EXPORT_SYMBOL(ipmi_smi_msg_received);
+
+void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
+{
+ if (intf->in_shutdown)
+ return;
+
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
+ tasklet_schedule(&intf->recv_tasklet);
+}
+EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
+
+static struct ipmi_smi_msg *
+smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
+ unsigned char seq, long seqid)
+{
+ struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
+ if (!smi_msg)
+ /*
+ * If we can't allocate the message, then just return, we
+ * get 4 retries, so this should be ok.
+ */
+ return NULL;
+
+ memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
+ smi_msg->data_size = recv_msg->msg.data_len;
+ smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
+
+ dev_dbg(intf->si_dev, "Resend: %*ph\n",
+ smi_msg->data_size, smi_msg->data);
+
+ return smi_msg;
+}
+
+static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
+ struct list_head *timeouts,
+ unsigned long timeout_period,
+ int slot, unsigned long *flags,
+ bool *need_timer)
+{
+ struct ipmi_recv_msg *msg;
+
+ if (intf->in_shutdown)
+ return;
+
+ if (!ent->inuse)
+ return;
+
+ if (timeout_period < ent->timeout) {
+ ent->timeout -= timeout_period;
+ *need_timer = true;
+ return;
+ }
+
+ if (ent->retries_left == 0) {
+ /* The message has used all its retries. */
+ ent->inuse = 0;
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ msg = ent->recv_msg;
+ list_add_tail(&msg->link, timeouts);
+ if (ent->broadcast)
+ ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
+ else if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf, timed_out_lan_commands);
+ else
+ ipmi_inc_stat(intf, timed_out_ipmb_commands);
+ } else {
+ struct ipmi_smi_msg *smi_msg;
+ /* More retries, send again. */
+
+ *need_timer = true;
+
+ /*
+ * Start with the max timer, set to normal timer after
+ * the message is sent.
+ */
+ ent->timeout = MAX_MSG_TIMEOUT;
+ ent->retries_left--;
+ smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
+ ent->seqid);
+ if (!smi_msg) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ dropped_rexmit_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ dropped_rexmit_ipmb_commands);
+ return;
+ }
+
+ spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
+ /*
+ * Send the new message. We send with a zero
+ * priority. It timed out, I doubt time is that
+ * critical now, and high priority messages are really
+ * only for messages to the local MC, which don't get
+ * resent.
+ */
+ if (intf->handlers) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ retransmitted_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ retransmitted_ipmb_commands);
+
+ smi_send(intf, intf->handlers, smi_msg, 0);
+ } else
+ ipmi_free_smi_msg(smi_msg);
+
+ spin_lock_irqsave(&intf->seq_lock, *flags);
+ }
+}
+
+static bool ipmi_timeout_handler(struct ipmi_smi *intf,
+ unsigned long timeout_period)
+{
+ struct list_head timeouts;
+ struct ipmi_recv_msg *msg, *msg2;
+ unsigned long flags;
+ int i;
+ bool need_timer = false;
+
+ if (!intf->bmc_registered) {
+ kref_get(&intf->refcount);
+ if (!schedule_work(&intf->bmc_reg_work)) {
+ kref_put(&intf->refcount, intf_free);
+ need_timer = true;
+ }
+ }
+
+ /*
+ * Go through the seq table and find any messages that
+ * have timed out, putting them in the timeouts
+ * list.
+ */
+ INIT_LIST_HEAD(&timeouts);
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ if (intf->ipmb_maintenance_mode_timeout) {
+ if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
+ intf->ipmb_maintenance_mode_timeout = 0;
+ else
+ intf->ipmb_maintenance_mode_timeout -= timeout_period;
+ }
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+ check_msg_timeout(intf, &intf->seq_table[i],
+ &timeouts, timeout_period, i,
+ &flags, &need_timer);
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ list_for_each_entry_safe(msg, msg2, &timeouts, link)
+ deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
+
+ /*
+ * Maintenance mode handling. Check the timeout
+ * optimistically before we claim the lock. It may
+ * mean a timeout gets missed occasionally, but that
+ * only means the timeout gets extended by one period
+ * in that case. No big deal, and it avoids the lock
+ * most of the time.
+ */
+ if (intf->auto_maintenance_timeout > 0) {
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ if (intf->auto_maintenance_timeout > 0) {
+ intf->auto_maintenance_timeout
+ -= timeout_period;
+ if (!intf->maintenance_mode
+ && (intf->auto_maintenance_timeout <= 0)) {
+ intf->maintenance_mode_enable = false;
+ maintenance_mode_update(intf);
+ }
+ }
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
+ }
+
+ tasklet_schedule(&intf->recv_tasklet);
+
+ return need_timer;
+}
+
+static void ipmi_request_event(struct ipmi_smi *intf)
+{
+ /* No event requests when in maintenance mode. */
+ if (intf->maintenance_mode_enable)
+ return;
+
+ if (!intf->in_shutdown)
+ intf->handlers->request_events(intf->send_info);
+}
+
+static struct timer_list ipmi_timer;
+
+static atomic_t stop_operation;
+
+static void ipmi_timeout(struct timer_list *unused)
+{
+ struct ipmi_smi *intf;
+ bool need_timer = false;
+ int index;
+
+ if (atomic_read(&stop_operation))
+ return;
+
+ index = srcu_read_lock(&ipmi_interfaces_srcu);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (atomic_read(&intf->event_waiters)) {
+ intf->ticks_to_req_ev--;
+ if (intf->ticks_to_req_ev == 0) {
+ ipmi_request_event(intf);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ }
+ need_timer = true;
+ }
+
+ need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
+ }
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+ if (need_timer)
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
+
+static void need_waiter(struct ipmi_smi *intf)
+{
+ /* Racy, but worst case we start the timer twice. */
+ if (!timer_pending(&ipmi_timer))
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
+
+static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
+static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
+
+static void free_smi_msg(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&smi_msg_inuse_count);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
+}
+
+struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
+{
+ struct ipmi_smi_msg *rv;
+ rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
+ if (rv) {
+ rv->done = free_smi_msg;
+ rv->user_data = NULL;
+ rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
+ atomic_inc(&smi_msg_inuse_count);
+ }
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_alloc_smi_msg);
+
+static void free_recv_msg(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&recv_msg_inuse_count);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
+}
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+{
+ struct ipmi_recv_msg *rv;
+
+ rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+ if (rv) {
+ rv->user = NULL;
+ rv->done = free_recv_msg;
+ atomic_inc(&recv_msg_inuse_count);
+ }
+ return rv;
+}
+
+void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
+{
+ if (msg->user && !oops_in_progress)
+ kref_put(&msg->user->refcount, free_user);
+ msg->done(msg);
+}
+EXPORT_SYMBOL(ipmi_free_recv_msg);
+
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
+static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+
+static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+
+/*
+ * Inside a panic, send a message and wait for a response.
+ */
+static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ struct ipmi_smi_msg smi_msg;
+ struct ipmi_recv_msg recv_msg;
+ int rv;
+
+ smi_msg.done = dummy_smi_done_handler;
+ recv_msg.done = dummy_recv_done_handler;
+ atomic_add(2, &panic_done_count);
+ rv = i_ipmi_request(NULL,
+ intf,
+ addr,
+ 0,
+ msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ 0, 1); /* Don't retry, and don't wait. */
+ if (rv)
+ atomic_sub(2, &panic_done_count);
+ else if (intf->handlers->flush_messages)
+ intf->handlers->flush_messages(intf->send_info);
+
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll(intf);
+}
+
+static void event_receiver_fetcher(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg)
+{
+ if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
+ && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
+ && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+ /* A get event receiver command, save it. */
+ intf->event_receiver = msg->msg.data[1];
+ intf->event_receiver_lun = msg->msg.data[2] & 0x3;
+ }
+}
+
+static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+ if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+ && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
+ && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+ /*
+ * A get device id command, save if we are an event
+ * receiver or generator.
+ */
+ intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
+ intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
+ }
+}
+
+static void send_panic_events(struct ipmi_smi *intf, char *str)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[16];
+ struct ipmi_system_interface_addr *si;
+ struct ipmi_addr addr;
+ char *p = str;
+ struct ipmi_ipmb_addr *ipmb;
+ int j;
+
+ if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
+ return;
+
+ si = (struct ipmi_system_interface_addr *) &addr;
+ si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si->channel = IPMI_BMC_CHANNEL;
+ si->lun = 0;
+
+ /* Fill in an event telling that we have failed. */
+ msg.netfn = 0x04; /* Sensor or Event. */
+ msg.cmd = 2; /* Platform event command. */
+ msg.data = data;
+ msg.data_len = 8;
+ data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
+ data[1] = 0x03; /* This is for IPMI 1.0. */
+ data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
+ data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
+ data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
+
+ /*
+ * Put a few breadcrumbs in. Hopefully later we can add more things
+ * to make the panic events more useful.
+ */
+ if (str) {
+ data[3] = str[0];
+ data[6] = str[1];
+ data[7] = str[2];
+ }
+
+ /* Send the event announcing the panic. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
+
+ /*
+ * On every interface, dump a bunch of OEM event holding the
+ * string.
+ */
+ if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
+ return;
+
+ /*
+ * intf_num is used as an marker to tell if the
+ * interface is valid. Thus we need a read barrier to
+ * make sure data fetched before checking intf_num
+ * won't be used.
+ */
+ smp_rmb();
+
+ /*
+ * First job here is to figure out where to send the
+ * OEM events. There's no way in IPMI to send OEM
+ * events using an event send command, so we have to
+ * find the SEL to put them in and stick them in
+ * there.
+ */
+
+ /* Get capabilities from the get device id. */
+ intf->local_sel_device = 0;
+ intf->local_event_generator = 0;
+ intf->event_receiver = 0;
+
+ /* Request the device info from the local MC. */
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ intf->null_user_handler = device_id_fetcher;
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
+
+ if (intf->local_event_generator) {
+ /* Request the event receiver from the local MC. */
+ msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
+ msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ intf->null_user_handler = event_receiver_fetcher;
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
+ }
+ intf->null_user_handler = NULL;
+
+ /*
+ * Validate the event receiver. The low bit must not
+ * be 1 (it must be a valid IPMB address), it cannot
+ * be zero, and it must not be my address.
+ */
+ if (((intf->event_receiver & 1) == 0)
+ && (intf->event_receiver != 0)
+ && (intf->event_receiver != intf->addrinfo[0].address)) {
+ /*
+ * The event receiver is valid, send an IPMB
+ * message.
+ */
+ ipmb = (struct ipmi_ipmb_addr *) &addr;
+ ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb->channel = 0; /* FIXME - is this right? */
+ ipmb->lun = intf->event_receiver_lun;
+ ipmb->slave_addr = intf->event_receiver;
+ } else if (intf->local_sel_device) {
+ /*
+ * The event receiver was not valid (or was
+ * me), but I am an SEL device, just dump it
+ * in my SEL.
+ */
+ si = (struct ipmi_system_interface_addr *) &addr;
+ si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si->channel = IPMI_BMC_CHANNEL;
+ si->lun = 0;
+ } else
+ return; /* No where to send the event. */
+
+ msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
+ msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
+ msg.data = data;
+ msg.data_len = 16;
+
+ j = 0;
+ while (*p) {
+ int size = strlen(p);
+
+ if (size > 11)
+ size = 11;
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0xf0; /* OEM event without timestamp. */
+ data[3] = intf->addrinfo[0].address;
+ data[4] = j++; /* sequence # */
+ /*
+ * Always give 11 bytes, so strncpy will fill
+ * it with zeroes for me.
+ */
+ strncpy(data+5, p, 11);
+ p += size;
+
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
+ }
+}
+
+static int has_panicked;
+
+static int panic_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ struct ipmi_smi *intf;
+ struct ipmi_user *user;
+
+ if (has_panicked)
+ return NOTIFY_DONE;
+ has_panicked = 1;
+
+ /* For every registered interface, set it to run to completion. */
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (!intf->handlers || intf->intf_num == -1)
+ /* Interface is not ready. */
+ continue;
+
+ if (!intf->handlers->poll)
+ continue;
+
+ /*
+ * If we were interrupted while locking xmit_msgs_lock or
+ * waiting_rcv_msgs_lock, the corresponding list may be
+ * corrupted. In this case, drop items on the list for
+ * the safety.
+ */
+ if (!spin_trylock(&intf->xmit_msgs_lock)) {
+ INIT_LIST_HEAD(&intf->xmit_msgs);
+ INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+ } else
+ spin_unlock(&intf->xmit_msgs_lock);
+
+ if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
+ INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+ else
+ spin_unlock(&intf->waiting_rcv_msgs_lock);
+
+ intf->run_to_completion = 1;
+ if (intf->handlers->set_run_to_completion)
+ intf->handlers->set_run_to_completion(intf->send_info,
+ 1);
+
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (user->handler->ipmi_panic_handler)
+ user->handler->ipmi_panic_handler(
+ user->handler_data);
+ }
+
+ send_panic_events(intf, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* Must be called with ipmi_interfaces_mutex held. */
+static int ipmi_register_driver(void)
+{
+ int rv;
+
+ if (drvregistered)
+ return 0;
+
+ rv = driver_register(&ipmidriver.driver);
+ if (rv)
+ pr_err("Could not register IPMI driver\n");
+ else
+ drvregistered = true;
+ return rv;
+}
+
+static struct notifier_block panic_block = {
+ .notifier_call = panic_event,
+ .next = NULL,
+ .priority = 200 /* priority: INT_MAX >= x >= 0 */
+};
+
+static int ipmi_init_msghandler(void)
+{
+ int rv;
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ rv = ipmi_register_driver();
+ if (rv)
+ goto out;
+ if (initialized)
+ goto out;
+
+ rv = init_srcu_struct(&ipmi_interfaces_srcu);
+ if (rv)
+ goto out;
+
+ remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!remove_work_wq) {
+ pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
+ rv = -ENOMEM;
+ goto out_wq;
+ }
+
+ timer_setup(&ipmi_timer, ipmi_timeout, 0);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+ initialized = true;
+
+out_wq:
+ if (rv)
+ cleanup_srcu_struct(&ipmi_interfaces_srcu);
+out:
+ mutex_unlock(&ipmi_interfaces_mutex);
+ return rv;
+}
+
+static int __init ipmi_init_msghandler_mod(void)
+{
+ int rv;
+
+ pr_info("version " IPMI_DRIVER_VERSION "\n");
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ rv = ipmi_register_driver();
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ return rv;
+}
+
+static void __exit cleanup_ipmi(void)
+{
+ int count;
+
+ if (initialized) {
+ destroy_workqueue(remove_work_wq);
+
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &panic_block);
+
+ /*
+ * This can't be called if any interfaces exist, so no worry
+ * about shutting down the interfaces.
+ */
+
+ /*
+ * Tell the timer to stop, then wait for it to stop. This
+ * avoids problems with race conditions removing the timer
+ * here.
+ */
+ atomic_set(&stop_operation, 1);
+ del_timer_sync(&ipmi_timer);
+
+ initialized = false;
+
+ /* Check for buffer leaks. */
+ count = atomic_read(&smi_msg_inuse_count);
+ if (count != 0)
+ pr_warn("SMI message count %d at exit\n", count);
+ count = atomic_read(&recv_msg_inuse_count);
+ if (count != 0)
+ pr_warn("recv message count %d at exit\n", count);
+
+ cleanup_srcu_struct(&ipmi_interfaces_srcu);
+ }
+ if (drvregistered)
+ driver_unregister(&ipmidriver.driver);
+}
+module_exit(cleanup_ipmi);
+
+module_init(ipmi_init_msghandler_mod);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
+MODULE_VERSION(IPMI_DRIVER_VERSION);
+MODULE_SOFTDEP("post: ipmi_devintf");
diff --git a/drivers/char/ipmi/ipmi_plat_data.c b/drivers/char/ipmi/ipmi_plat_data.c
new file mode 100644
index 0000000000..747b51ae01
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_plat_data.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Add an IPMI platform device.
+ */
+
+#include <linux/platform_device.h>
+#include "ipmi_plat_data.h"
+#include "ipmi_si.h"
+
+struct platform_device *ipmi_platform_add(const char *name, unsigned int inst,
+ struct ipmi_plat_data *p)
+{
+ struct platform_device *pdev;
+ unsigned int num_r = 1, size = 0, pidx = 0;
+ struct resource r[4];
+ struct property_entry pr[6];
+ u32 flags;
+ int rv;
+
+ memset(pr, 0, sizeof(pr));
+ memset(r, 0, sizeof(r));
+
+ if (p->iftype == IPMI_PLAT_IF_SI) {
+ if (p->type == SI_BT)
+ size = 3;
+ else if (p->type != SI_TYPE_INVALID)
+ size = 2;
+
+ if (p->regsize == 0)
+ p->regsize = DEFAULT_REGSIZE;
+ if (p->regspacing == 0)
+ p->regspacing = p->regsize;
+
+ pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type);
+ } else if (p->iftype == IPMI_PLAT_IF_SSIF) {
+ pr[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", p->addr);
+ }
+
+ if (p->slave_addr)
+ pr[pidx++] = PROPERTY_ENTRY_U8("slave-addr", p->slave_addr);
+ pr[pidx++] = PROPERTY_ENTRY_U8("addr-source", p->addr_source);
+ if (p->regshift)
+ pr[pidx++] = PROPERTY_ENTRY_U8("reg-shift", p->regshift);
+ pr[pidx++] = PROPERTY_ENTRY_U8("reg-size", p->regsize);
+ /* Last entry must be left NULL to terminate it. */
+
+ pdev = platform_device_alloc(name, inst);
+ if (!pdev) {
+ pr_err("Error allocating IPMI platform device %s.%d\n",
+ name, inst);
+ return NULL;
+ }
+
+ if (size == 0)
+ /* An invalid or SSIF interface, no resources. */
+ goto add_properties;
+
+ /*
+ * Register spacing is derived from the resources in
+ * the IPMI platform code.
+ */
+
+ if (p->space == IPMI_IO_ADDR_SPACE)
+ flags = IORESOURCE_IO;
+ else
+ flags = IORESOURCE_MEM;
+
+ r[0].start = p->addr;
+ r[0].end = r[0].start + p->regsize - 1;
+ r[0].name = "IPMI Address 1";
+ r[0].flags = flags;
+
+ if (size > 1) {
+ r[1].start = r[0].start + p->regspacing;
+ r[1].end = r[1].start + p->regsize - 1;
+ r[1].name = "IPMI Address 2";
+ r[1].flags = flags;
+ num_r++;
+ }
+
+ if (size > 2) {
+ r[2].start = r[1].start + p->regspacing;
+ r[2].end = r[2].start + p->regsize - 1;
+ r[2].name = "IPMI Address 3";
+ r[2].flags = flags;
+ num_r++;
+ }
+
+ if (p->irq) {
+ r[num_r].start = p->irq;
+ r[num_r].end = p->irq;
+ r[num_r].name = "IPMI IRQ";
+ r[num_r].flags = IORESOURCE_IRQ;
+ num_r++;
+ }
+
+ rv = platform_device_add_resources(pdev, r, num_r);
+ if (rv) {
+ dev_err(&pdev->dev,
+ "Unable to add hard-code resources: %d\n", rv);
+ goto err;
+ }
+ add_properties:
+ rv = device_create_managed_software_node(&pdev->dev, pr, NULL);
+ if (rv) {
+ dev_err(&pdev->dev,
+ "Unable to add hard-code properties: %d\n", rv);
+ goto err;
+ }
+
+ rv = platform_device_add(pdev);
+ if (rv) {
+ dev_err(&pdev->dev,
+ "Unable to add hard-code device: %d\n", rv);
+ goto err;
+ }
+ return pdev;
+
+err:
+ platform_device_put(pdev);
+ return NULL;
+}
+EXPORT_SYMBOL(ipmi_platform_add);
diff --git a/drivers/char/ipmi/ipmi_plat_data.h b/drivers/char/ipmi/ipmi_plat_data.h
new file mode 100644
index 0000000000..9ba744ea95
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_plat_data.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Generic code to add IPMI platform devices.
+ */
+
+#include <linux/ipmi.h>
+
+enum ipmi_plat_interface_type { IPMI_PLAT_IF_SI, IPMI_PLAT_IF_SSIF };
+
+struct ipmi_plat_data {
+ enum ipmi_plat_interface_type iftype;
+ unsigned int type; /* si_type for si, SI_INVALID for others */
+ unsigned int space; /* addr_space for si, intf# for ssif. */
+ unsigned long addr;
+ unsigned int regspacing;
+ unsigned int regsize;
+ unsigned int regshift;
+ unsigned int irq;
+ unsigned int slave_addr;
+ enum ipmi_addr_src addr_source;
+};
+
+struct platform_device *ipmi_platform_add(const char *name, unsigned int inst,
+ struct ipmi_plat_data *p);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
new file mode 100644
index 0000000000..da22a8cbe6
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PowerNV OPAL IPMI driver
+ *
+ * Copyright 2014 IBM Corp.
+ */
+
+#define pr_fmt(fmt) "ipmi-powernv: " fmt
+
+#include <linux/ipmi_smi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/opal.h>
+
+
+struct ipmi_smi_powernv {
+ u64 interface_id;
+ struct ipmi_smi *intf;
+ unsigned int irq;
+
+ /**
+ * We assume that there can only be one outstanding request, so
+ * keep the pending message in cur_msg. We protect this from concurrent
+ * updates through send & recv calls, (and consequently opal_msg, which
+ * is in-use when cur_msg is set) with msg_lock
+ */
+ spinlock_t msg_lock;
+ struct ipmi_smi_msg *cur_msg;
+ struct opal_ipmi_msg *opal_msg;
+};
+
+static int ipmi_powernv_start_processing(void *send_info, struct ipmi_smi *intf)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+
+ smi->intf = intf;
+ return 0;
+}
+
+static void send_error_reply(struct ipmi_smi_powernv *smi,
+ struct ipmi_smi_msg *msg, u8 completion_code)
+{
+ msg->rsp[0] = msg->data[0] | 0x4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = completion_code;
+ msg->rsp_size = 3;
+ ipmi_smi_msg_received(smi->intf, msg);
+}
+
+static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+ struct opal_ipmi_msg *opal_msg;
+ unsigned long flags;
+ int comp, rc;
+ size_t size;
+
+ /* ensure data_len will fit in the opal_ipmi_msg buffer... */
+ if (msg->data_size > IPMI_MAX_MSG_LENGTH) {
+ comp = IPMI_REQ_LEN_EXCEEDED_ERR;
+ goto err;
+ }
+
+ /* ... and that we at least have netfn and cmd bytes */
+ if (msg->data_size < 2) {
+ comp = IPMI_REQ_LEN_INVALID_ERR;
+ goto err;
+ }
+
+ spin_lock_irqsave(&smi->msg_lock, flags);
+
+ if (smi->cur_msg) {
+ comp = IPMI_NODE_BUSY_ERR;
+ goto err_unlock;
+ }
+
+ /* format our data for the OPAL API */
+ opal_msg = smi->opal_msg;
+ opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1;
+ opal_msg->netfn = msg->data[0];
+ opal_msg->cmd = msg->data[1];
+ if (msg->data_size > 2)
+ memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2);
+
+ /* data_size already includes the netfn and cmd bytes */
+ size = sizeof(*opal_msg) + msg->data_size - 2;
+
+ pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__,
+ smi->interface_id, opal_msg, size);
+ rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
+ pr_devel("%s: -> %d\n", __func__, rc);
+
+ if (!rc) {
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return;
+ }
+
+ comp = IPMI_ERR_UNSPECIFIED;
+err_unlock:
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+err:
+ send_error_reply(smi, msg, comp);
+}
+
+static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
+{
+ struct opal_ipmi_msg *opal_msg;
+ struct ipmi_smi_msg *msg;
+ unsigned long flags;
+ uint64_t size;
+ int rc;
+
+ pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__,
+ smi->interface_id);
+
+ spin_lock_irqsave(&smi->msg_lock, flags);
+
+ if (!smi->cur_msg) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("no current message?\n");
+ return 0;
+ }
+
+ msg = smi->cur_msg;
+ opal_msg = smi->opal_msg;
+
+ size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH);
+
+ rc = opal_ipmi_recv(smi->interface_id,
+ opal_msg,
+ &size);
+ size = be64_to_cpu(size);
+ pr_devel("%s: -> %d (size %lld)\n", __func__,
+ rc, rc == 0 ? size : 0);
+ if (rc) {
+ /* If came via the poll, and response was not yet ready */
+ if (rc == OPAL_EMPTY) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return 0;
+ }
+
+ smi->cur_msg = NULL;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ send_error_reply(smi, msg, IPMI_ERR_UNSPECIFIED);
+ return 0;
+ }
+
+ if (size < sizeof(*opal_msg)) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("unexpected IPMI message size %lld\n", size);
+ return 0;
+ }
+
+ if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("unexpected IPMI message format (version %d)\n",
+ opal_msg->version);
+ return 0;
+ }
+
+ msg->rsp[0] = opal_msg->netfn;
+ msg->rsp[1] = opal_msg->cmd;
+ if (size > sizeof(*opal_msg))
+ memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg));
+ msg->rsp_size = 2 + size - sizeof(*opal_msg);
+
+ smi->cur_msg = NULL;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ ipmi_smi_msg_received(smi->intf, msg);
+ return 0;
+}
+
+static void ipmi_powernv_request_events(void *send_info)
+{
+}
+
+static void ipmi_powernv_set_run_to_completion(void *send_info,
+ bool run_to_completion)
+{
+}
+
+static void ipmi_powernv_poll(void *send_info)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+
+ ipmi_powernv_recv(smi);
+}
+
+static const struct ipmi_smi_handlers ipmi_powernv_smi_handlers = {
+ .owner = THIS_MODULE,
+ .start_processing = ipmi_powernv_start_processing,
+ .sender = ipmi_powernv_send,
+ .request_events = ipmi_powernv_request_events,
+ .set_run_to_completion = ipmi_powernv_set_run_to_completion,
+ .poll = ipmi_powernv_poll,
+};
+
+static irqreturn_t ipmi_opal_event(int irq, void *data)
+{
+ struct ipmi_smi_powernv *smi = data;
+
+ ipmi_powernv_recv(smi);
+ return IRQ_HANDLED;
+}
+
+static int ipmi_powernv_probe(struct platform_device *pdev)
+{
+ struct ipmi_smi_powernv *ipmi;
+ struct device *dev;
+ u32 prop;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ dev = &pdev->dev;
+
+ ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL);
+ if (!ipmi)
+ return -ENOMEM;
+
+ spin_lock_init(&ipmi->msg_lock);
+
+ rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id",
+ &prop);
+ if (rc) {
+ dev_warn(dev, "No interface ID property\n");
+ goto err_free;
+ }
+ ipmi->interface_id = prop;
+
+ rc = of_property_read_u32(dev->of_node, "interrupts", &prop);
+ if (rc) {
+ dev_warn(dev, "No interrupts property\n");
+ goto err_free;
+ }
+
+ ipmi->irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!ipmi->irq) {
+ dev_info(dev, "Unable to map irq from device tree\n");
+ ipmi->irq = opal_event_request(prop);
+ }
+
+ rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+ "opal-ipmi", ipmi);
+ if (rc) {
+ dev_warn(dev, "Unable to request irq\n");
+ goto err_dispose;
+ }
+
+ ipmi->opal_msg = devm_kmalloc(dev,
+ sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH,
+ GFP_KERNEL);
+ if (!ipmi->opal_msg) {
+ rc = -ENOMEM;
+ goto err_unregister;
+ }
+
+ rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
+ if (rc) {
+ dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
+ goto err_free_msg;
+ }
+
+ dev_set_drvdata(dev, ipmi);
+ return 0;
+
+err_free_msg:
+ devm_kfree(dev, ipmi->opal_msg);
+err_unregister:
+ free_irq(ipmi->irq, ipmi);
+err_dispose:
+ irq_dispose_mapping(ipmi->irq);
+err_free:
+ devm_kfree(dev, ipmi);
+ return rc;
+}
+
+static int ipmi_powernv_remove(struct platform_device *pdev)
+{
+ struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
+
+ ipmi_unregister_smi(smi->intf);
+ free_irq(smi->irq, smi);
+ irq_dispose_mapping(smi->irq);
+
+ return 0;
+}
+
+static const struct of_device_id ipmi_powernv_match[] = {
+ { .compatible = "ibm,opal-ipmi" },
+ { },
+};
+
+
+static struct platform_driver powernv_ipmi_driver = {
+ .driver = {
+ .name = "ipmi-powernv",
+ .of_match_table = ipmi_powernv_match,
+ },
+ .probe = ipmi_powernv_probe,
+ .remove = ipmi_powernv_remove,
+};
+
+
+module_platform_driver(powernv_ipmi_driver);
+
+MODULE_DEVICE_TABLE(of, ipmi_powernv_match);
+MODULE_DESCRIPTION("powernv IPMI driver");
+MODULE_AUTHOR("Jeremy Kerr <jk@ozlabs.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
new file mode 100644
index 0000000000..870659d91d
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_poweroff.c
+ *
+ * MontaVista IPMI Poweroff extension to sys_reboot
+ *
+ * Author: MontaVista Software, Inc.
+ * Steven Dake <sdake@mvista.com>
+ * Corey Minyard <cminyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002,2004 MontaVista Software Inc.
+ */
+
+#define pr_fmt(fmt) "IPMI poweroff: " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/pm.h>
+#include <linux/kdev_t.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+
+static void ipmi_po_smi_gone(int if_num);
+static void ipmi_po_new_smi(int if_num, struct device *device);
+
+/* Definitions for controlling power off (if the system supports it). It
+ * conveniently matches the IPMI chassis control values. */
+#define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */
+#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */
+
+/* the IPMI data command */
+static int poweroff_powercycle;
+
+/* Which interface to use, -1 means the first we see. */
+static int ifnum_to_use = -1;
+
+/* Our local state. */
+static int ready;
+static struct ipmi_user *ipmi_user;
+static int ipmi_ifnum;
+static void (*specific_poweroff_func)(struct ipmi_user *user);
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+static int set_param_ifnum(const char *val, const struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+ if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
+ return 0;
+
+ ipmi_po_smi_gone(ipmi_ifnum);
+ ipmi_po_new_smi(ifnum_to_use, NULL);
+ return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
+ &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+ "timer. Setting to -1 defaults to the first registered "
+ "interface");
+
+/* parameter definition to allow user to flag power cycle */
+module_param(poweroff_powercycle, int, 0644);
+MODULE_PARM_DESC(poweroff_powercycle,
+ " Set to non-zero to enable power cycle instead of power"
+ " down. Power cycle is contingent on hardware support,"
+ " otherwise it defaults back to power down.");
+
+/* Stuff from the get device id command. */
+static unsigned int mfg_id;
+static unsigned int prod_id;
+static unsigned char capabilities;
+static unsigned char ipmi_version;
+
+/*
+ * We use our own messages for this operation, we don't let the system
+ * allocate them, since we may be in a panic situation. The whole
+ * thing is single-threaded, anyway, so multiple messages are not
+ * required.
+ */
+static atomic_t dummy_count = ATOMIC_INIT(0);
+static void dummy_smi_free(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&dummy_count);
+}
+static void dummy_recv_free(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&dummy_count);
+}
+static struct ipmi_smi_msg halt_smi_msg = INIT_IPMI_SMI_MSG(dummy_smi_free);
+static struct ipmi_recv_msg halt_recv_msg = INIT_IPMI_RECV_MSG(dummy_recv_free);
+
+
+/*
+ * Code to send a message and wait for the response.
+ */
+
+static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
+{
+ struct completion *comp = recv_msg->user_msg_data;
+
+ if (comp)
+ complete(comp);
+}
+
+static const struct ipmi_user_hndl ipmi_poweroff_handler = {
+ .ipmi_recv_hndl = receive_handler
+};
+
+
+static int ipmi_request_wait_for_response(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *send_msg)
+{
+ int rv;
+ struct completion comp;
+
+ init_completion(&comp);
+
+ rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp,
+ &halt_smi_msg, &halt_recv_msg, 0);
+ if (rv)
+ return rv;
+
+ wait_for_completion(&comp);
+
+ return halt_recv_msg.msg.data[0];
+}
+
+/* Wait for message to complete, spinning. */
+static int ipmi_request_in_rc_mode(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *send_msg)
+{
+ int rv;
+
+ atomic_set(&dummy_count, 2);
+ rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
+ &halt_smi_msg, &halt_recv_msg, 0);
+ if (rv) {
+ atomic_set(&dummy_count, 0);
+ return rv;
+ }
+
+ /*
+ * Spin until our message is done.
+ */
+ while (atomic_read(&dummy_count) > 0) {
+ ipmi_poll_interface(user);
+ cpu_relax();
+ }
+
+ return halt_recv_msg.msg.data[0];
+}
+
+/*
+ * ATCA Support
+ */
+
+#define IPMI_NETFN_ATCA 0x2c
+#define IPMI_ATCA_SET_POWER_CMD 0x11
+#define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01
+#define IPMI_PICMG_ID 0
+
+#define IPMI_NETFN_OEM 0x2e
+#define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11
+#define IPMI_ATCA_PPS_IANA "\x00\x40\x0A"
+#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1
+#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051
+
+static void (*atca_oem_poweroff_hook)(struct ipmi_user *user);
+
+static void pps_poweroff_atca(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ pr_info("PPS powerdown hook used\n");
+
+ send_msg.netfn = IPMI_NETFN_OEM;
+ send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+ send_msg.data = IPMI_ATCA_PPS_IANA;
+ send_msg.data_len = 3;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE)
+ pr_err("Unable to send ATCA, IPMI error 0x%x\n", rv);
+
+ return;
+}
+
+static int ipmi_atca_detect(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ /*
+ * Use get address info to check and see if we are ATCA
+ */
+ send_msg.netfn = IPMI_NETFN_ATCA;
+ send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD;
+ data[0] = IPMI_PICMG_ID;
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_wait_for_response(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+
+ pr_info("ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
+ if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+ && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+ pr_info("Installing Pigeon Point Systems Poweroff Hook\n");
+ atca_oem_poweroff_hook = pps_poweroff_atca;
+ }
+ return !rv;
+}
+
+static void ipmi_poweroff_atca(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[4];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ pr_info("Powering down via ATCA power command\n");
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_ATCA;
+ send_msg.cmd = IPMI_ATCA_SET_POWER_CMD;
+ data[0] = IPMI_PICMG_ID;
+ data[1] = 0; /* FRU id */
+ data[2] = 0; /* Power Level */
+ data[3] = 0; /* Don't change saved presets */
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ /*
+ * At this point, the system may be shutting down, and most
+ * serial drivers (if used) will have interrupts turned off
+ * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+ * return code
+ */
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+ pr_err("Unable to send ATCA powerdown message, IPMI error 0x%x\n",
+ rv);
+ goto out;
+ }
+
+ if (atca_oem_poweroff_hook)
+ atca_oem_poweroff_hook(user);
+ out:
+ return;
+}
+
+/*
+ * CPI1 Support
+ */
+
+#define IPMI_NETFN_OEM_1 0xf8
+#define OEM_GRP_CMD_SET_RESET_STATE 0x84
+#define OEM_GRP_CMD_SET_POWER_STATE 0x82
+#define IPMI_NETFN_OEM_8 0xf8
+#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL 0x80
+#define OEM_GRP_CMD_GET_SLOT_GA 0xa3
+#define IPMI_NETFN_SENSOR_EVT 0x10
+#define IPMI_CMD_GET_EVENT_RECEIVER 0x01
+
+#define IPMI_CPI1_PRODUCT_ID 0x000157
+#define IPMI_CPI1_MANUFACTURER_ID 0x0108
+
+static int ipmi_cpi1_detect(struct ipmi_user *user)
+{
+ return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
+ && (prod_id == IPMI_CPI1_PRODUCT_ID));
+}
+
+static void ipmi_poweroff_cpi1(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct ipmi_ipmb_addr ipmb_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+ int slot;
+ unsigned char hotswap_ipmb;
+ unsigned char aer_addr;
+ unsigned char aer_lun;
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ pr_info("Powering down via CPI1 power command\n");
+
+ /*
+ * Get IPMI ipmb address
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+ slot = halt_recv_msg.msg.data[1];
+ hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot);
+
+ /*
+ * Get active event receiver
+ */
+ send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2;
+ send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+ aer_addr = halt_recv_msg.msg.data[1];
+ aer_lun = halt_recv_msg.msg.data[2];
+
+ /*
+ * Setup IPMB address target instead of local target
+ */
+ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr.channel = 0;
+ ipmb_addr.slave_addr = aer_addr;
+ ipmb_addr.lun = aer_lun;
+
+ /*
+ * Send request hotswap control to remove blade from dpv
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL;
+ send_msg.data = &hotswap_ipmb;
+ send_msg.data_len = 1;
+ ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &ipmb_addr,
+ &send_msg);
+
+ /*
+ * Set reset asserted
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE;
+ send_msg.data = data;
+ data[0] = 1; /* Reset asserted state */
+ send_msg.data_len = 1;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE;
+ send_msg.data = data;
+ data[0] = 1; /* Power down state */
+ send_msg.data_len = 1;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+
+ out:
+ return;
+}
+
+/*
+ * ipmi_dell_chassis_detect()
+ * Dell systems with IPMI < 1.5 don't set the chassis capability bit
+ * but they can handle a chassis poweroff or powercycle command.
+ */
+
+#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
+static int ipmi_dell_chassis_detect(struct ipmi_user *user)
+{
+ const char ipmi_version_major = ipmi_version & 0xF;
+ const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
+ const char mfr[3] = DELL_IANA_MFR_ID;
+ if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
+ ipmi_version_major <= 1 &&
+ ipmi_version_minor < 5)
+ return 1;
+ return 0;
+}
+
+/*
+ * ipmi_hp_chassis_detect()
+ * HP PA-RISC servers rp3410/rp3440, the C8000 workstation and the rx2600 and
+ * zx6000 machines support IPMI vers 1 and don't set the chassis capability bit
+ * but they can handle a chassis poweroff or powercycle command.
+ */
+
+#define HP_IANA_MFR_ID 0x0b
+#define HP_BMC_PROD_ID 0x8201
+static int ipmi_hp_chassis_detect(struct ipmi_user *user)
+{
+ if (mfg_id == HP_IANA_MFR_ID
+ && prod_id == HP_BMC_PROD_ID
+ && ipmi_version == 1)
+ return 1;
+ return 0;
+}
+
+/*
+ * Standard chassis support
+ */
+
+#define IPMI_NETFN_CHASSIS_REQUEST 0
+#define IPMI_CHASSIS_CONTROL_CMD 0x02
+
+static int ipmi_chassis_detect(struct ipmi_user *user)
+{
+ /* Chassis support, use it. */
+ return (capabilities & 0x80);
+}
+
+static void ipmi_poweroff_chassis(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ powercyclefailed:
+ pr_info("Powering %s via IPMI chassis control command\n",
+ (poweroff_powercycle ? "cycle" : "down"));
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
+ send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
+ if (poweroff_powercycle)
+ data[0] = IPMI_CHASSIS_POWER_CYCLE;
+ else
+ data[0] = IPMI_CHASSIS_POWER_DOWN;
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv) {
+ if (poweroff_powercycle) {
+ /* power cycle failed, default to power down */
+ pr_err("Unable to send chassis power cycle message, IPMI error 0x%x\n",
+ rv);
+ poweroff_powercycle = 0;
+ goto powercyclefailed;
+ }
+
+ pr_err("Unable to send chassis power down message, IPMI error 0x%x\n",
+ rv);
+ }
+}
+
+
+/* Table of possible power off functions. */
+struct poweroff_function {
+ char *platform_type;
+ int (*detect)(struct ipmi_user *user);
+ void (*poweroff_func)(struct ipmi_user *user);
+};
+
+static struct poweroff_function poweroff_functions[] = {
+ { .platform_type = "ATCA",
+ .detect = ipmi_atca_detect,
+ .poweroff_func = ipmi_poweroff_atca },
+ { .platform_type = "CPI1",
+ .detect = ipmi_cpi1_detect,
+ .poweroff_func = ipmi_poweroff_cpi1 },
+ { .platform_type = "chassis",
+ .detect = ipmi_dell_chassis_detect,
+ .poweroff_func = ipmi_poweroff_chassis },
+ { .platform_type = "chassis",
+ .detect = ipmi_hp_chassis_detect,
+ .poweroff_func = ipmi_poweroff_chassis },
+ /* Chassis should generally be last, other things should override
+ it. */
+ { .platform_type = "chassis",
+ .detect = ipmi_chassis_detect,
+ .poweroff_func = ipmi_poweroff_chassis },
+};
+#define NUM_PO_FUNCS ARRAY_SIZE(poweroff_functions)
+
+
+/* Called on a powerdown request. */
+static void ipmi_poweroff_function(void)
+{
+ if (!ready)
+ return;
+
+ /* Use run-to-completion mode, since interrupts may be off. */
+ specific_poweroff_func(ipmi_user);
+}
+
+/* Wait for an IPMI interface to be installed, the first one installed
+ will be grabbed by this code and used to perform the powerdown. */
+static void ipmi_po_new_smi(int if_num, struct device *device)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ int i;
+
+ if (ready)
+ return;
+
+ if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
+ return;
+
+ rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
+ &ipmi_user);
+ if (rv) {
+ pr_err("could not create IPMI user, error %d\n", rv);
+ return;
+ }
+
+ ipmi_ifnum = if_num;
+
+ /*
+ * Do a get device ide and store some results, since this is
+ * used by several functions.
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ send_msg.netfn = IPMI_NETFN_APP_REQUEST;
+ send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_wait_for_response(ipmi_user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv) {
+ pr_err("Unable to send IPMI get device id info, IPMI error 0x%x\n",
+ rv);
+ goto out_err;
+ }
+
+ if (halt_recv_msg.msg.data_len < 12) {
+ pr_err("(chassis) IPMI get device id info too short, was %d bytes, needed %d bytes\n",
+ halt_recv_msg.msg.data_len, 12);
+ goto out_err;
+ }
+
+ mfg_id = (halt_recv_msg.msg.data[7]
+ | (halt_recv_msg.msg.data[8] << 8)
+ | (halt_recv_msg.msg.data[9] << 16));
+ prod_id = (halt_recv_msg.msg.data[10]
+ | (halt_recv_msg.msg.data[11] << 8));
+ capabilities = halt_recv_msg.msg.data[6];
+ ipmi_version = halt_recv_msg.msg.data[5];
+
+
+ /* Scan for a poweroff method */
+ for (i = 0; i < NUM_PO_FUNCS; i++) {
+ if (poweroff_functions[i].detect(ipmi_user))
+ goto found;
+ }
+
+ out_err:
+ pr_err("Unable to find a poweroff function that will work, giving up\n");
+ ipmi_destroy_user(ipmi_user);
+ return;
+
+ found:
+ pr_info("Found a %s style poweroff function\n",
+ poweroff_functions[i].platform_type);
+ specific_poweroff_func = poweroff_functions[i].poweroff_func;
+ old_poweroff_func = pm_power_off;
+ pm_power_off = ipmi_poweroff_function;
+ ready = 1;
+}
+
+static void ipmi_po_smi_gone(int if_num)
+{
+ if (!ready)
+ return;
+
+ if (ipmi_ifnum != if_num)
+ return;
+
+ ready = 0;
+ ipmi_destroy_user(ipmi_user);
+ pm_power_off = old_poweroff_func;
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_po_new_smi,
+ .smi_gone = ipmi_po_smi_gone
+};
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/sysctl.h>
+
+static struct ctl_table ipmi_table[] = {
+ { .procname = "poweroff_powercycle",
+ .data = &poweroff_powercycle,
+ .maxlen = sizeof(poweroff_powercycle),
+ .mode = 0644,
+ .proc_handler = proc_dointvec },
+ { }
+};
+
+static struct ctl_table_header *ipmi_table_header;
+#endif /* CONFIG_PROC_FS */
+
+/*
+ * Startup and shutdown functions.
+ */
+static int __init ipmi_poweroff_init(void)
+{
+ int rv;
+
+ pr_info("Copyright (C) 2004 MontaVista Software - IPMI Powerdown via sys_reboot\n");
+
+ if (poweroff_powercycle)
+ pr_info("Power cycle is enabled\n");
+
+#ifdef CONFIG_PROC_FS
+ ipmi_table_header = register_sysctl("dev/ipmi", ipmi_table);
+ if (!ipmi_table_header) {
+ pr_err("Unable to register powercycle sysctl\n");
+ rv = -ENOMEM;
+ goto out_err;
+ }
+#endif
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+
+#ifdef CONFIG_PROC_FS
+ if (rv) {
+ unregister_sysctl_table(ipmi_table_header);
+ pr_err("Unable to register SMI watcher: %d\n", rv);
+ goto out_err;
+ }
+
+ out_err:
+#endif
+ return rv;
+}
+
+#ifdef MODULE
+static void __exit ipmi_poweroff_cleanup(void)
+{
+ int rv;
+
+#ifdef CONFIG_PROC_FS
+ unregister_sysctl_table(ipmi_table_header);
+#endif
+
+ ipmi_smi_watcher_unregister(&smi_watcher);
+
+ if (ready) {
+ rv = ipmi_destroy_user(ipmi_user);
+ if (rv)
+ pr_err("could not cleanup the IPMI user: 0x%x\n", rv);
+ pm_power_off = old_poweroff_func;
+ }
+}
+module_exit(ipmi_poweroff_cleanup);
+#endif
+
+module_init(ipmi_poweroff_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
new file mode 100644
index 0000000000..a7ead2a4c7
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ipmi_si.h
+ *
+ * Interface from the device-specific interfaces (OF, DMI, ACPI, PCI,
+ * etc) to the base ipmi system interface code.
+ */
+
+#ifndef __IPMI_SI_H__
+#define __IPMI_SI_H__
+
+#include <linux/ipmi.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#define SI_DEVICE_NAME "ipmi_si"
+
+#define DEFAULT_REGSPACING 1
+#define DEFAULT_REGSIZE 1
+
+/* Numbers in this enumerator should be mapped to si_to_str[] */
+enum si_type {
+ SI_TYPE_INVALID, SI_KCS, SI_SMIC, SI_BT, SI_TYPE_MAX
+};
+
+/* Array is defined in the ipmi_si_intf.c */
+extern const char *const si_to_str[];
+
+enum ipmi_addr_space {
+ IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE
+};
+
+/*
+ * The structure for doing I/O in the state machine. The state
+ * machine doesn't have the actual I/O routines, they are done through
+ * this interface.
+ */
+struct si_sm_io {
+ unsigned char (*inputb)(const struct si_sm_io *io, unsigned int offset);
+ void (*outputb)(const struct si_sm_io *io,
+ unsigned int offset,
+ unsigned char b);
+
+ /*
+ * Generic info used by the actual handling routines, the
+ * state machine shouldn't touch these.
+ */
+ void __iomem *addr;
+ unsigned int regspacing;
+ unsigned int regsize;
+ unsigned int regshift;
+ enum ipmi_addr_space addr_space;
+ unsigned long addr_data;
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+ union ipmi_smi_info_union addr_info;
+
+ int (*io_setup)(struct si_sm_io *info);
+ void (*io_cleanup)(struct si_sm_io *info);
+ unsigned int io_size;
+
+ int irq;
+ int (*irq_setup)(struct si_sm_io *io);
+ void *irq_handler_data;
+ void (*irq_cleanup)(struct si_sm_io *io);
+
+ u8 slave_addr;
+ enum si_type si_type;
+ struct device *dev;
+};
+
+int ipmi_si_add_smi(struct si_sm_io *io);
+irqreturn_t ipmi_si_irq_handler(int irq, void *data);
+void ipmi_irq_start_cleanup(struct si_sm_io *io);
+int ipmi_std_irq_setup(struct si_sm_io *io);
+void ipmi_irq_finish_setup(struct si_sm_io *io);
+void ipmi_si_remove_by_dev(struct device *dev);
+struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+ unsigned long addr);
+void ipmi_hardcode_init(void);
+void ipmi_si_hardcode_exit(void);
+void ipmi_si_hotmod_exit(void);
+int ipmi_si_hardcode_match(int addr_space, unsigned long addr);
+void ipmi_si_platform_init(void);
+void ipmi_si_platform_shutdown(void);
+void ipmi_remove_platform_device_by_name(char *name);
+
+extern struct platform_driver ipmi_platform_driver;
+
+#ifdef CONFIG_PCI
+void ipmi_si_pci_init(void);
+void ipmi_si_pci_shutdown(void);
+#else
+static inline void ipmi_si_pci_init(void) { }
+static inline void ipmi_si_pci_shutdown(void) { }
+#endif
+#ifdef CONFIG_PARISC
+void ipmi_si_parisc_init(void);
+void ipmi_si_parisc_shutdown(void);
+#else
+static inline void ipmi_si_parisc_init(void) { }
+static inline void ipmi_si_parisc_shutdown(void) { }
+#endif
+
+int ipmi_si_port_setup(struct si_sm_io *io);
+int ipmi_si_mem_setup(struct si_sm_io *io);
+
+#endif /* __IPMI_SI_H__ */
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
new file mode 100644
index 0000000000..ed5e91b1e0
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#define pr_fmt(fmt) "ipmi_hardcode: " fmt
+
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include "ipmi_si.h"
+#include "ipmi_plat_data.h"
+
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
+ */
+
+#define SI_MAX_PARMS 4
+
+#define MAX_SI_TYPE_STR 30
+static char si_type_str[MAX_SI_TYPE_STR] __initdata;
+static unsigned long addrs[SI_MAX_PARMS];
+static unsigned int num_addrs;
+static unsigned int ports[SI_MAX_PARMS];
+static unsigned int num_ports;
+static int irqs[SI_MAX_PARMS] __initdata;
+static unsigned int num_irqs __initdata;
+static int regspacings[SI_MAX_PARMS] __initdata;
+static unsigned int num_regspacings __initdata;
+static int regsizes[SI_MAX_PARMS] __initdata;
+static unsigned int num_regsizes __initdata;
+static int regshifts[SI_MAX_PARMS] __initdata;
+static unsigned int num_regshifts __initdata;
+static int slave_addrs[SI_MAX_PARMS] __initdata;
+static unsigned int num_slave_addrs __initdata;
+
+module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
+MODULE_PARM_DESC(type,
+ "Defines the type of each interface, each interface separated by commas. The types are 'kcs', 'smic', and 'bt'. For example si_type=kcs,bt will set the first interface to kcs and the second to bt");
+module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0);
+MODULE_PARM_DESC(addrs,
+ "Sets the memory address of each interface, the addresses separated by commas. Only use if an interface is in memory. Otherwise, set it to zero or leave it blank.");
+module_param_hw_array(ports, uint, ioport, &num_ports, 0);
+MODULE_PARM_DESC(ports,
+ "Sets the port address of each interface, the addresses separated by commas. Only use if an interface is a port. Otherwise, set it to zero or leave it blank.");
+module_param_hw_array(irqs, int, irq, &num_irqs, 0);
+MODULE_PARM_DESC(irqs,
+ "Sets the interrupt of each interface, the addresses separated by commas. Only use if an interface has an interrupt. Otherwise, set it to zero or leave it blank.");
+module_param_hw_array(regspacings, int, other, &num_regspacings, 0);
+MODULE_PARM_DESC(regspacings,
+ "The number of bytes between the start address and each successive register used by the interface. For instance, if the start address is 0xca2 and the spacing is 2, then the second address is at 0xca4. Defaults to 1.");
+module_param_hw_array(regsizes, int, other, &num_regsizes, 0);
+MODULE_PARM_DESC(regsizes,
+ "The size of the specific IPMI register in bytes. This should generally be 1, 2, 4, or 8 for an 8-bit, 16-bit, 32-bit, or 64-bit register. Use this if you the 8-bit IPMI register has to be read from a larger register.");
+module_param_hw_array(regshifts, int, other, &num_regshifts, 0);
+MODULE_PARM_DESC(regshifts,
+ "The amount to shift the data read from the. IPMI register, in bits. For instance, if the data is read from a 32-bit word and the IPMI data is in bit 8-15, then the shift would be 8");
+module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs,
+ "Set the default IPMB slave address for the controller. Normally this is 0x20, but can be overridden by this parm. This is an array indexed by interface number.");
+
+static void __init ipmi_hardcode_init_one(const char *si_type_str,
+ unsigned int i,
+ unsigned long addr,
+ enum ipmi_addr_space addr_space)
+{
+ struct ipmi_plat_data p;
+ int t;
+
+ memset(&p, 0, sizeof(p));
+
+ p.iftype = IPMI_PLAT_IF_SI;
+ if (!si_type_str || !*si_type_str) {
+ p.type = SI_KCS;
+ } else {
+ t = match_string(si_to_str, -1, si_type_str);
+ if (t < 0) {
+ pr_warn("Interface type specified for interface %d, was invalid: %s\n",
+ i, si_type_str);
+ return;
+ }
+ p.type = t;
+ }
+
+ p.regsize = regsizes[i];
+ p.slave_addr = slave_addrs[i];
+ p.addr_source = SI_HARDCODED;
+ p.regshift = regshifts[i];
+ p.regsize = regsizes[i];
+ p.addr = addr;
+ p.space = addr_space;
+
+ ipmi_platform_add("hardcode-ipmi-si", i, &p);
+}
+
+void __init ipmi_hardcode_init(void)
+{
+ unsigned int i;
+ char *str;
+ char *si_type[SI_MAX_PARMS];
+
+ memset(si_type, 0, sizeof(si_type));
+
+ /* Parse out the si_type string into its components. */
+ str = si_type_str;
+ if (*str != '\0') {
+ for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
+ si_type[i] = str;
+ str = strchr(str, ',');
+ if (str) {
+ *str = '\0';
+ str++;
+ } else {
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < SI_MAX_PARMS; i++) {
+ if (i < num_ports && ports[i])
+ ipmi_hardcode_init_one(si_type[i], i, ports[i],
+ IPMI_IO_ADDR_SPACE);
+ if (i < num_addrs && addrs[i])
+ ipmi_hardcode_init_one(si_type[i], i, addrs[i],
+ IPMI_MEM_ADDR_SPACE);
+ }
+}
+
+
+void ipmi_si_hardcode_exit(void)
+{
+ ipmi_remove_platform_device_by_name("hardcode-ipmi-si");
+}
+
+/*
+ * Returns true of the given address exists as a hardcoded address,
+ * false if not.
+ */
+int ipmi_si_hardcode_match(int addr_space, unsigned long addr)
+{
+ unsigned int i;
+
+ if (addr_space == IPMI_IO_ADDR_SPACE) {
+ for (i = 0; i < num_ports; i++) {
+ if (ports[i] == addr)
+ return 1;
+ }
+ } else {
+ for (i = 0; i < num_addrs; i++) {
+ if (addrs[i] == addr)
+ return 1;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
new file mode 100644
index 0000000000..6b12a83ccd
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si_hotmod.c
+ *
+ * Handling for dynamically adding/removing IPMI devices through
+ * a module parameter (and thus sysfs).
+ */
+
+#define pr_fmt(fmt) "ipmi_hotmod: " fmt
+
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include <linux/atomic.h>
+#include "ipmi_si.h"
+#include "ipmi_plat_data.h"
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod,
+ "Add and remove interfaces. See Documentation/driver-api/ipmi.rst in the kernel sources for the gory details.");
+
+/*
+ * Parms come in as <op1>[:op2[:op3...]]. ops are:
+ * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ * rsp=<regspacing>
+ * rsi=<regsize>
+ * rsh=<regshift>
+ * irq=<irq>
+ * ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+ const char *name;
+ const int val;
+};
+
+static const struct hotmod_vals hotmod_ops[] = {
+ { "add", HM_ADD },
+ { "remove", HM_REMOVE },
+ { NULL }
+};
+
+static const struct hotmod_vals hotmod_si[] = {
+ { "kcs", SI_KCS },
+ { "smic", SI_SMIC },
+ { "bt", SI_BT },
+ { NULL }
+};
+
+static const struct hotmod_vals hotmod_as[] = {
+ { "mem", IPMI_MEM_ADDR_SPACE },
+ { "i/o", IPMI_IO_ADDR_SPACE },
+ { NULL }
+};
+
+static int parse_str(const struct hotmod_vals *v, unsigned int *val, char *name,
+ const char **curr)
+{
+ char *s;
+ int i;
+
+ s = strchr(*curr, ',');
+ if (!s) {
+ pr_warn("No hotmod %s given\n", name);
+ return -EINVAL;
+ }
+ *s = '\0';
+ s++;
+ for (i = 0; v[i].name; i++) {
+ if (strcmp(*curr, v[i].name) == 0) {
+ *val = v[i].val;
+ *curr = s;
+ return 0;
+ }
+ }
+
+ pr_warn("Invalid hotmod %s '%s'\n", name, *curr);
+ return -EINVAL;
+}
+
+static int check_hotmod_int_op(const char *curr, const char *option,
+ const char *name, unsigned int *val)
+{
+ char *n;
+
+ if (strcmp(curr, name) == 0) {
+ if (!option) {
+ pr_warn("No option given for '%s'\n", curr);
+ return -EINVAL;
+ }
+ *val = simple_strtoul(option, &n, 0);
+ if ((*n != '\0') || (*option == '\0')) {
+ pr_warn("Bad option given for '%s'\n", curr);
+ return -EINVAL;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int parse_hotmod_str(const char *curr, enum hotmod_op *op,
+ struct ipmi_plat_data *h)
+{
+ char *s, *o;
+ int rv;
+ unsigned int ival;
+
+ h->iftype = IPMI_PLAT_IF_SI;
+ rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+ if (rv)
+ return rv;
+ *op = ival;
+
+ rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+ if (rv)
+ return rv;
+ h->type = ival;
+
+ rv = parse_str(hotmod_as, &ival, "address space", &curr);
+ if (rv)
+ return rv;
+ h->space = ival;
+
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ rv = kstrtoul(curr, 0, &h->addr);
+ if (rv) {
+ pr_warn("Invalid hotmod address '%s': %d\n", curr, rv);
+ return rv;
+ }
+
+ while (s) {
+ curr = s;
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ o = strchr(curr, '=');
+ if (o) {
+ *o = '\0';
+ o++;
+ }
+ rv = check_hotmod_int_op(curr, o, "rsp", &h->regspacing);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsi", &h->regsize);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsh", &h->regshift);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "irq", &h->irq);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "ipmb", &h->slave_addr);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+
+ pr_warn("Invalid hotmod option '%s'\n", curr);
+ return -EINVAL;
+ }
+
+ h->addr_source = SI_HOTMOD;
+ return 0;
+}
+
+static atomic_t hotmod_nr;
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp)
+{
+ int rv;
+ struct ipmi_plat_data h;
+ char *str, *curr, *next;
+
+ str = kstrdup(val, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ /* Kill any trailing spaces, as we can get a "\n" from echo. */
+ for (curr = strstrip(str); curr; curr = next) {
+ enum hotmod_op op;
+
+ next = strchr(curr, ':');
+ if (next) {
+ *next = '\0';
+ next++;
+ }
+
+ memset(&h, 0, sizeof(h));
+ rv = parse_hotmod_str(curr, &op, &h);
+ if (rv)
+ goto out;
+
+ if (op == HM_ADD) {
+ ipmi_platform_add("hotmod-ipmi-si",
+ atomic_inc_return(&hotmod_nr),
+ &h);
+ } else {
+ struct device *dev;
+
+ dev = ipmi_si_remove_by_data(h.space, h.type, h.addr);
+ if (dev && dev_is_platform(dev)) {
+ struct platform_device *pdev;
+
+ pdev = to_platform_device(dev);
+ if (strcmp(pdev->name, "hotmod-ipmi-si") == 0)
+ platform_device_unregister(pdev);
+ }
+ put_device(dev);
+ }
+ }
+ rv = strlen(val);
+out:
+ kfree(str);
+ return rv;
+}
+
+void ipmi_si_hotmod_exit(void)
+{
+ ipmi_remove_platform_device_by_name("hotmod-ipmi-si");
+}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
new file mode 100644
index 0000000000..5cd031f3fc
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -0,0 +1,2307 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si.c
+ *
+ * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
+ * BT).
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SMI state
+ * machine. It does the configuration, handles timers and interrupts,
+ * and drives the real SMI state machine.
+ */
+
+#define pr_fmt(fmt) "ipmi_si: " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include "ipmi_si.h"
+#include "ipmi_si_sm.h"
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+/* Measure times between events in the driver. */
+#undef DEBUG_TIMING
+
+/* Call every 10 ms. */
+#define SI_TIMEOUT_TIME_USEC 10000
+#define SI_USEC_PER_JIFFY (1000000/HZ)
+#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
+#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
+ short timeout */
+
+enum si_intf_state {
+ SI_NORMAL,
+ SI_GETTING_FLAGS,
+ SI_GETTING_EVENTS,
+ SI_CLEARING_FLAGS,
+ SI_GETTING_MESSAGES,
+ SI_CHECKING_ENABLES,
+ SI_SETTING_ENABLES
+ /* FIXME - add watchdog stuff. */
+};
+
+/* Some BT-specific defines we need here. */
+#define IPMI_BT_INTMASK_REG 2
+#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
+#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
+
+/* 'invalid' to allow a firmware-specified interface to be disabled */
+const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
+
+static bool initialized;
+
+/*
+ * Indexes into stats[] in smi_info below.
+ */
+enum si_stat_indexes {
+ /*
+ * Number of times the driver requested a timer while an operation
+ * was in progress.
+ */
+ SI_STAT_short_timeouts = 0,
+
+ /*
+ * Number of times the driver requested a timer while nothing was in
+ * progress.
+ */
+ SI_STAT_long_timeouts,
+
+ /* Number of times the interface was idle while being polled. */
+ SI_STAT_idles,
+
+ /* Number of interrupts the driver handled. */
+ SI_STAT_interrupts,
+
+ /* Number of time the driver got an ATTN from the hardware. */
+ SI_STAT_attentions,
+
+ /* Number of times the driver requested flags from the hardware. */
+ SI_STAT_flag_fetches,
+
+ /* Number of times the hardware didn't follow the state machine. */
+ SI_STAT_hosed_count,
+
+ /* Number of completed messages. */
+ SI_STAT_complete_transactions,
+
+ /* Number of IPMI events received from the hardware. */
+ SI_STAT_events,
+
+ /* Number of watchdog pretimeouts. */
+ SI_STAT_watchdog_pretimeouts,
+
+ /* Number of asynchronous messages received. */
+ SI_STAT_incoming_messages,
+
+
+ /* This *must* remain last, add new values above this. */
+ SI_NUM_STATS
+};
+
+struct smi_info {
+ int si_num;
+ struct ipmi_smi *intf;
+ struct si_sm_data *si_sm;
+ const struct si_sm_handlers *handlers;
+ spinlock_t si_lock;
+ struct ipmi_smi_msg *waiting_msg;
+ struct ipmi_smi_msg *curr_msg;
+ enum si_intf_state si_state;
+
+ /*
+ * Used to handle the various types of I/O that can occur with
+ * IPMI
+ */
+ struct si_sm_io io;
+
+ /*
+ * Per-OEM handler, called from handle_flags(). Returns 1
+ * when handle_flags() needs to be re-run or 0 indicating it
+ * set si_state itself.
+ */
+ int (*oem_data_avail_handler)(struct smi_info *smi_info);
+
+ /*
+ * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+ * is set to hold the flags until we are done handling everything
+ * from the flags.
+ */
+#define RECEIVE_MSG_AVAIL 0x01
+#define EVENT_MSG_BUFFER_FULL 0x02
+#define WDT_PRE_TIMEOUT_INT 0x08
+#define OEM0_DATA_AVAIL 0x20
+#define OEM1_DATA_AVAIL 0x40
+#define OEM2_DATA_AVAIL 0x80
+#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
+ OEM1_DATA_AVAIL | \
+ OEM2_DATA_AVAIL)
+ unsigned char msg_flags;
+
+ /* Does the BMC have an event buffer? */
+ bool has_event_buffer;
+
+ /*
+ * If set to true, this will request events the next time the
+ * state machine is idle.
+ */
+ atomic_t req_events;
+
+ /*
+ * If true, run the state machine to completion on every send
+ * call. Generally used after a panic to make sure stuff goes
+ * out.
+ */
+ bool run_to_completion;
+
+ /* The timer for this si. */
+ struct timer_list si_timer;
+
+ /* This flag is set, if the timer can be set */
+ bool timer_can_start;
+
+ /* This flag is set, if the timer is running (timer_pending() isn't enough) */
+ bool timer_running;
+
+ /* The time (in jiffies) the last timeout occurred at. */
+ unsigned long last_timeout_jiffies;
+
+ /* Are we waiting for the events, pretimeouts, received msgs? */
+ atomic_t need_watch;
+
+ /*
+ * The driver will disable interrupts when it gets into a
+ * situation where it cannot handle messages due to lack of
+ * memory. Once that situation clears up, it will re-enable
+ * interrupts.
+ */
+ bool interrupt_disabled;
+
+ /*
+ * Does the BMC support events?
+ */
+ bool supports_event_msg_buff;
+
+ /*
+ * Can we disable interrupts the global enables receive irq
+ * bit? There are currently two forms of brokenness, some
+ * systems cannot disable the bit (which is technically within
+ * the spec but a bad idea) and some systems have the bit
+ * forced to zero even though interrupts work (which is
+ * clearly outside the spec). The next bool tells which form
+ * of brokenness is present.
+ */
+ bool cannot_disable_irq;
+
+ /*
+ * Some systems are broken and cannot set the irq enable
+ * bit, even if they support interrupts.
+ */
+ bool irq_enable_broken;
+
+ /* Is the driver in maintenance mode? */
+ bool in_maintenance_mode;
+
+ /*
+ * Did we get an attention that we did not handle?
+ */
+ bool got_attn;
+
+ /* From the get device id response... */
+ struct ipmi_device_id device_id;
+
+ /* Have we added the device group to the device? */
+ bool dev_group_added;
+
+ /* Counters and things for the proc filesystem. */
+ atomic_t stats[SI_NUM_STATS];
+
+ struct task_struct *thread;
+
+ struct list_head link;
+};
+
+#define smi_inc_stat(smi, stat) \
+ atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+#define smi_get_stat(smi, stat) \
+ ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+
+#define IPMI_MAX_INTFS 4
+static int force_kipmid[IPMI_MAX_INTFS];
+static int num_force_kipmid;
+
+static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
+static int num_max_busy_us;
+
+static bool unload_when_empty = true;
+
+static int try_smi_init(struct smi_info *smi);
+static void cleanup_one_si(struct smi_info *smi_info);
+static void cleanup_ipmi_si(void);
+
+#ifdef DEBUG_TIMING
+void debug_timestamp(struct smi_info *smi_info, char *msg)
+{
+ struct timespec64 t;
+
+ ktime_get_ts64(&t);
+ dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n",
+ msg, t.tv_sec, t.tv_nsec);
+}
+#else
+#define debug_timestamp(smi_info, x)
+#endif
+
+static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
+static int register_xaction_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&xaction_notifier_list, nb);
+}
+
+static void deliver_recv_msg(struct smi_info *smi_info,
+ struct ipmi_smi_msg *msg)
+{
+ /* Deliver the message to the upper layer. */
+ ipmi_smi_msg_received(smi_info->intf, msg);
+}
+
+static void return_hosed_msg(struct smi_info *smi_info, int cCode)
+{
+ struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+ if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
+ cCode = IPMI_ERR_UNSPECIFIED;
+ /* else use it as is */
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = cCode;
+ msg->rsp_size = 3;
+
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+}
+
+static enum si_sm_result start_next_msg(struct smi_info *smi_info)
+{
+ int rv;
+
+ if (!smi_info->waiting_msg) {
+ smi_info->curr_msg = NULL;
+ rv = SI_SM_IDLE;
+ } else {
+ int err;
+
+ smi_info->curr_msg = smi_info->waiting_msg;
+ smi_info->waiting_msg = NULL;
+ debug_timestamp(smi_info, "Start2");
+ err = atomic_notifier_call_chain(&xaction_notifier_list,
+ 0, smi_info);
+ if (err & NOTIFY_STOP_MASK) {
+ rv = SI_SM_CALL_WITHOUT_DELAY;
+ goto out;
+ }
+ err = smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ if (err)
+ return_hosed_msg(smi_info, err);
+
+ rv = SI_SM_CALL_WITHOUT_DELAY;
+ }
+out:
+ return rv;
+}
+
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+ if (!smi_info->timer_can_start)
+ return;
+ smi_info->last_timeout_jiffies = jiffies;
+ mod_timer(&smi_info->si_timer, new_val);
+ smi_info->timer_running = true;
+}
+
+/*
+ * Start a new message and (re)start the timer and thread.
+ */
+static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
+ unsigned int size)
+{
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
+}
+
+static void start_check_enables(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_CHECKING_ENABLES;
+}
+
+static void start_clear_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+
+ /* Make sure the watchdog pre-timeout flag is not set at startup. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+
+ start_new_msg(smi_info, msg, 3);
+ smi_info->si_state = SI_CLEARING_FLAGS;
+}
+
+static void start_getting_msg_queue(struct smi_info *smi_info)
+{
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ start_new_msg(smi_info, smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_MESSAGES;
+}
+
+static void start_getting_events(struct smi_info *smi_info)
+{
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ start_new_msg(smi_info, smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_EVENTS;
+}
+
+/*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+ * polled until we can allocate some memory. Once we have some
+ * memory, we will re-enable the interrupt.
+ *
+ * Note that we cannot just use disable_irq(), since the interrupt may
+ * be shared.
+ */
+static inline bool disable_si_irq(struct smi_info *smi_info)
+{
+ if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
+ smi_info->interrupt_disabled = true;
+ start_check_enables(smi_info);
+ return true;
+ }
+ return false;
+}
+
+static inline bool enable_si_irq(struct smi_info *smi_info)
+{
+ if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
+ smi_info->interrupt_disabled = false;
+ start_check_enables(smi_info);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Allocate a message. If unable to allocate, start the interrupt
+ * disable process and return NULL. If able to allocate but
+ * interrupts are disabled, free the message and return NULL after
+ * starting the interrupt enable process.
+ */
+static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ if (!disable_si_irq(smi_info))
+ smi_info->si_state = SI_NORMAL;
+ } else if (enable_si_irq(smi_info)) {
+ ipmi_free_smi_msg(msg);
+ msg = NULL;
+ }
+ return msg;
+}
+
+static void handle_flags(struct smi_info *smi_info)
+{
+retry:
+ if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+ /* Watchdog pre-timeout */
+ smi_inc_stat(smi_info, watchdog_pretimeouts);
+
+ start_clear_flags(smi_info);
+ smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ ipmi_smi_watchdog_pretimeout(smi_info->intf);
+ } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
+ /* Messages available. */
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
+ return;
+
+ start_getting_msg_queue(smi_info);
+ } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
+ /* Events available. */
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
+ return;
+
+ start_getting_events(smi_info);
+ } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
+ smi_info->oem_data_avail_handler) {
+ if (smi_info->oem_data_avail_handler(smi_info))
+ goto retry;
+ } else
+ smi_info->si_state = SI_NORMAL;
+}
+
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+ IPMI_BMC_EVT_MSG_INTR)
+
+static u8 current_global_enables(struct smi_info *smi_info, u8 base,
+ bool *irq_on)
+{
+ u8 enables = 0;
+
+ if (smi_info->supports_event_msg_buff)
+ enables |= IPMI_BMC_EVT_MSG_BUFF;
+
+ if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
+ smi_info->cannot_disable_irq) &&
+ !smi_info->irq_enable_broken)
+ enables |= IPMI_BMC_RCV_MSG_INTR;
+
+ if (smi_info->supports_event_msg_buff &&
+ smi_info->io.irq && !smi_info->interrupt_disabled &&
+ !smi_info->irq_enable_broken)
+ enables |= IPMI_BMC_EVT_MSG_INTR;
+
+ *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
+
+ return enables;
+}
+
+static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
+{
+ u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
+
+ irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
+
+ if ((bool)irqstate == irq_on)
+ return;
+
+ if (irq_on)
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+ else
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
+}
+
+static void handle_transaction_done(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg;
+
+ debug_timestamp(smi_info, "Done");
+ switch (smi_info->si_state) {
+ case SI_NORMAL:
+ if (!smi_info->curr_msg)
+ break;
+
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+ break;
+
+ case SI_GETTING_FLAGS:
+ {
+ unsigned char msg[4];
+ unsigned int len;
+
+ /* We got the flags from the SMI, now handle them. */
+ len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ /* Error fetching flags, just give up for now. */
+ smi_info->si_state = SI_NORMAL;
+ } else if (len < 4) {
+ /*
+ * Hmm, no flags. That's technically illegal, but
+ * don't use uninitialized data.
+ */
+ smi_info->si_state = SI_NORMAL;
+ } else {
+ smi_info->msg_flags = msg[3];
+ handle_flags(smi_info);
+ }
+ break;
+ }
+
+ case SI_CLEARING_FLAGS:
+ {
+ unsigned char msg[3];
+
+ /* We cleared the flags. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
+ if (msg[2] != 0) {
+ /* Error clearing flags */
+ dev_warn_ratelimited(smi_info->io.dev,
+ "Error clearing flags: %2.2x\n", msg[2]);
+ }
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+
+ case SI_GETTING_EVENTS:
+ {
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ if (msg->rsp[2] != 0) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the event flag. */
+ smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(smi_info);
+ } else {
+ smi_inc_stat(smi_info, events);
+
+ /*
+ * Do this before we deliver the message
+ * because delivering the message releases the
+ * lock and something else can mess with the
+ * state.
+ */
+ handle_flags(smi_info);
+
+ deliver_recv_msg(smi_info, msg);
+ }
+ break;
+ }
+
+ case SI_GETTING_MESSAGES:
+ {
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ if (msg->rsp[2] != 0) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(smi_info);
+ } else {
+ smi_inc_stat(smi_info, incoming_messages);
+
+ /*
+ * Do this before we deliver the message
+ * because delivering the message releases the
+ * lock and something else can mess with the
+ * state.
+ */
+ handle_flags(smi_info);
+
+ deliver_recv_msg(smi_info, msg);
+ }
+ break;
+ }
+
+ case SI_CHECKING_ENABLES:
+ {
+ unsigned char msg[4];
+ u8 enables;
+ bool irq_on;
+
+ /* We got the flags from the SMI, now handle them. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ dev_warn_ratelimited(smi_info->io.dev,
+ "Couldn't get irq info: %x,\n"
+ "Maybe ok, but ipmi might run very slowly.\n",
+ msg[2]);
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ enables = current_global_enables(smi_info, 0, &irq_on);
+ if (smi_info->io.si_type == SI_BT)
+ /* BT has its own interrupt enable bit. */
+ check_bt_irq(smi_info, irq_on);
+ if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
+ /* Enables are not correct, fix them. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 3);
+ smi_info->si_state = SI_SETTING_ENABLES;
+ } else if (smi_info->supports_event_msg_buff) {
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ start_getting_events(smi_info);
+ } else {
+ smi_info->si_state = SI_NORMAL;
+ }
+ break;
+ }
+
+ case SI_SETTING_ENABLES:
+ {
+ unsigned char msg[4];
+
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0)
+ dev_warn_ratelimited(smi_info->io.dev,
+ "Could not set the global enables: 0x%x.\n",
+ msg[2]);
+
+ if (smi_info->supports_event_msg_buff) {
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ start_getting_events(smi_info);
+ } else {
+ smi_info->si_state = SI_NORMAL;
+ }
+ break;
+ }
+ }
+}
+
+/*
+ * Called on timeouts and events. Timeouts should pass the elapsed
+ * time, interrupts should pass in zero. Must be called with
+ * si_lock held and interrupts disabled.
+ */
+static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
+ int time)
+{
+ enum si_sm_result si_sm_result;
+
+restart:
+ /*
+ * There used to be a loop here that waited a little while
+ * (around 25us) before giving up. That turned out to be
+ * pointless, the minimum delays I was seeing were in the 300us
+ * range, which is far too long to wait in an interrupt. So
+ * we just run until the state machine tells us something
+ * happened or it needs a delay.
+ */
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
+ time = 0;
+ while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+
+ if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
+ smi_inc_stat(smi_info, complete_transactions);
+
+ handle_transaction_done(smi_info);
+ goto restart;
+ } else if (si_sm_result == SI_SM_HOSED) {
+ smi_inc_stat(smi_info, hosed_count);
+
+ /*
+ * Do the before return_hosed_msg, because that
+ * releases the lock.
+ */
+ smi_info->si_state = SI_NORMAL;
+ if (smi_info->curr_msg != NULL) {
+ /*
+ * If we were handling a user message, format
+ * a response to send to the upper layer to
+ * tell it about the error.
+ */
+ return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+ }
+ goto restart;
+ }
+
+ /*
+ * We prefer handling attn over new messages. But don't do
+ * this if there is not yet an upper layer to handle anything.
+ */
+ if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
+ unsigned char msg[2];
+
+ if (smi_info->si_state != SI_NORMAL) {
+ /*
+ * We got an ATTN, but we are doing something else.
+ * Handle the ATTN later.
+ */
+ smi_info->got_attn = true;
+ } else {
+ smi_info->got_attn = false;
+ smi_inc_stat(smi_info, attentions);
+
+ /*
+ * Got a attn, send down a get message flags to see
+ * what's causing it. It would be better to handle
+ * this in the upper layer, but due to the way
+ * interrupts work with the SMI, that's not really
+ * possible.
+ */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+ goto restart;
+ }
+ }
+
+ /* If we are currently idle, try to start the next message. */
+ if (si_sm_result == SI_SM_IDLE) {
+ smi_inc_stat(smi_info, idles);
+
+ si_sm_result = start_next_msg(smi_info);
+ if (si_sm_result != SI_SM_IDLE)
+ goto restart;
+ }
+
+ if ((si_sm_result == SI_SM_IDLE)
+ && (atomic_read(&smi_info->req_events))) {
+ /*
+ * We are idle and the upper layer requested that I fetch
+ * events, so do so.
+ */
+ atomic_set(&smi_info->req_events, 0);
+
+ /*
+ * Take this opportunity to check the interrupt and
+ * message enable state for the BMC. The BMC can be
+ * asynchronously reset, and may thus get interrupts
+ * disable and messages disabled.
+ */
+ if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
+ start_check_enables(smi_info);
+ } else {
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
+ goto out;
+
+ start_getting_events(smi_info);
+ }
+ goto restart;
+ }
+
+ if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
+ /* Ok it if fails, the timer will just go off. */
+ if (del_timer(&smi_info->si_timer))
+ smi_info->timer_running = false;
+ }
+
+out:
+ return si_sm_result;
+}
+
+static void check_start_timer_thread(struct smi_info *smi_info)
+{
+ if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
+ start_next_msg(smi_info);
+ smi_event_handler(smi_info, 0);
+ }
+}
+
+static void flush_messages(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+ enum si_sm_result result;
+
+ /*
+ * Currently, this function is called only in run-to-completion
+ * mode. This means we are single-threaded, no need for locks.
+ */
+ result = smi_event_handler(smi_info, 0);
+ while (result != SI_SM_IDLE) {
+ udelay(SI_SHORT_TIMEOUT_USEC);
+ result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
+ }
+}
+
+static void sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct smi_info *smi_info = send_info;
+ unsigned long flags;
+
+ debug_timestamp(smi_info, "Enqueue");
+
+ if (smi_info->run_to_completion) {
+ /*
+ * If we are running to completion, start it. Upper
+ * layer will call flush_messages to clear it out.
+ */
+ smi_info->waiting_msg = msg;
+ return;
+ }
+
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ /*
+ * The following two lines don't need to be under the lock for
+ * the lock's sake, but they do need SMP memory barriers to
+ * avoid getting things out of order. We are already claiming
+ * the lock, anyway, so just do it under the lock to avoid the
+ * ordering problem.
+ */
+ BUG_ON(smi_info->waiting_msg);
+ smi_info->waiting_msg = msg;
+ check_start_timer_thread(smi_info);
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void set_run_to_completion(void *send_info, bool i_run_to_completion)
+{
+ struct smi_info *smi_info = send_info;
+
+ smi_info->run_to_completion = i_run_to_completion;
+ if (i_run_to_completion)
+ flush_messages(smi_info);
+}
+
+/*
+ * Use -1 as a special constant to tell that we are spinning in kipmid
+ * looking for something and not delaying between checks
+ */
+#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
+static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
+ const struct smi_info *smi_info,
+ ktime_t *busy_until)
+{
+ unsigned int max_busy_us = 0;
+
+ if (smi_info->si_num < num_max_busy_us)
+ max_busy_us = kipmid_max_busy_us[smi_info->si_num];
+ if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
+ *busy_until = IPMI_TIME_NOT_BUSY;
+ else if (*busy_until == IPMI_TIME_NOT_BUSY) {
+ *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
+ } else {
+ if (unlikely(ktime_get() > *busy_until)) {
+ *busy_until = IPMI_TIME_NOT_BUSY;
+ return false;
+ }
+ }
+ return true;
+}
+
+
+/*
+ * A busy-waiting loop for speeding up IPMI operation.
+ *
+ * Lousy hardware makes this hard. This is only enabled for systems
+ * that are not BT and do not have interrupts. It starts spinning
+ * when an operation is complete or until max_busy tells it to stop
+ * (if that is enabled). See the paragraph on kimid_max_busy_us in
+ * Documentation/driver-api/ipmi.rst for details.
+ */
+static int ipmi_thread(void *data)
+{
+ struct smi_info *smi_info = data;
+ unsigned long flags;
+ enum si_sm_result smi_result;
+ ktime_t busy_until = IPMI_TIME_NOT_BUSY;
+
+ set_user_nice(current, MAX_NICE);
+ while (!kthread_should_stop()) {
+ int busy_wait;
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+ smi_result = smi_event_handler(smi_info, 0);
+
+ /*
+ * If the driver is doing something, there is a possible
+ * race with the timer. If the timer handler see idle,
+ * and the thread here sees something else, the timer
+ * handler won't restart the timer even though it is
+ * required. So start it here if necessary.
+ */
+ if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+ &busy_until);
+ if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ ; /* do nothing */
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
+ /*
+ * In maintenance mode we run as fast as
+ * possible to allow firmware updates to
+ * complete as fast as possible, but normally
+ * don't bang on the scheduler.
+ */
+ if (smi_info->in_maintenance_mode)
+ schedule();
+ else
+ usleep_range(100, 200);
+ } else if (smi_result == SI_SM_IDLE) {
+ if (atomic_read(&smi_info->need_watch)) {
+ schedule_timeout_interruptible(100);
+ } else {
+ /* Wait to be woken up when we are needed. */
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ } else {
+ schedule_timeout_interruptible(1);
+ }
+ }
+ return 0;
+}
+
+
+static void poll(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+ unsigned long flags = 0;
+ bool run_to_completion = smi_info->run_to_completion;
+
+ /*
+ * Make sure there is some delay in the poll loop so we can
+ * drive time forward and timeout things.
+ */
+ udelay(10);
+ if (!run_to_completion)
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ smi_event_handler(smi_info, 10);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void request_events(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+
+ if (!smi_info->has_event_buffer)
+ return;
+
+ atomic_set(&smi_info->req_events, 1);
+}
+
+static void set_need_watch(void *send_info, unsigned int watch_mask)
+{
+ struct smi_info *smi_info = send_info;
+ unsigned long flags;
+ int enable;
+
+ enable = !!watch_mask;
+
+ atomic_set(&smi_info->need_watch, enable);
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ check_start_timer_thread(smi_info);
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void smi_timeout(struct timer_list *t)
+{
+ struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
+ enum si_sm_result smi_result;
+ unsigned long flags;
+ unsigned long jiffies_now;
+ long time_diff;
+ long timeout;
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+ debug_timestamp(smi_info, "Timer");
+
+ jiffies_now = jiffies;
+ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+ * SI_USEC_PER_JIFFY);
+ smi_result = smi_event_handler(smi_info, time_diff);
+
+ if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
+ /* Running with interrupts, only do long timeouts. */
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
+ smi_inc_stat(smi_info, long_timeouts);
+ goto do_mod_timer;
+ }
+
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
+ if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ smi_inc_stat(smi_info, short_timeouts);
+ timeout = jiffies + 1;
+ } else {
+ smi_inc_stat(smi_info, long_timeouts);
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
+ }
+
+do_mod_timer:
+ if (smi_result != SI_SM_IDLE)
+ smi_mod_timer(smi_info, timeout);
+ else
+ smi_info->timer_running = false;
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+}
+
+irqreturn_t ipmi_si_irq_handler(int irq, void *data)
+{
+ struct smi_info *smi_info = data;
+ unsigned long flags;
+
+ if (smi_info->io.si_type == SI_BT)
+ /* We need to clear the IRQ flag for the BT interface. */
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_CLEAR_IRQ_BIT
+ | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+ smi_inc_stat(smi_info, interrupts);
+
+ debug_timestamp(smi_info, "Interrupt");
+
+ smi_event_handler(smi_info, 0);
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ return IRQ_HANDLED;
+}
+
+static int smi_start_processing(void *send_info,
+ struct ipmi_smi *intf)
+{
+ struct smi_info *new_smi = send_info;
+ int enable = 0;
+
+ new_smi->intf = intf;
+
+ /* Set up the timer that drives the interface. */
+ timer_setup(&new_smi->si_timer, smi_timeout, 0);
+ new_smi->timer_can_start = true;
+ smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+
+ /* Try to claim any interrupts. */
+ if (new_smi->io.irq_setup) {
+ new_smi->io.irq_handler_data = new_smi;
+ new_smi->io.irq_setup(&new_smi->io);
+ }
+
+ /*
+ * Check if the user forcefully enabled the daemon.
+ */
+ if (new_smi->si_num < num_force_kipmid)
+ enable = force_kipmid[new_smi->si_num];
+ /*
+ * The BT interface is efficient enough to not need a thread,
+ * and there is no need for a thread if we have interrupts.
+ */
+ else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
+ enable = 1;
+
+ if (enable) {
+ new_smi->thread = kthread_run(ipmi_thread, new_smi,
+ "kipmi%d", new_smi->si_num);
+ if (IS_ERR(new_smi->thread)) {
+ dev_notice(new_smi->io.dev,
+ "Could not start kernel thread due to error %ld, only using timers to drive the interface\n",
+ PTR_ERR(new_smi->thread));
+ new_smi->thread = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+ struct smi_info *smi = send_info;
+
+ data->addr_src = smi->io.addr_source;
+ data->dev = smi->io.dev;
+ data->addr_info = smi->io.addr_info;
+ get_device(smi->io.dev);
+
+ return 0;
+}
+
+static void set_maintenance_mode(void *send_info, bool enable)
+{
+ struct smi_info *smi_info = send_info;
+
+ if (!enable)
+ atomic_set(&smi_info->req_events, 0);
+ smi_info->in_maintenance_mode = enable;
+}
+
+static void shutdown_smi(void *send_info);
+static const struct ipmi_smi_handlers handlers = {
+ .owner = THIS_MODULE,
+ .start_processing = smi_start_processing,
+ .shutdown = shutdown_smi,
+ .get_smi_info = get_smi_info,
+ .sender = sender,
+ .request_events = request_events,
+ .set_need_watch = set_need_watch,
+ .set_maintenance_mode = set_maintenance_mode,
+ .set_run_to_completion = set_run_to_completion,
+ .flush_messages = flush_messages,
+ .poll = poll,
+};
+
+static LIST_HEAD(smi_infos);
+static DEFINE_MUTEX(smi_infos_lock);
+static int smi_num; /* Used to sequence the SMIs */
+
+static const char * const addr_space_to_str[] = { "i/o", "mem" };
+
+module_param_array(force_kipmid, int, &num_force_kipmid, 0);
+MODULE_PARM_DESC(force_kipmid,
+ "Force the kipmi daemon to be enabled (1) or disabled(0). Normally the IPMI driver auto-detects this, but the value may be overridden by this parm.");
+module_param(unload_when_empty, bool, 0);
+MODULE_PARM_DESC(unload_when_empty,
+ "Unload the module if no interfaces are specified or found, default is 1. Setting to 0 is useful for hot add of devices using hotmod.");
+module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
+MODULE_PARM_DESC(kipmid_max_busy_us,
+ "Max time (in microseconds) to busy-wait for IPMI data before sleeping. 0 (default) means to wait forever. Set to 100-500 if kipmid is using up a lot of CPU time.");
+
+void ipmi_irq_finish_setup(struct si_sm_io *io)
+{
+ if (io->si_type == SI_BT)
+ /* Enable the interrupt in the BT interface. */
+ io->outputb(io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+}
+
+void ipmi_irq_start_cleanup(struct si_sm_io *io)
+{
+ if (io->si_type == SI_BT)
+ /* Disable the interrupt in the BT interface. */
+ io->outputb(io, IPMI_BT_INTMASK_REG, 0);
+}
+
+static void std_irq_cleanup(struct si_sm_io *io)
+{
+ ipmi_irq_start_cleanup(io);
+ free_irq(io->irq, io->irq_handler_data);
+}
+
+int ipmi_std_irq_setup(struct si_sm_io *io)
+{
+ int rv;
+
+ if (!io->irq)
+ return 0;
+
+ rv = request_irq(io->irq,
+ ipmi_si_irq_handler,
+ IRQF_SHARED,
+ SI_DEVICE_NAME,
+ io->irq_handler_data);
+ if (rv) {
+ dev_warn(io->dev, "%s unable to claim interrupt %d, running polled\n",
+ SI_DEVICE_NAME, io->irq);
+ io->irq = 0;
+ } else {
+ io->irq_cleanup = std_irq_cleanup;
+ ipmi_irq_finish_setup(io);
+ dev_info(io->dev, "Using irq %d\n", io->irq);
+ }
+
+ return rv;
+}
+
+static int wait_for_msg_done(struct smi_info *smi_info)
+{
+ enum si_sm_result smi_result;
+
+ smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ for (;;) {
+ if (smi_result == SI_SM_CALL_WITH_DELAY ||
+ smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+ schedule_timeout_uninterruptible(1);
+ smi_result = smi_info->handlers->event(
+ smi_info->si_sm, jiffies_to_usecs(1));
+ } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ smi_result = smi_info->handlers->event(
+ smi_info->si_sm, 0);
+ } else
+ break;
+ }
+ if (smi_result == SI_SM_HOSED)
+ /*
+ * We couldn't get the state machine to run, so whatever's at
+ * the port is probably not an IPMI SMI interface.
+ */
+ return -ENODEV;
+
+ return 0;
+}
+
+static int try_get_dev_id(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+ unsigned int retry_count = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /*
+ * Do a Get Device ID command, since it comes back with some
+ * useful info.
+ */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+
+retry:
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv)
+ goto out;
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ /* Check and record info from the get device id, in case we need it. */
+ rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
+ resp + 2, resp_len - 2, &smi_info->device_id);
+ if (rv) {
+ /* record completion code */
+ unsigned char cc = *(resp + 2);
+
+ if (cc != IPMI_CC_NO_ERROR &&
+ ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
+ dev_warn_ratelimited(smi_info->io.dev,
+ "BMC returned 0x%2.2x, retry get bmc device id\n",
+ cc);
+ goto retry;
+ }
+ }
+
+out:
+ kfree(resp);
+ return rv;
+}
+
+static int get_global_enables(struct smi_info *smi_info, u8 *enables)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ dev_warn(smi_info->io.dev,
+ "Error getting response from get global enables command: %d\n",
+ rv);
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 4 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
+ resp[2] != 0) {
+ dev_warn(smi_info->io.dev,
+ "Invalid return from get global enables command: %ld %x %x %x\n",
+ resp_len, resp[0], resp[1], resp[2]);
+ rv = -EINVAL;
+ goto out;
+ } else {
+ *enables = resp[3];
+ }
+
+out:
+ kfree(resp);
+ return rv;
+}
+
+/*
+ * Returns 1 if it gets an error from the command.
+ */
+static int set_global_enables(struct smi_info *smi_info, u8 enables)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = enables;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ dev_warn(smi_info->io.dev,
+ "Error getting response from set global enables command: %d\n",
+ rv);
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 3 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+ dev_warn(smi_info->io.dev,
+ "Invalid return from set global enables command: %ld %x %x\n",
+ resp_len, resp[0], resp[1]);
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[2] != 0)
+ rv = 1;
+
+out:
+ kfree(resp);
+ return rv;
+}
+
+/*
+ * Some BMCs do not support clearing the receive irq bit in the global
+ * enables (even if they don't support interrupts on the BMC). Check
+ * for this and handle it properly.
+ */
+static void check_clr_rcv_irq(struct smi_info *smi_info)
+{
+ u8 enables = 0;
+ int rv;
+
+ rv = get_global_enables(smi_info, &enables);
+ if (!rv) {
+ if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
+ /* Already clear, should work ok. */
+ return;
+
+ enables &= ~IPMI_BMC_RCV_MSG_INTR;
+ rv = set_global_enables(smi_info, enables);
+ }
+
+ if (rv < 0) {
+ dev_err(smi_info->io.dev,
+ "Cannot check clearing the rcv irq: %d\n", rv);
+ return;
+ }
+
+ if (rv) {
+ /*
+ * An error when setting the event buffer bit means
+ * clearing the bit is not supported.
+ */
+ dev_warn(smi_info->io.dev,
+ "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+ smi_info->cannot_disable_irq = true;
+ }
+}
+
+/*
+ * Some BMCs do not support setting the interrupt bits in the global
+ * enables even if they support interrupts. Clearly bad, but we can
+ * compensate.
+ */
+static void check_set_rcv_irq(struct smi_info *smi_info)
+{
+ u8 enables = 0;
+ int rv;
+
+ if (!smi_info->io.irq)
+ return;
+
+ rv = get_global_enables(smi_info, &enables);
+ if (!rv) {
+ enables |= IPMI_BMC_RCV_MSG_INTR;
+ rv = set_global_enables(smi_info, enables);
+ }
+
+ if (rv < 0) {
+ dev_err(smi_info->io.dev,
+ "Cannot check setting the rcv irq: %d\n", rv);
+ return;
+ }
+
+ if (rv) {
+ /*
+ * An error when setting the event buffer bit means
+ * setting the bit is not supported.
+ */
+ dev_warn(smi_info->io.dev,
+ "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+ smi_info->cannot_disable_irq = true;
+ smi_info->irq_enable_broken = true;
+ }
+}
+
+static int try_enable_event_buffer(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 4 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
+ resp[2] != 0) {
+ pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+ /* buffer is already enabled, nothing to do. */
+ smi_info->supports_event_msg_buff = true;
+ goto out;
+ }
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 3 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+ pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[2] != 0)
+ /*
+ * An error when setting the event buffer bit means
+ * that the event buffer is not supported.
+ */
+ rv = -ENOENT;
+ else
+ smi_info->supports_event_msg_buff = true;
+
+out:
+ kfree(resp);
+ return rv;
+}
+
+#define IPMI_SI_ATTR(name) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct smi_info *smi_info = dev_get_drvdata(dev); \
+ \
+ return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \
+} \
+static DEVICE_ATTR_RO(name)
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
+}
+static DEVICE_ATTR_RO(type);
+
+static ssize_t interrupts_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+ int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
+
+ return sysfs_emit(buf, "%d\n", enabled);
+}
+static DEVICE_ATTR_RO(interrupts_enabled);
+
+IPMI_SI_ATTR(short_timeouts);
+IPMI_SI_ATTR(long_timeouts);
+IPMI_SI_ATTR(idles);
+IPMI_SI_ATTR(interrupts);
+IPMI_SI_ATTR(attentions);
+IPMI_SI_ATTR(flag_fetches);
+IPMI_SI_ATTR(hosed_count);
+IPMI_SI_ATTR(complete_transactions);
+IPMI_SI_ATTR(events);
+IPMI_SI_ATTR(watchdog_pretimeouts);
+IPMI_SI_ATTR(incoming_messages);
+
+static ssize_t params_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf,
+ "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+ si_to_str[smi_info->io.si_type],
+ addr_space_to_str[smi_info->io.addr_space],
+ smi_info->io.addr_data,
+ smi_info->io.regspacing,
+ smi_info->io.regsize,
+ smi_info->io.regshift,
+ smi_info->io.irq,
+ smi_info->io.slave_addr);
+}
+static DEVICE_ATTR_RO(params);
+
+static struct attribute *ipmi_si_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_interrupts_enabled.attr,
+ &dev_attr_short_timeouts.attr,
+ &dev_attr_long_timeouts.attr,
+ &dev_attr_idles.attr,
+ &dev_attr_interrupts.attr,
+ &dev_attr_attentions.attr,
+ &dev_attr_flag_fetches.attr,
+ &dev_attr_hosed_count.attr,
+ &dev_attr_complete_transactions.attr,
+ &dev_attr_events.attr,
+ &dev_attr_watchdog_pretimeouts.attr,
+ &dev_attr_incoming_messages.attr,
+ &dev_attr_params.attr,
+ NULL
+};
+
+static const struct attribute_group ipmi_si_dev_attr_group = {
+ .attrs = ipmi_si_dev_attrs,
+};
+
+/*
+ * oem_data_avail_to_receive_msg_avail
+ * @info - smi_info structure with msg_flags set
+ *
+ * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
+ * Returns 1 indicating need to re-run handle_flags().
+ */
+static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
+{
+ smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
+ RECEIVE_MSG_AVAIL);
+ return 1;
+}
+
+/*
+ * setup_dell_poweredge_oem_data_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Systems that match, but have firmware version < 1.40 may assert
+ * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
+ * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
+ * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
+ * as RECEIVE_MSG_AVAIL instead.
+ *
+ * As Dell has no plans to release IPMI 1.5 firmware that *ever*
+ * assert the OEM[012] bits, and if it did, the driver would have to
+ * change to handle that properly, we don't actually check for the
+ * firmware version.
+ * Device ID = 0x20 BMC on PowerEdge 8G servers
+ * Device Revision = 0x80
+ * Firmware Revision1 = 0x01 BMC version 1.40
+ * Firmware Revision2 = 0x40 BCD encoded
+ * IPMI Version = 0x51 IPMI 1.5
+ * Manufacturer ID = A2 02 00 Dell IANA
+ *
+ * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
+ * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
+ *
+ */
+#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
+#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
+#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
+#define DELL_IANA_MFR_ID 0x0002a2
+static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
+{
+ struct ipmi_device_id *id = &smi_info->device_id;
+ if (id->manufacturer_id == DELL_IANA_MFR_ID) {
+ if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
+ id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
+ id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
+ smi_info->oem_data_avail_handler =
+ oem_data_avail_to_receive_msg_avail;
+ } else if (ipmi_version_major(id) < 1 ||
+ (ipmi_version_major(id) == 1 &&
+ ipmi_version_minor(id) < 5)) {
+ smi_info->oem_data_avail_handler =
+ oem_data_avail_to_receive_msg_avail;
+ }
+ }
+}
+
+#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
+static void return_hosed_msg_badsize(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
+ msg->rsp_size = 3;
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+}
+
+/*
+ * dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
+ * not respond to a Get SDR command if the length of the data
+ * requested is exactly 0x3A, which leads to command timeouts and no
+ * data returned. This intercepts such commands, and causes userspace
+ * callers to try again with a different-sized buffer, which succeeds.
+ */
+
+#define STORAGE_NETFN 0x0A
+#define STORAGE_CMD_GET_SDR 0x23
+static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
+ unsigned long unused,
+ void *in)
+{
+ struct smi_info *smi_info = in;
+ unsigned char *data = smi_info->curr_msg->data;
+ unsigned int size = smi_info->curr_msg->data_size;
+ if (size >= 8 &&
+ (data[0]>>2) == STORAGE_NETFN &&
+ data[1] == STORAGE_CMD_GET_SDR &&
+ data[7] == 0x3A) {
+ return_hosed_msg_badsize(smi_info);
+ return NOTIFY_STOP;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dell_poweredge_bt_xaction_notifier = {
+ .notifier_call = dell_poweredge_bt_xaction_handler,
+};
+
+/*
+ * setup_dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.start_transaction_pre_hook
+ * when we know what function to use there.
+ */
+static void
+setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
+{
+ struct ipmi_device_id *id = &smi_info->device_id;
+ if (id->manufacturer_id == DELL_IANA_MFR_ID &&
+ smi_info->io.si_type == SI_BT)
+ register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
+}
+
+/*
+ * setup_oem_data_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.oem_data_available_handler
+ * when we know what function to use there.
+ */
+
+static void setup_oem_data_handler(struct smi_info *smi_info)
+{
+ setup_dell_poweredge_oem_data_handler(smi_info);
+}
+
+static void setup_xaction_handlers(struct smi_info *smi_info)
+{
+ setup_dell_poweredge_bt_xaction_handler(smi_info);
+}
+
+static void check_for_broken_irqs(struct smi_info *smi_info)
+{
+ check_clr_rcv_irq(smi_info);
+ check_set_rcv_irq(smi_info);
+}
+
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
+{
+ if (smi_info->thread != NULL) {
+ kthread_stop(smi_info->thread);
+ smi_info->thread = NULL;
+ }
+
+ smi_info->timer_can_start = false;
+ del_timer_sync(&smi_info->si_timer);
+}
+
+static struct smi_info *find_dup_si(struct smi_info *info)
+{
+ struct smi_info *e;
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (e->io.addr_space != info->io.addr_space)
+ continue;
+ if (e->io.addr_data == info->io.addr_data) {
+ /*
+ * This is a cheap hack, ACPI doesn't have a defined
+ * slave address but SMBIOS does. Pick it up from
+ * any source that has it available.
+ */
+ if (info->io.slave_addr && !e->io.slave_addr)
+ e->io.slave_addr = info->io.slave_addr;
+ return e;
+ }
+ }
+
+ return NULL;
+}
+
+int ipmi_si_add_smi(struct si_sm_io *io)
+{
+ int rv = 0;
+ struct smi_info *new_smi, *dup;
+
+ /*
+ * If the user gave us a hard-coded device at the same
+ * address, they presumably want us to use it and not what is
+ * in the firmware.
+ */
+ if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD &&
+ ipmi_si_hardcode_match(io->addr_space, io->addr_data)) {
+ dev_info(io->dev,
+ "Hard-coded device at this address already exists");
+ return -ENODEV;
+ }
+
+ if (!io->io_setup) {
+ if (io->addr_space == IPMI_IO_ADDR_SPACE) {
+ io->io_setup = ipmi_si_port_setup;
+ } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) {
+ io->io_setup = ipmi_si_mem_setup;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
+ if (!new_smi)
+ return -ENOMEM;
+ spin_lock_init(&new_smi->si_lock);
+
+ new_smi->io = *io;
+
+ mutex_lock(&smi_infos_lock);
+ dup = find_dup_si(new_smi);
+ if (dup) {
+ if (new_smi->io.addr_source == SI_ACPI &&
+ dup->io.addr_source == SI_SMBIOS) {
+ /* We prefer ACPI over SMBIOS. */
+ dev_info(dup->io.dev,
+ "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
+ si_to_str[new_smi->io.si_type]);
+ cleanup_one_si(dup);
+ } else {
+ dev_info(new_smi->io.dev,
+ "%s-specified %s state machine: duplicate\n",
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type]);
+ rv = -EBUSY;
+ kfree(new_smi);
+ goto out_err;
+ }
+ }
+
+ pr_info("Adding %s-specified %s state machine\n",
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type]);
+
+ list_add_tail(&new_smi->link, &smi_infos);
+
+ if (initialized)
+ rv = try_smi_init(new_smi);
+out_err:
+ mutex_unlock(&smi_infos_lock);
+ return rv;
+}
+
+/*
+ * Try to start up an interface. Must be called with smi_infos_lock
+ * held, primarily to keep smi_num consistent, we only one to do these
+ * one at a time.
+ */
+static int try_smi_init(struct smi_info *new_smi)
+{
+ int rv = 0;
+ int i;
+
+ pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type],
+ addr_space_to_str[new_smi->io.addr_space],
+ new_smi->io.addr_data,
+ new_smi->io.slave_addr, new_smi->io.irq);
+
+ switch (new_smi->io.si_type) {
+ case SI_KCS:
+ new_smi->handlers = &kcs_smi_handlers;
+ break;
+
+ case SI_SMIC:
+ new_smi->handlers = &smic_smi_handlers;
+ break;
+
+ case SI_BT:
+ new_smi->handlers = &bt_smi_handlers;
+ break;
+
+ default:
+ /* No support for anything else yet. */
+ rv = -EIO;
+ goto out_err;
+ }
+
+ new_smi->si_num = smi_num;
+
+ /* Do this early so it's available for logs. */
+ if (!new_smi->io.dev) {
+ pr_err("IPMI interface added with no device\n");
+ rv = -EIO;
+ goto out_err;
+ }
+
+ /* Allocate the state machine's data and initialize it. */
+ new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
+ if (!new_smi->si_sm) {
+ rv = -ENOMEM;
+ goto out_err;
+ }
+ new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
+ &new_smi->io);
+
+ /* Now that we know the I/O size, we can set up the I/O. */
+ rv = new_smi->io.io_setup(&new_smi->io);
+ if (rv) {
+ dev_err(new_smi->io.dev, "Could not set up I/O space\n");
+ goto out_err;
+ }
+
+ /* Do low-level detection first. */
+ if (new_smi->handlers->detect(new_smi->si_sm)) {
+ if (new_smi->io.addr_source)
+ dev_err(new_smi->io.dev,
+ "Interface detection failed\n");
+ rv = -ENODEV;
+ goto out_err;
+ }
+
+ /*
+ * Attempt a get device id command. If it fails, we probably
+ * don't have a BMC here.
+ */
+ rv = try_get_dev_id(new_smi);
+ if (rv) {
+ if (new_smi->io.addr_source)
+ dev_err(new_smi->io.dev,
+ "There appears to be no BMC at this location\n");
+ goto out_err;
+ }
+
+ setup_oem_data_handler(new_smi);
+ setup_xaction_handlers(new_smi);
+ check_for_broken_irqs(new_smi);
+
+ new_smi->waiting_msg = NULL;
+ new_smi->curr_msg = NULL;
+ atomic_set(&new_smi->req_events, 0);
+ new_smi->run_to_completion = false;
+ for (i = 0; i < SI_NUM_STATS; i++)
+ atomic_set(&new_smi->stats[i], 0);
+
+ new_smi->interrupt_disabled = true;
+ atomic_set(&new_smi->need_watch, 0);
+
+ rv = try_enable_event_buffer(new_smi);
+ if (rv == 0)
+ new_smi->has_event_buffer = true;
+
+ /*
+ * Start clearing the flags before we enable interrupts or the
+ * timer to avoid racing with the timer.
+ */
+ start_clear_flags(new_smi);
+
+ /*
+ * IRQ is defined to be set when non-zero. req_events will
+ * cause a global flags check that will enable interrupts.
+ */
+ if (new_smi->io.irq) {
+ new_smi->interrupt_disabled = false;
+ atomic_set(&new_smi->req_events, 1);
+ }
+
+ dev_set_drvdata(new_smi->io.dev, new_smi);
+ rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
+ if (rv) {
+ dev_err(new_smi->io.dev,
+ "Unable to add device attributes: error %d\n",
+ rv);
+ goto out_err;
+ }
+ new_smi->dev_group_added = true;
+
+ rv = ipmi_register_smi(&handlers,
+ new_smi,
+ new_smi->io.dev,
+ new_smi->io.slave_addr);
+ if (rv) {
+ dev_err(new_smi->io.dev,
+ "Unable to register device: error %d\n",
+ rv);
+ goto out_err;
+ }
+
+ /* Don't increment till we know we have succeeded. */
+ smi_num++;
+
+ dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
+ si_to_str[new_smi->io.si_type]);
+
+ WARN_ON(new_smi->io.dev->init_name != NULL);
+
+ out_err:
+ if (rv && new_smi->io.io_cleanup) {
+ new_smi->io.io_cleanup(&new_smi->io);
+ new_smi->io.io_cleanup = NULL;
+ }
+
+ if (rv && new_smi->si_sm) {
+ kfree(new_smi->si_sm);
+ new_smi->si_sm = NULL;
+ }
+
+ return rv;
+}
+
+static int __init init_ipmi_si(void)
+{
+ struct smi_info *e;
+ enum ipmi_addr_src type = SI_INVALID;
+
+ if (initialized)
+ return 0;
+
+ ipmi_hardcode_init();
+
+ pr_info("IPMI System Interface driver\n");
+
+ ipmi_si_platform_init();
+
+ ipmi_si_pci_init();
+
+ ipmi_si_parisc_init();
+
+ /* We prefer devices with interrupts, but in the case of a machine
+ with multiple BMCs we assume that there will be several instances
+ of a given type so if we succeed in registering a type then also
+ try to register everything else of the same type */
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ /* Try to register a device if it has an IRQ and we either
+ haven't successfully registered a device yet or this
+ device has the same type as one we successfully registered */
+ if (e->io.irq && (!type || e->io.addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->io.addr_source;
+ }
+ }
+ }
+
+ /* type will only have been set if we successfully registered an si */
+ if (type)
+ goto skip_fallback_noirq;
+
+ /* Fall back to the preferred device */
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (!e->io.irq && (!type || e->io.addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->io.addr_source;
+ }
+ }
+ }
+
+skip_fallback_noirq:
+ initialized = true;
+ mutex_unlock(&smi_infos_lock);
+
+ if (type)
+ return 0;
+
+ mutex_lock(&smi_infos_lock);
+ if (unload_when_empty && list_empty(&smi_infos)) {
+ mutex_unlock(&smi_infos_lock);
+ cleanup_ipmi_si();
+ pr_warn("Unable to find any System Interface(s)\n");
+ return -ENODEV;
+ } else {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+}
+module_init(init_ipmi_si);
+
+static void wait_msg_processed(struct smi_info *smi_info)
+{
+ unsigned long jiffies_now;
+ long time_diff;
+
+ while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+ jiffies_now = jiffies;
+ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+ * SI_USEC_PER_JIFFY);
+ smi_event_handler(smi_info, time_diff);
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+static void shutdown_smi(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+
+ if (smi_info->dev_group_added) {
+ device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
+ smi_info->dev_group_added = false;
+ }
+ if (smi_info->io.dev)
+ dev_set_drvdata(smi_info->io.dev, NULL);
+
+ /*
+ * Make sure that interrupts, the timer and the thread are
+ * stopped and will not run again.
+ */
+ smi_info->interrupt_disabled = true;
+ if (smi_info->io.irq_cleanup) {
+ smi_info->io.irq_cleanup(&smi_info->io);
+ smi_info->io.irq_cleanup = NULL;
+ }
+ stop_timer_and_thread(smi_info);
+
+ /*
+ * Wait until we know that we are out of any interrupt
+ * handlers might have been running before we freed the
+ * interrupt.
+ */
+ synchronize_rcu();
+
+ /*
+ * Timeouts are stopped, now make sure the interrupts are off
+ * in the BMC. Note that timers and CPU interrupts are off,
+ * so no need for locks.
+ */
+ wait_msg_processed(smi_info);
+
+ if (smi_info->handlers)
+ disable_si_irq(smi_info);
+
+ wait_msg_processed(smi_info);
+
+ if (smi_info->handlers)
+ smi_info->handlers->cleanup(smi_info->si_sm);
+
+ if (smi_info->io.io_cleanup) {
+ smi_info->io.io_cleanup(&smi_info->io);
+ smi_info->io.io_cleanup = NULL;
+ }
+
+ kfree(smi_info->si_sm);
+ smi_info->si_sm = NULL;
+
+ smi_info->intf = NULL;
+}
+
+/*
+ * Must be called with smi_infos_lock held, to serialize the
+ * smi_info->intf check.
+ */
+static void cleanup_one_si(struct smi_info *smi_info)
+{
+ if (!smi_info)
+ return;
+
+ list_del(&smi_info->link);
+ ipmi_unregister_smi(smi_info->intf);
+ kfree(smi_info);
+}
+
+void ipmi_si_remove_by_dev(struct device *dev)
+{
+ struct smi_info *e;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ if (e->io.dev == dev) {
+ cleanup_one_si(e);
+ break;
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+}
+
+struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+ unsigned long addr)
+{
+ /* remove */
+ struct smi_info *e, *tmp_e;
+ struct device *dev = NULL;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+ if (e->io.addr_space != addr_space)
+ continue;
+ if (e->io.si_type != si_type)
+ continue;
+ if (e->io.addr_data == addr) {
+ dev = get_device(e->io.dev);
+ cleanup_one_si(e);
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+
+ return dev;
+}
+
+static void cleanup_ipmi_si(void)
+{
+ struct smi_info *e, *tmp_e;
+
+ if (!initialized)
+ return;
+
+ ipmi_si_pci_shutdown();
+
+ ipmi_si_parisc_shutdown();
+
+ ipmi_si_platform_shutdown();
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
+ cleanup_one_si(e);
+ mutex_unlock(&smi_infos_lock);
+
+ ipmi_si_hardcode_exit();
+ ipmi_si_hotmod_exit();
+}
+module_exit(cleanup_ipmi_si);
+
+MODULE_ALIAS("platform:dmi-ipmi-si");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
new file mode 100644
index 0000000000..86b92e93a7
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_mem_io.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char intf_mem_inb(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return readb((io->addr)+(offset * io->regspacing));
+}
+
+static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inw(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inl(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+#ifdef readq
+static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
+{
+ return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void mem_outq(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeq((u64)b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+#endif
+
+static void mem_region_cleanup(struct si_sm_io *io, int num)
+{
+ unsigned long addr = io->addr_data;
+ int idx;
+
+ for (idx = 0; idx < num; idx++)
+ release_mem_region(addr + idx * io->regspacing,
+ io->regsize);
+}
+
+static void mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr) {
+ iounmap(io->addr);
+ mem_region_cleanup(io, io->io_size);
+ }
+}
+
+int ipmi_si_mem_setup(struct si_sm_io *io)
+{
+ unsigned long addr = io->addr_data;
+ int mapsize, idx;
+
+ if (!addr)
+ return -ENODEV;
+
+ /*
+ * Figure out the actual readb/readw/readl/etc routine to use based
+ * upon the register size.
+ */
+ switch (io->regsize) {
+ case 1:
+ io->inputb = intf_mem_inb;
+ io->outputb = intf_mem_outb;
+ break;
+ case 2:
+ io->inputb = intf_mem_inw;
+ io->outputb = intf_mem_outw;
+ break;
+ case 4:
+ io->inputb = intf_mem_inl;
+ io->outputb = intf_mem_outl;
+ break;
+#ifdef readq
+ case 8:
+ io->inputb = mem_inq;
+ io->outputb = mem_outq;
+ break;
+#endif
+ default:
+ dev_warn(io->dev, "Invalid register size: %d\n",
+ io->regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Some BIOSes reserve disjoint memory regions in their ACPI
+ * tables. This causes problems when trying to request the
+ * entire region. Therefore we must request each register
+ * separately.
+ */
+ for (idx = 0; idx < io->io_size; idx++) {
+ if (request_mem_region(addr + idx * io->regspacing,
+ io->regsize, SI_DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ mem_region_cleanup(io, idx);
+ return -EIO;
+ }
+ }
+
+ /*
+ * Calculate the total amount of memory to claim. This is an
+ * unusual looking calculation, but it avoids claiming any
+ * more memory than it has to. It will claim everything
+ * between the first address to the end of the last full
+ * register.
+ */
+ mapsize = ((io->io_size * io->regspacing)
+ - (io->regspacing - io->regsize));
+ io->addr = ioremap(addr, mapsize);
+ if (io->addr == NULL) {
+ mem_region_cleanup(io, io->io_size);
+ return -EIO;
+ }
+
+ io->io_cleanup = mem_cleanup;
+
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
new file mode 100644
index 0000000000..2be2967f6b
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <asm/hardware.h> /* for register_parisc_driver() stuff */
+#include <asm/parisc-device.h>
+#include "ipmi_si.h"
+
+static bool parisc_registered;
+
+static int __init ipmi_parisc_probe(struct parisc_device *dev)
+{
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+
+ io.si_type = SI_KCS;
+ io.addr_source = SI_DEVICETREE;
+ io.addr_space = IPMI_MEM_ADDR_SPACE;
+ io.addr_data = dev->hpa.start;
+ io.regsize = 1;
+ io.regspacing = 1;
+ io.regshift = 0;
+ io.irq = 0; /* no interrupt */
+ io.irq_setup = NULL;
+ io.dev = &dev->dev;
+
+ dev_dbg(&dev->dev, "addr 0x%lx\n", io.addr_data);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void __exit ipmi_parisc_remove(struct parisc_device *dev)
+{
+ ipmi_si_remove_by_dev(&dev->dev);
+}
+
+static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
+ { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
+
+static struct parisc_driver ipmi_parisc_driver __refdata = {
+ .name = "ipmi",
+ .id_table = ipmi_parisc_tbl,
+ .probe = ipmi_parisc_probe,
+ .remove = __exit_p(ipmi_parisc_remove),
+};
+
+void ipmi_si_parisc_init(void)
+{
+ register_parisc_driver(&ipmi_parisc_driver);
+ parisc_registered = true;
+}
+
+void ipmi_si_parisc_shutdown(void)
+{
+ if (parisc_registered)
+ unregister_parisc_driver(&ipmi_parisc_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
new file mode 100644
index 0000000000..74fa205586
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si_pci.c
+ *
+ * Handling for IPMI devices on the PCI bus.
+ */
+
+#define pr_fmt(fmt) "ipmi_pci: " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "ipmi_si.h"
+
+static bool pci_registered;
+
+static bool si_trypci = true;
+
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(trypci,
+ "Setting this to zero will disable the default scan of the interfaces identified via pci");
+
+#define PCI_DEVICE_ID_HP_MMC 0x121A
+
+static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
+{
+ if (io->si_type == SI_KCS) {
+ unsigned char status;
+ int regspacing;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev, "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
+ }
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
+ }
+ }
+ return DEFAULT_REGSPACING;
+}
+
+static struct pci_device_id ipmi_pci_blacklist[] = {
+ /*
+ * This is a "Virtual IPMI device", whatever that is. It appears
+ * as a KCS device by the class, but it is not one.
+ */
+ { PCI_VDEVICE(REALTEK, 0x816c) },
+ { 0, }
+};
+
+static int ipmi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rv;
+ struct si_sm_io io;
+
+ if (pci_match_id(ipmi_pci_blacklist, pdev))
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_PCI;
+ dev_info(&pdev->dev, "probing via PCI");
+
+ switch (pdev->class) {
+ case PCI_CLASS_SERIAL_IPMI_SMIC:
+ io.si_type = SI_SMIC;
+ break;
+
+ case PCI_CLASS_SERIAL_IPMI_KCS:
+ io.si_type = SI_KCS;
+ break;
+
+ case PCI_CLASS_SERIAL_IPMI_BT:
+ io.si_type = SI_BT;
+ break;
+
+ default:
+ dev_info(&pdev->dev, "Unknown IPMI class: %x\n", pdev->class);
+ return -ENOMEM;
+ }
+
+ rv = pcim_enable_device(pdev);
+ if (rv) {
+ dev_err(&pdev->dev, "couldn't enable PCI device\n");
+ return rv;
+ }
+
+ if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
+ io.addr_space = IPMI_IO_ADDR_SPACE;
+ io.io_setup = ipmi_si_port_setup;
+ } else {
+ io.addr_space = IPMI_MEM_ADDR_SPACE;
+ io.io_setup = ipmi_si_mem_setup;
+ }
+ io.addr_data = pci_resource_start(pdev, 0);
+
+ io.dev = &pdev->dev;
+
+ io.regspacing = ipmi_pci_probe_regspacing(&io);
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = 0;
+
+ io.irq = pdev->irq;
+ if (io.irq)
+ io.irq_setup = ipmi_std_irq_setup;
+
+ dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ &pdev->resource[0], io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void ipmi_pci_remove(struct pci_dev *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+static const struct pci_device_id ipmi_pci_devices[] = {
+ { PCI_VDEVICE(HP, PCI_DEVICE_ID_HP_MMC) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_SMIC, ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_KCS, ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_BT, ~0) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
+
+static struct pci_driver ipmi_pci_driver = {
+ .name = SI_DEVICE_NAME,
+ .id_table = ipmi_pci_devices,
+ .probe = ipmi_pci_probe,
+ .remove = ipmi_pci_remove,
+};
+
+void ipmi_si_pci_init(void)
+{
+ if (si_trypci) {
+ int rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ pr_err("Unable to register PCI driver: %d\n", rv);
+ else
+ pci_registered = true;
+ }
+}
+
+void ipmi_si_pci_shutdown(void)
+{
+ if (pci_registered)
+ pci_unregister_driver(&ipmi_pci_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
new file mode 100644
index 0000000000..c3d8ac7873
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si_platform.c
+ *
+ * Handling for platform devices in IPMI (ACPI, OF, and things
+ * coming from the platform.
+ */
+
+#define pr_fmt(fmt) "ipmi_platform: " fmt
+#define dev_fmt pr_fmt
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/acpi.h>
+#include "ipmi_si.h"
+#include "ipmi_dmi.h"
+
+static bool platform_registered;
+static bool si_tryplatform = true;
+#ifdef CONFIG_ACPI
+static bool si_tryacpi = true;
+#endif
+#ifdef CONFIG_OF
+static bool si_tryopenfirmware = true;
+#endif
+#ifdef CONFIG_DMI
+static bool si_trydmi = true;
+#else
+static bool si_trydmi = false;
+#endif
+
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryplatform,
+ "Setting this to zero will disable the default scan of the interfaces identified via platform interfaces besides ACPI, OpenFirmware, and DMI");
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi,
+ "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_OF
+module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0);
+MODULE_PARM_DESC(tryopenfirmware,
+ "Setting this to zero will disable the default scan of the interfaces identified via OpenFirmware");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi,
+ "Setting this to zero will disable the default scan of the interfaces identified via DMI");
+#endif
+
+#ifdef CONFIG_ACPI
+/* For GPE-type interrupts. */
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+ u32 gpe_number, void *context)
+{
+ struct si_sm_io *io = context;
+
+ ipmi_si_irq_handler(io->irq, io->irq_handler_data);
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+static void acpi_gpe_irq_cleanup(struct si_sm_io *io)
+{
+ if (!io->irq)
+ return;
+
+ ipmi_irq_start_cleanup(io);
+ acpi_remove_gpe_handler(NULL, io->irq, &ipmi_acpi_gpe);
+}
+
+static int acpi_gpe_irq_setup(struct si_sm_io *io)
+{
+ acpi_status status;
+
+ if (!io->irq)
+ return 0;
+
+ status = acpi_install_gpe_handler(NULL,
+ io->irq,
+ ACPI_GPE_LEVEL_TRIGGERED,
+ &ipmi_acpi_gpe,
+ io);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(io->dev,
+ "Unable to claim ACPI GPE %d, running polled\n",
+ io->irq);
+ io->irq = 0;
+ return -EINVAL;
+ }
+
+ io->irq_cleanup = acpi_gpe_irq_cleanup;
+ ipmi_irq_finish_setup(io);
+ dev_info(io->dev, "Using ACPI GPE %d\n", io->irq);
+ return 0;
+}
+#endif
+
+static void ipmi_set_addr_data_and_space(struct resource *r, struct si_sm_io *io)
+{
+ if (resource_type(r) == IORESOURCE_IO)
+ io->addr_space = IPMI_IO_ADDR_SPACE;
+ else
+ io->addr_space = IPMI_MEM_ADDR_SPACE;
+ io->addr_data = r->start;
+}
+
+static struct resource *
+ipmi_get_info_from_resources(struct platform_device *pdev,
+ struct si_sm_io *io)
+{
+ struct resource *res, *res_second;
+
+ res = platform_get_mem_or_io(pdev, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no I/O or memory address\n");
+ return NULL;
+ }
+ ipmi_set_addr_data_and_space(res, io);
+
+ io->regspacing = DEFAULT_REGSPACING;
+ res_second = platform_get_mem_or_io(pdev, 1);
+ if (res_second && resource_type(res_second) == resource_type(res)) {
+ if (res_second->start > io->addr_data)
+ io->regspacing = res_second->start - io->addr_data;
+ }
+
+ return res;
+}
+
+static int platform_ipmi_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+ u8 type, slave_addr, addr_source, regsize, regshift;
+ int rv;
+
+ rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source);
+ if (rv)
+ addr_source = SI_PLATFORM;
+ if (addr_source >= SI_LAST)
+ return -EINVAL;
+
+ if (addr_source == SI_SMBIOS) {
+ if (!si_trydmi)
+ return -ENODEV;
+ } else if (addr_source != SI_HARDCODED) {
+ if (!si_tryplatform)
+ return -ENODEV;
+ }
+
+ rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
+ if (rv)
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = addr_source;
+ dev_info(&pdev->dev, "probing via %s\n",
+ ipmi_addr_src_to_str(addr_source));
+
+ switch (type) {
+ case SI_KCS:
+ case SI_SMIC:
+ case SI_BT:
+ io.si_type = type;
+ break;
+ case SI_TYPE_INVALID: /* User disabled this in hardcode. */
+ return -ENODEV;
+ default:
+ dev_err(&pdev->dev, "ipmi-type property is invalid\n");
+ return -EINVAL;
+ }
+
+ io.regsize = DEFAULT_REGSIZE;
+ rv = device_property_read_u8(&pdev->dev, "reg-size", &regsize);
+ if (!rv)
+ io.regsize = regsize;
+
+ io.regshift = 0;
+ rv = device_property_read_u8(&pdev->dev, "reg-shift", &regshift);
+ if (!rv)
+ io.regshift = regshift;
+
+ if (!ipmi_get_info_from_resources(pdev, &io))
+ return -EINVAL;
+
+ rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+ if (rv)
+ io.slave_addr = 0x20;
+ else
+ io.slave_addr = slave_addr;
+
+ io.irq = platform_get_irq_optional(pdev, 0);
+ if (io.irq > 0)
+ io.irq_setup = ipmi_std_irq_setup;
+ else
+ io.irq = 0;
+
+ io.dev = &pdev->dev;
+
+ pr_info("ipmi_si: %s: %s %#lx regsize %d spacing %d irq %d\n",
+ ipmi_addr_src_to_str(addr_source),
+ (io.addr_space == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ ipmi_si_add_smi(&io);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_match[] = {
+ { .type = "ipmi", .compatible = "ipmi-kcs",
+ .data = (void *)(unsigned long) SI_KCS },
+ { .type = "ipmi", .compatible = "ipmi-smic",
+ .data = (void *)(unsigned long) SI_SMIC },
+ { .type = "ipmi", .compatible = "ipmi-bt",
+ .data = (void *)(unsigned long) SI_BT },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
+
+static int of_ipmi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct si_sm_io io;
+ struct resource resource;
+ const __be32 *regsize, *regspacing, *regshift;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ int proplen;
+
+ if (!si_tryopenfirmware)
+ return -ENODEV;
+
+ dev_info(&pdev->dev, "probing via device tree\n");
+
+ match = of_match_device(of_ipmi_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ if (!of_device_is_available(np))
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_warn(&pdev->dev, "invalid address from OF\n");
+ return ret;
+ }
+
+ regsize = of_get_property(np, "reg-size", &proplen);
+ if (regsize && proplen != 4) {
+ dev_warn(&pdev->dev, "invalid regsize from OF\n");
+ return -EINVAL;
+ }
+
+ regspacing = of_get_property(np, "reg-spacing", &proplen);
+ if (regspacing && proplen != 4) {
+ dev_warn(&pdev->dev, "invalid regspacing from OF\n");
+ return -EINVAL;
+ }
+
+ regshift = of_get_property(np, "reg-shift", &proplen);
+ if (regshift && proplen != 4) {
+ dev_warn(&pdev->dev, "invalid regshift from OF\n");
+ return -EINVAL;
+ }
+
+ memset(&io, 0, sizeof(io));
+ io.si_type = (unsigned long) match->data;
+ io.addr_source = SI_DEVICETREE;
+ io.irq_setup = ipmi_std_irq_setup;
+
+ ipmi_set_addr_data_and_space(&resource, &io);
+
+ io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+ io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+ io.regshift = regshift ? be32_to_cpup(regshift) : 0;
+
+ io.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+}
+#else
+#define of_ipmi_match NULL
+static int of_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int find_slave_address(struct si_sm_io *io, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+ if (!slave_addr)
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_type,
+ io->addr_space,
+ io->addr_data);
+#endif
+
+ return slave_addr;
+}
+
+static int acpi_ipmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct si_sm_io io;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long tmp;
+ struct resource *res;
+
+ if (!si_tryacpi)
+ return -ENODEV;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_ACPI;
+ dev_info(dev, "probing via ACPI\n");
+
+ io.addr_info.acpi_info.acpi_handle = handle;
+
+ /* _IFT tells us the interface type: KCS, BT, etc */
+ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Could not find ACPI IPMI interface type\n");
+ return -EINVAL;
+ }
+
+ switch (tmp) {
+ case 1:
+ io.si_type = SI_KCS;
+ break;
+ case 2:
+ io.si_type = SI_SMIC;
+ break;
+ case 3:
+ io.si_type = SI_BT;
+ break;
+ case 4: /* SSIF, just ignore */
+ return -ENODEV;
+ default:
+ dev_info(dev, "unknown IPMI type %lld\n", tmp);
+ return -EINVAL;
+ }
+
+ io.dev = dev;
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = 0;
+
+ res = ipmi_get_info_from_resources(pdev, &io);
+ if (!res)
+ return -EINVAL;
+
+ /* If _GPE exists, use it; otherwise use standard interrupts */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ io.irq = tmp;
+ io.irq_setup = acpi_gpe_irq_setup;
+ } else {
+ int irq = platform_get_irq_optional(pdev, 0);
+
+ if (irq > 0) {
+ io.irq = irq;
+ io.irq_setup = ipmi_std_irq_setup;
+ }
+ }
+
+ io.slave_addr = find_slave_address(&io, io.slave_addr);
+
+ dev_info(dev, "%pR regsize %d spacing %d irq %d\n",
+ res, io.regsize, io.regspacing, io.irq);
+
+ request_module_nowait("acpi_ipmi");
+
+ return ipmi_si_add_smi(&io);
+}
+
+static const struct acpi_device_id acpi_ipmi_match[] = {
+ { "IPI0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match);
+#else
+static int acpi_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+static int ipmi_probe(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node && of_ipmi_probe(pdev) == 0)
+ return 0;
+
+ if (acpi_ipmi_probe(pdev) == 0)
+ return 0;
+
+ return platform_ipmi_probe(pdev);
+}
+
+static int ipmi_remove(struct platform_device *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+
+ return 0;
+}
+
+static int pdev_match_name(struct device *dev, const void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const char *name = data;
+
+ return strcmp(pdev->name, name) == 0;
+}
+
+void ipmi_remove_platform_device_by_name(char *name)
+{
+ struct device *dev;
+
+ while ((dev = bus_find_device(&platform_bus_type, NULL, name,
+ pdev_match_name))) {
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+ put_device(dev);
+ }
+}
+
+static const struct platform_device_id si_plat_ids[] = {
+ { "dmi-ipmi-si", 0 },
+ { "hardcode-ipmi-si", 0 },
+ { "hotmod-ipmi-si", 0 },
+ { }
+};
+
+struct platform_driver ipmi_platform_driver = {
+ .driver = {
+ .name = SI_DEVICE_NAME,
+ .of_match_table = of_ipmi_match,
+ .acpi_match_table = ACPI_PTR(acpi_ipmi_match),
+ },
+ .probe = ipmi_probe,
+ .remove = ipmi_remove,
+ .id_table = si_plat_ids
+};
+
+void ipmi_si_platform_init(void)
+{
+ int rv = platform_driver_register(&ipmi_platform_driver);
+ if (rv)
+ pr_err("Unable to register driver: %d\n", rv);
+ else
+ platform_registered = true;
+}
+
+void ipmi_si_platform_shutdown(void)
+{
+ if (platform_registered)
+ platform_driver_unregister(&ipmi_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c
new file mode 100644
index 0000000000..7d66f68eb1
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_port_io.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return inb(addr + (offset * io->regspacing));
+}
+
+static void port_outb(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outb(b, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outw(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outw(b << io->regshift, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outl(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outl(b << io->regshift, addr+(offset * io->regspacing));
+}
+
+static void port_cleanup(struct si_sm_io *io)
+{
+ unsigned int addr = io->addr_data;
+ int idx;
+
+ if (addr) {
+ for (idx = 0; idx < io->io_size; idx++)
+ release_region(addr + idx * io->regspacing,
+ io->regsize);
+ }
+}
+
+int ipmi_si_port_setup(struct si_sm_io *io)
+{
+ unsigned int addr = io->addr_data;
+ int idx;
+
+ if (!addr)
+ return -ENODEV;
+
+ /*
+ * Figure out the actual inb/inw/inl/etc routine to use based
+ * upon the register size.
+ */
+ switch (io->regsize) {
+ case 1:
+ io->inputb = port_inb;
+ io->outputb = port_outb;
+ break;
+ case 2:
+ io->inputb = port_inw;
+ io->outputb = port_outw;
+ break;
+ case 4:
+ io->inputb = port_inl;
+ io->outputb = port_outl;
+ break;
+ default:
+ dev_warn(io->dev, "Invalid register size: %d\n",
+ io->regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Some BIOSes reserve disjoint I/O regions in their ACPI
+ * tables. This causes problems when trying to register the
+ * entire I/O region. Therefore we must register each I/O
+ * port separately.
+ */
+ for (idx = 0; idx < io->io_size; idx++) {
+ if (request_region(addr + idx * io->regspacing,
+ io->regsize, SI_DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ while (idx--)
+ release_region(addr + idx * io->regspacing,
+ io->regsize);
+ return -EIO;
+ }
+ }
+
+ io->io_cleanup = port_cleanup;
+
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
new file mode 100644
index 0000000000..c3cdbcab0f
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ipmi_si_sm.h
+ *
+ * State machine interface for low-level IPMI system management
+ * interface state machines. This code is the interface between
+ * the ipmi_smi code (that handles the policy of a KCS, SMIC, or
+ * BT interface) and the actual low-level state machine.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#ifndef __IPMI_SI_SM_H__
+#define __IPMI_SI_SM_H__
+
+#include "ipmi_si.h"
+
+/*
+ * This is defined by the state machines themselves, it is an opaque
+ * data type for them to use.
+ */
+struct si_sm_data;
+
+/* Results of SMI events. */
+enum si_sm_result {
+ SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
+ SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */
+ SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
+ SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
+ SI_SM_IDLE, /* The SM is in idle state. */
+ SI_SM_HOSED, /* The hardware violated the state machine. */
+
+ /*
+ * The hardware is asserting attn and the state machine is
+ * idle.
+ */
+ SI_SM_ATTN
+};
+
+/* Handlers for the SMI state machine. */
+struct si_sm_handlers {
+ /*
+ * Put the version number of the state machine here so the
+ * upper layer can print it.
+ */
+ char *version;
+
+ /*
+ * Initialize the data and return the amount of I/O space to
+ * reserve for the space.
+ */
+ unsigned int (*init_data)(struct si_sm_data *smi,
+ struct si_sm_io *io);
+
+ /*
+ * Start a new transaction in the state machine. This will
+ * return -2 if the state machine is not idle, -1 if the size
+ * is invalid (to large or too small), or 0 if the transaction
+ * is successfully completed.
+ */
+ int (*start_transaction)(struct si_sm_data *smi,
+ unsigned char *data, unsigned int size);
+
+ /*
+ * Return the results after the transaction. This will return
+ * -1 if the buffer is too small, zero if no transaction is
+ * present, or the actual length of the result data.
+ */
+ int (*get_result)(struct si_sm_data *smi,
+ unsigned char *data, unsigned int length);
+
+ /*
+ * Call this periodically (for a polled interface) or upon
+ * receiving an interrupt (for a interrupt-driven interface).
+ * If interrupt driven, you should probably poll this
+ * periodically when not in idle state. This should be called
+ * with the time that passed since the last call, if it is
+ * significant. Time is in microseconds.
+ */
+ enum si_sm_result (*event)(struct si_sm_data *smi, long time);
+
+ /*
+ * Attempt to detect an SMI. Returns 0 on success or nonzero
+ * on failure.
+ */
+ int (*detect)(struct si_sm_data *smi);
+
+ /* The interface is shutting down, so clean it up. */
+ void (*cleanup)(struct si_sm_data *smi);
+
+ /* Return the size of the SMI structure in bytes. */
+ int (*size)(void);
+};
+
+/* Current state machines that we can use. */
+extern const struct si_sm_handlers kcs_smi_handlers;
+extern const struct si_sm_handlers smic_smi_handlers;
+extern const struct si_sm_handlers bt_smi_handlers;
+
+#endif /* __IPMI_SI_SM_H__ */
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
new file mode 100644
index 0000000000..bfea500d6f
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_smic_sm.c
+ *
+ * The state-machine driver for an IPMI SMIC driver
+ *
+ * It started as a copy of Corey Minyard's driver for the KSC interface
+ * and the kernel patch "mmcdev-patch-245" by HP
+ *
+ * modified by: Hannes Schulz <schulz@schwaar.com>
+ * ipmi@schwaar.com
+ *
+ *
+ * Corey Minyard's driver for the KSC interface has the following
+ * copyright notice:
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * the kernel patch "mmcdev-patch-245" by HP has the following
+ * copyright notice:
+ * (c) Copyright 2001 Grant Grundler (c) Copyright
+ * 2001 Hewlett-Packard Company
+ */
+
+#define DEBUG /* So dev_dbg() is always available. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* smic_debug is a bit-field
+ * SMIC_DEBUG_ENABLE - turned on for now
+ * SMIC_DEBUG_MSG - commands and their responses
+ * SMIC_DEBUG_STATES - state machine
+*/
+#define SMIC_DEBUG_STATES 4
+#define SMIC_DEBUG_MSG 2
+#define SMIC_DEBUG_ENABLE 1
+
+static int smic_debug = 1;
+module_param(smic_debug, int, 0644);
+MODULE_PARM_DESC(smic_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+enum smic_states {
+ SMIC_IDLE,
+ SMIC_START_OP,
+ SMIC_OP_OK,
+ SMIC_WRITE_START,
+ SMIC_WRITE_NEXT,
+ SMIC_WRITE_END,
+ SMIC_WRITE2READ,
+ SMIC_READ_START,
+ SMIC_READ_NEXT,
+ SMIC_READ_END,
+ SMIC_HOSED
+};
+
+#define MAX_SMIC_READ_SIZE 80
+#define MAX_SMIC_WRITE_SIZE 80
+#define SMIC_MAX_ERROR_RETRIES 3
+
+/* Timeouts in microseconds. */
+#define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC)
+
+/* SMIC Flags Register Bits */
+#define SMIC_RX_DATA_READY 0x80
+#define SMIC_TX_DATA_READY 0x40
+
+/*
+ * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
+ * a few systems, and then only by Systems Management
+ * Interrupts, not by the OS. Always ignore these bits.
+ *
+ */
+#define SMIC_SMI 0x10
+#define SMIC_EVM_DATA_AVAIL 0x08
+#define SMIC_SMS_DATA_AVAIL 0x04
+#define SMIC_FLAG_BSY 0x01
+
+/* SMIC Error Codes */
+#define EC_NO_ERROR 0x00
+#define EC_ABORTED 0x01
+#define EC_ILLEGAL_CONTROL 0x02
+#define EC_NO_RESPONSE 0x03
+#define EC_ILLEGAL_COMMAND 0x04
+#define EC_BUFFER_FULL 0x05
+
+struct si_sm_data {
+ enum smic_states state;
+ struct si_sm_io *io;
+ unsigned char write_data[MAX_SMIC_WRITE_SIZE];
+ int write_pos;
+ int write_count;
+ int orig_write_count;
+ unsigned char read_data[MAX_SMIC_READ_SIZE];
+ int read_pos;
+ int truncated;
+ unsigned int error_retries;
+ long smic_timeout;
+};
+
+static unsigned int init_smic_data(struct si_sm_data *smic,
+ struct si_sm_io *io)
+{
+ smic->state = SMIC_IDLE;
+ smic->io = io;
+ smic->write_pos = 0;
+ smic->write_count = 0;
+ smic->orig_write_count = 0;
+ smic->read_pos = 0;
+ smic->error_retries = 0;
+ smic->truncated = 0;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+
+ /* We use 3 bytes of I/O. */
+ return 3;
+}
+
+static int start_smic_transaction(struct si_sm_data *smic,
+ unsigned char *data, unsigned int size)
+{
+ unsigned int i;
+
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > MAX_SMIC_WRITE_SIZE)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) {
+ dev_warn(smic->io->dev,
+ "SMIC in invalid state %d\n", smic->state);
+ return IPMI_NOT_IN_MY_STATE_ERR;
+ }
+
+ if (smic_debug & SMIC_DEBUG_MSG) {
+ dev_dbg(smic->io->dev, "%s -", __func__);
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
+ }
+ smic->error_retries = 0;
+ memcpy(smic->write_data, data, size);
+ smic->write_count = size;
+ smic->orig_write_count = size;
+ smic->write_pos = 0;
+ smic->read_pos = 0;
+ smic->state = SMIC_START_OP;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ return 0;
+}
+
+static int smic_get_result(struct si_sm_data *smic,
+ unsigned char *data, unsigned int length)
+{
+ int i;
+
+ if (smic_debug & SMIC_DEBUG_MSG) {
+ dev_dbg(smic->io->dev, "smic_get result -");
+ for (i = 0; i < smic->read_pos; i++)
+ pr_cont(" %02x", smic->read_data[i]);
+ pr_cont("\n");
+ }
+ if (length < smic->read_pos) {
+ smic->read_pos = length;
+ smic->truncated = 1;
+ }
+ memcpy(data, smic->read_data, smic->read_pos);
+
+ if ((length >= 3) && (smic->read_pos < 3)) {
+ data[2] = IPMI_ERR_UNSPECIFIED;
+ smic->read_pos = 3;
+ }
+ if (smic->truncated) {
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ smic->truncated = 0;
+ }
+ return smic->read_pos;
+}
+
+static inline unsigned char read_smic_flags(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 2);
+}
+
+static inline unsigned char read_smic_status(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 1);
+}
+
+static inline unsigned char read_smic_data(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 0);
+}
+
+static inline void write_smic_flags(struct si_sm_data *smic,
+ unsigned char flags)
+{
+ smic->io->outputb(smic->io, 2, flags);
+}
+
+static inline void write_smic_control(struct si_sm_data *smic,
+ unsigned char control)
+{
+ smic->io->outputb(smic->io, 1, control);
+}
+
+static inline void write_si_sm_data(struct si_sm_data *smic,
+ unsigned char data)
+{
+ smic->io->outputb(smic->io, 0, data);
+}
+
+static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
+{
+ (smic->error_retries)++;
+ if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ pr_warn("ipmi_smic_drv: smic hosed: %s\n", reason);
+ smic->state = SMIC_HOSED;
+ } else {
+ smic->write_count = smic->orig_write_count;
+ smic->write_pos = 0;
+ smic->read_pos = 0;
+ smic->state = SMIC_START_OP;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ }
+}
+
+static inline void write_next_byte(struct si_sm_data *smic)
+{
+ write_si_sm_data(smic, smic->write_data[smic->write_pos]);
+ (smic->write_pos)++;
+ (smic->write_count)--;
+}
+
+static inline void read_next_byte(struct si_sm_data *smic)
+{
+ if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
+ read_smic_data(smic);
+ smic->truncated = 1;
+ } else {
+ smic->read_data[smic->read_pos] = read_smic_data(smic);
+ smic->read_pos++;
+ }
+}
+
+/* SMIC Control/Status Code Components */
+#define SMIC_GET_STATUS 0x00 /* Control form's name */
+#define SMIC_READY 0x00 /* Status form's name */
+#define SMIC_WR_START 0x01 /* Unified Control/Status names... */
+#define SMIC_WR_NEXT 0x02
+#define SMIC_WR_END 0x03
+#define SMIC_RD_START 0x04
+#define SMIC_RD_NEXT 0x05
+#define SMIC_RD_END 0x06
+#define SMIC_CODE_MASK 0x0f
+
+#define SMIC_CONTROL 0x00
+#define SMIC_STATUS 0x80
+#define SMIC_CS_MASK 0x80
+
+#define SMIC_SMS 0x40
+#define SMIC_SMM 0x60
+#define SMIC_STREAM_MASK 0x60
+
+/* SMIC Control Codes */
+#define SMIC_CC_SMS_GET_STATUS (SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS)
+#define SMIC_CC_SMS_WR_START (SMIC_CONTROL|SMIC_SMS|SMIC_WR_START)
+#define SMIC_CC_SMS_WR_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT)
+#define SMIC_CC_SMS_WR_END (SMIC_CONTROL|SMIC_SMS|SMIC_WR_END)
+#define SMIC_CC_SMS_RD_START (SMIC_CONTROL|SMIC_SMS|SMIC_RD_START)
+#define SMIC_CC_SMS_RD_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT)
+#define SMIC_CC_SMS_RD_END (SMIC_CONTROL|SMIC_SMS|SMIC_RD_END)
+
+#define SMIC_CC_SMM_GET_STATUS (SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS)
+#define SMIC_CC_SMM_WR_START (SMIC_CONTROL|SMIC_SMM|SMIC_WR_START)
+#define SMIC_CC_SMM_WR_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT)
+#define SMIC_CC_SMM_WR_END (SMIC_CONTROL|SMIC_SMM|SMIC_WR_END)
+#define SMIC_CC_SMM_RD_START (SMIC_CONTROL|SMIC_SMM|SMIC_RD_START)
+#define SMIC_CC_SMM_RD_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT)
+#define SMIC_CC_SMM_RD_END (SMIC_CONTROL|SMIC_SMM|SMIC_RD_END)
+
+/* SMIC Status Codes */
+#define SMIC_SC_SMS_READY (SMIC_STATUS|SMIC_SMS|SMIC_READY)
+#define SMIC_SC_SMS_WR_START (SMIC_STATUS|SMIC_SMS|SMIC_WR_START)
+#define SMIC_SC_SMS_WR_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT)
+#define SMIC_SC_SMS_WR_END (SMIC_STATUS|SMIC_SMS|SMIC_WR_END)
+#define SMIC_SC_SMS_RD_START (SMIC_STATUS|SMIC_SMS|SMIC_RD_START)
+#define SMIC_SC_SMS_RD_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT)
+#define SMIC_SC_SMS_RD_END (SMIC_STATUS|SMIC_SMS|SMIC_RD_END)
+
+#define SMIC_SC_SMM_READY (SMIC_STATUS|SMIC_SMM|SMIC_READY)
+#define SMIC_SC_SMM_WR_START (SMIC_STATUS|SMIC_SMM|SMIC_WR_START)
+#define SMIC_SC_SMM_WR_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT)
+#define SMIC_SC_SMM_WR_END (SMIC_STATUS|SMIC_SMM|SMIC_WR_END)
+#define SMIC_SC_SMM_RD_START (SMIC_STATUS|SMIC_SMM|SMIC_RD_START)
+#define SMIC_SC_SMM_RD_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT)
+#define SMIC_SC_SMM_RD_END (SMIC_STATUS|SMIC_SMM|SMIC_RD_END)
+
+/* these are the control/status codes we actually use
+ SMIC_CC_SMS_GET_STATUS 0x40
+ SMIC_CC_SMS_WR_START 0x41
+ SMIC_CC_SMS_WR_NEXT 0x42
+ SMIC_CC_SMS_WR_END 0x43
+ SMIC_CC_SMS_RD_START 0x44
+ SMIC_CC_SMS_RD_NEXT 0x45
+ SMIC_CC_SMS_RD_END 0x46
+
+ SMIC_SC_SMS_READY 0xC0
+ SMIC_SC_SMS_WR_START 0xC1
+ SMIC_SC_SMS_WR_NEXT 0xC2
+ SMIC_SC_SMS_WR_END 0xC3
+ SMIC_SC_SMS_RD_START 0xC4
+ SMIC_SC_SMS_RD_NEXT 0xC5
+ SMIC_SC_SMS_RD_END 0xC6
+*/
+
+static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
+{
+ unsigned char status;
+ unsigned char flags;
+ unsigned char data;
+
+ if (smic->state == SMIC_HOSED) {
+ init_smic_data(smic, smic->io);
+ return SI_SM_HOSED;
+ }
+ if (smic->state != SMIC_IDLE) {
+ if (smic_debug & SMIC_DEBUG_STATES)
+ dev_dbg(smic->io->dev,
+ "%s - smic->smic_timeout = %ld, time = %ld\n",
+ __func__, smic->smic_timeout, time);
+ /*
+ * FIXME: smic_event is sometimes called with time >
+ * SMIC_RETRY_TIMEOUT
+ */
+ if (time < SMIC_RETRY_TIMEOUT) {
+ smic->smic_timeout -= time;
+ if (smic->smic_timeout < 0) {
+ start_error_recovery(smic, "smic timed out.");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ }
+ }
+ flags = read_smic_flags(smic);
+ if (flags & SMIC_FLAG_BSY)
+ return SI_SM_CALL_WITH_DELAY;
+
+ status = read_smic_status(smic);
+ if (smic_debug & SMIC_DEBUG_STATES)
+ dev_dbg(smic->io->dev,
+ "%s - state = %d, flags = 0x%02x, status = 0x%02x\n",
+ __func__, smic->state, flags, status);
+
+ switch (smic->state) {
+ case SMIC_IDLE:
+ /* in IDLE we check for available messages */
+ if (flags & SMIC_SMS_DATA_AVAIL)
+ return SI_SM_ATTN;
+ return SI_SM_IDLE;
+
+ case SMIC_START_OP:
+ /* sanity check whether smic is really idle */
+ write_smic_control(smic, SMIC_CC_SMS_GET_STATUS);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_OP_OK;
+ break;
+
+ case SMIC_OP_OK:
+ if (status != SMIC_SC_SMS_READY) {
+ /* this should not happen */
+ start_error_recovery(smic,
+ "state = SMIC_OP_OK,"
+ " status != SMIC_SC_SMS_READY");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* OK so far; smic is idle let us start ... */
+ write_smic_control(smic, SMIC_CC_SMS_WR_START);
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_WRITE_START;
+ break;
+
+ case SMIC_WRITE_START:
+ if (status != SMIC_SC_SMS_WR_START) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_START, "
+ "status != SMIC_SC_SMS_WR_START");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /*
+ * we must not issue WR_(NEXT|END) unless
+ * TX_DATA_READY is set
+ * */
+ if (flags & SMIC_TX_DATA_READY) {
+ if (smic->write_count == 1) {
+ /* last byte */
+ write_smic_control(smic, SMIC_CC_SMS_WR_END);
+ smic->state = SMIC_WRITE_END;
+ } else {
+ write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+ smic->state = SMIC_WRITE_NEXT;
+ }
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_WRITE_NEXT:
+ if (status != SMIC_SC_SMS_WR_NEXT) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_NEXT, "
+ "status != SMIC_SC_SMS_WR_NEXT");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* this is the same code as in SMIC_WRITE_START */
+ if (flags & SMIC_TX_DATA_READY) {
+ if (smic->write_count == 1) {
+ write_smic_control(smic, SMIC_CC_SMS_WR_END);
+ smic->state = SMIC_WRITE_END;
+ } else {
+ write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+ smic->state = SMIC_WRITE_NEXT;
+ }
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_WRITE_END:
+ if (status != SMIC_SC_SMS_WR_END) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_END, "
+ "status != SMIC_SC_SMS_WR_END");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* data register holds an error code */
+ data = read_smic_data(smic);
+ if (data != 0) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ dev_dbg(smic->io->dev,
+ "SMIC_WRITE_END: data = %02x\n",
+ data);
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_END, "
+ "data != SUCCESS");
+ return SI_SM_CALL_WITH_DELAY;
+ } else
+ smic->state = SMIC_WRITE2READ;
+ break;
+
+ case SMIC_WRITE2READ:
+ /*
+ * we must wait for RX_DATA_READY to be set before we
+ * can continue
+ */
+ if (flags & SMIC_RX_DATA_READY) {
+ write_smic_control(smic, SMIC_CC_SMS_RD_START);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_START;
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_READ_START:
+ if (status != SMIC_SC_SMS_RD_START) {
+ start_error_recovery(smic,
+ "state = SMIC_READ_START, "
+ "status != SMIC_SC_SMS_RD_START");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ if (flags & SMIC_RX_DATA_READY) {
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_NEXT;
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_READ_NEXT:
+ switch (status) {
+ /*
+ * smic tells us that this is the last byte to be read
+ * --> clean up
+ */
+ case SMIC_SC_SMS_RD_END:
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_END);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_END;
+ break;
+ case SMIC_SC_SMS_RD_NEXT:
+ if (flags & SMIC_RX_DATA_READY) {
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_NEXT;
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+ default:
+ start_error_recovery(
+ smic,
+ "state = SMIC_READ_NEXT, "
+ "status != SMIC_SC_SMS_RD_(NEXT|END)");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ break;
+
+ case SMIC_READ_END:
+ if (status != SMIC_SC_SMS_READY) {
+ start_error_recovery(smic,
+ "state = SMIC_READ_END, "
+ "status != SMIC_SC_SMS_READY");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ data = read_smic_data(smic);
+ /* data register holds an error code */
+ if (data != 0) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ dev_dbg(smic->io->dev,
+ "SMIC_READ_END: data = %02x\n",
+ data);
+ start_error_recovery(smic,
+ "state = SMIC_READ_END, "
+ "data != SUCCESS");
+ return SI_SM_CALL_WITH_DELAY;
+ } else {
+ smic->state = SMIC_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+
+ case SMIC_HOSED:
+ init_smic_data(smic, smic->io);
+ return SI_SM_HOSED;
+
+ default:
+ if (smic_debug & SMIC_DEBUG_ENABLE) {
+ dev_dbg(smic->io->dev,
+ "smic->state = %d\n", smic->state);
+ start_error_recovery(smic, "state = UNKNOWN");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ }
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int smic_detect(struct si_sm_data *smic)
+{
+ /*
+ * It's impossible for the SMIC fnags register to be all 1's,
+ * (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first.
+ */
+ if (read_smic_flags(smic) == 0xff)
+ return 1;
+
+ return 0;
+}
+
+static void smic_cleanup(struct si_sm_data *kcs)
+{
+}
+
+static int smic_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+const struct si_sm_handlers smic_smi_handlers = {
+ .init_data = init_smic_data,
+ .start_transaction = start_smic_transaction,
+ .get_result = smic_get_result,
+ .event = smic_event,
+ .detect = smic_detect,
+ .cleanup = smic_cleanup,
+ .size = smic_size,
+};
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
new file mode 100644
index 0000000000..df8dd50b4c
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -0,0 +1,2162 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_ssif.c
+ *
+ * The interface to the IPMI driver for SMBus access to a SMBus
+ * compliant device. Called SSIF by the IPMI spec.
+ *
+ * Author: Intel Corporation
+ * Todd Davis <todd.c.davis@intel.com>
+ *
+ * Rewritten by Corey Minyard <minyard@acm.org> to support the
+ * non-blocking I2C interface, add support for multi-part
+ * transactions, add PEC support, and general clenaup.
+ *
+ * Copyright 2003 Intel Corporation
+ * Copyright 2005 MontaVista Software
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SSIF state
+ * machine. It does the configuration, handles timers and interrupts,
+ * and drives the real SSIF state machine.
+ */
+
+#define pr_fmt(fmt) "ipmi_ssif: " fmt
+#define dev_fmt(fmt) "ipmi_ssif: " fmt
+
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/ipmi_smi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/kthread.h>
+#include <linux/acpi.h>
+#include <linux/ctype.h>
+#include <linux/time64.h>
+#include "ipmi_dmi.h"
+
+#define DEVICE_NAME "ipmi_ssif"
+
+#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD 0x57
+
+#define SSIF_IPMI_REQUEST 2
+#define SSIF_IPMI_MULTI_PART_REQUEST_START 6
+#define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7
+#define SSIF_IPMI_MULTI_PART_REQUEST_END 8
+#define SSIF_IPMI_RESPONSE 3
+#define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9
+
+/* ssif_debug is a bit-field
+ * SSIF_DEBUG_MSG - commands and their responses
+ * SSIF_DEBUG_STATES - message states
+ * SSIF_DEBUG_TIMING - Measure times between events in the driver
+ */
+#define SSIF_DEBUG_TIMING 4
+#define SSIF_DEBUG_STATE 2
+#define SSIF_DEBUG_MSG 1
+#define SSIF_NODEBUG 0
+#define SSIF_DEFAULT_DEBUG (SSIF_NODEBUG)
+
+/*
+ * Timer values
+ */
+#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */
+#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */
+#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
+
+/* How many times to we retry sending/receiving the message. */
+#define SSIF_SEND_RETRIES 5
+#define SSIF_RECV_RETRIES 250
+
+#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
+#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000)
+#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
+#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+
+/*
+ * Timeout for the watch, only used for get flag timer.
+ */
+#define SSIF_WATCH_MSG_TIMEOUT msecs_to_jiffies(10)
+#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250)
+
+enum ssif_intf_state {
+ SSIF_IDLE,
+ SSIF_GETTING_FLAGS,
+ SSIF_GETTING_EVENTS,
+ SSIF_CLEARING_FLAGS,
+ SSIF_GETTING_MESSAGES,
+ /* FIXME - add watchdog stuff. */
+};
+
+#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \
+ && (ssif)->curr_msg == NULL)
+
+/*
+ * Indexes into stats[] in ssif_info below.
+ */
+enum ssif_stat_indexes {
+ /* Number of total messages sent. */
+ SSIF_STAT_sent_messages = 0,
+
+ /*
+ * Number of message parts sent. Messages may be broken into
+ * parts if they are long.
+ */
+ SSIF_STAT_sent_messages_parts,
+
+ /*
+ * Number of time a message was retried.
+ */
+ SSIF_STAT_send_retries,
+
+ /*
+ * Number of times the send of a message failed.
+ */
+ SSIF_STAT_send_errors,
+
+ /*
+ * Number of message responses received.
+ */
+ SSIF_STAT_received_messages,
+
+ /*
+ * Number of message fragments received.
+ */
+ SSIF_STAT_received_message_parts,
+
+ /*
+ * Number of times the receive of a message was retried.
+ */
+ SSIF_STAT_receive_retries,
+
+ /*
+ * Number of errors receiving messages.
+ */
+ SSIF_STAT_receive_errors,
+
+ /*
+ * Number of times a flag fetch was requested.
+ */
+ SSIF_STAT_flag_fetches,
+
+ /*
+ * Number of times the hardware didn't follow the state machine.
+ */
+ SSIF_STAT_hosed,
+
+ /*
+ * Number of received events.
+ */
+ SSIF_STAT_events,
+
+ /* Number of asyncronous messages received. */
+ SSIF_STAT_incoming_messages,
+
+ /* Number of watchdog pretimeouts. */
+ SSIF_STAT_watchdog_pretimeouts,
+
+ /* Number of alers received. */
+ SSIF_STAT_alerts,
+
+ /* Always add statistics before this value, it must be last. */
+ SSIF_NUM_STATS
+};
+
+struct ssif_addr_info {
+ struct i2c_board_info binfo;
+ char *adapter_name;
+ int debug;
+ int slave_addr;
+ enum ipmi_addr_src addr_src;
+ union ipmi_smi_info_union addr_info;
+ struct device *dev;
+ struct i2c_client *client;
+
+ struct mutex clients_mutex;
+ struct list_head clients;
+
+ struct list_head link;
+};
+
+struct ssif_info;
+
+typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len);
+
+struct ssif_info {
+ struct ipmi_smi *intf;
+ spinlock_t lock;
+ struct ipmi_smi_msg *waiting_msg;
+ struct ipmi_smi_msg *curr_msg;
+ enum ssif_intf_state ssif_state;
+ unsigned long ssif_debug;
+
+ struct ipmi_smi_handlers handlers;
+
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+ union ipmi_smi_info_union addr_info;
+
+ /*
+ * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+ * is set to hold the flags until we are done handling everything
+ * from the flags.
+ */
+#define RECEIVE_MSG_AVAIL 0x01
+#define EVENT_MSG_BUFFER_FULL 0x02
+#define WDT_PRE_TIMEOUT_INT 0x08
+ unsigned char msg_flags;
+
+ u8 global_enables;
+ bool has_event_buffer;
+ bool supports_alert;
+
+ /*
+ * Used to tell what we should do with alerts. If we are
+ * waiting on a response, read the data immediately.
+ */
+ bool got_alert;
+ bool waiting_alert;
+
+ /* Used to inform the timeout that it should do a resend. */
+ bool do_resend;
+
+ /*
+ * If set to true, this will request events the next time the
+ * state machine is idle.
+ */
+ bool req_events;
+
+ /*
+ * If set to true, this will request flags the next time the
+ * state machine is idle.
+ */
+ bool req_flags;
+
+ /* Used for sending/receiving data. +1 for the length. */
+ unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
+ unsigned int data_len;
+
+ /* Temp receive buffer, gets copied into data. */
+ unsigned char recv[I2C_SMBUS_BLOCK_MAX];
+
+ struct i2c_client *client;
+ ssif_i2c_done done_handler;
+
+ /* Thread interface handling */
+ struct task_struct *thread;
+ struct completion wake_thread;
+ bool stopping;
+ int i2c_read_write;
+ int i2c_command;
+ unsigned char *i2c_data;
+ unsigned int i2c_size;
+
+ struct timer_list retry_timer;
+ int retries_left;
+
+ long watch_timeout; /* Timeout for flags check, 0 if off. */
+ struct timer_list watch_timer; /* Flag fetch timer. */
+
+ /* Info from SSIF cmd */
+ unsigned char max_xmit_msg_size;
+ unsigned char max_recv_msg_size;
+ bool cmd8_works; /* See test_multipart_messages() for details. */
+ unsigned int multi_support;
+ int supports_pec;
+
+#define SSIF_NO_MULTI 0
+#define SSIF_MULTI_2_PART 1
+#define SSIF_MULTI_n_PART 2
+ unsigned char *multi_data;
+ unsigned int multi_len;
+ unsigned int multi_pos;
+
+ atomic_t stats[SSIF_NUM_STATS];
+};
+
+#define ssif_inc_stat(ssif, stat) \
+ atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat])
+#define ssif_get_stat(ssif, stat) \
+ ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
+
+static bool initialized;
+static bool platform_registered;
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg);
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags);
+static int start_send(struct ssif_info *ssif_info,
+ unsigned char *data,
+ unsigned int len);
+
+static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
+ unsigned long *flags)
+ __acquires(&ssif_info->lock)
+{
+ spin_lock_irqsave(&ssif_info->lock, *flags);
+ return flags;
+}
+
+static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info,
+ unsigned long *flags)
+ __releases(&ssif_info->lock)
+{
+ spin_unlock_irqrestore(&ssif_info->lock, *flags);
+}
+
+static void deliver_recv_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg)
+{
+ if (msg->rsp_size < 0) {
+ return_hosed_msg(ssif_info, msg);
+ dev_err(&ssif_info->client->dev,
+ "%s: Malformed message: rsp_size = %d\n",
+ __func__, msg->rsp_size);
+ } else {
+ ipmi_smi_msg_received(ssif_info->intf, msg);
+ }
+}
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg)
+{
+ ssif_inc_stat(ssif_info, hosed);
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = 0xFF; /* Unknown error. */
+ msg->rsp_size = 3;
+
+ deliver_recv_msg(ssif_info, msg);
+}
+
+/*
+ * Must be called with the message lock held. This will release the
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
+ */
+static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ unsigned char msg[3];
+
+ ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ ssif_info->ssif_state = SSIF_CLEARING_FLAGS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ /* Make sure the watchdog pre-timeout flag is not set at startup. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+
+ if (start_send(ssif_info, msg, 3) != 0) {
+ /* Error, just go to normal state. */
+ ssif_info->ssif_state = SSIF_IDLE;
+ }
+}
+
+static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ unsigned char mb[2];
+
+ ssif_info->req_flags = false;
+ ssif_info->ssif_state = SSIF_GETTING_FLAGS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ mb[1] = IPMI_GET_MSG_FLAGS_CMD;
+ if (start_send(ssif_info, mb, 2) != 0)
+ ssif_info->ssif_state = SSIF_IDLE;
+}
+
+static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+ struct ipmi_smi_msg *msg)
+{
+ if (start_send(ssif_info, msg->data, msg->data_size) != 0) {
+ unsigned long oflags;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->curr_msg = NULL;
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ ipmi_free_smi_msg(msg);
+ }
+}
+
+static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+
+ ssif_info->req_events = false;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ ssif_info->curr_msg = msg;
+ ssif_info->ssif_state = SSIF_GETTING_EVENTS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ msg->data_size = 2;
+
+ check_start_send(ssif_info, flags, msg);
+}
+
+static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+ unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ ssif_info->curr_msg = msg;
+ ssif_info->ssif_state = SSIF_GETTING_MESSAGES;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_GET_MSG_CMD;
+ msg->data_size = 2;
+
+ check_start_send(ssif_info, flags, msg);
+}
+
+/*
+ * Must be called with the message lock held. This will release the
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
+ */
+static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+ /* Watchdog pre-timeout */
+ ssif_inc_stat(ssif_info, watchdog_pretimeouts);
+ start_clear_flags(ssif_info, flags);
+ ipmi_smi_watchdog_pretimeout(ssif_info->intf);
+ } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL)
+ /* Messages available. */
+ start_recv_msg_fetch(ssif_info, flags);
+ else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL)
+ /* Events available. */
+ start_event_fetch(ssif_info, flags);
+ else {
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+}
+
+static int ipmi_ssif_thread(void *data)
+{
+ struct ssif_info *ssif_info = data;
+
+ while (!kthread_should_stop()) {
+ int result;
+
+ /* Wait for something to do */
+ result = wait_for_completion_interruptible(
+ &ssif_info->wake_thread);
+ if (ssif_info->stopping)
+ break;
+ if (result == -ERESTARTSYS)
+ continue;
+ init_completion(&ssif_info->wake_thread);
+
+ if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
+ result = i2c_smbus_write_block_data(
+ ssif_info->client, ssif_info->i2c_command,
+ ssif_info->i2c_data[0],
+ ssif_info->i2c_data + 1);
+ ssif_info->done_handler(ssif_info, result, NULL, 0);
+ } else {
+ result = i2c_smbus_read_block_data(
+ ssif_info->client, ssif_info->i2c_command,
+ ssif_info->i2c_data);
+ if (result < 0)
+ ssif_info->done_handler(ssif_info, result,
+ NULL, 0);
+ else
+ ssif_info->done_handler(ssif_info, 0,
+ ssif_info->i2c_data,
+ result);
+ }
+ }
+
+ return 0;
+}
+
+static void ssif_i2c_send(struct ssif_info *ssif_info,
+ ssif_i2c_done handler,
+ int read_write, int command,
+ unsigned char *data, unsigned int size)
+{
+ ssif_info->done_handler = handler;
+
+ ssif_info->i2c_read_write = read_write;
+ ssif_info->i2c_command = command;
+ ssif_info->i2c_data = data;
+ ssif_info->i2c_size = size;
+ complete(&ssif_info->wake_thread);
+}
+
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len);
+
+static void start_get(struct ssif_info *ssif_info)
+{
+ ssif_info->multi_pos = 0;
+
+ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_RESPONSE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+}
+
+static void start_resend(struct ssif_info *ssif_info);
+
+static void retry_timeout(struct timer_list *t)
+{
+ struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
+ unsigned long oflags, *flags;
+ bool waiting, resend;
+
+ if (ssif_info->stopping)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ resend = ssif_info->do_resend;
+ ssif_info->do_resend = false;
+ waiting = ssif_info->waiting_alert;
+ ssif_info->waiting_alert = false;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ if (waiting)
+ start_get(ssif_info);
+ if (resend) {
+ start_resend(ssif_info);
+ ssif_inc_stat(ssif_info, send_retries);
+ }
+}
+
+static void watch_timeout(struct timer_list *t)
+{
+ struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer);
+ unsigned long oflags, *flags;
+
+ if (ssif_info->stopping)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (ssif_info->watch_timeout) {
+ mod_timer(&ssif_info->watch_timer,
+ jiffies + ssif_info->watch_timeout);
+ if (IS_SSIF_IDLE(ssif_info)) {
+ start_flag_fetch(ssif_info, flags); /* Releases lock */
+ return;
+ }
+ ssif_info->req_flags = true;
+ }
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+}
+
+static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ unsigned int data)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+ unsigned long oflags, *flags;
+ bool do_get = false;
+
+ if (type != I2C_PROTOCOL_SMBUS_ALERT)
+ return;
+
+ ssif_inc_stat(ssif_info, alerts);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (ssif_info->waiting_alert) {
+ ssif_info->waiting_alert = false;
+ del_timer(&ssif_info->retry_timer);
+ do_get = true;
+ } else if (ssif_info->curr_msg) {
+ ssif_info->got_alert = true;
+ }
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ if (do_get)
+ start_get(ssif_info);
+}
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+{
+ struct ipmi_smi_msg *msg;
+ unsigned long oflags, *flags;
+
+ /*
+ * We are single-threaded here, so no need for a lock until we
+ * start messing with driver states or the queues.
+ */
+
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+ ssif_inc_stat(ssif_info, receive_retries);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->waiting_alert = true;
+ if (!ssif_info->stopping)
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_JIFFIES);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ ssif_inc_stat(ssif_info, receive_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "%s: Error %d\n", __func__, result);
+ len = 0;
+ goto continue_op;
+ }
+
+ if ((len > 1) && (ssif_info->multi_pos == 0)
+ && (data[0] == 0x00) && (data[1] == 0x01)) {
+ /* Start of multi-part read. Start the next transaction. */
+ int i;
+
+ ssif_inc_stat(ssif_info, received_message_parts);
+
+ /* Remove the multi-part read marker. */
+ len -= 2;
+ data += 2;
+ for (i = 0; i < len; i++)
+ ssif_info->data[i] = data[i];
+ ssif_info->multi_len = len;
+ ssif_info->multi_pos = 1;
+
+ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ return;
+ } else if (ssif_info->multi_pos) {
+ /* Middle of multi-part read. Start the next transaction. */
+ int i;
+ unsigned char blocknum;
+
+ if (len == 0) {
+ result = -EIO;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Middle message with no data\n");
+
+ goto continue_op;
+ }
+
+ blocknum = data[0];
+ len--;
+ data++;
+
+ if (blocknum != 0xff && len != 31) {
+ /* All blocks but the last must have 31 data bytes. */
+ result = -EIO;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Received middle message <31\n");
+
+ goto continue_op;
+ }
+
+ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
+ /* Received message too big, abort the operation. */
+ result = -E2BIG;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Received message too big\n");
+
+ goto continue_op;
+ }
+
+ for (i = 0; i < len; i++)
+ ssif_info->data[i + ssif_info->multi_len] = data[i];
+ ssif_info->multi_len += len;
+ if (blocknum == 0xff) {
+ /* End of read */
+ len = ssif_info->multi_len;
+ data = ssif_info->data;
+ } else if (blocknum + 1 != ssif_info->multi_pos) {
+ /*
+ * Out of sequence block, just abort. Block
+ * numbers start at zero for the second block,
+ * but multi_pos starts at one, so the +1.
+ */
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Received message out of sequence, expected %u, got %u\n",
+ ssif_info->multi_pos - 1, blocknum);
+ result = -EIO;
+ } else {
+ ssif_inc_stat(ssif_info, received_message_parts);
+
+ ssif_info->multi_pos++;
+
+ ssif_i2c_send(ssif_info, msg_done_handler,
+ I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv,
+ I2C_SMBUS_BLOCK_DATA);
+ return;
+ }
+ }
+
+ continue_op:
+ if (result < 0) {
+ ssif_inc_stat(ssif_info, receive_errors);
+ } else {
+ ssif_inc_stat(ssif_info, received_messages);
+ ssif_inc_stat(ssif_info, received_message_parts);
+ }
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+ dev_dbg(&ssif_info->client->dev,
+ "DONE 1: state = %d, result=%d\n",
+ ssif_info->ssif_state, result);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ msg = ssif_info->curr_msg;
+ if (msg) {
+ if (data) {
+ if (len > IPMI_MAX_MSG_LENGTH)
+ len = IPMI_MAX_MSG_LENGTH;
+ memcpy(msg->rsp, data, len);
+ } else {
+ len = 0;
+ }
+ msg->rsp_size = len;
+ ssif_info->curr_msg = NULL;
+ }
+
+ switch (ssif_info->ssif_state) {
+ case SSIF_IDLE:
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ if (!msg)
+ break;
+
+ if (result < 0)
+ return_hosed_msg(ssif_info, msg);
+ else
+ deliver_recv_msg(ssif_info, msg);
+ break;
+
+ case SSIF_GETTING_FLAGS:
+ /* We got the flags from the SSIF, now handle them. */
+ if ((result < 0) || (len < 4) || (data[2] != 0)) {
+ /*
+ * Error fetching flags, or invalid length,
+ * just give up for now.
+ */
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ dev_warn(&ssif_info->client->dev,
+ "Error getting flags: %d %d, %x\n",
+ result, len, (len >= 3) ? data[2] : 0);
+ } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ /*
+ * Recv error response, give up.
+ */
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ dev_warn(&ssif_info->client->dev,
+ "Invalid response getting flags: %x %x\n",
+ data[0], data[1]);
+ } else {
+ ssif_inc_stat(ssif_info, flag_fetches);
+ ssif_info->msg_flags = data[3];
+ handle_flags(ssif_info, flags);
+ }
+ break;
+
+ case SSIF_CLEARING_FLAGS:
+ /* We cleared the flags. */
+ if ((result < 0) || (len < 3) || (data[2] != 0)) {
+ /* Error clearing flags */
+ dev_warn(&ssif_info->client->dev,
+ "Error clearing flags: %d %d, %x\n",
+ result, len, (len >= 3) ? data[2] : 0);
+ } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
+ dev_warn(&ssif_info->client->dev,
+ "Invalid response clearing flags: %x %x\n",
+ data[0], data[1]);
+ }
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+
+ case SSIF_GETTING_EVENTS:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting events\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
+ if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the event flag. */
+ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(ssif_info, flags);
+ } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) {
+ dev_warn(&ssif_info->client->dev,
+ "Invalid response getting events: %x %x\n",
+ msg->rsp[0], msg->rsp[1]);
+ msg->done(msg);
+ /* Take off the event flag. */
+ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(ssif_info, flags);
+ } else {
+ handle_flags(ssif_info, flags);
+ ssif_inc_stat(ssif_info, events);
+ deliver_recv_msg(ssif_info, msg);
+ }
+ break;
+
+ case SSIF_GETTING_MESSAGES:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting messages\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
+ if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(ssif_info, flags);
+ } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || msg->rsp[1] != IPMI_GET_MSG_CMD) {
+ dev_warn(&ssif_info->client->dev,
+ "Invalid response clearing flags: %x %x\n",
+ msg->rsp[0], msg->rsp[1]);
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(ssif_info, flags);
+ } else {
+ ssif_inc_stat(ssif_info, incoming_messages);
+ handle_flags(ssif_info, flags);
+ deliver_recv_msg(ssif_info, msg);
+ }
+ break;
+
+ default:
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "Invalid state in message done handling: %d\n",
+ ssif_info->ssif_state);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ if (ssif_info->req_events)
+ start_event_fetch(ssif_info, flags);
+ else if (ssif_info->req_flags)
+ start_flag_fetch(ssif_info, flags);
+ else
+ start_next_msg(ssif_info, flags);
+ } else
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+ dev_dbg(&ssif_info->client->dev,
+ "DONE 2: state = %d.\n", ssif_info->ssif_state);
+}
+
+static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+{
+ /* We are single-threaded here, so no need for a lock. */
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+ /*
+ * Wait the retry timeout time per the spec,
+ * then redo the send.
+ */
+ ssif_info->do_resend = true;
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_REQ_RETRY_JIFFIES);
+ return;
+ }
+
+ ssif_inc_stat(ssif_info, send_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "%s: Out of retries\n", __func__);
+
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
+ return;
+ }
+
+ if (ssif_info->multi_data) {
+ /*
+ * In the middle of a multi-data write. See the comment
+ * in the SSIF_MULTI_n_PART case in the probe function
+ * for details on the intricacies of this.
+ */
+ int left, to_write;
+ unsigned char *data_to_send;
+ unsigned char cmd;
+
+ ssif_inc_stat(ssif_info, sent_messages_parts);
+
+ left = ssif_info->multi_len - ssif_info->multi_pos;
+ to_write = left;
+ if (to_write > 32)
+ to_write = 32;
+ /* Length byte. */
+ ssif_info->multi_data[ssif_info->multi_pos] = to_write;
+ data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
+ ssif_info->multi_pos += to_write;
+ cmd = SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE;
+ if (ssif_info->cmd8_works) {
+ if (left == to_write) {
+ cmd = SSIF_IPMI_MULTI_PART_REQUEST_END;
+ ssif_info->multi_data = NULL;
+ }
+ } else if (to_write < 32) {
+ ssif_info->multi_data = NULL;
+ }
+
+ ssif_i2c_send(ssif_info, msg_written_handler,
+ I2C_SMBUS_WRITE, cmd,
+ data_to_send, I2C_SMBUS_BLOCK_DATA);
+ } else {
+ /* Ready to request the result. */
+ unsigned long oflags, *flags;
+
+ ssif_inc_stat(ssif_info, sent_messages);
+ ssif_inc_stat(ssif_info, sent_messages_parts);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (ssif_info->got_alert) {
+ /* The result is already ready, just start it. */
+ ssif_info->got_alert = false;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ start_get(ssif_info);
+ } else {
+ /* Wait a jiffie then request the next message */
+ ssif_info->waiting_alert = true;
+ ssif_info->retries_left = SSIF_RECV_RETRIES;
+ if (!ssif_info->stopping)
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_PART_JIFFIES);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+ }
+}
+
+static void start_resend(struct ssif_info *ssif_info)
+{
+ int command;
+
+ ssif_info->got_alert = false;
+
+ if (ssif_info->data_len > 32) {
+ command = SSIF_IPMI_MULTI_PART_REQUEST_START;
+ ssif_info->multi_data = ssif_info->data;
+ ssif_info->multi_len = ssif_info->data_len;
+ /*
+ * Subtle thing, this is 32, not 33, because we will
+ * overwrite the thing at position 32 (which was just
+ * transmitted) with the new length.
+ */
+ ssif_info->multi_pos = 32;
+ ssif_info->data[0] = 32;
+ } else {
+ ssif_info->multi_data = NULL;
+ command = SSIF_IPMI_REQUEST;
+ ssif_info->data[0] = ssif_info->data_len;
+ }
+
+ ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+ command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+}
+
+static int start_send(struct ssif_info *ssif_info,
+ unsigned char *data,
+ unsigned int len)
+{
+ if (len > IPMI_MAX_MSG_LENGTH)
+ return -E2BIG;
+ if (len > ssif_info->max_xmit_msg_size)
+ return -E2BIG;
+
+ ssif_info->retries_left = SSIF_SEND_RETRIES;
+ memcpy(ssif_info->data + 1, data, len);
+ ssif_info->data_len = len;
+ start_resend(ssif_info);
+ return 0;
+}
+
+/* Must be called with the message lock held. */
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+ unsigned long oflags;
+
+ restart:
+ if (!IS_SSIF_IDLE(ssif_info)) {
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ if (!ssif_info->waiting_msg) {
+ ssif_info->curr_msg = NULL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ } else {
+ int rv;
+
+ ssif_info->curr_msg = ssif_info->waiting_msg;
+ ssif_info->waiting_msg = NULL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ rv = start_send(ssif_info,
+ ssif_info->curr_msg->data,
+ ssif_info->curr_msg->data_size);
+ if (rv) {
+ msg = ssif_info->curr_msg;
+ ssif_info->curr_msg = NULL;
+ return_hosed_msg(ssif_info, msg);
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ goto restart;
+ }
+ }
+}
+
+static void sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct ssif_info *ssif_info = send_info;
+ unsigned long oflags, *flags;
+
+ BUG_ON(ssif_info->waiting_msg);
+ ssif_info->waiting_msg = msg;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ start_next_msg(ssif_info, flags);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) {
+ struct timespec64 t;
+
+ ktime_get_real_ts64(&t);
+ dev_dbg(&ssif_info->client->dev,
+ "**Enqueue %02x %02x: %lld.%6.6ld\n",
+ msg->data[0], msg->data[1],
+ (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
+ }
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ data->addr_src = ssif_info->addr_source;
+ data->dev = &ssif_info->client->dev;
+ data->addr_info = ssif_info->addr_info;
+ get_device(data->dev);
+
+ return 0;
+}
+
+/*
+ * Upper layer wants us to request events.
+ */
+static void request_events(void *send_info)
+{
+ struct ssif_info *ssif_info = send_info;
+ unsigned long oflags, *flags;
+
+ if (!ssif_info->has_event_buffer)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->req_events = true;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+}
+
+/*
+ * Upper layer is changing the flag saying whether we need to request
+ * flags periodically or not.
+ */
+static void ssif_set_need_watch(void *send_info, unsigned int watch_mask)
+{
+ struct ssif_info *ssif_info = send_info;
+ unsigned long oflags, *flags;
+ long timeout = 0;
+
+ if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES)
+ timeout = SSIF_WATCH_MSG_TIMEOUT;
+ else if (watch_mask)
+ timeout = SSIF_WATCH_WATCHDOG_TIMEOUT;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (timeout != ssif_info->watch_timeout) {
+ ssif_info->watch_timeout = timeout;
+ if (ssif_info->watch_timeout)
+ mod_timer(&ssif_info->watch_timer,
+ jiffies + ssif_info->watch_timeout);
+ }
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+}
+
+static int ssif_start_processing(void *send_info,
+ struct ipmi_smi *intf)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ ssif_info->intf = intf;
+
+ return 0;
+}
+
+#define MAX_SSIF_BMCS 4
+
+static unsigned short addr[MAX_SSIF_BMCS];
+static int num_addrs;
+module_param_array(addr, ushort, &num_addrs, 0);
+MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs.");
+
+static char *adapter_name[MAX_SSIF_BMCS];
+static int num_adapter_names;
+module_param_array(adapter_name, charp, &num_adapter_names, 0);
+MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC. By default all devices are scanned.");
+
+static int slave_addrs[MAX_SSIF_BMCS];
+static int num_slave_addrs;
+module_param_array(slave_addrs, int, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs,
+ "The default IPMB slave address for the controller.");
+
+static bool alerts_broken;
+module_param(alerts_broken, bool, 0);
+MODULE_PARM_DESC(alerts_broken, "Don't enable alerts for the controller.");
+
+/*
+ * Bit 0 enables message debugging, bit 1 enables state debugging, and
+ * bit 2 enables timing debugging. This is an array indexed by
+ * interface number"
+ */
+static int dbg[MAX_SSIF_BMCS];
+static int num_dbg;
+module_param_array(dbg, int, &num_dbg, 0);
+MODULE_PARM_DESC(dbg, "Turn on debugging.");
+
+static bool ssif_dbg_probe;
+module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
+MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
+
+static bool ssif_tryacpi = true;
+module_param_named(tryacpi, ssif_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
+
+static bool ssif_trydmi = true;
+module_param_named(trydmi, ssif_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)");
+
+static DEFINE_MUTEX(ssif_infos_mutex);
+static LIST_HEAD(ssif_infos);
+
+#define IPMI_SSIF_ATTR(name) \
+static ssize_t ipmi_##name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ssif_info *ssif_info = dev_get_drvdata(dev); \
+ \
+ return sysfs_emit(buf, "%u\n", ssif_get_stat(ssif_info, name));\
+} \
+static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
+
+static ssize_t ipmi_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "ssif\n");
+}
+static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
+
+IPMI_SSIF_ATTR(sent_messages);
+IPMI_SSIF_ATTR(sent_messages_parts);
+IPMI_SSIF_ATTR(send_retries);
+IPMI_SSIF_ATTR(send_errors);
+IPMI_SSIF_ATTR(received_messages);
+IPMI_SSIF_ATTR(received_message_parts);
+IPMI_SSIF_ATTR(receive_retries);
+IPMI_SSIF_ATTR(receive_errors);
+IPMI_SSIF_ATTR(flag_fetches);
+IPMI_SSIF_ATTR(hosed);
+IPMI_SSIF_ATTR(events);
+IPMI_SSIF_ATTR(watchdog_pretimeouts);
+IPMI_SSIF_ATTR(alerts);
+
+static struct attribute *ipmi_ssif_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_sent_messages.attr,
+ &dev_attr_sent_messages_parts.attr,
+ &dev_attr_send_retries.attr,
+ &dev_attr_send_errors.attr,
+ &dev_attr_received_messages.attr,
+ &dev_attr_received_message_parts.attr,
+ &dev_attr_receive_retries.attr,
+ &dev_attr_receive_errors.attr,
+ &dev_attr_flag_fetches.attr,
+ &dev_attr_hosed.attr,
+ &dev_attr_events.attr,
+ &dev_attr_watchdog_pretimeouts.attr,
+ &dev_attr_alerts.attr,
+ NULL
+};
+
+static const struct attribute_group ipmi_ssif_dev_attr_group = {
+ .attrs = ipmi_ssif_dev_attrs,
+};
+
+static void shutdown_ssif(void *send_info)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+ dev_set_drvdata(&ssif_info->client->dev, NULL);
+
+ /* make sure the driver is not looking for flags any more. */
+ while (ssif_info->ssif_state != SSIF_IDLE)
+ schedule_timeout(1);
+
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->watch_timer);
+ del_timer_sync(&ssif_info->retry_timer);
+ if (ssif_info->thread) {
+ complete(&ssif_info->wake_thread);
+ kthread_stop(ssif_info->thread);
+ }
+}
+
+static void ssif_remove(struct i2c_client *client)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+ struct ssif_addr_info *addr_info;
+
+ /*
+ * After this point, we won't deliver anything asynchronously
+ * to the message handler. We can unregister ourself.
+ */
+ ipmi_unregister_smi(ssif_info->intf);
+
+ list_for_each_entry(addr_info, &ssif_infos, link) {
+ if (addr_info->client == client) {
+ addr_info->client = NULL;
+ break;
+ }
+ }
+
+ kfree(ssif_info);
+}
+
+static int read_response(struct i2c_client *client, unsigned char *resp)
+{
+ int ret = -ENODEV, retry_cnt = SSIF_RECV_RETRIES;
+
+ while (retry_cnt > 0) {
+ ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE,
+ resp);
+ if (ret > 0)
+ break;
+ msleep(SSIF_MSG_MSEC);
+ retry_cnt--;
+ if (retry_cnt <= 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
+ int *resp_len, unsigned char *resp)
+{
+ int retry_cnt;
+ int ret;
+
+ retry_cnt = SSIF_SEND_RETRIES;
+ retry1:
+ ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
+ if (ret) {
+ retry_cnt--;
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
+ goto retry1;
+ }
+ return -ENODEV;
+ }
+
+ ret = read_response(client, resp);
+ if (ret > 0) {
+ /* Validate that the response is correct. */
+ if (ret < 3 ||
+ (resp[0] != (msg[0] | (1 << 2))) ||
+ (resp[1] != msg[1]))
+ ret = -EINVAL;
+ else if (ret > IPMI_MAX_MSG_LENGTH) {
+ ret = -E2BIG;
+ } else {
+ *resp_len = ret;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ unsigned char *resp;
+ unsigned char msg[3];
+ int rv;
+ int len;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Do a Get Device ID command, since it is required. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv)
+ rv = -ENODEV;
+ else
+ strscpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
+ kfree(resp);
+ return rv;
+}
+
+static int strcmp_nospace(char *s1, char *s2)
+{
+ while (*s1 && *s2) {
+ while (isspace(*s1))
+ s1++;
+ while (isspace(*s2))
+ s2++;
+ if (*s1 > *s2)
+ return 1;
+ if (*s1 < *s2)
+ return -1;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+static struct ssif_addr_info *ssif_info_find(unsigned short addr,
+ char *adapter_name,
+ bool match_null_name)
+{
+ struct ssif_addr_info *info, *found = NULL;
+
+restart:
+ list_for_each_entry(info, &ssif_infos, link) {
+ if (info->binfo.addr == addr) {
+ if (info->addr_src == SI_SMBIOS && !info->adapter_name)
+ info->adapter_name = kstrdup(adapter_name,
+ GFP_KERNEL);
+
+ if (info->adapter_name || adapter_name) {
+ if (!info->adapter_name != !adapter_name) {
+ /* One is NULL and one is not */
+ continue;
+ }
+ if (adapter_name &&
+ strcmp_nospace(info->adapter_name,
+ adapter_name))
+ /* Names do not match */
+ continue;
+ }
+ found = info;
+ break;
+ }
+ }
+
+ if (!found && match_null_name) {
+ /* Try to get an exact match first, then try with a NULL name */
+ adapter_name = NULL;
+ match_null_name = false;
+ goto restart;
+ }
+
+ return found;
+}
+
+static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
+{
+#ifdef CONFIG_ACPI
+ acpi_handle acpi_handle;
+
+ acpi_handle = ACPI_HANDLE(dev);
+ if (acpi_handle) {
+ ssif_info->addr_source = SI_ACPI;
+ ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
+ request_module_nowait("acpi_ipmi");
+ return true;
+ }
+#endif
+ return false;
+}
+
+static int find_slave_address(struct i2c_client *client, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+ if (!slave_addr)
+ slave_addr = ipmi_dmi_get_slave_addr(
+ SI_TYPE_INVALID,
+ i2c_adapter_id(client->adapter),
+ client->addr);
+#endif
+
+ return slave_addr;
+}
+
+static int start_multipart_test(struct i2c_client *client,
+ unsigned char *msg, bool do_middle)
+{
+ int retry_cnt = SSIF_SEND_RETRIES, ret;
+
+retry_write:
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_START,
+ 32, msg);
+ if (ret) {
+ retry_cnt--;
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
+ goto retry_write;
+ }
+ dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
+ return ret;
+ }
+
+ if (!do_middle)
+ return 0;
+
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+ 32, msg + 32);
+ if (ret) {
+ dev_err(&client->dev, "Could not write multi-part middle, though the BMC said it could handle it. Just limit sends to one part.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void test_multipart_messages(struct i2c_client *client,
+ struct ssif_info *ssif_info,
+ unsigned char *resp)
+{
+ unsigned char msg[65];
+ int ret;
+ bool do_middle;
+
+ if (ssif_info->max_xmit_msg_size <= 32)
+ return;
+
+ do_middle = ssif_info->max_xmit_msg_size > 63;
+
+ memset(msg, 0, sizeof(msg));
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+
+ /*
+ * The specification is all messed up dealing with sending
+ * multi-part messages. Per what the specification says, it
+ * is impossible to send a message that is a multiple of 32
+ * bytes, except for 32 itself. It talks about a "start"
+ * transaction (cmd=6) that must be 32 bytes, "middle"
+ * transaction (cmd=7) that must be 32 bytes, and an "end"
+ * transaction. The "end" transaction is shown as cmd=7 in
+ * the text, but if that's the case there is no way to
+ * differentiate between a middle and end part except the
+ * length being less than 32. But there is a table at the far
+ * end of the section (that I had never noticed until someone
+ * pointed it out to me) that mentions it as cmd=8.
+ *
+ * After some thought, I think the example is wrong and the
+ * end transaction should be cmd=8. But some systems don't
+ * implement cmd=8, they use a zero-length end transaction,
+ * even though that violates the SMBus specification.
+ *
+ * So, to work around this, this code tests if cmd=8 works.
+ * If it does, then we use that. If not, it tests zero-
+ * byte end transactions. If that works, good. If not,
+ * we only allow 63-byte transactions max.
+ */
+
+ ret = start_multipart_test(client, msg, do_middle);
+ if (ret)
+ goto out_no_multi_part;
+
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_END,
+ 1, msg + 64);
+
+ if (!ret)
+ ret = read_response(client, resp);
+
+ if (ret > 0) {
+ /* End transactions work, we are good. */
+ ssif_info->cmd8_works = true;
+ return;
+ }
+
+ ret = start_multipart_test(client, msg, do_middle);
+ if (ret) {
+ dev_err(&client->dev, "Second multipart test failed.\n");
+ goto out_no_multi_part;
+ }
+
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+ 0, msg + 64);
+ if (!ret)
+ ret = read_response(client, resp);
+ if (ret > 0)
+ /* Zero-size end parts work, use those. */
+ return;
+
+ /* Limit to 63 bytes and use a short middle command to mark the end. */
+ if (ssif_info->max_xmit_msg_size > 63)
+ ssif_info->max_xmit_msg_size = 63;
+ return;
+
+out_no_multi_part:
+ ssif_info->max_xmit_msg_size = 32;
+ return;
+}
+
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+ IPMI_BMC_EVT_MSG_INTR)
+
+static void ssif_remove_dup(struct i2c_client *client)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+
+ ipmi_unregister_smi(ssif_info->intf);
+ kfree(ssif_info);
+}
+
+static int ssif_add_infos(struct i2c_client *client)
+{
+ struct ssif_addr_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->addr_src = SI_ACPI;
+ info->client = client;
+ info->adapter_name = kstrdup(client->adapter->name, GFP_KERNEL);
+ if (!info->adapter_name) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
+ info->binfo.addr = client->addr;
+ list_add_tail(&info->link, &ssif_infos);
+ return 0;
+}
+
+/*
+ * Prefer ACPI over SMBIOS, if both are available.
+ * So if we get an ACPI interface and have already registered a SMBIOS
+ * interface at the same address, remove the SMBIOS and add the ACPI one.
+ */
+static int ssif_check_and_remove(struct i2c_client *client,
+ struct ssif_info *ssif_info)
+{
+ struct ssif_addr_info *info;
+
+ list_for_each_entry(info, &ssif_infos, link) {
+ if (!info->client)
+ return 0;
+ if (!strcmp(info->adapter_name, client->adapter->name) &&
+ info->binfo.addr == client->addr) {
+ if (info->addr_src == SI_ACPI)
+ return -EEXIST;
+
+ if (ssif_info->addr_source == SI_ACPI &&
+ info->addr_src == SI_SMBIOS) {
+ dev_info(&client->dev,
+ "Removing %s-specified SSIF interface in favor of ACPI\n",
+ ipmi_addr_src_to_str(info->addr_src));
+ ssif_remove_dup(info->client);
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+static int ssif_probe(struct i2c_client *client)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ struct ssif_info *ssif_info;
+ int rv = 0;
+ int len = 0;
+ int i;
+ u8 slave_addr = 0;
+ struct ssif_addr_info *addr_info = NULL;
+
+ mutex_lock(&ssif_infos_mutex);
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp) {
+ mutex_unlock(&ssif_infos_mutex);
+ return -ENOMEM;
+ }
+
+ ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL);
+ if (!ssif_info) {
+ kfree(resp);
+ mutex_unlock(&ssif_infos_mutex);
+ return -ENOMEM;
+ }
+
+ if (!check_acpi(ssif_info, &client->dev)) {
+ addr_info = ssif_info_find(client->addr, client->adapter->name,
+ true);
+ if (!addr_info) {
+ /* Must have come in through sysfs. */
+ ssif_info->addr_source = SI_HOTMOD;
+ } else {
+ ssif_info->addr_source = addr_info->addr_src;
+ ssif_info->ssif_debug = addr_info->debug;
+ ssif_info->addr_info = addr_info->addr_info;
+ addr_info->client = client;
+ slave_addr = addr_info->slave_addr;
+ }
+ }
+
+ ssif_info->client = client;
+ i2c_set_clientdata(client, ssif_info);
+
+ rv = ssif_check_and_remove(client, ssif_info);
+ /* If rv is 0 and addr source is not SI_ACPI, continue probing */
+ if (!rv && ssif_info->addr_source == SI_ACPI) {
+ rv = ssif_add_infos(client);
+ if (rv) {
+ dev_err(&client->dev, "Out of memory!, exiting ..\n");
+ goto out;
+ }
+ } else if (rv) {
+ dev_err(&client->dev, "Not probing, Interface already present\n");
+ goto out;
+ }
+
+ slave_addr = find_slave_address(client, slave_addr);
+
+ dev_info(&client->dev,
+ "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
+ ipmi_addr_src_to_str(ssif_info->addr_source),
+ client->addr, client->adapter->name, slave_addr);
+
+ /* Now check for system interface capabilities */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
+ msg[2] = 0; /* SSIF */
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (!rv && (len >= 3) && (resp[2] == 0)) {
+ if (len < 7) {
+ if (ssif_dbg_probe)
+ dev_dbg(&ssif_info->client->dev,
+ "SSIF info too short: %d\n", len);
+ goto no_support;
+ }
+
+ /* Got a good SSIF response, handle it. */
+ ssif_info->max_xmit_msg_size = resp[5];
+ ssif_info->max_recv_msg_size = resp[6];
+ ssif_info->multi_support = (resp[4] >> 6) & 0x3;
+ ssif_info->supports_pec = (resp[4] >> 3) & 0x1;
+
+ /* Sanitize the data */
+ switch (ssif_info->multi_support) {
+ case SSIF_NO_MULTI:
+ if (ssif_info->max_xmit_msg_size > 32)
+ ssif_info->max_xmit_msg_size = 32;
+ if (ssif_info->max_recv_msg_size > 32)
+ ssif_info->max_recv_msg_size = 32;
+ break;
+
+ case SSIF_MULTI_2_PART:
+ if (ssif_info->max_xmit_msg_size > 63)
+ ssif_info->max_xmit_msg_size = 63;
+ if (ssif_info->max_recv_msg_size > 62)
+ ssif_info->max_recv_msg_size = 62;
+ break;
+
+ case SSIF_MULTI_n_PART:
+ /* We take whatever size given, but do some testing. */
+ break;
+
+ default:
+ /* Data is not sane, just give up. */
+ goto no_support;
+ }
+ } else {
+ no_support:
+ /* Assume no multi-part or PEC support */
+ dev_info(&ssif_info->client->dev,
+ "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
+ rv, len, resp[2]);
+
+ ssif_info->max_xmit_msg_size = 32;
+ ssif_info->max_recv_msg_size = 32;
+ ssif_info->multi_support = SSIF_NO_MULTI;
+ ssif_info->supports_pec = 0;
+ }
+
+ test_multipart_messages(client, ssif_info, resp);
+
+ /* Make sure the NMI timeout is cleared. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 3) || (resp[2] != 0))
+ dev_warn(&ssif_info->client->dev,
+ "Unable to clear message flags: %d %d %2.2x\n",
+ rv, len, resp[2]);
+
+ /* Attempt to enable the event buffer. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv || (len < 4) || (resp[2] != 0)) {
+ dev_warn(&ssif_info->client->dev,
+ "Error getting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ ssif_info->global_enables = resp[3];
+
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+ ssif_info->has_event_buffer = true;
+ /* buffer is already enabled, nothing to do. */
+ goto found;
+ }
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 2)) {
+ dev_warn(&ssif_info->client->dev,
+ "Error setting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[2] == 0) {
+ /* A successful return means the event buffer is supported. */
+ ssif_info->has_event_buffer = true;
+ ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF;
+ }
+
+ /* Some systems don't behave well if you enable alerts. */
+ if (alerts_broken)
+ goto found;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 2)) {
+ dev_warn(&ssif_info->client->dev,
+ "Error setting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[2] == 0) {
+ /* A successful return means the alert is supported. */
+ ssif_info->supports_alert = true;
+ ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR;
+ }
+
+ found:
+ if (ssif_dbg_probe) {
+ dev_dbg(&ssif_info->client->dev,
+ "%s: i2c_probe found device at i2c address %x\n",
+ __func__, client->addr);
+ }
+
+ spin_lock_init(&ssif_info->lock);
+ ssif_info->ssif_state = SSIF_IDLE;
+ timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
+ timer_setup(&ssif_info->watch_timer, watch_timeout, 0);
+
+ for (i = 0; i < SSIF_NUM_STATS; i++)
+ atomic_set(&ssif_info->stats[i], 0);
+
+ if (ssif_info->supports_pec)
+ ssif_info->client->flags |= I2C_CLIENT_PEC;
+
+ ssif_info->handlers.owner = THIS_MODULE;
+ ssif_info->handlers.start_processing = ssif_start_processing;
+ ssif_info->handlers.shutdown = shutdown_ssif;
+ ssif_info->handlers.get_smi_info = get_smi_info;
+ ssif_info->handlers.sender = sender;
+ ssif_info->handlers.request_events = request_events;
+ ssif_info->handlers.set_need_watch = ssif_set_need_watch;
+
+ {
+ unsigned int thread_num;
+
+ thread_num = ((i2c_adapter_id(ssif_info->client->adapter)
+ << 8) |
+ ssif_info->client->addr);
+ init_completion(&ssif_info->wake_thread);
+ ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info,
+ "kssif%4.4x", thread_num);
+ if (IS_ERR(ssif_info->thread)) {
+ rv = PTR_ERR(ssif_info->thread);
+ dev_notice(&ssif_info->client->dev,
+ "Could not start kernel thread: error %d\n",
+ rv);
+ goto out;
+ }
+ }
+
+ dev_set_drvdata(&ssif_info->client->dev, ssif_info);
+ rv = device_add_group(&ssif_info->client->dev,
+ &ipmi_ssif_dev_attr_group);
+ if (rv) {
+ dev_err(&ssif_info->client->dev,
+ "Unable to add device attributes: error %d\n",
+ rv);
+ goto out;
+ }
+
+ rv = ipmi_register_smi(&ssif_info->handlers,
+ ssif_info,
+ &ssif_info->client->dev,
+ slave_addr);
+ if (rv) {
+ dev_err(&ssif_info->client->dev,
+ "Unable to register device: error %d\n", rv);
+ goto out_remove_attr;
+ }
+
+ out:
+ if (rv) {
+ if (addr_info)
+ addr_info->client = NULL;
+
+ dev_err(&ssif_info->client->dev,
+ "Unable to start IPMI SSIF: %d\n", rv);
+ i2c_set_clientdata(client, NULL);
+ kfree(ssif_info);
+ }
+ kfree(resp);
+ mutex_unlock(&ssif_infos_mutex);
+ return rv;
+
+out_remove_attr:
+ device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+ dev_set_drvdata(&ssif_info->client->dev, NULL);
+ goto out;
+}
+
+static int new_ssif_client(int addr, char *adapter_name,
+ int debug, int slave_addr,
+ enum ipmi_addr_src addr_src,
+ struct device *dev)
+{
+ struct ssif_addr_info *addr_info;
+ int rv = 0;
+
+ mutex_lock(&ssif_infos_mutex);
+ if (ssif_info_find(addr, adapter_name, false)) {
+ rv = -EEXIST;
+ goto out_unlock;
+ }
+
+ addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL);
+ if (!addr_info) {
+ rv = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (adapter_name) {
+ addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL);
+ if (!addr_info->adapter_name) {
+ kfree(addr_info);
+ rv = -ENOMEM;
+ goto out_unlock;
+ }
+ }
+
+ strncpy(addr_info->binfo.type, DEVICE_NAME,
+ sizeof(addr_info->binfo.type));
+ addr_info->binfo.addr = addr;
+ addr_info->binfo.platform_data = addr_info;
+ addr_info->debug = debug;
+ addr_info->slave_addr = slave_addr;
+ addr_info->addr_src = addr_src;
+ addr_info->dev = dev;
+
+ if (dev)
+ dev_set_drvdata(dev, addr_info);
+
+ list_add_tail(&addr_info->link, &ssif_infos);
+
+ /* Address list will get it */
+
+out_unlock:
+ mutex_unlock(&ssif_infos_mutex);
+ return rv;
+}
+
+static void free_ssif_clients(void)
+{
+ struct ssif_addr_info *info, *tmp;
+
+ mutex_lock(&ssif_infos_mutex);
+ list_for_each_entry_safe(info, tmp, &ssif_infos, link) {
+ list_del(&info->link);
+ kfree(info->adapter_name);
+ kfree(info);
+ }
+ mutex_unlock(&ssif_infos_mutex);
+}
+
+static unsigned short *ssif_address_list(void)
+{
+ struct ssif_addr_info *info;
+ unsigned int count = 0, i = 0;
+ unsigned short *address_list;
+
+ list_for_each_entry(info, &ssif_infos, link)
+ count++;
+
+ address_list = kcalloc(count + 1, sizeof(*address_list),
+ GFP_KERNEL);
+ if (!address_list)
+ return NULL;
+
+ list_for_each_entry(info, &ssif_infos, link) {
+ unsigned short addr = info->binfo.addr;
+ int j;
+
+ for (j = 0; j < i; j++) {
+ if (address_list[j] == addr)
+ /* Found a dup. */
+ break;
+ }
+ if (j == i) /* Didn't find it in the list. */
+ address_list[i++] = addr;
+ }
+ address_list[i] = I2C_CLIENT_END;
+
+ return address_list;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ssif_acpi_match[] = {
+ { "IPI0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssif_acpi_match);
+#endif
+
+#ifdef CONFIG_DMI
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+ u8 slave_addr = 0;
+ u16 i2c_addr;
+ int rv;
+
+ if (!ssif_trydmi)
+ return -ENODEV;
+
+ rv = device_property_read_u16(&pdev->dev, "i2c-addr", &i2c_addr);
+ if (rv) {
+ dev_warn(&pdev->dev, "No i2c-addr property\n");
+ return -ENODEV;
+ }
+
+ rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+ if (rv)
+ slave_addr = 0x20;
+
+ return new_ssif_client(i2c_addr, NULL, 0,
+ slave_addr, SI_SMBIOS, &pdev->dev);
+}
+#else
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
+static const struct i2c_device_id ssif_id[] = {
+ { DEVICE_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ssif_id);
+
+static struct i2c_driver ssif_i2c_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVICE_NAME
+ },
+ .probe = ssif_probe,
+ .remove = ssif_remove,
+ .alert = ssif_alert,
+ .id_table = ssif_id,
+ .detect = ssif_detect
+};
+
+static int ssif_platform_probe(struct platform_device *dev)
+{
+ return dmi_ipmi_probe(dev);
+}
+
+static int ssif_platform_remove(struct platform_device *dev)
+{
+ struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev);
+
+ mutex_lock(&ssif_infos_mutex);
+ list_del(&addr_info->link);
+ kfree(addr_info);
+ mutex_unlock(&ssif_infos_mutex);
+ return 0;
+}
+
+static const struct platform_device_id ssif_plat_ids[] = {
+ { "dmi-ipmi-ssif", 0 },
+ { }
+};
+
+static struct platform_driver ipmi_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ },
+ .probe = ssif_platform_probe,
+ .remove = ssif_platform_remove,
+ .id_table = ssif_plat_ids
+};
+
+static int __init init_ipmi_ssif(void)
+{
+ int i;
+ int rv;
+
+ if (initialized)
+ return 0;
+
+ pr_info("IPMI SSIF Interface driver\n");
+
+ /* build list for i2c from addr list */
+ for (i = 0; i < num_addrs; i++) {
+ rv = new_ssif_client(addr[i], adapter_name[i],
+ dbg[i], slave_addrs[i],
+ SI_HARDCODED, NULL);
+ if (rv)
+ pr_err("Couldn't add hardcoded device at addr 0x%x\n",
+ addr[i]);
+ }
+
+ if (ssif_tryacpi)
+ ssif_i2c_driver.driver.acpi_match_table =
+ ACPI_PTR(ssif_acpi_match);
+
+ if (ssif_trydmi) {
+ rv = platform_driver_register(&ipmi_driver);
+ if (rv)
+ pr_err("Unable to register driver: %d\n", rv);
+ else
+ platform_registered = true;
+ }
+
+ ssif_i2c_driver.address_list = ssif_address_list();
+
+ rv = i2c_add_driver(&ssif_i2c_driver);
+ if (!rv)
+ initialized = true;
+
+ return rv;
+}
+module_init(init_ipmi_ssif);
+
+static void __exit cleanup_ipmi_ssif(void)
+{
+ if (!initialized)
+ return;
+
+ initialized = false;
+
+ i2c_del_driver(&ssif_i2c_driver);
+
+ kfree(ssif_i2c_driver.address_list);
+
+ if (ssif_trydmi && platform_registered)
+ platform_driver_unregister(&ipmi_driver);
+
+ free_ssif_clients();
+}
+module_exit(cleanup_ipmi_ssif);
+
+MODULE_ALIAS("platform:dmi-ipmi-ssif");
+MODULE_AUTHOR("Todd C Davis <todd.c.davis@intel.com>, Corey Minyard <minyard@acm.org>");
+MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
new file mode 100644
index 0000000000..9a45925748
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -0,0 +1,1344 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_watchdog.c
+ *
+ * A watchdog timer based upon the IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#define pr_fmt(fmt) "IPMI Watchdog: " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/mutex.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/kdebug.h>
+#include <linux/kstrtox.h>
+#include <linux/rwsem.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/nmi.h>
+#include <linux/reboot.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/sched/signal.h>
+
+#ifdef CONFIG_X86
+/*
+ * This is ugly, but I've determined that x86 is the only architecture
+ * that can reasonably support the IPMI NMI watchdog timeout at this
+ * time. If another architecture adds this capability somehow, it
+ * will have to be a somewhat different mechanism and I have no idea
+ * how it will work. So in the unlikely event that another
+ * architecture supports this, we can figure out a good generic
+ * mechanism for it at that time.
+ */
+#include <asm/kdebug.h>
+#include <asm/nmi.h>
+#define HAVE_DIE_NMI
+#endif
+
+/*
+ * The IPMI command/response information for the watchdog timer.
+ */
+
+/* values for byte 1 of the set command, byte 2 of the get response. */
+#define WDOG_DONT_LOG (1 << 7)
+#define WDOG_DONT_STOP_ON_SET (1 << 6)
+#define WDOG_SET_TIMER_USE(byte, use) \
+ byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7)
+#define WDOG_TIMER_USE_BIOS_FRB2 1
+#define WDOG_TIMER_USE_BIOS_POST 2
+#define WDOG_TIMER_USE_OS_LOAD 3
+#define WDOG_TIMER_USE_SMS_OS 4
+#define WDOG_TIMER_USE_OEM 5
+
+/* values for byte 2 of the set command, byte 3 of the get response. */
+#define WDOG_SET_PRETIMEOUT_ACT(byte, use) \
+ byte = ((byte) & 0x8f) | (((use) & 0x7) << 4)
+#define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7)
+#define WDOG_PRETIMEOUT_NONE 0
+#define WDOG_PRETIMEOUT_SMI 1
+#define WDOG_PRETIMEOUT_NMI 2
+#define WDOG_PRETIMEOUT_MSG_INT 3
+
+/* Operations that can be performed on a pretimout. */
+#define WDOG_PREOP_NONE 0
+#define WDOG_PREOP_PANIC 1
+/* Cause data to be available to read. Doesn't work in NMI mode. */
+#define WDOG_PREOP_GIVE_DATA 2
+
+/* Actions to perform on a full timeout. */
+#define WDOG_SET_TIMEOUT_ACT(byte, use) \
+ byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7)
+#define WDOG_TIMEOUT_NONE 0
+#define WDOG_TIMEOUT_RESET 1
+#define WDOG_TIMEOUT_POWER_DOWN 2
+#define WDOG_TIMEOUT_POWER_CYCLE 3
+
+/*
+ * Byte 3 of the get command, byte 4 of the get response is the
+ * pre-timeout in seconds.
+ */
+
+/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
+#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1)
+#define WDOG_EXPIRE_CLEAR_BIOS_POST (1 << 2)
+#define WDOG_EXPIRE_CLEAR_OS_LOAD (1 << 3)
+#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4)
+#define WDOG_EXPIRE_CLEAR_OEM (1 << 5)
+
+/*
+ * Setting/getting the watchdog timer value. This is for bytes 5 and
+ * 6 (the timeout time) of the set command, and bytes 6 and 7 (the
+ * timeout time) and 8 and 9 (the current countdown value) of the
+ * response. The timeout value is given in seconds (in the command it
+ * is 100ms intervals).
+ */
+#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
+ (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
+#define WDOG_GET_TIMEOUT(byte1, byte2) \
+ (((byte1) | ((byte2) << 8)) / 10)
+
+#define IPMI_WDOG_RESET_TIMER 0x22
+#define IPMI_WDOG_SET_TIMER 0x24
+#define IPMI_WDOG_GET_TIMER 0x25
+
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
+
+static DEFINE_MUTEX(ipmi_watchdog_mutex);
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+static struct ipmi_user *watchdog_user;
+static int watchdog_ifnum;
+
+/* Default the timeout to 10 seconds. */
+static int timeout = 10;
+
+/* The pre-timeout is disabled by default. */
+static int pretimeout;
+
+/* Default timeout to set on panic */
+static int panic_wdt_timeout = 255;
+
+/* Default action is to reset the board on a timeout. */
+static unsigned char action_val = WDOG_TIMEOUT_RESET;
+
+static char action[16] = "reset";
+
+static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE;
+
+static char preaction[16] = "pre_none";
+
+static unsigned char preop_val = WDOG_PREOP_NONE;
+
+static char preop[16] = "preop_none";
+static DEFINE_SPINLOCK(ipmi_read_lock);
+static char data_to_read;
+static DECLARE_WAIT_QUEUE_HEAD(read_q);
+static struct fasync_struct *fasync_q;
+static atomic_t pretimeout_since_last_heartbeat;
+static char expect_close;
+
+static int ifnum_to_use = -1;
+
+/* Parameters to ipmi_set_timeout */
+#define IPMI_SET_TIMEOUT_NO_HB 0
+#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1
+#define IPMI_SET_TIMEOUT_FORCE_HB 2
+
+static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
+
+/*
+ * If true, the driver will start running as soon as it is configured
+ * and ready.
+ */
+static int start_now;
+
+static int set_param_timeout(const char *val, const struct kernel_param *kp)
+{
+ char *endp;
+ int l;
+ int rv = 0;
+
+ if (!val)
+ return -EINVAL;
+ l = simple_strtoul(val, &endp, 0);
+ if (endp == val)
+ return -EINVAL;
+
+ *((int *)kp->arg) = l;
+ if (watchdog_user)
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ return rv;
+}
+
+static const struct kernel_param_ops param_ops_timeout = {
+ .set = set_param_timeout,
+ .get = param_get_int,
+};
+#define param_check_timeout param_check_int
+
+typedef int (*action_fn)(const char *intval, char *outval);
+
+static int action_op(const char *inval, char *outval);
+static int preaction_op(const char *inval, char *outval);
+static int preop_op(const char *inval, char *outval);
+static void check_parms(void);
+
+static int set_param_str(const char *val, const struct kernel_param *kp)
+{
+ action_fn fn = (action_fn) kp->arg;
+ int rv = 0;
+ char valcp[16];
+ char *s;
+
+ strscpy(valcp, val, 16);
+
+ s = strstrip(valcp);
+
+ rv = fn(s, NULL);
+ if (rv)
+ goto out;
+
+ check_parms();
+ if (watchdog_user)
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ out:
+ return rv;
+}
+
+static int get_param_str(char *buffer, const struct kernel_param *kp)
+{
+ action_fn fn = (action_fn) kp->arg;
+ int rv, len;
+
+ rv = fn(NULL, buffer);
+ if (rv)
+ return rv;
+
+ len = strlen(buffer);
+ buffer[len++] = '\n';
+ buffer[len] = 0;
+
+ return len;
+}
+
+
+static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+ if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+ return 0;
+
+ ipmi_unregister_watchdog(watchdog_ifnum);
+ ipmi_register_watchdog(ifnum_to_use);
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_wdog_ifnum = {
+ .set = set_param_wdog_ifnum,
+ .get = param_get_int,
+};
+
+#define param_check_wdog_ifnum param_check_int
+
+static const struct kernel_param_ops param_ops_str = {
+ .set = set_param_str,
+ .get = get_param_str,
+};
+
+module_param(ifnum_to_use, wdog_ifnum, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+ "timer. Setting to -1 defaults to the first registered "
+ "interface");
+
+module_param(timeout, timeout, 0644);
+MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
+
+module_param(pretimeout, timeout, 0644);
+MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
+
+module_param(panic_wdt_timeout, timeout, 0644);
+MODULE_PARM_DESC(panic_wdt_timeout, "Timeout value on kernel panic in seconds.");
+
+module_param_cb(action, &param_ops_str, action_op, 0644);
+MODULE_PARM_DESC(action, "Timeout action. One of: "
+ "reset, none, power_cycle, power_off.");
+
+module_param_cb(preaction, &param_ops_str, preaction_op, 0644);
+MODULE_PARM_DESC(preaction, "Pretimeout action. One of: "
+ "pre_none, pre_smi, pre_nmi, pre_int.");
+
+module_param_cb(preop, &param_ops_str, preop_op, 0644);
+MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
+ "preop_none, preop_panic, preop_give_data.");
+
+module_param(start_now, int, 0444);
+MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
+ "soon as the driver is loaded.");
+
+module_param(nowayout, bool, 0644);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+/* Default state of the timer. */
+static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+
+/* Is someone using the watchdog? Only one user is allowed. */
+static unsigned long ipmi_wdog_open;
+
+/*
+ * If set to 1, the heartbeat command will set the state to reset and
+ * start the timer. The timer doesn't normally run when the driver is
+ * first opened until the heartbeat is set the first time, this
+ * variable is used to accomplish this.
+ */
+static int ipmi_start_timer_on_heartbeat;
+
+/* IPMI version of the BMC. */
+static unsigned char ipmi_version_major;
+static unsigned char ipmi_version_minor;
+
+/* If a pretimeout occurs, this is used to allow only one panic to happen. */
+static atomic_t preop_panic_excl = ATOMIC_INIT(-1);
+
+#ifdef HAVE_DIE_NMI
+static int testing_nmi;
+static int nmi_handler_registered;
+#endif
+
+static int __ipmi_heartbeat(void);
+
+/*
+ * We use a mutex to make sure that only one thing can send a set a
+ * message at one time. The mutex is claimed when a message is sent
+ * and freed when both the send and receive messages are free.
+ */
+static atomic_t msg_tofree = ATOMIC_INIT(0);
+static DECLARE_COMPLETION(msg_wait);
+static void msg_free_smi(struct ipmi_smi_msg *msg)
+{
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
+}
+static void msg_free_recv(struct ipmi_recv_msg *msg)
+{
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
+}
+static struct ipmi_smi_msg smi_msg = INIT_IPMI_SMI_MSG(msg_free_smi);
+static struct ipmi_recv_msg recv_msg = INIT_IPMI_RECV_MSG(msg_free_recv);
+
+static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ int *send_heartbeat_now)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[6];
+ int rv;
+ struct ipmi_system_interface_addr addr;
+ int hbnow = 0;
+
+
+ data[0] = 0;
+ WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
+
+ if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ if ((ipmi_version_major > 1) ||
+ ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
+ /* This is an IPMI 1.5-only feature. */
+ data[0] |= WDOG_DONT_STOP_ON_SET;
+ } else {
+ /*
+ * In ipmi 1.0, setting the timer stops the watchdog, we
+ * need to start it back up again.
+ */
+ hbnow = 1;
+ }
+ }
+
+ data[1] = 0;
+ WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
+ if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) {
+ WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
+ data[2] = pretimeout;
+ } else {
+ WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE);
+ data[2] = 0; /* No pretimeout. */
+ }
+ data[3] = 0;
+ WDOG_SET_TIMEOUT(data[4], data[5], timeout);
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_SET_TIMER;
+ msg.data = data;
+ msg.data_len = sizeof(data);
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ if (rv)
+ pr_warn("set timeout error: %d\n", rv);
+ else if (send_heartbeat_now)
+ *send_heartbeat_now = hbnow;
+
+ return rv;
+}
+
+static int _ipmi_set_timeout(int do_heartbeat)
+{
+ int send_heartbeat_now;
+ int rv;
+
+ if (!watchdog_user)
+ return -ENODEV;
+
+ atomic_set(&msg_tofree, 2);
+
+ rv = __ipmi_set_timeout(&smi_msg,
+ &recv_msg,
+ &send_heartbeat_now);
+ if (rv) {
+ atomic_set(&msg_tofree, 0);
+ return rv;
+ }
+
+ wait_for_completion(&msg_wait);
+
+ if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
+ || ((send_heartbeat_now)
+ && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+ rv = __ipmi_heartbeat();
+
+ return rv;
+}
+
+static int ipmi_set_timeout(int do_heartbeat)
+{
+ int rv;
+
+ mutex_lock(&ipmi_watchdog_mutex);
+ rv = _ipmi_set_timeout(do_heartbeat);
+ mutex_unlock(&ipmi_watchdog_mutex);
+
+ return rv;
+}
+
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
+static void panic_smi_free(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+static void panic_recv_free(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
+ INIT_IPMI_SMI_MSG(panic_smi_free);
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
+ INIT_IPMI_RECV_MSG(panic_recv_free);
+
+static void panic_halt_ipmi_heartbeat(void)
+{
+ struct kernel_ipmi_msg msg;
+ struct ipmi_system_interface_addr addr;
+ int rv;
+
+ /*
+ * Don't reset the timer if we have the timer turned off, that
+ * re-enables the watchdog.
+ */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+ return;
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_RESET_TIMER;
+ msg.data = NULL;
+ msg.data_len = 0;
+ atomic_add(2, &panic_done_count);
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ &panic_halt_heartbeat_smi_msg,
+ &panic_halt_heartbeat_recv_msg,
+ 1);
+ if (rv)
+ atomic_sub(2, &panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_smi_msg =
+ INIT_IPMI_SMI_MSG(panic_smi_free);
+static struct ipmi_recv_msg panic_halt_recv_msg =
+ INIT_IPMI_RECV_MSG(panic_recv_free);
+
+/*
+ * Special call, doesn't claim any locks. This is only to be called
+ * at panic or halt time, in run-to-completion mode, when the caller
+ * is the only CPU and the only thing that will be going is these IPMI
+ * calls.
+ */
+static void panic_halt_ipmi_set_timeout(void)
+{
+ int send_heartbeat_now;
+ int rv;
+
+ /* Wait for the messages to be free. */
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll_interface(watchdog_user);
+ atomic_add(2, &panic_done_count);
+ rv = __ipmi_set_timeout(&panic_halt_smi_msg,
+ &panic_halt_recv_msg,
+ &send_heartbeat_now);
+ if (rv) {
+ atomic_sub(2, &panic_done_count);
+ pr_warn("Unable to extend the watchdog timeout\n");
+ } else {
+ if (send_heartbeat_now)
+ panic_halt_ipmi_heartbeat();
+ }
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll_interface(watchdog_user);
+}
+
+static int __ipmi_heartbeat(void)
+{
+ struct kernel_ipmi_msg msg;
+ int rv;
+ struct ipmi_system_interface_addr addr;
+ int timeout_retries = 0;
+
+restart:
+ /*
+ * Don't reset the timer if we have the timer turned off, that
+ * re-enables the watchdog.
+ */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+ return 0;
+
+ atomic_set(&msg_tofree, 2);
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_RESET_TIMER;
+ msg.data = NULL;
+ msg.data_len = 0;
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ &smi_msg,
+ &recv_msg,
+ 1);
+ if (rv) {
+ atomic_set(&msg_tofree, 0);
+ pr_warn("heartbeat send failure: %d\n", rv);
+ return rv;
+ }
+
+ /* Wait for the heartbeat to be sent. */
+ wait_for_completion(&msg_wait);
+
+ if (recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
+ timeout_retries++;
+ if (timeout_retries > 3) {
+ pr_err("Unable to restore the IPMI watchdog's settings, giving up\n");
+ rv = -EIO;
+ goto out;
+ }
+
+ /*
+ * The timer was not initialized, that means the BMC was
+ * probably reset and lost the watchdog information. Attempt
+ * to restore the timer's info. Note that we still hold
+ * the heartbeat lock, to keep a heartbeat from happening
+ * in this process, so must say no heartbeat to avoid a
+ * deadlock on this mutex
+ */
+ rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ if (rv) {
+ pr_err("Unable to send the command to set the watchdog's settings, giving up\n");
+ goto out;
+ }
+
+ /* Might need a heartbeat send, go ahead and do it. */
+ goto restart;
+ } else if (recv_msg.msg.data[0] != 0) {
+ /*
+ * Got an error in the heartbeat response. It was already
+ * reported in ipmi_wdog_msg_handler, but we should return
+ * an error here.
+ */
+ rv = -EINVAL;
+ }
+
+out:
+ return rv;
+}
+
+static int _ipmi_heartbeat(void)
+{
+ int rv;
+
+ if (!watchdog_user)
+ return -ENODEV;
+
+ if (ipmi_start_timer_on_heartbeat) {
+ ipmi_start_timer_on_heartbeat = 0;
+ ipmi_watchdog_state = action_val;
+ rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ } else if (atomic_cmpxchg(&pretimeout_since_last_heartbeat, 1, 0)) {
+ /*
+ * A pretimeout occurred, make sure we set the timeout.
+ * We don't want to set the action, though, we want to
+ * leave that alone (thus it can't be combined with the
+ * above operation.
+ */
+ rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+ } else {
+ rv = __ipmi_heartbeat();
+ }
+
+ return rv;
+}
+
+static int ipmi_heartbeat(void)
+{
+ int rv;
+
+ mutex_lock(&ipmi_watchdog_mutex);
+ rv = _ipmi_heartbeat();
+ mutex_unlock(&ipmi_watchdog_mutex);
+
+ return rv;
+}
+
+static const struct watchdog_info ident = {
+ .options = 0, /* WDIOF_SETTIMEOUT, */
+ .firmware_version = 1,
+ .identity = "IPMI"
+};
+
+static int ipmi_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int i;
+ int val;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ i = copy_to_user(argp, &ident, sizeof(ident));
+ return i ? -EFAULT : 0;
+
+ case WDIOC_SETTIMEOUT:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ timeout = val;
+ return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ case WDIOC_GETTIMEOUT:
+ i = copy_to_user(argp, &timeout, sizeof(timeout));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_SETPRETIMEOUT:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ pretimeout = val;
+ return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ case WDIOC_GETPRETIMEOUT:
+ i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_KEEPALIVE:
+ return _ipmi_heartbeat();
+
+ case WDIOC_SETOPTIONS:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ if (val & WDIOS_DISABLECARD) {
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ ipmi_start_timer_on_heartbeat = 0;
+ }
+
+ if (val & WDIOS_ENABLECARD) {
+ ipmi_watchdog_state = action_val;
+ _ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ }
+ return 0;
+
+ case WDIOC_GETSTATUS:
+ val = 0;
+ i = copy_to_user(argp, &val, sizeof(val));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ipmi_watchdog_mutex);
+ ret = ipmi_ioctl(file, cmd, arg);
+ mutex_unlock(&ipmi_watchdog_mutex);
+
+ return ret;
+}
+
+static ssize_t ipmi_write(struct file *file,
+ const char __user *buf,
+ size_t len,
+ loff_t *ppos)
+{
+ int rv;
+
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 42;
+ }
+ }
+ rv = ipmi_heartbeat();
+ if (rv)
+ return rv;
+ }
+ return len;
+}
+
+static ssize_t ipmi_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int rv = 0;
+ wait_queue_entry_t wait;
+
+ if (count <= 0)
+ return 0;
+
+ /*
+ * Reading returns if the pretimeout has gone off, and it only does
+ * it once per pretimeout.
+ */
+ spin_lock_irq(&ipmi_read_lock);
+ if (!data_to_read) {
+ if (file->f_flags & O_NONBLOCK) {
+ rv = -EAGAIN;
+ goto out;
+ }
+
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&read_q, &wait);
+ while (!data_to_read && !signal_pending(current)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&ipmi_read_lock);
+ schedule();
+ spin_lock_irq(&ipmi_read_lock);
+ }
+ remove_wait_queue(&read_q, &wait);
+
+ if (signal_pending(current)) {
+ rv = -ERESTARTSYS;
+ goto out;
+ }
+ }
+ data_to_read = 0;
+
+ out:
+ spin_unlock_irq(&ipmi_read_lock);
+
+ if (rv == 0) {
+ if (copy_to_user(buf, &data_to_read, 1))
+ rv = -EFAULT;
+ else
+ rv = 1;
+ }
+
+ return rv;
+}
+
+static int ipmi_open(struct inode *ino, struct file *filep)
+{
+ switch (iminor(ino)) {
+ case WATCHDOG_MINOR:
+ if (test_and_set_bit(0, &ipmi_wdog_open))
+ return -EBUSY;
+
+
+ /*
+ * Don't start the timer now, let it start on the
+ * first heartbeat.
+ */
+ ipmi_start_timer_on_heartbeat = 1;
+ return stream_open(ino, filep);
+
+ default:
+ return (-ENODEV);
+ }
+}
+
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
+{
+ __poll_t mask = 0;
+
+ poll_wait(file, &read_q, wait);
+
+ spin_lock_irq(&ipmi_read_lock);
+ if (data_to_read)
+ mask |= (EPOLLIN | EPOLLRDNORM);
+ spin_unlock_irq(&ipmi_read_lock);
+
+ return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+ int result;
+
+ result = fasync_helper(fd, file, on, &fasync_q);
+
+ return (result);
+}
+
+static int ipmi_close(struct inode *ino, struct file *filep)
+{
+ if (iminor(ino) == WATCHDOG_MINOR) {
+ if (expect_close == 42) {
+ mutex_lock(&ipmi_watchdog_mutex);
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ mutex_unlock(&ipmi_watchdog_mutex);
+ } else {
+ pr_crit("Unexpected close, not stopping watchdog!\n");
+ ipmi_heartbeat();
+ }
+ clear_bit(0, &ipmi_wdog_open);
+ }
+
+ expect_close = 0;
+
+ return 0;
+}
+
+static const struct file_operations ipmi_wdog_fops = {
+ .owner = THIS_MODULE,
+ .read = ipmi_read,
+ .poll = ipmi_poll,
+ .write = ipmi_write,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .open = ipmi_open,
+ .release = ipmi_close,
+ .fasync = ipmi_fasync,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice ipmi_wdog_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &ipmi_wdog_fops
+};
+
+static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
+ void *handler_data)
+{
+ if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+ msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+ pr_info("response: The IPMI controller appears to have been reset, will attempt to reinitialize the watchdog timer\n");
+ else if (msg->msg.data[0] != 0)
+ pr_err("response: Error %x on cmd %x\n",
+ msg->msg.data[0],
+ msg->msg.cmd);
+
+ ipmi_free_recv_msg(msg);
+}
+
+static void ipmi_wdog_pretimeout_handler(void *handler_data)
+{
+ if (preaction_val != WDOG_PRETIMEOUT_NONE) {
+ if (preop_val == WDOG_PREOP_PANIC) {
+ if (atomic_inc_and_test(&preop_panic_excl))
+ panic("Watchdog pre-timeout");
+ } else if (preop_val == WDOG_PREOP_GIVE_DATA) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipmi_read_lock, flags);
+ data_to_read = 1;
+ wake_up_interruptible(&read_q);
+ kill_fasync(&fasync_q, SIGIO, POLL_IN);
+ spin_unlock_irqrestore(&ipmi_read_lock, flags);
+ }
+ }
+
+ /*
+ * On some machines, the heartbeat will give an error and not
+ * work unless we re-enable the timer. So do so.
+ */
+ atomic_set(&pretimeout_since_last_heartbeat, 1);
+}
+
+static void ipmi_wdog_panic_handler(void *user_data)
+{
+ static int panic_event_handled;
+
+ /*
+ * On a panic, if we have a panic timeout, make sure to extend
+ * the watchdog timer to a reasonable value to complete the
+ * panic, if the watchdog timer is running. Plus the
+ * pretimeout is meaningless at panic time.
+ */
+ if (watchdog_user && !panic_event_handled &&
+ ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ /* Make sure we do this only once. */
+ panic_event_handled = 1;
+
+ timeout = panic_wdt_timeout;
+ pretimeout = 0;
+ panic_halt_ipmi_set_timeout();
+ }
+}
+
+static const struct ipmi_user_hndl ipmi_hndlrs = {
+ .ipmi_recv_hndl = ipmi_wdog_msg_handler,
+ .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler,
+ .ipmi_panic_handler = ipmi_wdog_panic_handler
+};
+
+static void ipmi_register_watchdog(int ipmi_intf)
+{
+ int rv = -EBUSY;
+
+ if (watchdog_user)
+ goto out;
+
+ if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+ goto out;
+
+ watchdog_ifnum = ipmi_intf;
+
+ rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
+ if (rv < 0) {
+ pr_crit("Unable to register with ipmi\n");
+ goto out;
+ }
+
+ rv = ipmi_get_version(watchdog_user,
+ &ipmi_version_major,
+ &ipmi_version_minor);
+ if (rv) {
+ pr_warn("Unable to get IPMI version, assuming 1.0\n");
+ ipmi_version_major = 1;
+ ipmi_version_minor = 0;
+ }
+
+ rv = misc_register(&ipmi_wdog_miscdev);
+ if (rv < 0) {
+ ipmi_destroy_user(watchdog_user);
+ watchdog_user = NULL;
+ pr_crit("Unable to register misc device\n");
+ }
+
+#ifdef HAVE_DIE_NMI
+ if (nmi_handler_registered) {
+ int old_pretimeout = pretimeout;
+ int old_timeout = timeout;
+ int old_preop_val = preop_val;
+
+ /*
+ * Set the pretimeout to go off in a second and give
+ * ourselves plenty of time to stop the timer.
+ */
+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
+ pretimeout = 99;
+ timeout = 100;
+
+ testing_nmi = 1;
+
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ if (rv) {
+ pr_warn("Error starting timer to test NMI: 0x%x. The NMI pretimeout will likely not work\n",
+ rv);
+ rv = 0;
+ goto out_restore;
+ }
+
+ msleep(1500);
+
+ if (testing_nmi != 2) {
+ pr_warn("IPMI NMI didn't seem to occur. The NMI pretimeout will likely not work\n");
+ }
+ out_restore:
+ testing_nmi = 0;
+ preop_val = old_preop_val;
+ pretimeout = old_pretimeout;
+ timeout = old_timeout;
+ }
+#endif
+
+ out:
+ if ((start_now) && (rv == 0)) {
+ /* Run from startup, so start the timer now. */
+ start_now = 0; /* Disable this function after first startup. */
+ ipmi_watchdog_state = action_val;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ pr_info("Starting now!\n");
+ } else {
+ /* Stop the timer now. */
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ }
+}
+
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+ int rv;
+ struct ipmi_user *loc_user = watchdog_user;
+
+ if (!loc_user)
+ return;
+
+ if (watchdog_ifnum != ipmi_intf)
+ return;
+
+ /* Make sure no one can call us any more. */
+ misc_deregister(&ipmi_wdog_miscdev);
+
+ watchdog_user = NULL;
+
+ /*
+ * Wait to make sure the message makes it out. The lower layer has
+ * pointers to our buffers, we want to make sure they are done before
+ * we release our memory.
+ */
+ while (atomic_read(&msg_tofree))
+ msg_free_smi(NULL);
+
+ mutex_lock(&ipmi_watchdog_mutex);
+
+ /* Disconnect from IPMI. */
+ rv = ipmi_destroy_user(loc_user);
+ if (rv)
+ pr_warn("error unlinking from IPMI: %d\n", rv);
+
+ /* If it comes back, restart it properly. */
+ ipmi_start_timer_on_heartbeat = 1;
+
+ mutex_unlock(&ipmi_watchdog_mutex);
+}
+
+#ifdef HAVE_DIE_NMI
+static int
+ipmi_nmi(unsigned int val, struct pt_regs *regs)
+{
+ /*
+ * If we get here, it's an NMI that's not a memory or I/O
+ * error. We can't truly tell if it's from IPMI or not
+ * without sending a message, and sending a message is almost
+ * impossible because of locking.
+ */
+
+ if (testing_nmi) {
+ testing_nmi = 2;
+ return NMI_HANDLED;
+ }
+
+ /* If we are not expecting a timeout, ignore it. */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+ return NMI_DONE;
+
+ if (preaction_val != WDOG_PRETIMEOUT_NMI)
+ return NMI_DONE;
+
+ /*
+ * If no one else handled the NMI, we assume it was the IPMI
+ * watchdog.
+ */
+ if (preop_val == WDOG_PREOP_PANIC) {
+ /* On some machines, the heartbeat will give
+ an error and not work unless we re-enable
+ the timer. So do so. */
+ atomic_set(&pretimeout_since_last_heartbeat, 1);
+ if (atomic_inc_and_test(&preop_panic_excl))
+ nmi_panic(regs, "pre-timeout");
+ }
+
+ return NMI_HANDLED;
+}
+#endif
+
+static int wdog_reboot_handler(struct notifier_block *this,
+ unsigned long code,
+ void *unused)
+{
+ static int reboot_event_handled;
+
+ if ((watchdog_user) && (!reboot_event_handled)) {
+ /* Make sure we only do this once. */
+ reboot_event_handled = 1;
+
+ if (code == SYS_POWER_OFF || code == SYS_HALT) {
+ /* Disable the WDT if we are shutting down. */
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ /* Set a long timer to let the reboot happen or
+ reset if it hangs, but only if the watchdog
+ timer was already running. */
+ if (timeout < 120)
+ timeout = 120;
+ pretimeout = 0;
+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_reboot_notifier = {
+ .notifier_call = wdog_reboot_handler,
+ .next = NULL,
+ .priority = 0
+};
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+ ipmi_register_watchdog(if_num);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+ ipmi_unregister_watchdog(if_num);
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_new_smi,
+ .smi_gone = ipmi_smi_gone
+};
+
+static int action_op(const char *inval, char *outval)
+{
+ if (outval)
+ strcpy(outval, action);
+
+ if (!inval)
+ return 0;
+
+ if (strcmp(inval, "reset") == 0)
+ action_val = WDOG_TIMEOUT_RESET;
+ else if (strcmp(inval, "none") == 0)
+ action_val = WDOG_TIMEOUT_NONE;
+ else if (strcmp(inval, "power_cycle") == 0)
+ action_val = WDOG_TIMEOUT_POWER_CYCLE;
+ else if (strcmp(inval, "power_off") == 0)
+ action_val = WDOG_TIMEOUT_POWER_DOWN;
+ else
+ return -EINVAL;
+ strcpy(action, inval);
+ return 0;
+}
+
+static int preaction_op(const char *inval, char *outval)
+{
+ if (outval)
+ strcpy(outval, preaction);
+
+ if (!inval)
+ return 0;
+
+ if (strcmp(inval, "pre_none") == 0)
+ preaction_val = WDOG_PRETIMEOUT_NONE;
+ else if (strcmp(inval, "pre_smi") == 0)
+ preaction_val = WDOG_PRETIMEOUT_SMI;
+#ifdef HAVE_DIE_NMI
+ else if (strcmp(inval, "pre_nmi") == 0)
+ preaction_val = WDOG_PRETIMEOUT_NMI;
+#endif
+ else if (strcmp(inval, "pre_int") == 0)
+ preaction_val = WDOG_PRETIMEOUT_MSG_INT;
+ else
+ return -EINVAL;
+ strcpy(preaction, inval);
+ return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+ if (outval)
+ strcpy(outval, preop);
+
+ if (!inval)
+ return 0;
+
+ if (strcmp(inval, "preop_none") == 0)
+ preop_val = WDOG_PREOP_NONE;
+ else if (strcmp(inval, "preop_panic") == 0)
+ preop_val = WDOG_PREOP_PANIC;
+ else if (strcmp(inval, "preop_give_data") == 0)
+ preop_val = WDOG_PREOP_GIVE_DATA;
+ else
+ return -EINVAL;
+ strcpy(preop, inval);
+ return 0;
+}
+
+static void check_parms(void)
+{
+#ifdef HAVE_DIE_NMI
+ int do_nmi = 0;
+ int rv;
+
+ if (preaction_val == WDOG_PRETIMEOUT_NMI) {
+ do_nmi = 1;
+ if (preop_val == WDOG_PREOP_GIVE_DATA) {
+ pr_warn("Pretimeout op is to give data but NMI pretimeout is enabled, setting pretimeout op to none\n");
+ preop_op("preop_none", NULL);
+ do_nmi = 0;
+ }
+ }
+ if (do_nmi && !nmi_handler_registered) {
+ rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
+ "ipmi");
+ if (rv) {
+ pr_warn("Can't register nmi handler\n");
+ return;
+ } else
+ nmi_handler_registered = 1;
+ } else if (!do_nmi && nmi_handler_registered) {
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+ nmi_handler_registered = 0;
+ }
+#endif
+}
+
+static int __init ipmi_wdog_init(void)
+{
+ int rv;
+
+ if (action_op(action, NULL)) {
+ action_op("reset", NULL);
+ pr_info("Unknown action '%s', defaulting to reset\n", action);
+ }
+
+ if (preaction_op(preaction, NULL)) {
+ preaction_op("pre_none", NULL);
+ pr_info("Unknown preaction '%s', defaulting to none\n",
+ preaction);
+ }
+
+ if (preop_op(preop, NULL)) {
+ preop_op("preop_none", NULL);
+ pr_info("Unknown preop '%s', defaulting to none\n", preop);
+ }
+
+ check_parms();
+
+ register_reboot_notifier(&wdog_reboot_notifier);
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+ if (rv) {
+#ifdef HAVE_DIE_NMI
+ if (nmi_handler_registered)
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+#endif
+ unregister_reboot_notifier(&wdog_reboot_notifier);
+ pr_warn("can't register smi watcher\n");
+ return rv;
+ }
+
+ pr_info("driver initialized\n");
+
+ return 0;
+}
+
+static void __exit ipmi_wdog_exit(void)
+{
+ ipmi_smi_watcher_unregister(&smi_watcher);
+ ipmi_unregister_watchdog(watchdog_ifnum);
+
+#ifdef HAVE_DIE_NMI
+ if (nmi_handler_registered)
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+#endif
+
+ unregister_reboot_notifier(&wdog_reboot_notifier);
+}
+module_exit(ipmi_wdog_exit);
+module_init(ipmi_wdog_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface.");
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
new file mode 100644
index 0000000000..8b1161d519
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ * Copyright (c) 2021, IBM Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include "kcs_bmc.h"
+
+/* Implement both the device and client interfaces here */
+#include "kcs_bmc_device.h"
+#include "kcs_bmc_client.h"
+
+/* Record registered devices and drivers */
+static DEFINE_MUTEX(kcs_bmc_lock);
+static LIST_HEAD(kcs_bmc_devices);
+static LIST_HEAD(kcs_bmc_drivers);
+
+/* Consumer data access */
+
+u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc)
+{
+ return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr);
+}
+EXPORT_SYMBOL(kcs_bmc_read_data);
+
+void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data)
+{
+ kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data);
+}
+EXPORT_SYMBOL(kcs_bmc_write_data);
+
+u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc)
+{
+ return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.str);
+}
+EXPORT_SYMBOL(kcs_bmc_read_status);
+
+void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data)
+{
+ kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data);
+}
+EXPORT_SYMBOL(kcs_bmc_write_status);
+
+void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val)
+{
+ kcs_bmc->ops->io_updateb(kcs_bmc, kcs_bmc->ioreg.str, mask, val);
+}
+EXPORT_SYMBOL(kcs_bmc_update_status);
+
+irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_client *client;
+ irqreturn_t rc = IRQ_NONE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kcs_bmc->lock, flags);
+ client = kcs_bmc->client;
+ if (client)
+ rc = client->ops->event(client);
+ spin_unlock_irqrestore(&kcs_bmc->lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL(kcs_bmc_handle_event);
+
+int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client)
+{
+ int rc;
+
+ spin_lock_irq(&kcs_bmc->lock);
+ if (kcs_bmc->client) {
+ rc = -EBUSY;
+ } else {
+ u8 mask = KCS_BMC_EVENT_TYPE_IBF;
+
+ kcs_bmc->client = client;
+ kcs_bmc_update_event_mask(kcs_bmc, mask, mask);
+ rc = 0;
+ }
+ spin_unlock_irq(&kcs_bmc->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(kcs_bmc_enable_device);
+
+void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client)
+{
+ spin_lock_irq(&kcs_bmc->lock);
+ if (client == kcs_bmc->client) {
+ u8 mask = KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE;
+
+ kcs_bmc_update_event_mask(kcs_bmc, mask, 0);
+ kcs_bmc->client = NULL;
+ }
+ spin_unlock_irq(&kcs_bmc->lock);
+}
+EXPORT_SYMBOL(kcs_bmc_disable_device);
+
+int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_driver *drv;
+ int error = 0;
+ int rc;
+
+ spin_lock_init(&kcs_bmc->lock);
+ kcs_bmc->client = NULL;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_add(&kcs_bmc->entry, &kcs_bmc_devices);
+ list_for_each_entry(drv, &kcs_bmc_drivers, entry) {
+ rc = drv->ops->add_device(kcs_bmc);
+ if (!rc)
+ continue;
+
+ dev_err(kcs_bmc->dev, "Failed to add chardev for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ error = rc;
+ }
+ mutex_unlock(&kcs_bmc_lock);
+
+ return error;
+}
+EXPORT_SYMBOL(kcs_bmc_add_device);
+
+void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_driver *drv;
+ int rc;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_del(&kcs_bmc->entry);
+ list_for_each_entry(drv, &kcs_bmc_drivers, entry) {
+ rc = drv->ops->remove_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to remove chardev for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
+}
+EXPORT_SYMBOL(kcs_bmc_remove_device);
+
+void kcs_bmc_register_driver(struct kcs_bmc_driver *drv)
+{
+ struct kcs_bmc_device *kcs_bmc;
+ int rc;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_add(&drv->entry, &kcs_bmc_drivers);
+ list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) {
+ rc = drv->ops->add_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to add driver for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
+}
+EXPORT_SYMBOL(kcs_bmc_register_driver);
+
+void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv)
+{
+ struct kcs_bmc_device *kcs_bmc;
+ int rc;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_del(&drv->entry);
+ list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) {
+ rc = drv->ops->remove_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to remove driver for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
+}
+EXPORT_SYMBOL(kcs_bmc_unregister_driver);
+
+void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events)
+{
+ kcs_bmc->ops->irq_mask_update(kcs_bmc, mask, events);
+}
+EXPORT_SYMBOL(kcs_bmc_update_event_mask);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
diff --git a/drivers/char/ipmi/kcs_bmc.h b/drivers/char/ipmi/kcs_bmc.h
new file mode 100644
index 0000000000..fa408b802c
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#ifndef __KCS_BMC_H__
+#define __KCS_BMC_H__
+
+#include <linux/list.h>
+
+#define KCS_BMC_EVENT_TYPE_OBE BIT(0)
+#define KCS_BMC_EVENT_TYPE_IBF BIT(1)
+
+#define KCS_BMC_STR_OBF BIT(0)
+#define KCS_BMC_STR_IBF BIT(1)
+#define KCS_BMC_STR_CMD_DAT BIT(3)
+
+/* IPMI 2.0 - 9.5, KCS Interface Registers
+ * @idr: Input Data Register
+ * @odr: Output Data Register
+ * @str: Status Register
+ */
+struct kcs_ioreg {
+ u32 idr;
+ u32 odr;
+ u32 str;
+};
+
+struct kcs_bmc_device_ops;
+struct kcs_bmc_client;
+
+struct kcs_bmc_device {
+ struct list_head entry;
+
+ struct device *dev;
+ u32 channel;
+
+ struct kcs_ioreg ioreg;
+
+ const struct kcs_bmc_device_ops *ops;
+
+ spinlock_t lock;
+ struct kcs_bmc_client *client;
+};
+
+#endif /* __KCS_BMC_H__ */
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
new file mode 100644
index 0000000000..72640da553
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -0,0 +1,684 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "kcs_bmc_device.h"
+
+
+#define DEVICE_NAME "ast-kcs-bmc"
+
+#define KCS_CHANNEL_MAX 4
+
+/*
+ * Field class descriptions
+ *
+ * LPCyE Enable LPC channel y
+ * IBFIEy Input Buffer Full IRQ Enable for LPC channel y
+ * IRQxEy Assert SerIRQ x for LPC channel y (Deprecated, use IDyIRQX, IRQXEy)
+ * IDyIRQX Use the specified 4-bit SerIRQ for LPC channel y
+ * SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1)
+ * IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y
+ */
+
+#define LPC_TYIRQX_LOW 0b00
+#define LPC_TYIRQX_HIGH 0b01
+#define LPC_TYIRQX_RSVD 0b10
+#define LPC_TYIRQX_RISING 0b11
+
+#define LPC_HICR0 0x000
+#define LPC_HICR0_LPC3E BIT(7)
+#define LPC_HICR0_LPC2E BIT(6)
+#define LPC_HICR0_LPC1E BIT(5)
+#define LPC_HICR2 0x008
+#define LPC_HICR2_IBFIE3 BIT(3)
+#define LPC_HICR2_IBFIE2 BIT(2)
+#define LPC_HICR2_IBFIE1 BIT(1)
+#define LPC_HICR4 0x010
+#define LPC_HICR4_LADR12AS BIT(7)
+#define LPC_HICR4_KCSENBL BIT(2)
+#define LPC_SIRQCR0 0x070
+/* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */
+#define LPC_SIRQCR0_IRQ12E1 BIT(1)
+#define LPC_SIRQCR0_IRQ1E1 BIT(0)
+#define LPC_HICR5 0x080
+#define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20)
+#define LPC_HICR5_ID3IRQX_SHIFT 20
+#define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16)
+#define LPC_HICR5_ID2IRQX_SHIFT 16
+#define LPC_HICR5_SEL3IRQX BIT(15)
+#define LPC_HICR5_IRQXE3 BIT(14)
+#define LPC_HICR5_SEL2IRQX BIT(13)
+#define LPC_HICR5_IRQXE2 BIT(12)
+#define LPC_LADR3H 0x014
+#define LPC_LADR3L 0x018
+#define LPC_LADR12H 0x01C
+#define LPC_LADR12L 0x020
+#define LPC_IDR1 0x024
+#define LPC_IDR2 0x028
+#define LPC_IDR3 0x02C
+#define LPC_ODR1 0x030
+#define LPC_ODR2 0x034
+#define LPC_ODR3 0x038
+#define LPC_STR1 0x03C
+#define LPC_STR2 0x040
+#define LPC_STR3 0x044
+#define LPC_HICRB 0x100
+#define LPC_HICRB_EN16LADR2 BIT(5)
+#define LPC_HICRB_EN16LADR1 BIT(4)
+#define LPC_HICRB_IBFIE4 BIT(1)
+#define LPC_HICRB_LPC4E BIT(0)
+#define LPC_HICRC 0x104
+#define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4)
+#define LPC_HICRC_ID4IRQX_SHIFT 4
+#define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2)
+#define LPC_HICRC_TY4IRQX_SHIFT 2
+#define LPC_HICRC_OBF4_AUTO_CLR BIT(1)
+#define LPC_HICRC_IRQXE4 BIT(0)
+#define LPC_LADR4 0x110
+#define LPC_IDR4 0x114
+#define LPC_ODR4 0x118
+#define LPC_STR4 0x11C
+#define LPC_LSADR12 0x120
+#define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16)
+#define LPC_LSADR12_LSADR2_SHIFT 16
+#define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0)
+#define LPC_LSADR12_LSADR1_SHIFT 0
+
+#define OBE_POLL_PERIOD (HZ / 2)
+
+enum aspeed_kcs_irq_mode {
+ aspeed_kcs_irq_none,
+ aspeed_kcs_irq_serirq,
+};
+
+struct aspeed_kcs_bmc {
+ struct kcs_bmc_device kcs_bmc;
+
+ struct regmap *map;
+
+ struct {
+ enum aspeed_kcs_irq_mode mode;
+ int id;
+ } upstream_irq;
+
+ struct {
+ spinlock_t lock;
+ bool remove;
+ struct timer_list timer;
+ } obe;
+};
+
+static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
+{
+ return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc);
+}
+
+static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ u32 val = 0;
+ int rc;
+
+ rc = regmap_read(priv->map, reg, &val);
+ WARN(rc != 0, "regmap_read() failed: %d\n", rc);
+
+ return rc == 0 ? (u8) val : 0;
+}
+
+static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_write(priv->map, reg, data);
+ WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+
+ /* Trigger the upstream IRQ on ODR writes, if enabled */
+
+ switch (reg) {
+ case LPC_ODR1:
+ case LPC_ODR2:
+ case LPC_ODR3:
+ case LPC_ODR4:
+ break;
+ default:
+ return;
+ }
+
+ if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq)
+ return;
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ switch (priv->upstream_irq.id) {
+ case 12:
+ regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1,
+ LPC_SIRQCR0_IRQ12E1);
+ break;
+ case 1:
+ regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1,
+ LPC_SIRQCR0_IRQ1E1);
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2);
+ break;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3);
+ break;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4);
+ break;
+ default:
+ break;
+ }
+}
+
+static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_update_bits(priv->map, reg, mask, val);
+ WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
+}
+
+/*
+ * We note D for Data, and C for Cmd/Status, default rules are
+ *
+ * 1. Only the D address is given:
+ * A. KCS1/KCS2 (D/C: X/X+4)
+ * D/C: CA0h/CA4h
+ * D/C: CA8h/CACh
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
+ * C. KCS4 (D/C: X/X+1)
+ * D/C: CA4h/CA5h
+ *
+ * 2. Both the D/C addresses are given:
+ * A. KCS1/KCS2/KCS4 (D/C: X/Y)
+ * D/C: CA0h/CA1h
+ * D/C: CA8h/CA9h
+ * D/C: CA4h/CA5h
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
+ */
+static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+
+ if (WARN_ON(nr_addrs < 1 || nr_addrs > 2))
+ return -EINVAL;
+
+ switch (priv->kcs_bmc.channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0);
+ regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
+ if (nr_addrs == 2) {
+ regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK,
+ addrs[1] << LPC_LSADR12_LSADR1_SHIFT);
+
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1,
+ LPC_HICRB_EN16LADR1);
+ }
+ break;
+
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
+ regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
+ if (nr_addrs == 2) {
+ regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK,
+ addrs[1] << LPC_LSADR12_LSADR2_SHIFT);
+
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2,
+ LPC_HICRB_EN16LADR2);
+ }
+ break;
+
+ case 3:
+ if (nr_addrs == 2) {
+ dev_err(priv->kcs_bmc.dev,
+ "Channel 3 only supports inferred status IO address\n");
+ return -EINVAL;
+ }
+
+ regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF);
+ break;
+
+ case 4:
+ if (nr_addrs == 1)
+ regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]);
+ else
+ regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]);
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int aspeed_kcs_map_serirq_type(u32 dt_type)
+{
+ switch (dt_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ return LPC_TYIRQX_RISING;
+ case IRQ_TYPE_LEVEL_HIGH:
+ return LPC_TYIRQX_HIGH;
+ case IRQ_TYPE_LEVEL_LOW:
+ return LPC_TYIRQX_LOW;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type)
+{
+ unsigned int mask, val, hw_type;
+ int ret;
+
+ if (id > 15)
+ return -EINVAL;
+
+ ret = aspeed_kcs_map_serirq_type(dt_type);
+ if (ret < 0)
+ return ret;
+ hw_type = ret;
+
+ priv->upstream_irq.mode = aspeed_kcs_irq_serirq;
+ priv->upstream_irq.id = id;
+
+ switch (priv->kcs_bmc.channel) {
+ case 1:
+ /* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */
+ break;
+ case 2:
+ if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
+ return -EINVAL;
+
+ mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK;
+ val = (id << LPC_HICR5_ID2IRQX_SHIFT);
+ val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0;
+ regmap_update_bits(priv->map, LPC_HICR5, mask, val);
+
+ break;
+ case 3:
+ if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
+ return -EINVAL;
+
+ mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK;
+ val = (id << LPC_HICR5_ID3IRQX_SHIFT);
+ val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0;
+ regmap_update_bits(priv->map, LPC_HICR5, mask, val);
+
+ break;
+ case 4:
+ mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR;
+ val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT);
+ regmap_update_bits(priv->map, LPC_HICRC, mask, val);
+ break;
+ default:
+ dev_warn(priv->kcs_bmc.dev,
+ "SerIRQ configuration not supported on KCS channel %d\n",
+ priv->kcs_bmc.channel);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E);
+ return;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E);
+ return;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E);
+ regmap_update_bits(priv->map, LPC_HICR4,
+ LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL);
+ return;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E);
+ return;
+ default:
+ pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
+ return;
+ }
+}
+
+static void aspeed_kcs_check_obe(struct timer_list *timer)
+{
+ struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer);
+ unsigned long flags;
+ u8 str;
+
+ spin_lock_irqsave(&priv->obe.lock, flags);
+ if (priv->obe.remove) {
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+ return;
+ }
+
+ str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
+ if (str & KCS_BMC_STR_OBF) {
+ mod_timer(timer, jiffies + OBE_POLL_PERIOD);
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+
+ kcs_bmc_handle_event(&priv->kcs_bmc);
+}
+
+static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ int rc;
+ u8 str;
+
+ /* We don't have an OBE IRQ, emulate it */
+ if (mask & KCS_BMC_EVENT_TYPE_OBE) {
+ if (KCS_BMC_EVENT_TYPE_OBE & state) {
+ /*
+ * Given we don't have an OBE IRQ, delay by polling briefly to see if we can
+ * observe such an event before returning to the caller. This is not
+ * incorrect because OBF may have already become clear before enabling the
+ * IRQ if we had one, under which circumstance no event will be propagated
+ * anyway.
+ *
+ * The onus is on the client to perform a race-free check that it hasn't
+ * missed the event.
+ */
+ rc = read_poll_timeout_atomic(aspeed_kcs_inb, str,
+ !(str & KCS_BMC_STR_OBF), 1, 100, false,
+ &priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
+ /* Time for the slow path? */
+ if (rc == -ETIMEDOUT)
+ mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
+ } else {
+ del_timer(&priv->obe.timer);
+ }
+ }
+
+ if (mask & KCS_BMC_EVENT_TYPE_IBF) {
+ const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF);
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1,
+ enable * LPC_HICR2_IBFIE1);
+ return;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2,
+ enable * LPC_HICR2_IBFIE2);
+ return;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3,
+ enable * LPC_HICR2_IBFIE3);
+ return;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4,
+ enable * LPC_HICRB_IBFIE4);
+ return;
+ default:
+ pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
+ return;
+ }
+ }
+}
+
+static const struct kcs_bmc_device_ops aspeed_kcs_ops = {
+ .irq_mask_update = aspeed_kcs_irq_mask_update,
+ .io_inputb = aspeed_kcs_inb,
+ .io_outputb = aspeed_kcs_outb,
+ .io_updateb = aspeed_kcs_updateb,
+};
+
+static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
+{
+ struct kcs_bmc_device *kcs_bmc = arg;
+
+ return kcs_bmc_handle_event(kcs_bmc);
+}
+
+static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return devm_request_irq(dev, irq, aspeed_kcs_irq, IRQF_SHARED,
+ dev_name(dev), kcs_bmc);
+}
+
+static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
+ { .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 },
+ { .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 },
+ { .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 },
+ { .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
+};
+
+static int aspeed_kcs_of_get_channel(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct kcs_ioreg ioreg;
+ const __be32 *reg;
+ int i;
+
+ np = pdev->dev.of_node;
+
+ /* Don't translate addresses, we want offsets for the regmaps */
+ reg = of_get_address(np, 0, NULL, NULL);
+ if (!reg)
+ return -EINVAL;
+ ioreg.idr = be32_to_cpup(reg);
+
+ reg = of_get_address(np, 1, NULL, NULL);
+ if (!reg)
+ return -EINVAL;
+ ioreg.odr = be32_to_cpup(reg);
+
+ reg = of_get_address(np, 2, NULL, NULL);
+ if (!reg)
+ return -EINVAL;
+ ioreg.str = be32_to_cpup(reg);
+
+ for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
+ if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg)))
+ return i + 1;
+ }
+ return -EINVAL;
+}
+
+static int
+aspeed_kcs_of_get_io_address(struct platform_device *pdev, u32 addrs[2])
+{
+ int rc;
+
+ rc = of_property_read_variable_u32_array(pdev->dev.of_node,
+ "aspeed,lpc-io-reg",
+ addrs, 1, 2);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n");
+ return rc;
+ }
+
+ if (addrs[0] > 0xffff) {
+ dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n");
+ return -EINVAL;
+ }
+
+ if (rc == 2 && addrs[1] > 0xffff) {
+ dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int aspeed_kcs_probe(struct platform_device *pdev)
+{
+ struct kcs_bmc_device *kcs_bmc;
+ struct aspeed_kcs_bmc *priv;
+ struct device_node *np;
+ bool have_upstream_irq;
+ u32 upstream_irq[2];
+ int rc, channel;
+ int nr_addrs;
+ u32 addrs[2];
+
+ np = pdev->dev.of_node->parent;
+ if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
+ dev_err(&pdev->dev, "unsupported LPC device binding\n");
+ return -ENODEV;
+ }
+
+ channel = aspeed_kcs_of_get_channel(pdev);
+ if (channel < 0)
+ return channel;
+
+ nr_addrs = aspeed_kcs_of_get_io_address(pdev, addrs);
+ if (nr_addrs < 0)
+ return nr_addrs;
+
+ np = pdev->dev.of_node;
+ rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2);
+ if (rc && rc != -EINVAL)
+ return -EINVAL;
+
+ have_upstream_irq = !rc;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ kcs_bmc = &priv->kcs_bmc;
+ kcs_bmc->dev = &pdev->dev;
+ kcs_bmc->channel = channel;
+ kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1];
+ kcs_bmc->ops = &aspeed_kcs_ops;
+
+ priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(priv->map)) {
+ dev_err(&pdev->dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&priv->obe.lock);
+ priv->obe.remove = false;
+ timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0);
+
+ rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs);
+ if (rc)
+ return rc;
+
+ /* Host to BMC IRQ */
+ rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev);
+ if (rc)
+ return rc;
+
+ /* BMC to Host IRQ */
+ if (have_upstream_irq) {
+ rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]);
+ if (rc < 0)
+ return rc;
+ } else {
+ priv->upstream_irq.mode = aspeed_kcs_irq_none;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+ aspeed_kcs_enable_channel(kcs_bmc, true);
+
+ rc = kcs_bmc_add_device(&priv->kcs_bmc);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
+ return rc;
+ }
+
+ dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n",
+ kcs_bmc->channel, addrs[0]);
+
+ return 0;
+}
+
+static int aspeed_kcs_remove(struct platform_device *pdev)
+{
+ struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev);
+ struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
+
+ kcs_bmc_remove_device(kcs_bmc);
+
+ aspeed_kcs_enable_channel(kcs_bmc, false);
+ aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+
+ /* Make sure it's proper dead */
+ spin_lock_irq(&priv->obe.lock);
+ priv->obe.remove = true;
+ spin_unlock_irq(&priv->obe.lock);
+ del_timer_sync(&priv->obe.timer);
+
+ return 0;
+}
+
+static const struct of_device_id ast_kcs_bmc_match[] = {
+ { .compatible = "aspeed,ast2400-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2500-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2600-kcs-bmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
+
+static struct platform_driver ast_kcs_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = ast_kcs_bmc_match,
+ },
+ .probe = aspeed_kcs_probe,
+ .remove = aspeed_kcs_remove,
+};
+module_platform_driver(ast_kcs_bmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device");
diff --git a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
new file mode 100644
index 0000000000..cf670e8919
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "kcs-bmc: " fmt
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ipmi_bmc.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_client.h"
+
+/* Different phases of the KCS BMC module.
+ * KCS_PHASE_IDLE:
+ * BMC should not be expecting nor sending any data.
+ * KCS_PHASE_WRITE_START:
+ * BMC is receiving a WRITE_START command from system software.
+ * KCS_PHASE_WRITE_DATA:
+ * BMC is receiving a data byte from system software.
+ * KCS_PHASE_WRITE_END_CMD:
+ * BMC is waiting a last data byte from system software.
+ * KCS_PHASE_WRITE_DONE:
+ * BMC has received the whole request from system software.
+ * KCS_PHASE_WAIT_READ:
+ * BMC is waiting the response from the upper IPMI service.
+ * KCS_PHASE_READ:
+ * BMC is transferring the response to system software.
+ * KCS_PHASE_ABORT_ERROR1:
+ * BMC is waiting error status request from system software.
+ * KCS_PHASE_ABORT_ERROR2:
+ * BMC is waiting for idle status afer error from system software.
+ * KCS_PHASE_ERROR:
+ * BMC has detected a protocol violation at the interface level.
+ */
+enum kcs_ipmi_phases {
+ KCS_PHASE_IDLE,
+
+ KCS_PHASE_WRITE_START,
+ KCS_PHASE_WRITE_DATA,
+ KCS_PHASE_WRITE_END_CMD,
+ KCS_PHASE_WRITE_DONE,
+
+ KCS_PHASE_WAIT_READ,
+ KCS_PHASE_READ,
+
+ KCS_PHASE_ABORT_ERROR1,
+ KCS_PHASE_ABORT_ERROR2,
+ KCS_PHASE_ERROR
+};
+
+/* IPMI 2.0 - Table 9-4, KCS Interface Status Codes */
+enum kcs_ipmi_errors {
+ KCS_NO_ERROR = 0x00,
+ KCS_ABORTED_BY_COMMAND = 0x01,
+ KCS_ILLEGAL_CONTROL_CODE = 0x02,
+ KCS_LENGTH_ERROR = 0x06,
+ KCS_UNSPECIFIED_ERROR = 0xFF
+};
+
+struct kcs_bmc_ipmi {
+ struct list_head entry;
+
+ struct kcs_bmc_client client;
+
+ spinlock_t lock;
+
+ enum kcs_ipmi_phases phase;
+ enum kcs_ipmi_errors error;
+
+ wait_queue_head_t queue;
+ bool data_in_avail;
+ int data_in_idx;
+ u8 *data_in;
+
+ int data_out_idx;
+ int data_out_len;
+ u8 *data_out;
+
+ struct mutex mutex;
+ u8 *kbuffer;
+
+ struct miscdevice miscdev;
+};
+
+#define DEVICE_NAME "ipmi-kcs"
+
+#define KCS_MSG_BUFSIZ 1000
+
+#define KCS_ZERO_DATA 0
+
+/* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
+#define KCS_STATUS_STATE(state) (state << 6)
+#define KCS_STATUS_STATE_MASK GENMASK(7, 6)
+#define KCS_STATUS_CMD_DAT BIT(3)
+#define KCS_STATUS_SMS_ATN BIT(2)
+#define KCS_STATUS_IBF BIT(1)
+#define KCS_STATUS_OBF BIT(0)
+
+/* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
+enum kcs_states {
+ IDLE_STATE = 0,
+ READ_STATE = 1,
+ WRITE_STATE = 2,
+ ERROR_STATE = 3,
+};
+
+/* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
+#define KCS_CMD_GET_STATUS_ABORT 0x60
+#define KCS_CMD_WRITE_START 0x61
+#define KCS_CMD_WRITE_END 0x62
+#define KCS_CMD_READ_BYTE 0x68
+
+static inline void set_state(struct kcs_bmc_ipmi *priv, u8 state)
+{
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_STATE_MASK, KCS_STATUS_STATE(state));
+}
+
+static void kcs_bmc_ipmi_force_abort(struct kcs_bmc_ipmi *priv)
+{
+ set_state(priv, ERROR_STATE);
+ kcs_bmc_read_data(priv->client.dev);
+ kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
+
+ priv->phase = KCS_PHASE_ERROR;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+}
+
+static void kcs_bmc_ipmi_handle_data(struct kcs_bmc_ipmi *priv)
+{
+ struct kcs_bmc_device *dev;
+ u8 data;
+
+ dev = priv->client.dev;
+
+ switch (priv->phase) {
+ case KCS_PHASE_WRITE_START:
+ priv->phase = KCS_PHASE_WRITE_DATA;
+ fallthrough;
+
+ case KCS_PHASE_WRITE_DATA:
+ if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
+ set_state(priv, WRITE_STATE);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
+ } else {
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_LENGTH_ERROR;
+ }
+ break;
+
+ case KCS_PHASE_WRITE_END_CMD:
+ if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
+ set_state(priv, READ_STATE);
+ priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
+ priv->phase = KCS_PHASE_WRITE_DONE;
+ priv->data_in_avail = true;
+ wake_up_interruptible(&priv->queue);
+ } else {
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_LENGTH_ERROR;
+ }
+ break;
+
+ case KCS_PHASE_READ:
+ if (priv->data_out_idx == priv->data_out_len)
+ set_state(priv, IDLE_STATE);
+
+ data = kcs_bmc_read_data(dev);
+ if (data != KCS_CMD_READ_BYTE) {
+ set_state(priv, ERROR_STATE);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ break;
+ }
+
+ if (priv->data_out_idx == priv->data_out_len) {
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->phase = KCS_PHASE_IDLE;
+ break;
+ }
+
+ kcs_bmc_write_data(dev, priv->data_out[priv->data_out_idx++]);
+ break;
+
+ case KCS_PHASE_ABORT_ERROR1:
+ set_state(priv, READ_STATE);
+ kcs_bmc_read_data(dev);
+ kcs_bmc_write_data(dev, priv->error);
+ priv->phase = KCS_PHASE_ABORT_ERROR2;
+ break;
+
+ case KCS_PHASE_ABORT_ERROR2:
+ set_state(priv, IDLE_STATE);
+ kcs_bmc_read_data(dev);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->phase = KCS_PHASE_IDLE;
+ break;
+
+ default:
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+ }
+}
+
+static void kcs_bmc_ipmi_handle_cmd(struct kcs_bmc_ipmi *priv)
+{
+ u8 cmd;
+
+ set_state(priv, WRITE_STATE);
+ kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
+
+ cmd = kcs_bmc_read_data(priv->client.dev);
+ switch (cmd) {
+ case KCS_CMD_WRITE_START:
+ priv->phase = KCS_PHASE_WRITE_START;
+ priv->error = KCS_NO_ERROR;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ break;
+
+ case KCS_CMD_WRITE_END:
+ if (priv->phase != KCS_PHASE_WRITE_DATA) {
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+ }
+
+ priv->phase = KCS_PHASE_WRITE_END_CMD;
+ break;
+
+ case KCS_CMD_GET_STATUS_ABORT:
+ if (priv->error == KCS_NO_ERROR)
+ priv->error = KCS_ABORTED_BY_COMMAND;
+
+ priv->phase = KCS_PHASE_ABORT_ERROR1;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ break;
+
+ default:
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_ILLEGAL_CONTROL_CODE;
+ break;
+ }
+}
+
+static inline struct kcs_bmc_ipmi *client_to_kcs_bmc_ipmi(struct kcs_bmc_client *client)
+{
+ return container_of(client, struct kcs_bmc_ipmi, client);
+}
+
+static irqreturn_t kcs_bmc_ipmi_event(struct kcs_bmc_client *client)
+{
+ struct kcs_bmc_ipmi *priv;
+ u8 status;
+ int ret;
+
+ priv = client_to_kcs_bmc_ipmi(client);
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ status = kcs_bmc_read_status(client->dev);
+ if (status & KCS_STATUS_IBF) {
+ if (status & KCS_STATUS_CMD_DAT)
+ kcs_bmc_ipmi_handle_cmd(priv);
+ else
+ kcs_bmc_ipmi_handle_data(priv);
+
+ ret = IRQ_HANDLED;
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static const struct kcs_bmc_client_ops kcs_bmc_ipmi_client_ops = {
+ .event = kcs_bmc_ipmi_event,
+};
+
+static inline struct kcs_bmc_ipmi *to_kcs_bmc(struct file *filp)
+{
+ return container_of(filp->private_data, struct kcs_bmc_ipmi, miscdev);
+}
+
+static int kcs_bmc_ipmi_open(struct inode *inode, struct file *filp)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+
+ return kcs_bmc_enable_device(priv->client.dev, &priv->client);
+}
+
+static __poll_t kcs_bmc_ipmi_poll(struct file *filp, poll_table *wait)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ __poll_t mask = 0;
+
+ poll_wait(filp, &priv->queue, wait);
+
+ spin_lock_irq(&priv->lock);
+ if (priv->data_in_avail)
+ mask |= EPOLLIN;
+ spin_unlock_irq(&priv->lock);
+
+ return mask;
+}
+
+static ssize_t kcs_bmc_ipmi_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ bool data_avail;
+ size_t data_len;
+ ssize_t ret;
+
+ if (!(filp->f_flags & O_NONBLOCK))
+ wait_event_interruptible(priv->queue,
+ priv->data_in_avail);
+
+ mutex_lock(&priv->mutex);
+
+ spin_lock_irq(&priv->lock);
+ data_avail = priv->data_in_avail;
+ if (data_avail) {
+ data_len = priv->data_in_idx;
+ memcpy(priv->kbuffer, priv->data_in, data_len);
+ }
+ spin_unlock_irq(&priv->lock);
+
+ if (!data_avail) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (count < data_len) {
+ pr_err("channel=%u with too large data : %zu\n",
+ priv->client.dev->channel, data_len);
+
+ spin_lock_irq(&priv->lock);
+ kcs_bmc_ipmi_force_abort(priv);
+ spin_unlock_irq(&priv->lock);
+
+ ret = -EOVERFLOW;
+ goto out_unlock;
+ }
+
+ if (copy_to_user(buf, priv->kbuffer, data_len)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ ret = data_len;
+
+ spin_lock_irq(&priv->lock);
+ if (priv->phase == KCS_PHASE_WRITE_DONE) {
+ priv->phase = KCS_PHASE_WAIT_READ;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ } else {
+ ret = -EAGAIN;
+ }
+ spin_unlock_irq(&priv->lock);
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static ssize_t kcs_bmc_ipmi_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ ssize_t ret;
+
+ /* a minimum response size '3' : netfn + cmd + ccode */
+ if (count < 3 || count > KCS_MSG_BUFSIZ)
+ return -EINVAL;
+
+ mutex_lock(&priv->mutex);
+
+ if (copy_from_user(priv->kbuffer, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ spin_lock_irq(&priv->lock);
+ if (priv->phase == KCS_PHASE_WAIT_READ) {
+ priv->phase = KCS_PHASE_READ;
+ priv->data_out_idx = 1;
+ priv->data_out_len = count;
+ memcpy(priv->data_out, priv->kbuffer, count);
+ kcs_bmc_write_data(priv->client.dev, priv->data_out[0]);
+ ret = count;
+ } else {
+ ret = -EINVAL;
+ }
+ spin_unlock_irq(&priv->lock);
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static long kcs_bmc_ipmi_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ long ret = 0;
+
+ spin_lock_irq(&priv->lock);
+
+ switch (cmd) {
+ case IPMI_BMC_IOCTL_SET_SMS_ATN:
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, KCS_STATUS_SMS_ATN);
+ break;
+
+ case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, 0);
+ break;
+
+ case IPMI_BMC_IOCTL_FORCE_ABORT:
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+static int kcs_bmc_ipmi_release(struct inode *inode, struct file *filp)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+
+ kcs_bmc_ipmi_force_abort(priv);
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+
+ return 0;
+}
+
+static const struct file_operations kcs_bmc_ipmi_fops = {
+ .owner = THIS_MODULE,
+ .open = kcs_bmc_ipmi_open,
+ .read = kcs_bmc_ipmi_read,
+ .write = kcs_bmc_ipmi_write,
+ .release = kcs_bmc_ipmi_release,
+ .poll = kcs_bmc_ipmi_poll,
+ .unlocked_ioctl = kcs_bmc_ipmi_ioctl,
+};
+
+static DEFINE_SPINLOCK(kcs_bmc_ipmi_instances_lock);
+static LIST_HEAD(kcs_bmc_ipmi_instances);
+
+static int kcs_bmc_ipmi_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_ipmi *priv;
+ int rc;
+
+ priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ mutex_init(&priv->mutex);
+
+ init_waitqueue_head(&priv->queue);
+
+ priv->client.dev = kcs_bmc;
+ priv->client.ops = &kcs_bmc_ipmi_client_ops;
+ priv->data_in = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ priv->data_out = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ priv->kbuffer = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = devm_kasprintf(kcs_bmc->dev, GFP_KERNEL, "%s%u", DEVICE_NAME,
+ kcs_bmc->channel);
+ if (!priv->data_in || !priv->data_out || !priv->kbuffer || !priv->miscdev.name)
+ return -EINVAL;
+
+ priv->miscdev.fops = &kcs_bmc_ipmi_fops;
+
+ rc = misc_register(&priv->miscdev);
+ if (rc) {
+ dev_err(kcs_bmc->dev, "Unable to register device: %d\n", rc);
+ return rc;
+ }
+
+ spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
+ list_add(&priv->entry, &kcs_bmc_ipmi_instances);
+ spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
+
+ dev_info(kcs_bmc->dev, "Initialised IPMI client for channel %d", kcs_bmc->channel);
+
+ return 0;
+}
+
+static int kcs_bmc_ipmi_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_ipmi *priv = NULL, *pos;
+
+ spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
+ list_for_each_entry(pos, &kcs_bmc_ipmi_instances, entry) {
+ if (pos->client.dev == kcs_bmc) {
+ priv = pos;
+ list_del(&pos->entry);
+ break;
+ }
+ }
+ spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
+
+ if (!priv)
+ return -ENODEV;
+
+ misc_deregister(&priv->miscdev);
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+ devm_kfree(kcs_bmc->dev, priv->kbuffer);
+ devm_kfree(kcs_bmc->dev, priv->data_out);
+ devm_kfree(kcs_bmc->dev, priv->data_in);
+ devm_kfree(kcs_bmc->dev, priv);
+
+ return 0;
+}
+
+static const struct kcs_bmc_driver_ops kcs_bmc_ipmi_driver_ops = {
+ .add_device = kcs_bmc_ipmi_add_device,
+ .remove_device = kcs_bmc_ipmi_remove_device,
+};
+
+static struct kcs_bmc_driver kcs_bmc_ipmi_driver = {
+ .ops = &kcs_bmc_ipmi_driver_ops,
+};
+
+static int __init kcs_bmc_ipmi_init(void)
+{
+ kcs_bmc_register_driver(&kcs_bmc_ipmi_driver);
+
+ return 0;
+}
+module_init(kcs_bmc_ipmi_init);
+
+static void __exit kcs_bmc_ipmi_exit(void)
+{
+ kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver);
+}
+module_exit(kcs_bmc_ipmi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
diff --git a/drivers/char/ipmi/kcs_bmc_client.h b/drivers/char/ipmi/kcs_bmc_client.h
new file mode 100644
index 0000000000..6fdcde0a71
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_client.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, IBM Corp. */
+
+#ifndef __KCS_BMC_CONSUMER_H__
+#define __KCS_BMC_CONSUMER_H__
+
+#include <linux/irqreturn.h>
+
+#include "kcs_bmc.h"
+
+struct kcs_bmc_driver_ops {
+ int (*add_device)(struct kcs_bmc_device *kcs_bmc);
+ int (*remove_device)(struct kcs_bmc_device *kcs_bmc);
+};
+
+struct kcs_bmc_driver {
+ struct list_head entry;
+
+ const struct kcs_bmc_driver_ops *ops;
+};
+
+struct kcs_bmc_client_ops {
+ irqreturn_t (*event)(struct kcs_bmc_client *client);
+};
+
+struct kcs_bmc_client {
+ const struct kcs_bmc_client_ops *ops;
+
+ struct kcs_bmc_device *dev;
+};
+
+void kcs_bmc_register_driver(struct kcs_bmc_driver *drv);
+void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv);
+
+int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
+void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
+
+void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events);
+
+u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data);
+u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data);
+void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val);
+#endif
diff --git a/drivers/char/ipmi/kcs_bmc_device.h b/drivers/char/ipmi/kcs_bmc_device.h
new file mode 100644
index 0000000000..17c572f25c
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_device.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, IBM Corp. */
+
+#ifndef __KCS_BMC_DEVICE_H__
+#define __KCS_BMC_DEVICE_H__
+
+#include <linux/irqreturn.h>
+
+#include "kcs_bmc.h"
+
+struct kcs_bmc_device_ops {
+ void (*irq_mask_update)(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 enable);
+ u8 (*io_inputb)(struct kcs_bmc_device *kcs_bmc, u32 reg);
+ void (*io_outputb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 b);
+ void (*io_updateb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 b);
+};
+
+irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc);
+int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc);
+
+#endif
diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
new file mode 100644
index 0000000000..7961fec564
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, Nuvoton Corporation.
+ * Copyright (c) 2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "nuvoton-kcs-bmc: " fmt
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_device.h"
+
+#define DEVICE_NAME "npcm-kcs-bmc"
+#define KCS_CHANNEL_MAX 3
+
+#define KCS1ST 0x0C
+#define KCS2ST 0x1E
+#define KCS3ST 0x30
+
+#define KCS1DO 0x0E
+#define KCS2DO 0x20
+#define KCS3DO 0x32
+
+#define KCS1DI 0x10
+#define KCS2DI 0x22
+#define KCS3DI 0x34
+
+#define KCS1CTL 0x18
+#define KCS2CTL 0x2A
+#define KCS3CTL 0x3C
+#define KCS_CTL_IBFIE BIT(0)
+#define KCS_CTL_OBEIE BIT(1)
+
+#define KCS1IE 0x1C
+#define KCS2IE 0x2E
+#define KCS3IE 0x40
+#define KCS_IE_IRQE BIT(0)
+#define KCS_IE_HIRQE BIT(3)
+
+/*
+ * 7.2.4 Core KCS Registers
+ * Registers in this module are 8 bits. An 8-bit register must be accessed
+ * by an 8-bit read or write.
+ *
+ * sts: KCS Channel n Status Register (KCSnST).
+ * dob: KCS Channel n Data Out Buffer Register (KCSnDO).
+ * dib: KCS Channel n Data In Buffer Register (KCSnDI).
+ * ctl: KCS Channel n Control Register (KCSnCTL).
+ * ie : KCS Channel n Interrupt Enable Register (KCSnIE).
+ */
+struct npcm7xx_kcs_reg {
+ u32 sts;
+ u32 dob;
+ u32 dib;
+ u32 ctl;
+ u32 ie;
+};
+
+struct npcm7xx_kcs_bmc {
+ struct kcs_bmc_device kcs_bmc;
+
+ struct regmap *map;
+
+ const struct npcm7xx_kcs_reg *reg;
+};
+
+static const struct npcm7xx_kcs_reg npcm7xx_kcs_reg_tbl[KCS_CHANNEL_MAX] = {
+ { .sts = KCS1ST, .dob = KCS1DO, .dib = KCS1DI, .ctl = KCS1CTL, .ie = KCS1IE },
+ { .sts = KCS2ST, .dob = KCS2DO, .dib = KCS2DI, .ctl = KCS2CTL, .ie = KCS2IE },
+ { .sts = KCS3ST, .dob = KCS3DO, .dib = KCS3DI, .ctl = KCS3CTL, .ie = KCS3IE },
+};
+
+static inline struct npcm7xx_kcs_bmc *to_npcm7xx_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
+{
+ return container_of(kcs_bmc, struct npcm7xx_kcs_bmc, kcs_bmc);
+}
+
+static u8 npcm7xx_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+ u32 val = 0;
+ int rc;
+
+ rc = regmap_read(priv->map, reg, &val);
+ WARN(rc != 0, "regmap_read() failed: %d\n", rc);
+
+ return rc == 0 ? (u8)val : 0;
+}
+
+static void npcm7xx_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_write(priv->map, reg, data);
+ WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+}
+
+static void npcm7xx_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 data)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_update_bits(priv->map, reg, mask, data);
+ WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
+}
+
+static void npcm7xx_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+
+ regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE,
+ enable ? KCS_IE_IRQE | KCS_IE_HIRQE : 0);
+}
+
+static void npcm7xx_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+
+ if (mask & KCS_BMC_EVENT_TYPE_OBE)
+ regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_OBEIE,
+ !!(state & KCS_BMC_EVENT_TYPE_OBE) * KCS_CTL_OBEIE);
+
+ if (mask & KCS_BMC_EVENT_TYPE_IBF)
+ regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
+ !!(state & KCS_BMC_EVENT_TYPE_IBF) * KCS_CTL_IBFIE);
+}
+
+static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg)
+{
+ struct kcs_bmc_device *kcs_bmc = arg;
+
+ return kcs_bmc_handle_event(kcs_bmc);
+}
+
+static int npcm7xx_kcs_config_irq(struct kcs_bmc_device *kcs_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return devm_request_irq(dev, irq, npcm7xx_kcs_irq, IRQF_SHARED,
+ dev_name(dev), kcs_bmc);
+}
+
+static const struct kcs_bmc_device_ops npcm7xx_kcs_ops = {
+ .irq_mask_update = npcm7xx_kcs_irq_mask_update,
+ .io_inputb = npcm7xx_kcs_inb,
+ .io_outputb = npcm7xx_kcs_outb,
+ .io_updateb = npcm7xx_kcs_updateb,
+};
+
+static int npcm7xx_kcs_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct npcm7xx_kcs_bmc *priv;
+ struct kcs_bmc_device *kcs_bmc;
+ u32 chan;
+ int rc;
+
+ rc = of_property_read_u32(dev->of_node, "kcs_chan", &chan);
+ if (rc != 0 || chan == 0 || chan > KCS_CHANNEL_MAX) {
+ dev_err(dev, "no valid 'kcs_chan' configured\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->map)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+ priv->reg = &npcm7xx_kcs_reg_tbl[chan - 1];
+
+ kcs_bmc = &priv->kcs_bmc;
+ kcs_bmc->dev = &pdev->dev;
+ kcs_bmc->channel = chan;
+ kcs_bmc->ioreg.idr = priv->reg->dib;
+ kcs_bmc->ioreg.odr = priv->reg->dob;
+ kcs_bmc->ioreg.str = priv->reg->sts;
+ kcs_bmc->ops = &npcm7xx_kcs_ops;
+
+ platform_set_drvdata(pdev, priv);
+
+ rc = npcm7xx_kcs_config_irq(kcs_bmc, pdev);
+ if (rc)
+ return rc;
+
+ npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+ npcm7xx_kcs_enable_channel(kcs_bmc, true);
+
+ rc = kcs_bmc_add_device(kcs_bmc);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
+ return rc;
+ }
+
+ pr_info("channel=%u idr=0x%x odr=0x%x str=0x%x\n",
+ chan,
+ kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr, kcs_bmc->ioreg.str);
+
+ return 0;
+}
+
+static int npcm7xx_kcs_remove(struct platform_device *pdev)
+{
+ struct npcm7xx_kcs_bmc *priv = platform_get_drvdata(pdev);
+ struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
+
+ kcs_bmc_remove_device(kcs_bmc);
+
+ npcm7xx_kcs_enable_channel(kcs_bmc, false);
+ npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+
+ return 0;
+}
+
+static const struct of_device_id npcm_kcs_bmc_match[] = {
+ { .compatible = "nuvoton,npcm750-kcs-bmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, npcm_kcs_bmc_match);
+
+static struct platform_driver npcm_kcs_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = npcm_kcs_bmc_match,
+ },
+ .probe = npcm7xx_kcs_probe,
+ .remove = npcm7xx_kcs_remove,
+};
+module_platform_driver(npcm_kcs_bmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Avi Fishman <avifishman70@gmail.com>");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_DESCRIPTION("NPCM7xx device interface to the KCS BMC device");
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
new file mode 100644
index 0000000000..1793358be7
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2021 IBM Corp. */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_client.h"
+
+struct kcs_bmc_serio {
+ struct list_head entry;
+
+ struct kcs_bmc_client client;
+ struct serio *port;
+
+ spinlock_t lock;
+};
+
+static inline struct kcs_bmc_serio *client_to_kcs_bmc_serio(struct kcs_bmc_client *client)
+{
+ return container_of(client, struct kcs_bmc_serio, client);
+}
+
+static irqreturn_t kcs_bmc_serio_event(struct kcs_bmc_client *client)
+{
+ struct kcs_bmc_serio *priv;
+ u8 handled = IRQ_NONE;
+ u8 status;
+
+ priv = client_to_kcs_bmc_serio(client);
+
+ spin_lock(&priv->lock);
+
+ status = kcs_bmc_read_status(client->dev);
+
+ if (status & KCS_BMC_STR_IBF)
+ handled = serio_interrupt(priv->port, kcs_bmc_read_data(client->dev), 0);
+
+ spin_unlock(&priv->lock);
+
+ return handled;
+}
+
+static const struct kcs_bmc_client_ops kcs_bmc_serio_client_ops = {
+ .event = kcs_bmc_serio_event,
+};
+
+static int kcs_bmc_serio_open(struct serio *port)
+{
+ struct kcs_bmc_serio *priv = port->port_data;
+
+ return kcs_bmc_enable_device(priv->client.dev, &priv->client);
+}
+
+static void kcs_bmc_serio_close(struct serio *port)
+{
+ struct kcs_bmc_serio *priv = port->port_data;
+
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+}
+
+static DEFINE_SPINLOCK(kcs_bmc_serio_instances_lock);
+static LIST_HEAD(kcs_bmc_serio_instances);
+
+static int kcs_bmc_serio_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_serio *priv;
+ struct serio *port;
+
+ priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Use kzalloc() as the allocation is cleaned up with kfree() via serio_unregister_port() */
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->id.type = SERIO_8042;
+ port->open = kcs_bmc_serio_open;
+ port->close = kcs_bmc_serio_close;
+ port->port_data = priv;
+ port->dev.parent = kcs_bmc->dev;
+
+ spin_lock_init(&priv->lock);
+ priv->port = port;
+ priv->client.dev = kcs_bmc;
+ priv->client.ops = &kcs_bmc_serio_client_ops;
+
+ spin_lock_irq(&kcs_bmc_serio_instances_lock);
+ list_add(&priv->entry, &kcs_bmc_serio_instances);
+ spin_unlock_irq(&kcs_bmc_serio_instances_lock);
+
+ serio_register_port(port);
+
+ dev_info(kcs_bmc->dev, "Initialised serio client for channel %d", kcs_bmc->channel);
+
+ return 0;
+}
+
+static int kcs_bmc_serio_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_serio *priv = NULL, *pos;
+
+ spin_lock_irq(&kcs_bmc_serio_instances_lock);
+ list_for_each_entry(pos, &kcs_bmc_serio_instances, entry) {
+ if (pos->client.dev == kcs_bmc) {
+ priv = pos;
+ list_del(&pos->entry);
+ break;
+ }
+ }
+ spin_unlock_irq(&kcs_bmc_serio_instances_lock);
+
+ if (!priv)
+ return -ENODEV;
+
+ /* kfree()s priv->port via put_device() */
+ serio_unregister_port(priv->port);
+
+ /* Ensure the IBF IRQ is disabled if we were the active client */
+ kcs_bmc_disable_device(kcs_bmc, &priv->client);
+
+ devm_kfree(priv->client.dev->dev, priv);
+
+ return 0;
+}
+
+static const struct kcs_bmc_driver_ops kcs_bmc_serio_driver_ops = {
+ .add_device = kcs_bmc_serio_add_device,
+ .remove_device = kcs_bmc_serio_remove_device,
+};
+
+static struct kcs_bmc_driver kcs_bmc_serio_driver = {
+ .ops = &kcs_bmc_serio_driver_ops,
+};
+
+static int __init kcs_bmc_serio_init(void)
+{
+ kcs_bmc_register_driver(&kcs_bmc_serio_driver);
+
+ return 0;
+}
+module_init(kcs_bmc_serio_init);
+
+static void __exit kcs_bmc_serio_exit(void)
+{
+ kcs_bmc_unregister_driver(&kcs_bmc_serio_driver);
+}
+module_exit(kcs_bmc_serio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("Adapter driver for serio access to BMC KCS devices");
diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
new file mode 100644
index 0000000000..56346fb328
--- /dev/null
+++ b/drivers/char/ipmi/ssif_bmc.c
@@ -0,0 +1,873 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The driver for BMC side of SSIF interface
+ *
+ * Copyright (c) 2022, Ampere Computing LLC
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/ipmi_ssif_bmc.h>
+
+#define DEVICE_NAME "ipmi-ssif-host"
+
+#define GET_8BIT_ADDR(addr_7bit) (((addr_7bit) << 1) & 0xff)
+
+/* A standard SMBus Transaction is limited to 32 data bytes */
+#define MAX_PAYLOAD_PER_TRANSACTION 32
+/* Transaction includes the address, the command, the length and the PEC byte */
+#define MAX_TRANSACTION (MAX_PAYLOAD_PER_TRANSACTION + 4)
+
+#define MAX_IPMI_DATA_PER_START_TRANSACTION 30
+#define MAX_IPMI_DATA_PER_MIDDLE_TRANSACTION 31
+
+#define SSIF_IPMI_SINGLEPART_WRITE 0x2
+#define SSIF_IPMI_SINGLEPART_READ 0x3
+#define SSIF_IPMI_MULTIPART_WRITE_START 0x6
+#define SSIF_IPMI_MULTIPART_WRITE_MIDDLE 0x7
+#define SSIF_IPMI_MULTIPART_WRITE_END 0x8
+#define SSIF_IPMI_MULTIPART_READ_START 0x3
+#define SSIF_IPMI_MULTIPART_READ_MIDDLE 0x9
+
+/*
+ * IPMI 2.0 Spec, section 12.7 SSIF Timing,
+ * Request-to-Response Time is T6max(250ms) - T1max(20ms) - 3ms = 227ms
+ * Recover ssif_bmc from busy state if it takes up to 500ms
+ */
+#define RESPONSE_TIMEOUT 500 /* ms */
+
+struct ssif_part_buffer {
+ u8 address;
+ u8 smbus_cmd;
+ u8 length;
+ u8 payload[MAX_PAYLOAD_PER_TRANSACTION];
+ u8 pec;
+ u8 index;
+};
+
+/*
+ * SSIF internal states:
+ * SSIF_READY 0x00 : Ready state
+ * SSIF_START 0x01 : Start smbus transaction
+ * SSIF_SMBUS_CMD 0x02 : Received SMBus command
+ * SSIF_REQ_RECVING 0x03 : Receiving request
+ * SSIF_RES_SENDING 0x04 : Sending response
+ * SSIF_ABORTING 0x05 : Aborting state
+ */
+enum ssif_state {
+ SSIF_READY,
+ SSIF_START,
+ SSIF_SMBUS_CMD,
+ SSIF_REQ_RECVING,
+ SSIF_RES_SENDING,
+ SSIF_ABORTING,
+ SSIF_STATE_MAX
+};
+
+struct ssif_bmc_ctx {
+ struct i2c_client *client;
+ struct miscdevice miscdev;
+ int msg_idx;
+ bool pec_support;
+ /* ssif bmc spinlock */
+ spinlock_t lock;
+ wait_queue_head_t wait_queue;
+ u8 running;
+ enum ssif_state state;
+ /* Timeout waiting for response */
+ struct timer_list response_timer;
+ bool response_timer_inited;
+ /* Flag to identify a Multi-part Read Transaction */
+ bool is_singlepart_read;
+ u8 nbytes_processed;
+ u8 remain_len;
+ u8 recv_len;
+ /* Block Number of a Multi-part Read Transaction */
+ u8 block_num;
+ bool request_available;
+ bool response_in_progress;
+ bool busy;
+ bool aborting;
+ /* Buffer for SSIF Transaction part*/
+ struct ssif_part_buffer part_buf;
+ struct ipmi_ssif_msg response;
+ struct ipmi_ssif_msg request;
+};
+
+static inline struct ssif_bmc_ctx *to_ssif_bmc(struct file *file)
+{
+ return container_of(file->private_data, struct ssif_bmc_ctx, miscdev);
+}
+
+static const char *state_to_string(enum ssif_state state)
+{
+ switch (state) {
+ case SSIF_READY:
+ return "SSIF_READY";
+ case SSIF_START:
+ return "SSIF_START";
+ case SSIF_SMBUS_CMD:
+ return "SSIF_SMBUS_CMD";
+ case SSIF_REQ_RECVING:
+ return "SSIF_REQ_RECVING";
+ case SSIF_RES_SENDING:
+ return "SSIF_RES_SENDING";
+ case SSIF_ABORTING:
+ return "SSIF_ABORTING";
+ default:
+ return "SSIF_STATE_UNKNOWN";
+ }
+}
+
+/* Handle SSIF message that will be sent to user */
+static ssize_t ssif_bmc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+ struct ipmi_ssif_msg msg;
+ unsigned long flags;
+ ssize_t ret;
+
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+ while (!ssif_bmc->request_available) {
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_event_interruptible(ssif_bmc->wait_queue,
+ ssif_bmc->request_available);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+ }
+
+ if (count < min_t(ssize_t,
+ sizeof_field(struct ipmi_ssif_msg, len) + ssif_bmc->request.len,
+ sizeof(struct ipmi_ssif_msg))) {
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+ ret = -EINVAL;
+ } else {
+ count = min_t(ssize_t,
+ sizeof_field(struct ipmi_ssif_msg, len) + ssif_bmc->request.len,
+ sizeof(struct ipmi_ssif_msg));
+ memcpy(&msg, &ssif_bmc->request, count);
+ ssif_bmc->request_available = false;
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+
+ ret = copy_to_user(buf, &msg, count);
+ }
+
+ return (ret < 0) ? ret : count;
+}
+
+/* Handle SSIF message that is written by user */
+static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+ struct ipmi_ssif_msg msg;
+ unsigned long flags;
+ ssize_t ret;
+
+ if (count > sizeof(struct ipmi_ssif_msg))
+ return -EINVAL;
+
+ if (copy_from_user(&msg, buf, count))
+ return -EFAULT;
+
+ if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+ while (ssif_bmc->response_in_progress) {
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_event_interruptible(ssif_bmc->wait_queue,
+ !ssif_bmc->response_in_progress);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+ }
+
+ /*
+ * The write must complete before the response timeout fired, otherwise
+ * the response is aborted and wait for next request
+ * Return -EINVAL if the response is aborted
+ */
+ ret = (ssif_bmc->response_timer_inited) ? 0 : -EINVAL;
+ if (ret)
+ goto exit;
+
+ del_timer(&ssif_bmc->response_timer);
+ ssif_bmc->response_timer_inited = false;
+
+ memcpy(&ssif_bmc->response, &msg, count);
+ ssif_bmc->is_singlepart_read = (msg.len <= MAX_PAYLOAD_PER_TRANSACTION);
+
+ ssif_bmc->response_in_progress = true;
+
+ /* ssif_bmc not busy */
+ ssif_bmc->busy = false;
+
+ /* Clean old request buffer */
+ memset(&ssif_bmc->request, 0, sizeof(struct ipmi_ssif_msg));
+exit:
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+
+ return (ret < 0) ? ret : count;
+}
+
+static int ssif_bmc_open(struct inode *inode, struct file *file)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+ int ret = 0;
+
+ spin_lock_irq(&ssif_bmc->lock);
+ if (!ssif_bmc->running)
+ ssif_bmc->running = 1;
+ else
+ ret = -EBUSY;
+ spin_unlock_irq(&ssif_bmc->lock);
+
+ return ret;
+}
+
+static __poll_t ssif_bmc_poll(struct file *file, poll_table *wait)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+ __poll_t mask = 0;
+
+ poll_wait(file, &ssif_bmc->wait_queue, wait);
+
+ spin_lock_irq(&ssif_bmc->lock);
+ /* The request is available, userspace application can get the request */
+ if (ssif_bmc->request_available)
+ mask |= EPOLLIN;
+
+ spin_unlock_irq(&ssif_bmc->lock);
+
+ return mask;
+}
+
+static int ssif_bmc_release(struct inode *inode, struct file *file)
+{
+ struct ssif_bmc_ctx *ssif_bmc = to_ssif_bmc(file);
+
+ spin_lock_irq(&ssif_bmc->lock);
+ ssif_bmc->running = 0;
+ spin_unlock_irq(&ssif_bmc->lock);
+
+ return 0;
+}
+
+/*
+ * System calls to device interface for user apps
+ */
+static const struct file_operations ssif_bmc_fops = {
+ .owner = THIS_MODULE,
+ .open = ssif_bmc_open,
+ .read = ssif_bmc_read,
+ .write = ssif_bmc_write,
+ .release = ssif_bmc_release,
+ .poll = ssif_bmc_poll,
+};
+
+/* Called with ssif_bmc->lock held. */
+static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
+{
+ /* Invalidate response in buffer to denote it having been sent. */
+ ssif_bmc->response.len = 0;
+ ssif_bmc->response_in_progress = false;
+ ssif_bmc->nbytes_processed = 0;
+ ssif_bmc->remain_len = 0;
+ ssif_bmc->busy = false;
+ memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
+ wake_up_all(&ssif_bmc->wait_queue);
+}
+
+static void response_timeout(struct timer_list *t)
+{
+ struct ssif_bmc_ctx *ssif_bmc = from_timer(ssif_bmc, t, response_timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+
+ /* Do nothing if the response is in progress */
+ if (!ssif_bmc->response_in_progress) {
+ /* Recover ssif_bmc from busy */
+ ssif_bmc->busy = false;
+ ssif_bmc->response_timer_inited = false;
+ /* Set aborting flag */
+ ssif_bmc->aborting = true;
+ }
+
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+}
+
+/* Called with ssif_bmc->lock held. */
+static void handle_request(struct ssif_bmc_ctx *ssif_bmc)
+{
+ /* set ssif_bmc to busy waiting for response */
+ ssif_bmc->busy = true;
+ /* Request message is available to process */
+ ssif_bmc->request_available = true;
+ /* Clean old response buffer */
+ memset(&ssif_bmc->response, 0, sizeof(struct ipmi_ssif_msg));
+ /* This is the new READ request.*/
+ wake_up_all(&ssif_bmc->wait_queue);
+
+ /* Armed timer to recover slave from busy state in case of no response */
+ if (!ssif_bmc->response_timer_inited) {
+ timer_setup(&ssif_bmc->response_timer, response_timeout, 0);
+ ssif_bmc->response_timer_inited = true;
+ }
+ mod_timer(&ssif_bmc->response_timer, jiffies + msecs_to_jiffies(RESPONSE_TIMEOUT));
+}
+
+static void calculate_response_part_pec(struct ssif_part_buffer *part)
+{
+ u8 addr = part->address;
+
+ /* PEC - Start Read Address */
+ part->pec = i2c_smbus_pec(0, &addr, 1);
+ /* PEC - SSIF Command */
+ part->pec = i2c_smbus_pec(part->pec, &part->smbus_cmd, 1);
+ /* PEC - Restart Write Address */
+ addr = addr | 0x01;
+ part->pec = i2c_smbus_pec(part->pec, &addr, 1);
+ part->pec = i2c_smbus_pec(part->pec, &part->length, 1);
+ if (part->length)
+ part->pec = i2c_smbus_pec(part->pec, part->payload, part->length);
+}
+
+static void set_singlepart_response_buffer(struct ssif_bmc_ctx *ssif_bmc)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+
+ part->address = GET_8BIT_ADDR(ssif_bmc->client->addr);
+ part->length = (u8)ssif_bmc->response.len;
+
+ /* Clear the rest to 0 */
+ memset(part->payload + part->length, 0, MAX_PAYLOAD_PER_TRANSACTION - part->length);
+ memcpy(&part->payload[0], &ssif_bmc->response.payload[0], part->length);
+}
+
+static void set_multipart_response_buffer(struct ssif_bmc_ctx *ssif_bmc)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+ u8 part_len = 0;
+
+ part->address = GET_8BIT_ADDR(ssif_bmc->client->addr);
+ switch (part->smbus_cmd) {
+ case SSIF_IPMI_MULTIPART_READ_START:
+ /*
+ * Read Start length is 32 bytes.
+ * Read Start transfer first 30 bytes of IPMI response
+ * and 2 special code 0x00, 0x01.
+ */
+ ssif_bmc->nbytes_processed = 0;
+ ssif_bmc->block_num = 0;
+ part->length = MAX_PAYLOAD_PER_TRANSACTION;
+ part_len = MAX_IPMI_DATA_PER_START_TRANSACTION;
+ ssif_bmc->remain_len = ssif_bmc->response.len - part_len;
+
+ part->payload[0] = 0x00; /* Start Flag */
+ part->payload[1] = 0x01; /* Start Flag */
+
+ memcpy(&part->payload[2], &ssif_bmc->response.payload[0], part_len);
+ break;
+
+ case SSIF_IPMI_MULTIPART_READ_MIDDLE:
+ /*
+ * IPMI READ Middle or READ End messages can carry up to 31 bytes
+ * IPMI data plus block number byte.
+ */
+ if (ssif_bmc->remain_len <= MAX_IPMI_DATA_PER_MIDDLE_TRANSACTION) {
+ /*
+ * This is READ End message
+ * Return length is the remaining response data length
+ * plus block number
+ * Block number 0xFF is to indicate this is last message
+ *
+ */
+ /* Clean the buffer */
+ memset(&part->payload[0], 0, MAX_PAYLOAD_PER_TRANSACTION);
+ part->length = ssif_bmc->remain_len + 1;
+ part_len = ssif_bmc->remain_len;
+ ssif_bmc->block_num = 0xFF;
+ part->payload[0] = ssif_bmc->block_num;
+ } else {
+ /*
+ * This is READ Middle message
+ * Response length is the maximum SMBUS transfer length
+ * Block number byte is incremented
+ * Return length is maximum SMBUS transfer length
+ */
+ part->length = MAX_PAYLOAD_PER_TRANSACTION;
+ part_len = MAX_IPMI_DATA_PER_MIDDLE_TRANSACTION;
+ part->payload[0] = ssif_bmc->block_num;
+ ssif_bmc->block_num++;
+ }
+
+ ssif_bmc->remain_len -= part_len;
+ memcpy(&part->payload[1], ssif_bmc->response.payload + ssif_bmc->nbytes_processed,
+ part_len);
+ break;
+
+ default:
+ /* Do not expect to go to this case */
+ dev_err(&ssif_bmc->client->dev, "%s: Unexpected SMBus command 0x%x\n",
+ __func__, part->smbus_cmd);
+ break;
+ }
+
+ ssif_bmc->nbytes_processed += part_len;
+}
+
+static bool supported_read_cmd(u8 cmd)
+{
+ if (cmd == SSIF_IPMI_SINGLEPART_READ ||
+ cmd == SSIF_IPMI_MULTIPART_READ_START ||
+ cmd == SSIF_IPMI_MULTIPART_READ_MIDDLE)
+ return true;
+
+ return false;
+}
+
+static bool supported_write_cmd(u8 cmd)
+{
+ if (cmd == SSIF_IPMI_SINGLEPART_WRITE ||
+ cmd == SSIF_IPMI_MULTIPART_WRITE_START ||
+ cmd == SSIF_IPMI_MULTIPART_WRITE_MIDDLE ||
+ cmd == SSIF_IPMI_MULTIPART_WRITE_END)
+ return true;
+
+ return false;
+}
+
+/* Process the IPMI response that will be read by master */
+static void handle_read_processed(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+
+ /* msg_idx start from 0 */
+ if (part->index < part->length)
+ *val = part->payload[part->index];
+ else if (part->index == part->length && ssif_bmc->pec_support)
+ *val = part->pec;
+ else
+ *val = 0;
+
+ part->index++;
+}
+
+static void handle_write_received(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ /*
+ * The msg_idx must be 1 when first enter SSIF_REQ_RECVING state
+ * And it would never exceeded 36 bytes included the 32 bytes max payload +
+ * the address + the command + the len and the PEC.
+ */
+ if (ssif_bmc->msg_idx < 1 || ssif_bmc->msg_idx > MAX_TRANSACTION)
+ return;
+
+ if (ssif_bmc->msg_idx == 1) {
+ ssif_bmc->part_buf.length = *val;
+ ssif_bmc->part_buf.index = 0;
+ } else {
+ ssif_bmc->part_buf.payload[ssif_bmc->part_buf.index++] = *val;
+ }
+
+ ssif_bmc->msg_idx++;
+}
+
+static bool validate_request_part(struct ssif_bmc_ctx *ssif_bmc)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+ bool ret = true;
+ u8 cpec;
+ u8 addr;
+
+ if (part->index == part->length) {
+ /* PEC is not included */
+ ssif_bmc->pec_support = false;
+ ret = true;
+ goto exit;
+ }
+
+ if (part->index != part->length + 1) {
+ ret = false;
+ goto exit;
+ }
+
+ /* PEC is included */
+ ssif_bmc->pec_support = true;
+ part->pec = part->payload[part->length];
+ addr = GET_8BIT_ADDR(ssif_bmc->client->addr);
+ cpec = i2c_smbus_pec(0, &addr, 1);
+ cpec = i2c_smbus_pec(cpec, &part->smbus_cmd, 1);
+ cpec = i2c_smbus_pec(cpec, &part->length, 1);
+ /*
+ * As SMBus specification does not allow the length
+ * (byte count) in the Write-Block protocol to be zero.
+ * Therefore, it is illegal to have the last Middle
+ * transaction in the sequence carry 32-byte and have
+ * a length of ‘0’ in the End transaction.
+ * But some users may try to use this way and we should
+ * prevent ssif_bmc driver broken in this case.
+ */
+ if (part->length)
+ cpec = i2c_smbus_pec(cpec, part->payload, part->length);
+
+ if (cpec != part->pec)
+ ret = false;
+
+exit:
+ return ret;
+}
+
+static void process_request_part(struct ssif_bmc_ctx *ssif_bmc)
+{
+ struct ssif_part_buffer *part = &ssif_bmc->part_buf;
+ unsigned int len;
+
+ switch (part->smbus_cmd) {
+ case SSIF_IPMI_SINGLEPART_WRITE:
+ /* save the whole part to request*/
+ ssif_bmc->request.len = part->length;
+ memcpy(ssif_bmc->request.payload, part->payload, part->length);
+
+ break;
+ case SSIF_IPMI_MULTIPART_WRITE_START:
+ ssif_bmc->request.len = 0;
+
+ fallthrough;
+ case SSIF_IPMI_MULTIPART_WRITE_MIDDLE:
+ case SSIF_IPMI_MULTIPART_WRITE_END:
+ len = ssif_bmc->request.len + part->length;
+ /* Do the bound check here, not allow the request len exceed 254 bytes */
+ if (len > IPMI_SSIF_PAYLOAD_MAX) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: Request exceeded 254 bytes, aborting");
+ /* Request too long, aborting */
+ ssif_bmc->aborting = true;
+ } else {
+ memcpy(ssif_bmc->request.payload + ssif_bmc->request.len,
+ part->payload, part->length);
+ ssif_bmc->request.len += part->length;
+ }
+ break;
+ default:
+ /* Do not expect to go to this case */
+ dev_err(&ssif_bmc->client->dev, "%s: Unexpected SMBus command 0x%x\n",
+ __func__, part->smbus_cmd);
+ break;
+ }
+}
+
+static void process_smbus_cmd(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ /* SMBUS command can vary (single or multi-part) */
+ ssif_bmc->part_buf.smbus_cmd = *val;
+ ssif_bmc->msg_idx = 1;
+ memset(&ssif_bmc->part_buf.payload[0], 0, MAX_PAYLOAD_PER_TRANSACTION);
+
+ if (*val == SSIF_IPMI_SINGLEPART_WRITE || *val == SSIF_IPMI_MULTIPART_WRITE_START) {
+ /*
+ * The response maybe not come in-time, causing host SSIF driver
+ * to timeout and resend a new request. In such case check for
+ * pending response and clear it
+ */
+ if (ssif_bmc->response_in_progress)
+ complete_response(ssif_bmc);
+
+ /* This is new request, flip aborting flag if set */
+ if (ssif_bmc->aborting)
+ ssif_bmc->aborting = false;
+ }
+}
+
+static void on_read_requested_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY ||
+ ssif_bmc->state == SSIF_START ||
+ ssif_bmc->state == SSIF_REQ_RECVING ||
+ ssif_bmc->state == SSIF_RES_SENDING) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected READ REQUESTED in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_ABORTING;
+ *val = 0;
+ return;
+
+ } else if (ssif_bmc->state == SSIF_SMBUS_CMD) {
+ if (!supported_read_cmd(ssif_bmc->part_buf.smbus_cmd)) {
+ dev_warn(&ssif_bmc->client->dev, "Warn: Unknown SMBus read command=0x%x",
+ ssif_bmc->part_buf.smbus_cmd);
+ ssif_bmc->aborting = true;
+ }
+
+ if (ssif_bmc->aborting)
+ ssif_bmc->state = SSIF_ABORTING;
+ else
+ ssif_bmc->state = SSIF_RES_SENDING;
+ }
+
+ ssif_bmc->msg_idx = 0;
+
+ /* Send 0 if there is nothing to send */
+ if (!ssif_bmc->response_in_progress || ssif_bmc->state == SSIF_ABORTING) {
+ *val = 0;
+ return;
+ }
+
+ if (ssif_bmc->is_singlepart_read)
+ set_singlepart_response_buffer(ssif_bmc);
+ else
+ set_multipart_response_buffer(ssif_bmc);
+
+ calculate_response_part_pec(&ssif_bmc->part_buf);
+ ssif_bmc->part_buf.index = 0;
+ *val = ssif_bmc->part_buf.length;
+}
+
+static void on_read_processed_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY ||
+ ssif_bmc->state == SSIF_START ||
+ ssif_bmc->state == SSIF_REQ_RECVING ||
+ ssif_bmc->state == SSIF_SMBUS_CMD) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected READ PROCESSED in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_ABORTING;
+ *val = 0;
+ return;
+ }
+
+ /* Send 0 if there is nothing to send */
+ if (!ssif_bmc->response_in_progress || ssif_bmc->state == SSIF_ABORTING) {
+ *val = 0;
+ return;
+ }
+
+ handle_read_processed(ssif_bmc, val);
+}
+
+static void on_write_requested_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY || ssif_bmc->state == SSIF_SMBUS_CMD) {
+ ssif_bmc->state = SSIF_START;
+
+ } else if (ssif_bmc->state == SSIF_START ||
+ ssif_bmc->state == SSIF_REQ_RECVING ||
+ ssif_bmc->state == SSIF_RES_SENDING) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected WRITE REQUEST in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_ABORTING;
+ return;
+ }
+
+ ssif_bmc->msg_idx = 0;
+ ssif_bmc->part_buf.address = *val;
+}
+
+static void on_write_received_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY ||
+ ssif_bmc->state == SSIF_RES_SENDING) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected WRITE RECEIVED in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_ABORTING;
+
+ } else if (ssif_bmc->state == SSIF_START) {
+ ssif_bmc->state = SSIF_SMBUS_CMD;
+
+ } else if (ssif_bmc->state == SSIF_SMBUS_CMD) {
+ if (!supported_write_cmd(ssif_bmc->part_buf.smbus_cmd)) {
+ dev_warn(&ssif_bmc->client->dev, "Warn: Unknown SMBus write command=0x%x",
+ ssif_bmc->part_buf.smbus_cmd);
+ ssif_bmc->aborting = true;
+ }
+
+ if (ssif_bmc->aborting)
+ ssif_bmc->state = SSIF_ABORTING;
+ else
+ ssif_bmc->state = SSIF_REQ_RECVING;
+ }
+
+ /* This is response sending state */
+ if (ssif_bmc->state == SSIF_REQ_RECVING)
+ handle_write_received(ssif_bmc, val);
+ else if (ssif_bmc->state == SSIF_SMBUS_CMD)
+ process_smbus_cmd(ssif_bmc, val);
+}
+
+static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+{
+ if (ssif_bmc->state == SSIF_READY ||
+ ssif_bmc->state == SSIF_START ||
+ ssif_bmc->state == SSIF_SMBUS_CMD ||
+ ssif_bmc->state == SSIF_ABORTING) {
+ dev_warn(&ssif_bmc->client->dev,
+ "Warn: %s unexpected SLAVE STOP in state=%s\n",
+ __func__, state_to_string(ssif_bmc->state));
+ ssif_bmc->state = SSIF_READY;
+
+ } else if (ssif_bmc->state == SSIF_REQ_RECVING) {
+ if (validate_request_part(ssif_bmc)) {
+ process_request_part(ssif_bmc);
+ if (ssif_bmc->part_buf.smbus_cmd == SSIF_IPMI_SINGLEPART_WRITE ||
+ ssif_bmc->part_buf.smbus_cmd == SSIF_IPMI_MULTIPART_WRITE_END)
+ handle_request(ssif_bmc);
+ ssif_bmc->state = SSIF_READY;
+ } else {
+ /*
+ * A BMC that receives an invalid request drop the data for the write
+ * transaction and any further transactions (read or write) until
+ * the next valid read or write Start transaction is received
+ */
+ dev_err(&ssif_bmc->client->dev, "Error: invalid pec\n");
+ ssif_bmc->aborting = true;
+ }
+ } else if (ssif_bmc->state == SSIF_RES_SENDING) {
+ if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF)
+ /* Invalidate response buffer to denote it is sent */
+ complete_response(ssif_bmc);
+ ssif_bmc->state = SSIF_READY;
+ }
+
+ /* Reset message index */
+ ssif_bmc->msg_idx = 0;
+}
+
+/*
+ * Callback function to handle I2C slave events
+ */
+static int ssif_bmc_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val)
+{
+ unsigned long flags;
+ struct ssif_bmc_ctx *ssif_bmc = i2c_get_clientdata(client);
+ int ret = 0;
+
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+
+ switch (event) {
+ case I2C_SLAVE_READ_REQUESTED:
+ on_read_requested_event(ssif_bmc, val);
+ break;
+
+ case I2C_SLAVE_WRITE_REQUESTED:
+ on_write_requested_event(ssif_bmc, val);
+ break;
+
+ case I2C_SLAVE_READ_PROCESSED:
+ on_read_processed_event(ssif_bmc, val);
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ on_write_received_event(ssif_bmc, val);
+ break;
+
+ case I2C_SLAVE_STOP:
+ on_stop_event(ssif_bmc, val);
+ break;
+
+ default:
+ dev_warn(&ssif_bmc->client->dev, "Warn: Unknown i2c slave event\n");
+ break;
+ }
+
+ if (!ssif_bmc->aborting && ssif_bmc->busy)
+ ret = -EBUSY;
+
+ spin_unlock_irqrestore(&ssif_bmc->lock, flags);
+
+ return ret;
+}
+
+static int ssif_bmc_probe(struct i2c_client *client)
+{
+ struct ssif_bmc_ctx *ssif_bmc;
+ int ret;
+
+ ssif_bmc = devm_kzalloc(&client->dev, sizeof(*ssif_bmc), GFP_KERNEL);
+ if (!ssif_bmc)
+ return -ENOMEM;
+
+ spin_lock_init(&ssif_bmc->lock);
+
+ init_waitqueue_head(&ssif_bmc->wait_queue);
+ ssif_bmc->request_available = false;
+ ssif_bmc->response_in_progress = false;
+ ssif_bmc->busy = false;
+ ssif_bmc->response_timer_inited = false;
+
+ /* Register misc device interface */
+ ssif_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
+ ssif_bmc->miscdev.name = DEVICE_NAME;
+ ssif_bmc->miscdev.fops = &ssif_bmc_fops;
+ ssif_bmc->miscdev.parent = &client->dev;
+ ret = misc_register(&ssif_bmc->miscdev);
+ if (ret)
+ return ret;
+
+ ssif_bmc->client = client;
+ ssif_bmc->client->flags |= I2C_CLIENT_SLAVE;
+
+ /* Register I2C slave */
+ i2c_set_clientdata(client, ssif_bmc);
+ ret = i2c_slave_register(client, ssif_bmc_cb);
+ if (ret)
+ misc_deregister(&ssif_bmc->miscdev);
+
+ return ret;
+}
+
+static void ssif_bmc_remove(struct i2c_client *client)
+{
+ struct ssif_bmc_ctx *ssif_bmc = i2c_get_clientdata(client);
+
+ i2c_slave_unregister(client);
+ misc_deregister(&ssif_bmc->miscdev);
+}
+
+static const struct of_device_id ssif_bmc_match[] = {
+ { .compatible = "ssif-bmc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ssif_bmc_match);
+
+static const struct i2c_device_id ssif_bmc_id[] = {
+ { DEVICE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, ssif_bmc_id);
+
+static struct i2c_driver ssif_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = ssif_bmc_match,
+ },
+ .probe = ssif_bmc_probe,
+ .remove = ssif_bmc_remove,
+ .id_table = ssif_bmc_id,
+};
+
+module_i2c_driver(ssif_bmc_driver);
+
+MODULE_AUTHOR("Quan Nguyen <quan@os.amperecomputing.com>");
+MODULE_AUTHOR("Chuong Tran <chuong@os.amperecomputing.com>");
+MODULE_DESCRIPTION("Linux device driver of the BMC IPMI SSIF interface.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
new file mode 100644
index 0000000000..2f171d14b9
--- /dev/null
+++ b/drivers/char/lp.c
@@ -0,0 +1,1126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic parallel printer driver
+ *
+ * Copyright (C) 1992 by Jim Weigand and Linus Torvalds
+ * Copyright (C) 1992,1993 by Michael K. Johnson
+ * - Thanks much to Gunter Windau for pointing out to me where the error
+ * checking ought to be.
+ * Copyright (C) 1993 by Nigel Gamble (added interrupt code)
+ * Copyright (C) 1994 by Alan Cox (Modularised it)
+ * LPCAREFUL, LPABORT, LPGETSTATUS added by Chris Metcalf, metcalf@lcs.mit.edu
+ * Statistics and support for slow printers by Rob Janssen, rob@knoware.nl
+ * "lp=" command line parameters added by Grant Guenther, grant@torque.net
+ * lp_read (Status readback) support added by Carsten Gross,
+ * carsten@sol.wohnheim.uni-ulm.de
+ * Support for parport by Philip Blundell <philb@gnu.org>
+ * Parport sharing hacking by Andrea Arcangeli
+ * Fixed kernel_(to/from)_user memory copy to check for errors
+ * by Riccardo Facchetti <fizban@tin.it>
+ * 22-JAN-1998 Added support for devfs Richard Gooch <rgooch@atnf.csiro.au>
+ * Redesigned interrupt handling for handle printers with buggy handshake
+ * by Andrea Arcangeli, 11 May 1998
+ * Full efficient handling of printer with buggy irq handshake (now I have
+ * understood the meaning of the strange handshake). This is done sending new
+ * characters if the interrupt is just happened, even if the printer say to
+ * be still BUSY. This is needed at least with Epson Stylus Color. To enable
+ * the new TRUST_IRQ mode read the `LP OPTIMIZATION' section below...
+ * Fixed the irq on the rising edge of the strobe case.
+ * Obsoleted the CAREFUL flag since a printer that doesn' t work with
+ * CAREFUL will block a bit after in lp_check_status().
+ * Andrea Arcangeli, 15 Oct 1998
+ * Obsoleted and removed all the lowlevel stuff implemented in the last
+ * month to use the IEEE1284 functions (that handle the _new_ compatibilty
+ * mode fine).
+ */
+
+/* This driver should, in theory, work with any parallel port that has an
+ * appropriate low-level driver; all I/O is done through the parport
+ * abstraction layer.
+ *
+ * If this driver is built into the kernel, you can configure it using the
+ * kernel command-line. For example:
+ *
+ * lp=parport1,none,parport2 (bind lp0 to parport1, disable lp1 and
+ * bind lp2 to parport2)
+ *
+ * lp=auto (assign lp devices to all ports that
+ * have printers attached, as determined
+ * by the IEEE-1284 autoprobe)
+ *
+ * lp=reset (reset the printer during
+ * initialisation)
+ *
+ * lp=off (disable the printer driver entirely)
+ *
+ * If the driver is loaded as a module, similar functionality is available
+ * using module parameters. The equivalent of the above commands would be:
+ *
+ * # insmod lp.o parport=1,none,2
+ *
+ * # insmod lp.o parport=auto
+ *
+ * # insmod lp.o reset=1
+ */
+
+/* COMPATIBILITY WITH OLD KERNELS
+ *
+ * Under Linux 2.0 and previous versions, lp devices were bound to ports at
+ * particular I/O addresses, as follows:
+ *
+ * lp0 0x3bc
+ * lp1 0x378
+ * lp2 0x278
+ *
+ * The new driver, by default, binds lp devices to parport devices as it
+ * finds them. This means that if you only have one port, it will be bound
+ * to lp0 regardless of its I/O address. If you need the old behaviour, you
+ * can force it using the parameters described above.
+ */
+
+/*
+ * The new interrupt handling code take care of the buggy handshake
+ * of some HP and Epson printer:
+ * ___
+ * ACK _______________ ___________
+ * |__|
+ * ____
+ * BUSY _________ _______
+ * |____________|
+ *
+ * I discovered this using the printer scanner that you can find at:
+ *
+ * ftp://e-mind.com/pub/linux/pscan/
+ *
+ * 11 May 98, Andrea Arcangeli
+ *
+ * My printer scanner run on an Epson Stylus Color show that such printer
+ * generates the irq on the _rising_ edge of the STROBE. Now lp handle
+ * this case fine too.
+ *
+ * 15 Oct 1998, Andrea Arcangeli
+ *
+ * The so called `buggy' handshake is really the well documented
+ * compatibility mode IEEE1284 handshake. They changed the well known
+ * Centronics handshake acking in the middle of busy expecting to not
+ * break drivers or legacy application, while they broken linux lp
+ * until I fixed it reverse engineering the protocol by hand some
+ * month ago...
+ *
+ * 14 Dec 1998, Andrea Arcangeli
+ *
+ * Copyright (C) 2000 by Tim Waugh (added LPSETTIMEOUT ioctl)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/compat.h>
+
+#include <linux/parport.h>
+#undef LP_STATS
+#include <linux/lp.h>
+
+#include <asm/irq.h>
+#include <linux/uaccess.h>
+
+/* if you have more than 8 printers, remember to increase LP_NO */
+#define LP_NO 8
+
+static DEFINE_MUTEX(lp_mutex);
+static struct lp_struct lp_table[LP_NO];
+static int port_num[LP_NO];
+
+static unsigned int lp_count = 0;
+static const struct class lp_class = {
+ .name = "printer",
+};
+
+#ifdef CONFIG_LP_CONSOLE
+static struct parport *console_registered;
+#endif /* CONFIG_LP_CONSOLE */
+
+#undef LP_DEBUG
+
+/* Bits used to manage claiming the parport device */
+#define LP_PREEMPT_REQUEST 1
+#define LP_PARPORT_CLAIMED 2
+
+/* --- low-level port access ----------------------------------- */
+
+#define r_dtr(x) (parport_read_data(lp_table[(x)].dev->port))
+#define r_str(x) (parport_read_status(lp_table[(x)].dev->port))
+#define w_ctr(x,y) do { parport_write_control(lp_table[(x)].dev->port, (y)); } while (0)
+#define w_dtr(x,y) do { parport_write_data(lp_table[(x)].dev->port, (y)); } while (0)
+
+/* Claim the parport or block trying unless we've already claimed it */
+static void lp_claim_parport_or_block(struct lp_struct *this_lp)
+{
+ if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
+ parport_claim_or_block(this_lp->dev);
+ }
+}
+
+/* Claim the parport or block trying unless we've already claimed it */
+static void lp_release_parport(struct lp_struct *this_lp)
+{
+ if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
+ parport_release(this_lp->dev);
+ }
+}
+
+
+
+static int lp_preempt(void *handle)
+{
+ struct lp_struct *this_lp = (struct lp_struct *)handle;
+ set_bit(LP_PREEMPT_REQUEST, &this_lp->bits);
+ return 1;
+}
+
+
+/*
+ * Try to negotiate to a new mode; if unsuccessful negotiate to
+ * compatibility mode. Return the mode we ended up in.
+ */
+static int lp_negotiate(struct parport *port, int mode)
+{
+ if (parport_negotiate(port, mode) != 0) {
+ mode = IEEE1284_MODE_COMPAT;
+ parport_negotiate(port, mode);
+ }
+
+ return mode;
+}
+
+static int lp_reset(int minor)
+{
+ int retval;
+ lp_claim_parport_or_block(&lp_table[minor]);
+ w_ctr(minor, LP_PSELECP);
+ udelay(LP_DELAY);
+ w_ctr(minor, LP_PSELECP | LP_PINITP);
+ retval = r_str(minor);
+ lp_release_parport(&lp_table[minor]);
+ return retval;
+}
+
+static void lp_error(int minor)
+{
+ DEFINE_WAIT(wait);
+ int polling;
+
+ if (LP_F(minor) & LP_ABORT)
+ return;
+
+ polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE;
+ if (polling)
+ lp_release_parport(&lp_table[minor]);
+ prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE);
+ schedule_timeout(LP_TIMEOUT_POLLED);
+ finish_wait(&lp_table[minor].waitq, &wait);
+ if (polling)
+ lp_claim_parport_or_block(&lp_table[minor]);
+ else
+ parport_yield_blocking(lp_table[minor].dev);
+}
+
+static int lp_check_status(int minor)
+{
+ int error = 0;
+ unsigned int last = lp_table[minor].last_error;
+ unsigned char status = r_str(minor);
+ if ((status & LP_PERRORP) && !(LP_F(minor) & LP_CAREFUL))
+ /* No error. */
+ last = 0;
+ else if ((status & LP_POUTPA)) {
+ if (last != LP_POUTPA) {
+ last = LP_POUTPA;
+ printk(KERN_INFO "lp%d out of paper\n", minor);
+ }
+ error = -ENOSPC;
+ } else if (!(status & LP_PSELECD)) {
+ if (last != LP_PSELECD) {
+ last = LP_PSELECD;
+ printk(KERN_INFO "lp%d off-line\n", minor);
+ }
+ error = -EIO;
+ } else if (!(status & LP_PERRORP)) {
+ if (last != LP_PERRORP) {
+ last = LP_PERRORP;
+ printk(KERN_INFO "lp%d on fire\n", minor);
+ }
+ error = -EIO;
+ } else {
+ last = 0; /* Come here if LP_CAREFUL is set and no
+ errors are reported. */
+ }
+
+ lp_table[minor].last_error = last;
+
+ if (last != 0)
+ lp_error(minor);
+
+ return error;
+}
+
+static int lp_wait_ready(int minor, int nonblock)
+{
+ int error = 0;
+
+ /* If we're not in compatibility mode, we're ready now! */
+ if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) {
+ return 0;
+ }
+
+ do {
+ error = lp_check_status(minor);
+ if (error && (nonblock || (LP_F(minor) & LP_ABORT)))
+ break;
+ if (signal_pending(current)) {
+ error = -EINTR;
+ break;
+ }
+ } while (error);
+ return error;
+}
+
+static ssize_t lp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = iminor(file_inode(file));
+ struct parport *port = lp_table[minor].dev->port;
+ char *kbuf = lp_table[minor].lp_buffer;
+ ssize_t retv = 0;
+ ssize_t written;
+ size_t copy_size = count;
+ int nonblock = ((file->f_flags & O_NONBLOCK) ||
+ (LP_F(minor) & LP_ABORT));
+
+#ifdef LP_STATS
+ if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor)))
+ lp_table[minor].runchars = 0;
+
+ lp_table[minor].lastcall = jiffies;
+#endif
+
+ /* Need to copy the data from user-space. */
+ if (copy_size > LP_BUFFER_SIZE)
+ copy_size = LP_BUFFER_SIZE;
+
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+ return -EINTR;
+
+ if (copy_from_user(kbuf, buf, copy_size)) {
+ retv = -EFAULT;
+ goto out_unlock;
+ }
+
+ /* Claim Parport or sleep until it becomes available
+ */
+ lp_claim_parport_or_block(&lp_table[minor]);
+ /* Go to the proper mode. */
+ lp_table[minor].current_mode = lp_negotiate(port,
+ lp_table[minor].best_mode);
+
+ parport_set_timeout(lp_table[minor].dev,
+ (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
+ : lp_table[minor].timeout));
+
+ if ((retv = lp_wait_ready(minor, nonblock)) == 0)
+ do {
+ /* Write the data. */
+ written = parport_write(port, kbuf, copy_size);
+ if (written > 0) {
+ copy_size -= written;
+ count -= written;
+ buf += written;
+ retv += written;
+ }
+
+ if (signal_pending(current)) {
+ if (retv == 0)
+ retv = -EINTR;
+
+ break;
+ }
+
+ if (copy_size > 0) {
+ /* incomplete write -> check error ! */
+ int error;
+
+ parport_negotiate(lp_table[minor].dev->port,
+ IEEE1284_MODE_COMPAT);
+ lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+
+ error = lp_wait_ready(minor, nonblock);
+
+ if (error) {
+ if (retv == 0)
+ retv = error;
+ break;
+ } else if (nonblock) {
+ if (retv == 0)
+ retv = -EAGAIN;
+ break;
+ }
+
+ parport_yield_blocking(lp_table[minor].dev);
+ lp_table[minor].current_mode
+ = lp_negotiate(port,
+ lp_table[minor].best_mode);
+
+ } else if (need_resched())
+ schedule();
+
+ if (count) {
+ copy_size = count;
+ if (copy_size > LP_BUFFER_SIZE)
+ copy_size = LP_BUFFER_SIZE;
+
+ if (copy_from_user(kbuf, buf, copy_size)) {
+ if (retv == 0)
+ retv = -EFAULT;
+ break;
+ }
+ }
+ } while (count > 0);
+
+ if (test_and_clear_bit(LP_PREEMPT_REQUEST,
+ &lp_table[minor].bits)) {
+ printk(KERN_INFO "lp%d releasing parport\n", minor);
+ parport_negotiate(lp_table[minor].dev->port,
+ IEEE1284_MODE_COMPAT);
+ lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+ lp_release_parport(&lp_table[minor]);
+ }
+out_unlock:
+ mutex_unlock(&lp_table[minor].port_mutex);
+
+ return retv;
+}
+
+#ifdef CONFIG_PARPORT_1284
+
+/* Status readback conforming to ieee1284 */
+static ssize_t lp_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ DEFINE_WAIT(wait);
+ unsigned int minor=iminor(file_inode(file));
+ struct parport *port = lp_table[minor].dev->port;
+ ssize_t retval = 0;
+ char *kbuf = lp_table[minor].lp_buffer;
+ int nonblock = ((file->f_flags & O_NONBLOCK) ||
+ (LP_F(minor) & LP_ABORT));
+
+ if (count > LP_BUFFER_SIZE)
+ count = LP_BUFFER_SIZE;
+
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+ return -EINTR;
+
+ lp_claim_parport_or_block(&lp_table[minor]);
+
+ parport_set_timeout(lp_table[minor].dev,
+ (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
+ : lp_table[minor].timeout));
+
+ parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+ if (parport_negotiate(lp_table[minor].dev->port,
+ IEEE1284_MODE_NIBBLE)) {
+ retval = -EIO;
+ goto out;
+ }
+
+ while (retval == 0) {
+ retval = parport_read(port, kbuf, count);
+
+ if (retval > 0)
+ break;
+
+ if (nonblock) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ /* Wait for data. */
+
+ if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) {
+ parport_negotiate(lp_table[minor].dev->port,
+ IEEE1284_MODE_COMPAT);
+ lp_error(minor);
+ if (parport_negotiate(lp_table[minor].dev->port,
+ IEEE1284_MODE_NIBBLE)) {
+ retval = -EIO;
+ goto out;
+ }
+ } else {
+ prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE);
+ schedule_timeout(LP_TIMEOUT_POLLED);
+ finish_wait(&lp_table[minor].waitq, &wait);
+ }
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ cond_resched();
+ }
+ parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+ out:
+ lp_release_parport(&lp_table[minor]);
+
+ if (retval > 0 && copy_to_user(buf, kbuf, retval))
+ retval = -EFAULT;
+
+ mutex_unlock(&lp_table[minor].port_mutex);
+
+ return retval;
+}
+
+#endif /* IEEE 1284 support */
+
+static int lp_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ int ret = 0;
+
+ mutex_lock(&lp_mutex);
+ if (minor >= LP_NO) {
+ ret = -ENXIO;
+ goto out;
+ }
+ if ((LP_F(minor) & LP_EXIST) == 0) {
+ ret = -ENXIO;
+ goto out;
+ }
+ if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /* If ABORTOPEN is set and the printer is offline or out of paper,
+ we may still want to open it to perform ioctl()s. Therefore we
+ have commandeered O_NONBLOCK, even though it is being used in
+ a non-standard manner. This is strictly a Linux hack, and
+ should most likely only ever be used by the tunelp application. */
+ if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) {
+ int status;
+ lp_claim_parport_or_block(&lp_table[minor]);
+ status = r_str(minor);
+ lp_release_parport(&lp_table[minor]);
+ if (status & LP_POUTPA) {
+ printk(KERN_INFO "lp%d out of paper\n", minor);
+ LP_F(minor) &= ~LP_BUSY;
+ ret = -ENOSPC;
+ goto out;
+ } else if (!(status & LP_PSELECD)) {
+ printk(KERN_INFO "lp%d off-line\n", minor);
+ LP_F(minor) &= ~LP_BUSY;
+ ret = -EIO;
+ goto out;
+ } else if (!(status & LP_PERRORP)) {
+ printk(KERN_ERR "lp%d printer error\n", minor);
+ LP_F(minor) &= ~LP_BUSY;
+ ret = -EIO;
+ goto out;
+ }
+ }
+ lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL);
+ if (!lp_table[minor].lp_buffer) {
+ LP_F(minor) &= ~LP_BUSY;
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Determine if the peripheral supports ECP mode */
+ lp_claim_parport_or_block(&lp_table[minor]);
+ if ((lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) &&
+ !parport_negotiate(lp_table[minor].dev->port,
+ IEEE1284_MODE_ECP)) {
+ printk(KERN_INFO "lp%d: ECP mode\n", minor);
+ lp_table[minor].best_mode = IEEE1284_MODE_ECP;
+ } else {
+ lp_table[minor].best_mode = IEEE1284_MODE_COMPAT;
+ }
+ /* Leave peripheral in compatibility mode */
+ parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+ lp_release_parport(&lp_table[minor]);
+ lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+out:
+ mutex_unlock(&lp_mutex);
+ return ret;
+}
+
+static int lp_release(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+
+ lp_claim_parport_or_block(&lp_table[minor]);
+ parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+ lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+ lp_release_parport(&lp_table[minor]);
+ kfree(lp_table[minor].lp_buffer);
+ lp_table[minor].lp_buffer = NULL;
+ LP_F(minor) &= ~LP_BUSY;
+ return 0;
+}
+
+static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
+ unsigned long arg, void __user *argp)
+{
+ int status;
+ int retval = 0;
+
+#ifdef LP_DEBUG
+ printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
+#endif
+ if (minor >= LP_NO)
+ return -ENODEV;
+ if ((LP_F(minor) & LP_EXIST) == 0)
+ return -ENODEV;
+ switch (cmd) {
+ case LPTIME:
+ if (arg > UINT_MAX / HZ)
+ return -EINVAL;
+ LP_TIME(minor) = arg * HZ/100;
+ break;
+ case LPCHAR:
+ LP_CHAR(minor) = arg;
+ break;
+ case LPABORT:
+ if (arg)
+ LP_F(minor) |= LP_ABORT;
+ else
+ LP_F(minor) &= ~LP_ABORT;
+ break;
+ case LPABORTOPEN:
+ if (arg)
+ LP_F(minor) |= LP_ABORTOPEN;
+ else
+ LP_F(minor) &= ~LP_ABORTOPEN;
+ break;
+ case LPCAREFUL:
+ if (arg)
+ LP_F(minor) |= LP_CAREFUL;
+ else
+ LP_F(minor) &= ~LP_CAREFUL;
+ break;
+ case LPWAIT:
+ LP_WAIT(minor) = arg;
+ break;
+ case LPSETIRQ:
+ return -EINVAL;
+ case LPGETIRQ:
+ if (copy_to_user(argp, &LP_IRQ(minor),
+ sizeof(int)))
+ return -EFAULT;
+ break;
+ case LPGETSTATUS:
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+ return -EINTR;
+ lp_claim_parport_or_block(&lp_table[minor]);
+ status = r_str(minor);
+ lp_release_parport(&lp_table[minor]);
+ mutex_unlock(&lp_table[minor].port_mutex);
+
+ if (copy_to_user(argp, &status, sizeof(int)))
+ return -EFAULT;
+ break;
+ case LPRESET:
+ lp_reset(minor);
+ break;
+#ifdef LP_STATS
+ case LPGETSTATS:
+ if (copy_to_user(argp, &LP_STAT(minor),
+ sizeof(struct lp_stats)))
+ return -EFAULT;
+ if (capable(CAP_SYS_ADMIN))
+ memset(&LP_STAT(minor), 0,
+ sizeof(struct lp_stats));
+ break;
+#endif
+ case LPGETFLAGS:
+ status = LP_F(minor);
+ if (copy_to_user(argp, &status, sizeof(int)))
+ return -EFAULT;
+ break;
+
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static int lp_set_timeout(unsigned int minor, s64 tv_sec, long tv_usec)
+{
+ long to_jiffies;
+
+ /* Convert to jiffies, place in lp_table */
+ if (tv_sec < 0 || tv_usec < 0)
+ return -EINVAL;
+
+ /*
+ * we used to not check, so let's not make this fatal,
+ * but deal with user space passing a 32-bit tv_nsec in
+ * a 64-bit field, capping the timeout to 1 second
+ * worth of microseconds, and capping the total at
+ * MAX_JIFFY_OFFSET.
+ */
+ if (tv_usec > 999999)
+ tv_usec = 999999;
+
+ if (tv_sec >= MAX_SEC_IN_JIFFIES - 1) {
+ to_jiffies = MAX_JIFFY_OFFSET;
+ } else {
+ to_jiffies = DIV_ROUND_UP(tv_usec, 1000000/HZ);
+ to_jiffies += tv_sec * (long) HZ;
+ }
+
+ if (to_jiffies <= 0) {
+ return -EINVAL;
+ }
+ lp_table[minor].timeout = to_jiffies;
+ return 0;
+}
+
+static int lp_set_timeout32(unsigned int minor, void __user *arg)
+{
+ s32 karg[2];
+
+ if (copy_from_user(karg, arg, sizeof(karg)))
+ return -EFAULT;
+
+ return lp_set_timeout(minor, karg[0], karg[1]);
+}
+
+static int lp_set_timeout64(unsigned int minor, void __user *arg)
+{
+ s64 karg[2];
+
+ if (copy_from_user(karg, arg, sizeof(karg)))
+ return -EFAULT;
+
+ /* sparc64 suseconds_t is 32-bit only */
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ karg[1] >>= 32;
+
+ return lp_set_timeout(minor, karg[0], karg[1]);
+}
+
+static long lp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int minor;
+ int ret;
+
+ minor = iminor(file_inode(file));
+ mutex_lock(&lp_mutex);
+ switch (cmd) {
+ case LPSETTIMEOUT_OLD:
+ if (BITS_PER_LONG == 32) {
+ ret = lp_set_timeout32(minor, (void __user *)arg);
+ break;
+ }
+ fallthrough; /* for 64-bit */
+ case LPSETTIMEOUT_NEW:
+ ret = lp_set_timeout64(minor, (void __user *)arg);
+ break;
+ default:
+ ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg);
+ break;
+ }
+ mutex_unlock(&lp_mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long lp_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int minor;
+ int ret;
+
+ minor = iminor(file_inode(file));
+ mutex_lock(&lp_mutex);
+ switch (cmd) {
+ case LPSETTIMEOUT_OLD:
+ if (!COMPAT_USE_64BIT_TIME) {
+ ret = lp_set_timeout32(minor, (void __user *)arg);
+ break;
+ }
+ fallthrough; /* for x32 mode */
+ case LPSETTIMEOUT_NEW:
+ ret = lp_set_timeout64(minor, (void __user *)arg);
+ break;
+#ifdef LP_STATS
+ case LPGETSTATS:
+ /* FIXME: add an implementation if you set LP_STATS */
+ ret = -EINVAL;
+ break;
+#endif
+ default:
+ ret = lp_do_ioctl(minor, cmd, arg, compat_ptr(arg));
+ break;
+ }
+ mutex_unlock(&lp_mutex);
+
+ return ret;
+}
+#endif
+
+static const struct file_operations lp_fops = {
+ .owner = THIS_MODULE,
+ .write = lp_write,
+ .unlocked_ioctl = lp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lp_compat_ioctl,
+#endif
+ .open = lp_open,
+ .release = lp_release,
+#ifdef CONFIG_PARPORT_1284
+ .read = lp_read,
+#endif
+ .llseek = noop_llseek,
+};
+
+/* --- support for console on the line printer ----------------- */
+
+#ifdef CONFIG_LP_CONSOLE
+
+#define CONSOLE_LP 0
+
+/* If the printer is out of paper, we can either lose the messages or
+ * stall until the printer is happy again. Define CONSOLE_LP_STRICT
+ * non-zero to get the latter behaviour. */
+#define CONSOLE_LP_STRICT 1
+
+/* The console must be locked when we get here. */
+
+static void lp_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ struct pardevice *dev = lp_table[CONSOLE_LP].dev;
+ struct parport *port = dev->port;
+ ssize_t written;
+
+ if (parport_claim(dev))
+ /* Nothing we can do. */
+ return;
+
+ parport_set_timeout(dev, 0);
+
+ /* Go to compatibility mode. */
+ parport_negotiate(port, IEEE1284_MODE_COMPAT);
+
+ do {
+ /* Write the data, converting LF->CRLF as we go. */
+ ssize_t canwrite = count;
+ char *lf = memchr(s, '\n', count);
+ if (lf)
+ canwrite = lf - s;
+
+ if (canwrite > 0) {
+ written = parport_write(port, s, canwrite);
+
+ if (written <= 0)
+ continue;
+
+ s += written;
+ count -= written;
+ canwrite -= written;
+ }
+
+ if (lf && canwrite <= 0) {
+ const char *crlf = "\r\n";
+ int i = 2;
+
+ /* Dodge the original '\n', and put '\r\n' instead. */
+ s++;
+ count--;
+ do {
+ written = parport_write(port, crlf, i);
+ if (written > 0) {
+ i -= written;
+ crlf += written;
+ }
+ } while (i > 0 && (CONSOLE_LP_STRICT || written > 0));
+ }
+ } while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
+
+ parport_release(dev);
+}
+
+static struct console lpcons = {
+ .name = "lp",
+ .write = lp_console_write,
+ .flags = CON_PRINTBUFFER,
+};
+
+#endif /* console on line printer */
+
+/* --- initialisation code ------------------------------------- */
+
+static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC };
+static char *parport[LP_NO];
+static bool reset;
+
+module_param_array(parport, charp, NULL, 0);
+module_param(reset, bool, 0);
+
+#ifndef MODULE
+static int __init lp_setup(char *str)
+{
+ static int parport_ptr;
+ int x;
+
+ if (get_option(&str, &x)) {
+ if (x == 0) {
+ /* disable driver on "lp=" or "lp=0" */
+ parport_nr[0] = LP_PARPORT_OFF;
+ } else {
+ printk(KERN_WARNING "warning: 'lp=0x%x' is deprecated, ignored\n", x);
+ return 0;
+ }
+ } else if (!strncmp(str, "parport", 7)) {
+ int n = simple_strtoul(str+7, NULL, 10);
+ if (parport_ptr < LP_NO)
+ parport_nr[parport_ptr++] = n;
+ else
+ printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+ str);
+ } else if (!strcmp(str, "auto")) {
+ parport_nr[0] = LP_PARPORT_AUTO;
+ } else if (!strcmp(str, "none")) {
+ if (parport_ptr < LP_NO)
+ parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+ else
+ printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+ str);
+ } else if (!strcmp(str, "reset")) {
+ reset = true;
+ }
+ return 1;
+}
+#endif
+
+static int lp_register(int nr, struct parport *port)
+{
+ struct pardev_cb ppdev_cb;
+
+ memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+ ppdev_cb.preempt = lp_preempt;
+ ppdev_cb.private = &lp_table[nr];
+ lp_table[nr].dev = parport_register_dev_model(port, "lp",
+ &ppdev_cb, nr);
+ if (lp_table[nr].dev == NULL)
+ return 1;
+ lp_table[nr].flags |= LP_EXIST;
+
+ if (reset)
+ lp_reset(nr);
+
+ device_create(&lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL,
+ "lp%d", nr);
+
+ printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name,
+ (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven");
+
+#ifdef CONFIG_LP_CONSOLE
+ if (!nr) {
+ if (port->modes & PARPORT_MODE_SAFEININT) {
+ register_console(&lpcons);
+ console_registered = port;
+ printk(KERN_INFO "lp%d: console ready\n", CONSOLE_LP);
+ } else
+ printk(KERN_ERR "lp%d: cannot run console on %s\n",
+ CONSOLE_LP, port->name);
+ }
+#endif
+ port_num[nr] = port->number;
+
+ return 0;
+}
+
+static void lp_attach(struct parport *port)
+{
+ unsigned int i;
+
+ switch (parport_nr[0]) {
+ case LP_PARPORT_UNSPEC:
+ case LP_PARPORT_AUTO:
+ if (parport_nr[0] == LP_PARPORT_AUTO &&
+ port->probe_info[0].class != PARPORT_CLASS_PRINTER)
+ return;
+ if (lp_count == LP_NO) {
+ printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO);
+ return;
+ }
+ for (i = 0; i < LP_NO; i++)
+ if (port_num[i] == -1)
+ break;
+
+ if (!lp_register(i, port))
+ lp_count++;
+ break;
+
+ default:
+ for (i = 0; i < LP_NO; i++) {
+ if (port->number == parport_nr[i]) {
+ if (!lp_register(i, port))
+ lp_count++;
+ break;
+ }
+ }
+ break;
+ }
+}
+
+static void lp_detach(struct parport *port)
+{
+ int n;
+
+ /* Write this some day. */
+#ifdef CONFIG_LP_CONSOLE
+ if (console_registered == port) {
+ unregister_console(&lpcons);
+ console_registered = NULL;
+ }
+#endif /* CONFIG_LP_CONSOLE */
+
+ for (n = 0; n < LP_NO; n++) {
+ if (port_num[n] == port->number) {
+ port_num[n] = -1;
+ lp_count--;
+ device_destroy(&lp_class, MKDEV(LP_MAJOR, n));
+ parport_unregister_device(lp_table[n].dev);
+ }
+ }
+}
+
+static struct parport_driver lp_driver = {
+ .name = "lp",
+ .match_port = lp_attach,
+ .detach = lp_detach,
+ .devmodel = true,
+};
+
+static int __init lp_init(void)
+{
+ int i, err;
+
+ if (parport_nr[0] == LP_PARPORT_OFF)
+ return 0;
+
+ for (i = 0; i < LP_NO; i++) {
+ lp_table[i].dev = NULL;
+ lp_table[i].flags = 0;
+ lp_table[i].chars = LP_INIT_CHAR;
+ lp_table[i].time = LP_INIT_TIME;
+ lp_table[i].wait = LP_INIT_WAIT;
+ lp_table[i].lp_buffer = NULL;
+#ifdef LP_STATS
+ lp_table[i].lastcall = 0;
+ lp_table[i].runchars = 0;
+ memset(&lp_table[i].stats, 0, sizeof(struct lp_stats));
+#endif
+ lp_table[i].last_error = 0;
+ init_waitqueue_head(&lp_table[i].waitq);
+ init_waitqueue_head(&lp_table[i].dataq);
+ mutex_init(&lp_table[i].port_mutex);
+ lp_table[i].timeout = 10 * HZ;
+ port_num[i] = -1;
+ }
+
+ if (register_chrdev(LP_MAJOR, "lp", &lp_fops)) {
+ printk(KERN_ERR "lp: unable to get major %d\n", LP_MAJOR);
+ return -EIO;
+ }
+
+ err = class_register(&lp_class);
+ if (err)
+ goto out_reg;
+
+ if (parport_register_driver(&lp_driver)) {
+ printk(KERN_ERR "lp: unable to register with parport\n");
+ err = -EIO;
+ goto out_class;
+ }
+
+ if (!lp_count) {
+ printk(KERN_INFO "lp: driver loaded but no devices found\n");
+#ifndef CONFIG_PARPORT_1284
+ if (parport_nr[0] == LP_PARPORT_AUTO)
+ printk(KERN_INFO "lp: (is IEEE 1284 support enabled?)\n");
+#endif
+ }
+
+ return 0;
+
+out_class:
+ class_unregister(&lp_class);
+out_reg:
+ unregister_chrdev(LP_MAJOR, "lp");
+ return err;
+}
+
+static int __init lp_init_module(void)
+{
+ if (parport[0]) {
+ /* The user gave some parameters. Let's see what they were. */
+ if (!strncmp(parport[0], "auto", 4))
+ parport_nr[0] = LP_PARPORT_AUTO;
+ else {
+ int n;
+ for (n = 0; n < LP_NO && parport[n]; n++) {
+ if (!strncmp(parport[n], "none", 4))
+ parport_nr[n] = LP_PARPORT_NONE;
+ else {
+ char *ep;
+ unsigned long r = simple_strtoul(parport[n], &ep, 0);
+ if (ep != parport[n])
+ parport_nr[n] = r;
+ else {
+ printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]);
+ return -ENODEV;
+ }
+ }
+ }
+ }
+ }
+
+ return lp_init();
+}
+
+static void lp_cleanup_module(void)
+{
+ parport_unregister_driver(&lp_driver);
+
+#ifdef CONFIG_LP_CONSOLE
+ unregister_console(&lpcons);
+#endif
+
+ unregister_chrdev(LP_MAJOR, "lp");
+ class_unregister(&lp_class);
+}
+
+__setup("lp=", lp_setup);
+module_init(lp_init_module);
+module_exit(lp_cleanup_module);
+
+MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
new file mode 100644
index 0000000000..1052b0f2d4
--- /dev/null
+++ b/drivers/char/mem.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/char/mem.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Added devfs support.
+ * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
+ * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/capability.h>
+#include <linux/ptrace.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/backing-dev.h>
+#include <linux/shmem_fs.h>
+#include <linux/splice.h>
+#include <linux/pfn.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/security.h>
+
+#ifdef CONFIG_IA64
+# include <linux/efi.h>
+#endif
+
+#define DEVMEM_MINOR 1
+#define DEVPORT_MINOR 4
+
+static inline unsigned long size_inside_page(unsigned long start,
+ unsigned long size)
+{
+ unsigned long sz;
+
+ sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
+
+ return min(sz, size);
+}
+
+#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
+{
+ return addr + count <= __pa(high_memory);
+}
+
+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+{
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return devmem_is_allowed(pfn);
+}
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ u64 from = ((u64)pfn) << PAGE_SHIFT;
+ u64 to = from + size;
+ u64 cursor = from;
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn))
+ return 0;
+ cursor += PAGE_SIZE;
+ pfn++;
+ }
+ return 1;
+}
+#else
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return 1;
+}
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ return 1;
+}
+#endif
+
+static inline bool should_stop_iteration(void)
+{
+ if (need_resched())
+ cond_resched();
+ return signal_pending(current);
+}
+
+/*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+ */
+static ssize_t read_mem(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ phys_addr_t p = *ppos;
+ ssize_t read, sz;
+ void *ptr;
+ char *bounce;
+ int err;
+
+ if (p != *ppos)
+ return 0;
+
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+ read = 0;
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE) {
+ sz = size_inside_page(p, count);
+ if (sz > 0) {
+ if (clear_user(buf, sz))
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+ count -= sz;
+ read += sz;
+ }
+ }
+#endif
+
+ bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!bounce)
+ return -ENOMEM;
+
+ while (count > 0) {
+ unsigned long remaining;
+ int allowed, probe;
+
+ sz = size_inside_page(p, count);
+
+ err = -EPERM;
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
+ goto failed;
+
+ err = -EFAULT;
+ if (allowed == 2) {
+ /* Show zeros for restricted memory. */
+ remaining = clear_user(buf, sz);
+ } else {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ goto failed;
+
+ probe = copy_from_kernel_nofault(bounce, ptr, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (probe)
+ goto failed;
+
+ remaining = copy_to_user(buf, bounce, sz);
+ }
+
+ if (remaining)
+ goto failed;
+
+ buf += sz;
+ p += sz;
+ count -= sz;
+ read += sz;
+ if (should_stop_iteration())
+ break;
+ }
+ kfree(bounce);
+
+ *ppos += read;
+ return read;
+
+failed:
+ kfree(bounce);
+ return err;
+}
+
+static ssize_t write_mem(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ phys_addr_t p = *ppos;
+ ssize_t written, sz;
+ unsigned long copied;
+ void *ptr;
+
+ if (p != *ppos)
+ return -EFBIG;
+
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+
+ written = 0;
+
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE) {
+ sz = size_inside_page(p, count);
+ /* Hmm. Do something? */
+ buf += sz;
+ p += sz;
+ count -= sz;
+ written += sz;
+ }
+#endif
+
+ while (count > 0) {
+ int allowed;
+
+ sz = size_inside_page(p, count);
+
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
+ return -EPERM;
+
+ /* Skip actual writing when a page is marked as restricted. */
+ if (allowed == 1) {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }
+
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
+ }
+
+ buf += sz;
+ p += sz;
+ count -= sz;
+ written += sz;
+ if (should_stop_iteration())
+ break;
+ }
+
+ *ppos += written;
+ return written;
+}
+
+int __weak phys_mem_access_prot_allowed(struct file *file,
+ unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
+{
+ return 1;
+}
+
+#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
+
+/*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+ *
+ */
+#ifdef pgprot_noncached
+static int uncached_access(struct file *file, phys_addr_t addr)
+{
+#if defined(CONFIG_IA64)
+ /*
+ * On ia64, we ignore O_DSYNC because we cannot tolerate memory
+ * attribute aliases.
+ */
+ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
+#else
+ /*
+ * Accessing memory above the top the kernel knows about or through a
+ * file pointer
+ * that was marked O_DSYNC will be done non-cached.
+ */
+ if (file->f_flags & O_DSYNC)
+ return 1;
+ return addr >= __pa(high_memory);
+#endif
+}
+#endif
+
+static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+#ifdef pgprot_noncached
+ phys_addr_t offset = pfn << PAGE_SHIFT;
+
+ if (uncached_access(file, offset))
+ return pgprot_noncached(vma_prot);
+#endif
+ return vma_prot;
+}
+#endif
+
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_mem(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ if (!valid_mmap_phys_addr_range(pgoff, len))
+ return (unsigned long) -EINVAL;
+ return pgoff << PAGE_SHIFT;
+}
+
+/* permit direct mmap, for read, write or exec */
+static unsigned memory_mmap_capabilities(struct file *file)
+{
+ return NOMMU_MAP_DIRECT |
+ NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
+}
+
+static unsigned zero_mmap_capabilities(struct file *file)
+{
+ return NOMMU_MAP_COPY;
+}
+
+/* can't do an in-place private mapping if there's no MMU */
+static inline int private_mapping_ok(struct vm_area_struct *vma)
+{
+ return is_nommu_shared_mapping(vma->vm_flags);
+}
+#else
+
+static inline int private_mapping_ok(struct vm_area_struct *vma)
+{
+ return 1;
+}
+#endif
+
+static const struct vm_operations_struct mmap_mem_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys
+#endif
+};
+
+static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+ phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+
+ /* Does it even fit in phys_addr_t? */
+ if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+ return -EINVAL;
+
+ /* It's illegal to wrap around the end of the physical address space. */
+ if (offset + (phys_addr_t)size - 1 < offset)
+ return -EINVAL;
+
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+ return -EINVAL;
+
+ if (!private_mapping_ok(vma))
+ return -ENOSYS;
+
+ if (!range_is_allowed(vma->vm_pgoff, size))
+ return -EPERM;
+
+ if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
+ &vma->vm_page_prot))
+ return -EINVAL;
+
+ vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+ size,
+ vma->vm_page_prot);
+
+ vma->vm_ops = &mmap_mem_ops;
+
+ /* Remap-pfn-range will mark the range VM_IO */
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ vma->vm_pgoff,
+ size,
+ vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static ssize_t read_port(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ char __user *tmp = buf;
+
+ if (!access_ok(buf, count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ if (__put_user(inb(i), tmp) < 0)
+ return -EFAULT;
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+
+static ssize_t write_port(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ const char __user *tmp = buf;
+
+ if (!access_ok(buf, count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ char c;
+
+ if (__get_user(c, tmp)) {
+ if (tmp > buf)
+ break;
+ return -EFAULT;
+ }
+ outb(c, i);
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+
+static ssize_t read_null(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t write_null(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
+static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
+{
+ return 0;
+}
+
+static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
+{
+ size_t count = iov_iter_count(from);
+ iov_iter_advance(from, count);
+ return count;
+}
+
+static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
+ struct splice_desc *sd)
+{
+ return sd->len;
+}
+
+static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+{
+ return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
+}
+
+static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ return 0;
+}
+
+static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
+{
+ size_t written = 0;
+
+ while (iov_iter_count(iter)) {
+ size_t chunk = iov_iter_count(iter), n;
+
+ if (chunk > PAGE_SIZE)
+ chunk = PAGE_SIZE; /* Just for latency reasons */
+ n = iov_iter_zero(chunk, iter);
+ if (!n && iov_iter_count(iter))
+ return written ? written : -EFAULT;
+ written += n;
+ if (signal_pending(current))
+ return written ? written : -ERESTARTSYS;
+ if (!need_resched())
+ continue;
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return written ? written : -EAGAIN;
+ cond_resched();
+ }
+ return written;
+}
+
+static ssize_t read_zero(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t cleared = 0;
+
+ while (count) {
+ size_t chunk = min_t(size_t, count, PAGE_SIZE);
+ size_t left;
+
+ left = clear_user(buf + cleared, chunk);
+ if (unlikely(left)) {
+ cleared += (chunk - left);
+ if (!cleared)
+ return -EFAULT;
+ break;
+ }
+ cleared += chunk;
+ count -= chunk;
+
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+
+ return cleared;
+}
+
+static int mmap_zero(struct file *file, struct vm_area_struct *vma)
+{
+#ifndef CONFIG_MMU
+ return -ENOSYS;
+#endif
+ if (vma->vm_flags & VM_SHARED)
+ return shmem_zero_setup(vma);
+ vma_set_anonymous(vma);
+ return 0;
+}
+
+static unsigned long get_unmapped_area_zero(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+#ifdef CONFIG_MMU
+ if (flags & MAP_SHARED) {
+ /*
+ * mmap_zero() will call shmem_zero_setup() to create a file,
+ * so use shmem's get_unmapped_area in case it can be huge;
+ * and pass NULL for file as in mmap.c's get_unmapped_area(),
+ * so as not to confuse shmem with our handle on "/dev/zero".
+ */
+ return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
+ }
+
+ /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+#else
+ return -ENOSYS;
+#endif
+}
+
+static ssize_t write_full(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENOSPC;
+}
+
+/*
+ * Special lseek() function for /dev/null and /dev/zero. Most notably, you
+ * can fopen() both devices with "a" now. This was previously impossible.
+ * -- SRB.
+ */
+static loff_t null_lseek(struct file *file, loff_t offset, int orig)
+{
+ return file->f_pos = 0;
+}
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ inode_lock(file_inode(file));
+ switch (orig) {
+ case SEEK_CUR:
+ offset += file->f_pos;
+ fallthrough;
+ case SEEK_SET:
+ /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
+ if ((unsigned long long)offset >= -MAX_ERRNO) {
+ ret = -EOVERFLOW;
+ break;
+ }
+ file->f_pos = offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ inode_unlock(file_inode(file));
+ return ret;
+}
+
+static int open_port(struct inode *inode, struct file *filp)
+{
+ int rc;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ rc = security_locked_down(LOCKDOWN_DEV_MEM);
+ if (rc)
+ return rc;
+
+ if (iminor(inode) != DEVMEM_MINOR)
+ return 0;
+
+ /*
+ * Use a unified address space to have a single point to manage
+ * revocations when drivers want to take over a /dev/mem mapped
+ * range.
+ */
+ filp->f_mapping = iomem_get_mapping();
+
+ return 0;
+}
+
+#define zero_lseek null_lseek
+#define full_lseek null_lseek
+#define write_zero write_null
+#define write_iter_zero write_iter_null
+#define open_mem open_port
+
+static const struct file_operations __maybe_unused mem_fops = {
+ .llseek = memory_lseek,
+ .read = read_mem,
+ .write = write_mem,
+ .mmap = mmap_mem,
+ .open = open_mem,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = get_unmapped_area_mem,
+ .mmap_capabilities = memory_mmap_capabilities,
+#endif
+};
+
+static const struct file_operations null_fops = {
+ .llseek = null_lseek,
+ .read = read_null,
+ .write = write_null,
+ .read_iter = read_iter_null,
+ .write_iter = write_iter_null,
+ .splice_write = splice_write_null,
+ .uring_cmd = uring_cmd_null,
+};
+
+static const struct file_operations __maybe_unused port_fops = {
+ .llseek = memory_lseek,
+ .read = read_port,
+ .write = write_port,
+ .open = open_port,
+};
+
+static const struct file_operations zero_fops = {
+ .llseek = zero_lseek,
+ .write = write_zero,
+ .read_iter = read_iter_zero,
+ .read = read_zero,
+ .write_iter = write_iter_zero,
+ .mmap = mmap_zero,
+ .get_unmapped_area = get_unmapped_area_zero,
+#ifndef CONFIG_MMU
+ .mmap_capabilities = zero_mmap_capabilities,
+#endif
+};
+
+static const struct file_operations full_fops = {
+ .llseek = full_lseek,
+ .read_iter = read_iter_zero,
+ .write = write_full,
+};
+
+static const struct memdev {
+ const char *name;
+ const struct file_operations *fops;
+ fmode_t fmode;
+ umode_t mode;
+} devlist[] = {
+#ifdef CONFIG_DEVMEM
+ [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 },
+#endif
+ [3] = { "null", &null_fops, FMODE_NOWAIT, 0666 },
+#ifdef CONFIG_DEVPORT
+ [4] = { "port", &port_fops, 0, 0 },
+#endif
+ [5] = { "zero", &zero_fops, FMODE_NOWAIT, 0666 },
+ [7] = { "full", &full_fops, 0, 0666 },
+ [8] = { "random", &random_fops, FMODE_NOWAIT, 0666 },
+ [9] = { "urandom", &urandom_fops, FMODE_NOWAIT, 0666 },
+#ifdef CONFIG_PRINTK
+ [11] = { "kmsg", &kmsg_fops, 0, 0644 },
+#endif
+};
+
+static int memory_open(struct inode *inode, struct file *filp)
+{
+ int minor;
+ const struct memdev *dev;
+
+ minor = iminor(inode);
+ if (minor >= ARRAY_SIZE(devlist))
+ return -ENXIO;
+
+ dev = &devlist[minor];
+ if (!dev->fops)
+ return -ENXIO;
+
+ filp->f_op = dev->fops;
+ filp->f_mode |= dev->fmode;
+
+ if (dev->fops->open)
+ return dev->fops->open(inode, filp);
+
+ return 0;
+}
+
+static const struct file_operations memory_fops = {
+ .open = memory_open,
+ .llseek = noop_llseek,
+};
+
+static char *mem_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode && devlist[MINOR(dev->devt)].mode)
+ *mode = devlist[MINOR(dev->devt)].mode;
+ return NULL;
+}
+
+static const struct class mem_class = {
+ .name = "mem",
+ .devnode = mem_devnode,
+};
+
+static int __init chr_dev_init(void)
+{
+ int retval;
+ int minor;
+
+ if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
+ printk("unable to get major %d for memory devs\n", MEM_MAJOR);
+
+ retval = class_register(&mem_class);
+ if (retval)
+ return retval;
+
+ for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
+ if (!devlist[minor].name)
+ continue;
+
+ /*
+ * Create /dev/port?
+ */
+ if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
+ continue;
+
+ device_create(&mem_class, NULL, MKDEV(MEM_MAJOR, minor),
+ NULL, devlist[minor].name);
+ }
+
+ return tty_init();
+}
+
+fs_initcall(chr_dev_init);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
new file mode 100644
index 0000000000..541edc26ec
--- /dev/null
+++ b/drivers/char/misc.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/char/misc.c
+ *
+ * Generic misc open routine by Johan Myreen
+ *
+ * Based on code from Linus
+ *
+ * Teemu Rantanen's Microsoft Busmouse support and Derrick Cole's
+ * changes incorporated into 0.97pl4
+ * by Peter Cervasio (pete%q106fm.uucp@wupost.wustl.edu) (08SEP92)
+ * See busmouse.c for particulars.
+ *
+ * Made things a lot mode modular - easy to compile in just one or two
+ * of the misc drivers, as they are now completely independent. Linus.
+ *
+ * Support for loadable modules. 8-Sep-95 Philip Blundell <pjb27@cam.ac.uk>
+ *
+ * Fixed a failing symbol register to free the device registration
+ * Alan Cox <alan@lxorguk.ukuu.org.uk> 21-Jan-96
+ *
+ * Dynamic minors and /proc/mice by Alessandro Rubini. 26-Mar-96
+ *
+ * Renamed to misc and miscdevice to be more accurate. Alan Cox 26-Mar-96
+ *
+ * Handling of mouse minor numbers for kerneld:
+ * Idea by Jacques Gelinas <jack@solucorp.qc.ca>,
+ * adapted by Bjorn Ekwall <bj0rn@blox.se>
+ * corrected by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Changes for kmod (from kerneld):
+ * Cyrus Durgin <cider@speakeasy.org>
+ *
+ * Added devfs support. Richard Gooch <rgooch@atnf.csiro.au> 10-Jan-1998
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/kmod.h>
+#include <linux/gfp.h>
+
+/*
+ * Head entry for the doubly linked miscdevice list
+ */
+static LIST_HEAD(misc_list);
+static DEFINE_MUTEX(misc_mtx);
+
+/*
+ * Assigned numbers, used for dynamic minors
+ */
+#define DYNAMIC_MINORS 128 /* like dynamic majors */
+static DEFINE_IDA(misc_minors_ida);
+
+static int misc_minor_alloc(void)
+{
+ int ret;
+
+ ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
+ if (ret >= 0) {
+ ret = DYNAMIC_MINORS - ret - 1;
+ } else {
+ ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
+ MINORMASK, GFP_KERNEL);
+ }
+ return ret;
+}
+
+static void misc_minor_free(int minor)
+{
+ if (minor < DYNAMIC_MINORS)
+ ida_free(&misc_minors_ida, DYNAMIC_MINORS - minor - 1);
+ else if (minor > MISC_DYNAMIC_MINOR)
+ ida_free(&misc_minors_ida, minor);
+}
+
+#ifdef CONFIG_PROC_FS
+static void *misc_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ mutex_lock(&misc_mtx);
+ return seq_list_start(&misc_list, *pos);
+}
+
+static void *misc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &misc_list, pos);
+}
+
+static void misc_seq_stop(struct seq_file *seq, void *v)
+{
+ mutex_unlock(&misc_mtx);
+}
+
+static int misc_seq_show(struct seq_file *seq, void *v)
+{
+ const struct miscdevice *p = list_entry(v, struct miscdevice, list);
+
+ seq_printf(seq, "%3i %s\n", p->minor, p->name ? p->name : "");
+ return 0;
+}
+
+
+static const struct seq_operations misc_seq_ops = {
+ .start = misc_seq_start,
+ .next = misc_seq_next,
+ .stop = misc_seq_stop,
+ .show = misc_seq_show,
+};
+#endif
+
+static int misc_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ struct miscdevice *c = NULL, *iter;
+ int err = -ENODEV;
+ const struct file_operations *new_fops = NULL;
+
+ mutex_lock(&misc_mtx);
+
+ list_for_each_entry(iter, &misc_list, list) {
+ if (iter->minor != minor)
+ continue;
+ c = iter;
+ new_fops = fops_get(iter->fops);
+ break;
+ }
+
+ if (!new_fops) {
+ mutex_unlock(&misc_mtx);
+ request_module("char-major-%d-%d", MISC_MAJOR, minor);
+ mutex_lock(&misc_mtx);
+
+ list_for_each_entry(iter, &misc_list, list) {
+ if (iter->minor != minor)
+ continue;
+ c = iter;
+ new_fops = fops_get(iter->fops);
+ break;
+ }
+ if (!new_fops)
+ goto fail;
+ }
+
+ /*
+ * Place the miscdevice in the file's
+ * private_data so it can be used by the
+ * file operations, including f_op->open below
+ */
+ file->private_data = c;
+
+ err = 0;
+ replace_fops(file, new_fops);
+ if (file->f_op->open)
+ err = file->f_op->open(inode, file);
+fail:
+ mutex_unlock(&misc_mtx);
+ return err;
+}
+
+static char *misc_devnode(const struct device *dev, umode_t *mode)
+{
+ const struct miscdevice *c = dev_get_drvdata(dev);
+
+ if (mode && c->mode)
+ *mode = c->mode;
+ if (c->nodename)
+ return kstrdup(c->nodename, GFP_KERNEL);
+ return NULL;
+}
+
+static const struct class misc_class = {
+ .name = "misc",
+ .devnode = misc_devnode,
+};
+
+static const struct file_operations misc_fops = {
+ .owner = THIS_MODULE,
+ .open = misc_open,
+ .llseek = noop_llseek,
+};
+
+/**
+ * misc_register - register a miscellaneous device
+ * @misc: device structure
+ *
+ * Register a miscellaneous device with the kernel. If the minor
+ * number is set to %MISC_DYNAMIC_MINOR a minor number is assigned
+ * and placed in the minor field of the structure. For other cases
+ * the minor number requested is used.
+ *
+ * The structure passed is linked into the kernel and may not be
+ * destroyed until it has been unregistered. By default, an open()
+ * syscall to the device sets file->private_data to point to the
+ * structure. Drivers don't need open in fops for this.
+ *
+ * A zero is returned on success and a negative errno code for
+ * failure.
+ */
+
+int misc_register(struct miscdevice *misc)
+{
+ dev_t dev;
+ int err = 0;
+ bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
+
+ INIT_LIST_HEAD(&misc->list);
+
+ mutex_lock(&misc_mtx);
+
+ if (is_dynamic) {
+ int i = misc_minor_alloc();
+
+ if (i < 0) {
+ err = -EBUSY;
+ goto out;
+ }
+ misc->minor = i;
+ } else {
+ struct miscdevice *c;
+
+ list_for_each_entry(c, &misc_list, list) {
+ if (c->minor == misc->minor) {
+ err = -EBUSY;
+ goto out;
+ }
+ }
+ }
+
+ dev = MKDEV(MISC_MAJOR, misc->minor);
+
+ misc->this_device =
+ device_create_with_groups(&misc_class, misc->parent, dev,
+ misc, misc->groups, "%s", misc->name);
+ if (IS_ERR(misc->this_device)) {
+ if (is_dynamic) {
+ misc_minor_free(misc->minor);
+ misc->minor = MISC_DYNAMIC_MINOR;
+ }
+ err = PTR_ERR(misc->this_device);
+ goto out;
+ }
+
+ /*
+ * Add it to the front, so that later devices can "override"
+ * earlier defaults
+ */
+ list_add(&misc->list, &misc_list);
+ out:
+ mutex_unlock(&misc_mtx);
+ return err;
+}
+EXPORT_SYMBOL(misc_register);
+
+/**
+ * misc_deregister - unregister a miscellaneous device
+ * @misc: device to unregister
+ *
+ * Unregister a miscellaneous device that was previously
+ * successfully registered with misc_register().
+ */
+
+void misc_deregister(struct miscdevice *misc)
+{
+ if (WARN_ON(list_empty(&misc->list)))
+ return;
+
+ mutex_lock(&misc_mtx);
+ list_del(&misc->list);
+ device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor));
+ misc_minor_free(misc->minor);
+ mutex_unlock(&misc_mtx);
+}
+EXPORT_SYMBOL(misc_deregister);
+
+static int __init misc_init(void)
+{
+ int err;
+ struct proc_dir_entry *ret;
+
+ ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
+ err = class_register(&misc_class);
+ if (err)
+ goto fail_remove;
+
+ err = -EIO;
+ if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
+ goto fail_printk;
+ return 0;
+
+fail_printk:
+ pr_err("unable to get major %d for misc devices\n", MISC_MAJOR);
+ class_unregister(&misc_class);
+fail_remove:
+ if (ret)
+ remove_proc_entry("misc", NULL);
+ return err;
+}
+subsys_initcall(misc_init);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
new file mode 100644
index 0000000000..b35f651837
--- /dev/null
+++ b/drivers/char/mspec.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights
+ * reserved.
+ */
+
+/*
+ * SN Platform Special Memory (mspec) Support
+ *
+ * This driver exports the SN special memory (mspec) facility to user
+ * processes.
+ * There are two types of memory made available thru this driver:
+ * uncached and cached.
+ *
+ * Uncached are used for memory write combining feature of the ia64
+ * cpu.
+ *
+ * Cached are used for areas of memory that are used as cached addresses
+ * on our partition and used as uncached addresses from other partitions.
+ * Due to a design constraint of the SN2 Shub, you can not have processors
+ * on the same FSB perform both a cached and uncached reference to the
+ * same cache line. These special memory cached regions prevent the
+ * kernel from ever dropping in a TLB entry and therefore prevent the
+ * processor from ever speculating a cache line from this page.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/numa.h>
+#include <linux/refcount.h>
+#include <asm/page.h>
+#include <linux/atomic.h>
+#include <asm/tlbflush.h>
+#include <asm/uncached.h>
+
+
+#define CACHED_ID "Cached,"
+#define UNCACHED_ID "Uncached"
+#define REVISION "4.0"
+#define MSPEC_BASENAME "mspec"
+
+/*
+ * Page types allocated by the device.
+ */
+enum mspec_page_type {
+ MSPEC_CACHED = 2,
+ MSPEC_UNCACHED
+};
+
+/*
+ * One of these structures is allocated when an mspec region is mmaped. The
+ * structure is pointed to by the vma->vm_private_data field in the vma struct.
+ * This structure is used to record the addresses of the mspec pages.
+ * This structure is shared by all vma's that are split off from the
+ * original vma when split_vma()'s are done.
+ *
+ * The refcnt is incremented atomically because mm->mmap_lock does not
+ * protect in fork case where multiple tasks share the vma_data.
+ */
+struct vma_data {
+ refcount_t refcnt; /* Number of vmas sharing the data. */
+ spinlock_t lock; /* Serialize access to this structure. */
+ int count; /* Number of pages allocated. */
+ enum mspec_page_type type; /* Type of pages allocated. */
+ unsigned long vm_start; /* Original (unsplit) base. */
+ unsigned long vm_end; /* Original (unsplit) end. */
+ unsigned long maddr[]; /* Array of MSPEC addresses. */
+};
+
+/*
+ * mspec_open
+ *
+ * Called when a device mapping is created by a means other than mmap
+ * (via fork, munmap, etc.). Increments the reference count on the
+ * underlying mspec data so it is not freed prematurely.
+ */
+static void
+mspec_open(struct vm_area_struct *vma)
+{
+ struct vma_data *vdata;
+
+ vdata = vma->vm_private_data;
+ refcount_inc(&vdata->refcnt);
+}
+
+/*
+ * mspec_close
+ *
+ * Called when unmapping a device mapping. Frees all mspec pages
+ * belonging to all the vma's sharing this vma_data structure.
+ */
+static void
+mspec_close(struct vm_area_struct *vma)
+{
+ struct vma_data *vdata;
+ int index, last_index;
+ unsigned long my_page;
+
+ vdata = vma->vm_private_data;
+
+ if (!refcount_dec_and_test(&vdata->refcnt))
+ return;
+
+ last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+ for (index = 0; index < last_index; index++) {
+ if (vdata->maddr[index] == 0)
+ continue;
+ /*
+ * Clear the page before sticking it back
+ * into the pool.
+ */
+ my_page = vdata->maddr[index];
+ vdata->maddr[index] = 0;
+ memset((char *)my_page, 0, PAGE_SIZE);
+ uncached_free_page(my_page, 1);
+ }
+
+ kvfree(vdata);
+}
+
+/*
+ * mspec_fault
+ *
+ * Creates a mspec page and maps it to user space.
+ */
+static vm_fault_t
+mspec_fault(struct vm_fault *vmf)
+{
+ unsigned long paddr, maddr;
+ unsigned long pfn;
+ pgoff_t index = vmf->pgoff;
+ struct vma_data *vdata = vmf->vma->vm_private_data;
+
+ maddr = (volatile unsigned long) vdata->maddr[index];
+ if (maddr == 0) {
+ maddr = uncached_alloc_page(numa_node_id(), 1);
+ if (maddr == 0)
+ return VM_FAULT_OOM;
+
+ spin_lock(&vdata->lock);
+ if (vdata->maddr[index] == 0) {
+ vdata->count++;
+ vdata->maddr[index] = maddr;
+ } else {
+ uncached_free_page(maddr, 1);
+ maddr = vdata->maddr[index];
+ }
+ spin_unlock(&vdata->lock);
+ }
+
+ paddr = maddr & ~__IA64_UNCACHED_OFFSET;
+ pfn = paddr >> PAGE_SHIFT;
+
+ return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
+}
+
+static const struct vm_operations_struct mspec_vm_ops = {
+ .open = mspec_open,
+ .close = mspec_close,
+ .fault = mspec_fault,
+};
+
+/*
+ * mspec_mmap
+ *
+ * Called when mmapping the device. Initializes the vma with a fault handler
+ * and private data structure necessary to allocate, track, and free the
+ * underlying pages.
+ */
+static int
+mspec_mmap(struct file *file, struct vm_area_struct *vma,
+ enum mspec_page_type type)
+{
+ struct vma_data *vdata;
+ int pages, vdata_size;
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_WRITE) == 0)
+ return -EPERM;
+
+ pages = vma_pages(vma);
+ vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
+ vdata = kvzalloc(vdata_size, GFP_KERNEL);
+ if (!vdata)
+ return -ENOMEM;
+
+ vdata->vm_start = vma->vm_start;
+ vdata->vm_end = vma->vm_end;
+ vdata->type = type;
+ spin_lock_init(&vdata->lock);
+ refcount_set(&vdata->refcnt, 1);
+ vma->vm_private_data = vdata;
+
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+ if (vdata->type == MSPEC_UNCACHED)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_ops = &mspec_vm_ops;
+
+ return 0;
+}
+
+static int
+cached_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return mspec_mmap(file, vma, MSPEC_CACHED);
+}
+
+static int
+uncached_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return mspec_mmap(file, vma, MSPEC_UNCACHED);
+}
+
+static const struct file_operations cached_fops = {
+ .owner = THIS_MODULE,
+ .mmap = cached_mmap,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice cached_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mspec_cached",
+ .fops = &cached_fops
+};
+
+static const struct file_operations uncached_fops = {
+ .owner = THIS_MODULE,
+ .mmap = uncached_mmap,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice uncached_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mspec_uncached",
+ .fops = &uncached_fops
+};
+
+/*
+ * mspec_init
+ *
+ * Called at boot time to initialize the mspec facility.
+ */
+static int __init
+mspec_init(void)
+{
+ int ret;
+
+ ret = misc_register(&cached_miscdev);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to register device %i\n",
+ CACHED_ID, ret);
+ return ret;
+ }
+ ret = misc_register(&uncached_miscdev);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to register device %i\n",
+ UNCACHED_ID, ret);
+ misc_deregister(&cached_miscdev);
+ return ret;
+ }
+
+ printk(KERN_INFO "%s %s initialized devices: %s %s\n",
+ MSPEC_BASENAME, REVISION, CACHED_ID, UNCACHED_ID);
+
+ return 0;
+}
+
+static void __exit
+mspec_exit(void)
+{
+ misc_deregister(&uncached_miscdev);
+ misc_deregister(&cached_miscdev);
+}
+
+module_init(mspec_init);
+module_exit(mspec_exit);
+
+MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
+MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
new file mode 100644
index 0000000000..4a8937f805
--- /dev/null
+++ b/drivers/char/mwave/3780i.c
@@ -0,0 +1,738 @@
+/*
+*
+* 3780i.c -- helper routines for the 3780i DSP
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#include <linux/kernel.h>
+#include <linux/unistd.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+#include <linux/sched.h> /* cond_resched() */
+
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include "smapi.h"
+#include "mwavedd.h"
+#include "3780i.h"
+
+static DEFINE_SPINLOCK(dsp_lock);
+
+static void PaceMsaAccess(unsigned short usDspBaseIO)
+{
+ cond_resched();
+ udelay(100);
+ cond_resched();
+}
+
+unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
+ unsigned long ulMsaAddr)
+{
+ unsigned long flags;
+ unsigned short val;
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780I_ReadMsaCfg entry usDspBaseIO %x ulMsaAddr %lx\n",
+ usDspBaseIO, ulMsaAddr);
+
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
+ val = InWordDsp(DSP_MsaDataDSISHigh);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ PRINTK_2(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg exit val %x\n", val);
+
+ return val;
+}
+
+void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO,
+ unsigned long ulMsaAddr, unsigned short usValue)
+{
+ unsigned long flags;
+
+ PRINTK_4(TRACE_3780I,
+ "3780i::dsp3780i_WriteMsaCfg entry usDspBaseIO %x ulMsaAddr %lx usValue %x\n",
+ usDspBaseIO, ulMsaAddr, usValue);
+
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
+ OutWordDsp(DSP_MsaDataDSISHigh, usValue);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+}
+
+static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex,
+ unsigned char ucValue)
+{
+ DSP_ISA_SLAVE_CONTROL rSlaveControl;
+ DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
+
+
+ PRINTK_4(TRACE_3780I,
+ "3780i::dsp3780i_WriteGenCfg entry usDspBaseIO %x uIndex %x ucValue %x\n",
+ usDspBaseIO, uIndex, ucValue);
+
+ MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_WriteGenCfg rSlaveControl %x\n",
+ MKBYTE(rSlaveControl));
+
+ rSlaveControl_Save = rSlaveControl;
+ rSlaveControl.ConfigMode = true;
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n",
+ MKBYTE(rSlaveControl));
+
+ OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
+ OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
+ OutByteDsp(DSP_ConfigData, ucValue);
+ OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg exit\n");
+
+
+}
+
+#if 0
+unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO,
+ unsigned uIndex)
+{
+ DSP_ISA_SLAVE_CONTROL rSlaveControl;
+ DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
+ unsigned char ucValue;
+
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780i_ReadGenCfg entry usDspBaseIO %x uIndex %x\n",
+ usDspBaseIO, uIndex);
+
+ MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
+ rSlaveControl_Save = rSlaveControl;
+ rSlaveControl.ConfigMode = true;
+ OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
+ OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
+ ucValue = InByteDsp(DSP_ConfigData);
+ OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_ReadGenCfg exit ucValue %x\n", ucValue);
+
+
+ return ucValue;
+}
+#endif /* 0 */
+
+int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+ unsigned short *pIrqMap,
+ unsigned short *pDmaMap)
+{
+ unsigned long flags;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ int i;
+ DSP_UART_CFG_1 rUartCfg1;
+ DSP_UART_CFG_2 rUartCfg2;
+ DSP_HBRIDGE_CFG_1 rHBridgeCfg1;
+ DSP_HBRIDGE_CFG_2 rHBridgeCfg2;
+ DSP_BUSMASTER_CFG_1 rBusmasterCfg1;
+ DSP_BUSMASTER_CFG_2 rBusmasterCfg2;
+ DSP_ISA_PROT_CFG rIsaProtCfg;
+ DSP_POWER_MGMT_CFG rPowerMgmtCfg;
+ DSP_HBUS_TIMER_CFG rHBusTimerCfg;
+ DSP_LBUS_TIMEOUT_DISABLE rLBusTimeoutDisable;
+ DSP_CHIP_RESET rChipReset;
+ DSP_CLOCK_CONTROL_1 rClockControl1;
+ DSP_CLOCK_CONTROL_2 rClockControl2;
+ DSP_ISA_SLAVE_CONTROL rSlaveControl;
+ DSP_HBRIDGE_CONTROL rHBridgeControl;
+ unsigned short ChipID = 0;
+ unsigned short tval;
+
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780I_EnableDSP entry pSettings->bDSPEnabled %x\n",
+ pSettings->bDSPEnabled);
+
+
+ if (!pSettings->bDSPEnabled) {
+ PRINTK_ERROR( KERN_ERR "3780i::dsp3780I_EnableDSP: Error: DSP not enabled. Aborting.\n" );
+ return -EIO;
+ }
+
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP entry pSettings->bModemEnabled %x\n",
+ pSettings->bModemEnabled);
+
+ if (pSettings->bModemEnabled) {
+ rUartCfg1.Reserved = rUartCfg2.Reserved = 0;
+ rUartCfg1.IrqActiveLow = pSettings->bUartIrqActiveLow;
+ rUartCfg1.IrqPulse = pSettings->bUartIrqPulse;
+ rUartCfg1.Irq =
+ (unsigned char) pIrqMap[pSettings->usUartIrq];
+ switch (pSettings->usUartBaseIO) {
+ case 0x03F8:
+ rUartCfg1.BaseIO = 0;
+ break;
+ case 0x02F8:
+ rUartCfg1.BaseIO = 1;
+ break;
+ case 0x03E8:
+ rUartCfg1.BaseIO = 2;
+ break;
+ case 0x02E8:
+ rUartCfg1.BaseIO = 3;
+ break;
+ }
+ rUartCfg2.Enable = true;
+ }
+
+ rHBridgeCfg1.Reserved = rHBridgeCfg2.Reserved = 0;
+ rHBridgeCfg1.IrqActiveLow = pSettings->bDspIrqActiveLow;
+ rHBridgeCfg1.IrqPulse = pSettings->bDspIrqPulse;
+ rHBridgeCfg1.Irq = (unsigned char) pIrqMap[pSettings->usDspIrq];
+ rHBridgeCfg1.AccessMode = 1;
+ rHBridgeCfg2.Enable = true;
+
+
+ rBusmasterCfg2.Reserved = 0;
+ rBusmasterCfg1.Dma = (unsigned char) pDmaMap[pSettings->usDspDma];
+ rBusmasterCfg1.NumTransfers =
+ (unsigned char) pSettings->usNumTransfers;
+ rBusmasterCfg1.ReRequest = (unsigned char) pSettings->usReRequest;
+ rBusmasterCfg1.MEMCS16 = pSettings->bEnableMEMCS16;
+ rBusmasterCfg2.IsaMemCmdWidth =
+ (unsigned char) pSettings->usIsaMemCmdWidth;
+
+
+ rIsaProtCfg.Reserved = 0;
+ rIsaProtCfg.GateIOCHRDY = pSettings->bGateIOCHRDY;
+
+ rPowerMgmtCfg.Reserved = 0;
+ rPowerMgmtCfg.Enable = pSettings->bEnablePwrMgmt;
+
+ rHBusTimerCfg.LoadValue =
+ (unsigned char) pSettings->usHBusTimerLoadValue;
+
+ rLBusTimeoutDisable.Reserved = 0;
+ rLBusTimeoutDisable.DisableTimeout =
+ pSettings->bDisableLBusTimeout;
+
+ MKWORD(rChipReset) = ~pSettings->usChipletEnable;
+
+ rClockControl1.Reserved1 = rClockControl1.Reserved2 = 0;
+ rClockControl1.N_Divisor = pSettings->usN_Divisor;
+ rClockControl1.M_Multiplier = pSettings->usM_Multiplier;
+
+ rClockControl2.Reserved = 0;
+ rClockControl2.PllBypass = pSettings->bPllBypass;
+
+ /* Issue a soft reset to the chip */
+ /* Note: Since we may be coming in with 3780i clocks suspended, we must keep
+ * soft-reset active for 10ms.
+ */
+ rSlaveControl.ClockControl = 0;
+ rSlaveControl.SoftReset = true;
+ rSlaveControl.ConfigMode = false;
+ rSlaveControl.Reserved = 0;
+
+ PRINTK_4(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP usDspBaseIO %x index %x taddr %x\n",
+ usDspBaseIO, DSP_IsaSlaveControl,
+ usDspBaseIO + DSP_IsaSlaveControl);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP rSlaveContrl %x\n",
+ MKWORD(rSlaveControl));
+
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+ MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP rSlaveControl 2 %x\n", tval);
+
+
+ for (i = 0; i < 11; i++)
+ udelay(2000);
+
+ rSlaveControl.SoftReset = false;
+ OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+
+ MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP rSlaveControl 3 %x\n", tval);
+
+
+ /* Program our general configuration registers */
+ WriteGenCfg(DSP_HBridgeCfg1Index, MKBYTE(rHBridgeCfg1));
+ WriteGenCfg(DSP_HBridgeCfg2Index, MKBYTE(rHBridgeCfg2));
+ WriteGenCfg(DSP_BusMasterCfg1Index, MKBYTE(rBusmasterCfg1));
+ WriteGenCfg(DSP_BusMasterCfg2Index, MKBYTE(rBusmasterCfg2));
+ WriteGenCfg(DSP_IsaProtCfgIndex, MKBYTE(rIsaProtCfg));
+ WriteGenCfg(DSP_PowerMgCfgIndex, MKBYTE(rPowerMgmtCfg));
+ WriteGenCfg(DSP_HBusTimerCfgIndex, MKBYTE(rHBusTimerCfg));
+
+ if (pSettings->bModemEnabled) {
+ WriteGenCfg(DSP_UartCfg1Index, MKBYTE(rUartCfg1));
+ WriteGenCfg(DSP_UartCfg2Index, MKBYTE(rUartCfg2));
+ }
+
+
+ rHBridgeControl.EnableDspInt = false;
+ rHBridgeControl.MemAutoInc = true;
+ rHBridgeControl.IoAutoInc = false;
+ rHBridgeControl.DiagnosticMode = false;
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n",
+ DSP_HBridgeControl, MKWORD(rHBridgeControl));
+
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ WriteMsaCfg(DSP_LBusTimeoutDisable, MKWORD(rLBusTimeoutDisable));
+ WriteMsaCfg(DSP_ClockControl_1, MKWORD(rClockControl1));
+ WriteMsaCfg(DSP_ClockControl_2, MKWORD(rClockControl2));
+ WriteMsaCfg(DSP_ChipReset, MKWORD(rChipReset));
+
+ ChipID = ReadMsaCfg(DSP_ChipID);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780I_EnableDSP exiting bRC=true, ChipID %x\n",
+ ChipID);
+
+ return 0;
+}
+
+int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
+{
+ unsigned long flags;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ DSP_ISA_SLAVE_CONTROL rSlaveControl;
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n");
+
+ rSlaveControl.ClockControl = 0;
+ rSlaveControl.SoftReset = true;
+ rSlaveControl.ConfigMode = false;
+ rSlaveControl.Reserved = 0;
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+
+ udelay(5);
+
+ rSlaveControl.ClockControl = 1;
+ OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ udelay(5);
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP exit\n");
+
+ return 0;
+}
+
+int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
+{
+ unsigned long flags;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ DSP_BOOT_DOMAIN rBootDomain;
+ DSP_HBRIDGE_CONTROL rHBridgeControl;
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset entry\n");
+
+ spin_lock_irqsave(&dsp_lock, flags);
+ /* Mask DSP to PC interrupt */
+ MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
+
+ PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n",
+ MKWORD(rHBridgeControl));
+
+ rHBridgeControl.EnableDspInt = false;
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Reset the core via the boot domain register */
+ rBootDomain.ResetCore = true;
+ rBootDomain.Halt = true;
+ rBootDomain.NMI = true;
+ rBootDomain.Reserved = 0;
+
+ PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n",
+ MKWORD(rBootDomain));
+
+ WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+
+ /* Reset all the chiplets and then reactivate them */
+ WriteMsaCfg(DSP_ChipReset, 0xFFFF);
+ udelay(5);
+ WriteMsaCfg(DSP_ChipReset,
+ (unsigned short) (~pSettings->usChipletEnable));
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset exit bRC=0\n");
+
+ return 0;
+}
+
+
+int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
+{
+ unsigned long flags;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ DSP_BOOT_DOMAIN rBootDomain;
+ DSP_HBRIDGE_CONTROL rHBridgeControl;
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run entry\n");
+
+
+ /* Transition the core to a running state */
+ rBootDomain.ResetCore = true;
+ rBootDomain.Halt = false;
+ rBootDomain.NMI = true;
+ rBootDomain.Reserved = 0;
+ WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+
+ udelay(5);
+
+ rBootDomain.ResetCore = false;
+ WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+ udelay(5);
+
+ rBootDomain.NMI = false;
+ WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+ udelay(5);
+
+ /* Enable DSP to PC interrupt */
+ spin_lock_irqsave(&dsp_lock, flags);
+ MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
+ rHBridgeControl.EnableDspInt = true;
+
+ PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n",
+ MKWORD(rHBridgeControl));
+
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=true\n");
+
+ return 0;
+}
+
+
+int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+ unsigned short val;
+
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780I_ReadDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+ /* Set the initial MSA address. No adjustments need to be made to data store addresses */
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ spin_lock_irqsave(&dsp_lock, flags);
+ val = InWordDsp(DSP_MsaDataDSISHigh);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ if(put_user(val, pusBuffer++))
+ return -EFAULT;
+
+ PRINTK_3(TRACE_3780I,
+ "3780I::dsp3780I_ReadDStore uCount %x val %x\n",
+ uCount, val);
+
+ PaceMsaAccess(usDspBaseIO);
+ }
+
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780I_ReadDStore exit bRC=true\n");
+
+ return 0;
+}
+
+int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
+ void __user *pvBuffer, unsigned uCount,
+ unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+ unsigned short val;
+
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780I_ReadAndDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+ /* Set the initial MSA address. No adjustments need to be made to data store addresses */
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ spin_lock_irqsave(&dsp_lock, flags);
+ val = InWordDsp(DSP_ReadAndClear);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ if(put_user(val, pusBuffer++))
+ return -EFAULT;
+
+ PRINTK_3(TRACE_3780I,
+ "3780I::dsp3780I_ReadAndCleanDStore uCount %x val %x\n",
+ uCount, val);
+
+ PaceMsaAccess(usDspBaseIO);
+ }
+
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780I_ReadAndClearDStore exit bRC=true\n");
+
+ return 0;
+}
+
+
+int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780D_WriteDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+ /* Set the initial MSA address. No adjustments need to be made to data store addresses */
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ unsigned short val;
+ if(get_user(val, pusBuffer++))
+ return -EFAULT;
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaDataDSISHigh, val);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ PRINTK_3(TRACE_3780I,
+ "3780I::dsp3780I_WriteDStore uCount %x val %x\n",
+ uCount, val);
+
+ PaceMsaAccess(usDspBaseIO);
+ }
+
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780D_WriteDStore exit bRC=true\n");
+
+ return 0;
+}
+
+
+int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780I_ReadIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+ /*
+ * Set the initial MSA address. To convert from an instruction store
+ * address to an MSA address
+ * shift the address two bits to the left and set bit 22
+ */
+ ulDSPAddr = (ulDSPAddr << 2) | (1 << 22);
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ unsigned short val_lo, val_hi;
+ spin_lock_irqsave(&dsp_lock, flags);
+ val_lo = InWordDsp(DSP_MsaDataISLow);
+ val_hi = InWordDsp(DSP_MsaDataDSISHigh);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ if(put_user(val_lo, pusBuffer++))
+ return -EFAULT;
+ if(put_user(val_hi, pusBuffer++))
+ return -EFAULT;
+
+ PRINTK_4(TRACE_3780I,
+ "3780I::dsp3780I_ReadIStore uCount %x val_lo %x val_hi %x\n",
+ uCount, val_lo, val_hi);
+
+ PaceMsaAccess(usDspBaseIO);
+
+ }
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780I_ReadIStore exit bRC=true\n");
+
+ return 0;
+}
+
+
+int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780I_WriteIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+ /*
+ * Set the initial MSA address. To convert from an instruction store
+ * address to an MSA address
+ * shift the address two bits to the left and set bit 22
+ */
+ ulDSPAddr = (ulDSPAddr << 2) | (1 << 22);
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ unsigned short val_lo, val_hi;
+ if(get_user(val_lo, pusBuffer++))
+ return -EFAULT;
+ if(get_user(val_hi, pusBuffer++))
+ return -EFAULT;
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaDataISLow, val_lo);
+ OutWordDsp(DSP_MsaDataDSISHigh, val_hi);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ PRINTK_4(TRACE_3780I,
+ "3780I::dsp3780I_WriteIStore uCount %x val_lo %x val_hi %x\n",
+ uCount, val_lo, val_hi);
+
+ PaceMsaAccess(usDspBaseIO);
+
+ }
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780I_WriteIStore exit bRC=true\n");
+
+ return 0;
+}
+
+
+int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
+ unsigned short *pusIPCSource)
+{
+ unsigned long flags;
+ DSP_HBRIDGE_CONTROL rHBridgeControl;
+ unsigned short temp;
+
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780I_GetIPCSource entry usDspBaseIO %x pusIPCSource %p\n",
+ usDspBaseIO, pusIPCSource);
+
+ /*
+ * Disable DSP to PC interrupts, read the interrupt register,
+ * clear the pending IPC bits, and reenable DSP to PC interrupts
+ */
+ spin_lock_irqsave(&dsp_lock, flags);
+ MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
+ rHBridgeControl.EnableDspInt = false;
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+
+ *pusIPCSource = InWordDsp(DSP_Interrupt);
+ temp = (unsigned short) ~(*pusIPCSource);
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780I_GetIPCSource, usIPCSource %x ~ %x\n",
+ *pusIPCSource, temp);
+
+ OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource));
+
+ rHBridgeControl.EnableDspInt = true;
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780I_GetIPCSource exit usIPCSource %x\n",
+ *pusIPCSource);
+
+ return 0;
+}
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
new file mode 100644
index 0000000000..95164246af
--- /dev/null
+++ b/drivers/char/mwave/3780i.h
@@ -0,0 +1,358 @@
+/*
+*
+* 3780i.h -- declarations for 3780i.c
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_3780I_H
+#define _LINUX_3780I_H
+
+#include <asm/io.h>
+
+/* DSP I/O port offsets and definitions */
+#define DSP_IsaSlaveControl 0x0000 /* ISA slave control register */
+#define DSP_IsaSlaveStatus 0x0001 /* ISA slave status register */
+#define DSP_ConfigAddress 0x0002 /* General config address register */
+#define DSP_ConfigData 0x0003 /* General config data register */
+#define DSP_HBridgeControl 0x0002 /* HBridge control register */
+#define DSP_MsaAddrLow 0x0004 /* MSP System Address, low word */
+#define DSP_MsaAddrHigh 0x0006 /* MSP System Address, high word */
+#define DSP_MsaDataDSISHigh 0x0008 /* MSA data register: d-store word or high byte of i-store */
+#define DSP_MsaDataISLow 0x000A /* MSA data register: low word of i-store */
+#define DSP_ReadAndClear 0x000C /* MSA read and clear data register */
+#define DSP_Interrupt 0x000E /* Interrupt register (IPC source) */
+
+typedef struct {
+ unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */
+ unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */
+ unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */
+ unsigned short Reserved:13; /* 0: Reserved */
+} DSP_ISA_SLAVE_CONTROL;
+
+
+typedef struct {
+ unsigned short EnableDspInt:1; /* RW: Enable DSP to X86 ISA interrupt 0=mask it, 1=enable it */
+ unsigned short MemAutoInc:1; /* RW: Memory address auto increment, 0=disable, 1=enable */
+ unsigned short IoAutoInc:1; /* RW: I/O address auto increment, 0=disable, 1=enable */
+ unsigned short DiagnosticMode:1; /* RW: Disgnostic mode 0=nromal, 1=diagnostic mode */
+ unsigned short IsaPacingTimer:12; /* R: ISA access pacing timer: count of core cycles stolen */
+} DSP_HBRIDGE_CONTROL;
+
+
+/* DSP register indexes used with the configuration register address (index) register */
+#define DSP_UartCfg1Index 0x0003 /* UART config register 1 */
+#define DSP_UartCfg2Index 0x0004 /* UART config register 2 */
+#define DSP_HBridgeCfg1Index 0x0007 /* HBridge config register 1 */
+#define DSP_HBridgeCfg2Index 0x0008 /* HBridge config register 2 */
+#define DSP_BusMasterCfg1Index 0x0009 /* ISA bus master config register 1 */
+#define DSP_BusMasterCfg2Index 0x000A /* ISA bus master config register 2 */
+#define DSP_IsaProtCfgIndex 0x000F /* ISA protocol control register */
+#define DSP_PowerMgCfgIndex 0x0010 /* Low poser suspend/resume enable */
+#define DSP_HBusTimerCfgIndex 0x0011 /* HBUS timer load value */
+
+typedef struct {
+ unsigned char IrqActiveLow:1; /* RW: IRQ active high or low: 0=high, 1=low */
+ unsigned char IrqPulse:1; /* RW: IRQ pulse or level: 0=level, 1=pulse */
+ unsigned char Irq:3; /* RW: IRQ selection */
+ unsigned char BaseIO:2; /* RW: Base I/O selection */
+ unsigned char Reserved:1; /* 0: Reserved */
+} DSP_UART_CFG_1;
+
+typedef struct {
+ unsigned char Enable:1; /* RW: Enable I/O and IRQ: 0=false, 1=true */
+ unsigned char Reserved:7; /* 0: Reserved */
+} DSP_UART_CFG_2;
+
+typedef struct {
+ unsigned char IrqActiveLow:1; /* RW: IRQ active high=0 or low=1 */
+ unsigned char IrqPulse:1; /* RW: IRQ pulse=1 or level=0 */
+ unsigned char Irq:3; /* RW: IRQ selection */
+ unsigned char AccessMode:1; /* RW: 16-bit register access method 0=byte, 1=word */
+ unsigned char Reserved:2; /* 0: Reserved */
+} DSP_HBRIDGE_CFG_1;
+
+typedef struct {
+ unsigned char Enable:1; /* RW: enable I/O and IRQ: 0=false, 1=true */
+ unsigned char Reserved:7; /* 0: Reserved */
+} DSP_HBRIDGE_CFG_2;
+
+
+typedef struct {
+ unsigned char Dma:3; /* RW: DMA channel selection */
+ unsigned char NumTransfers:2; /* RW: Maximum # of transfers once being granted the ISA bus */
+ unsigned char ReRequest:2; /* RW: Minimum delay between releasing the ISA bus and requesting it again */
+ unsigned char MEMCS16:1; /* RW: ISA signal MEMCS16: 0=disabled, 1=enabled */
+} DSP_BUSMASTER_CFG_1;
+
+typedef struct {
+ unsigned char IsaMemCmdWidth:2; /* RW: ISA memory command width */
+ unsigned char Reserved:6; /* 0: Reserved */
+} DSP_BUSMASTER_CFG_2;
+
+
+typedef struct {
+ unsigned char GateIOCHRDY:1; /* RW: Enable IOCHRDY gating: 0=false, 1=true */
+ unsigned char Reserved:7; /* 0: Reserved */
+} DSP_ISA_PROT_CFG;
+
+typedef struct {
+ unsigned char Enable:1; /* RW: Enable low power suspend/resume 0=false, 1=true */
+ unsigned char Reserved:7; /* 0: Reserved */
+} DSP_POWER_MGMT_CFG;
+
+typedef struct {
+ unsigned char LoadValue:8; /* RW: HBUS timer load value */
+} DSP_HBUS_TIMER_CFG;
+
+
+
+/* DSP registers that exist in MSA I/O space */
+#define DSP_ChipID 0x80000000
+#define DSP_MspBootDomain 0x80000580
+#define DSP_LBusTimeoutDisable 0x80000580
+#define DSP_ClockControl_1 0x8000058A
+#define DSP_ClockControl_2 0x8000058C
+#define DSP_ChipReset 0x80000588
+#define DSP_GpioModeControl_15_8 0x80000082
+#define DSP_GpioDriverEnable_15_8 0x80000076
+#define DSP_GpioOutputData_15_8 0x80000072
+
+typedef struct {
+ unsigned short NMI:1; /* RW: non maskable interrupt */
+ unsigned short Halt:1; /* RW: Halt MSP clock */
+ unsigned short ResetCore:1; /* RW: Reset MSP core interface */
+ unsigned short Reserved:13; /* 0: Reserved */
+} DSP_BOOT_DOMAIN;
+
+typedef struct {
+ unsigned short DisableTimeout:1; /* RW: Disable LBus timeout */
+ unsigned short Reserved:15; /* 0: Reserved */
+} DSP_LBUS_TIMEOUT_DISABLE;
+
+typedef struct {
+ unsigned short Memory:1; /* RW: Reset memory interface */
+ unsigned short SerialPort1:1; /* RW: Reset serial port 1 interface */
+ unsigned short SerialPort2:1; /* RW: Reset serial port 2 interface */
+ unsigned short SerialPort3:1; /* RW: Reset serial port 3 interface */
+ unsigned short Gpio:1; /* RW: Reset GPIO interface */
+ unsigned short Dma:1; /* RW: Reset DMA interface */
+ unsigned short SoundBlaster:1; /* RW: Reset soundblaster interface */
+ unsigned short Uart:1; /* RW: Reset UART interface */
+ unsigned short Midi:1; /* RW: Reset MIDI interface */
+ unsigned short IsaMaster:1; /* RW: Reset ISA master interface */
+ unsigned short Reserved:6; /* 0: Reserved */
+} DSP_CHIP_RESET;
+
+typedef struct {
+ unsigned short N_Divisor:6; /* RW: (N) PLL output clock divisor */
+ unsigned short Reserved1:2; /* 0: reserved */
+ unsigned short M_Multiplier:6; /* RW: (M) PLL feedback clock multiplier */
+ unsigned short Reserved2:2; /* 0: reserved */
+} DSP_CLOCK_CONTROL_1;
+
+typedef struct {
+ unsigned short PllBypass:1; /* RW: PLL Bypass */
+ unsigned short Reserved:15; /* 0: Reserved */
+} DSP_CLOCK_CONTROL_2;
+
+typedef struct {
+ unsigned short Latch8:1;
+ unsigned short Latch9:1;
+ unsigned short Latch10:1;
+ unsigned short Latch11:1;
+ unsigned short Latch12:1;
+ unsigned short Latch13:1;
+ unsigned short Latch14:1;
+ unsigned short Latch15:1;
+ unsigned short Mask8:1;
+ unsigned short Mask9:1;
+ unsigned short Mask10:1;
+ unsigned short Mask11:1;
+ unsigned short Mask12:1;
+ unsigned short Mask13:1;
+ unsigned short Mask14:1;
+ unsigned short Mask15:1;
+} DSP_GPIO_OUTPUT_DATA_15_8;
+
+typedef struct {
+ unsigned short Enable8:1;
+ unsigned short Enable9:1;
+ unsigned short Enable10:1;
+ unsigned short Enable11:1;
+ unsigned short Enable12:1;
+ unsigned short Enable13:1;
+ unsigned short Enable14:1;
+ unsigned short Enable15:1;
+ unsigned short Mask8:1;
+ unsigned short Mask9:1;
+ unsigned short Mask10:1;
+ unsigned short Mask11:1;
+ unsigned short Mask12:1;
+ unsigned short Mask13:1;
+ unsigned short Mask14:1;
+ unsigned short Mask15:1;
+} DSP_GPIO_DRIVER_ENABLE_15_8;
+
+typedef struct {
+ unsigned short GpioMode8:2;
+ unsigned short GpioMode9:2;
+ unsigned short GpioMode10:2;
+ unsigned short GpioMode11:2;
+ unsigned short GpioMode12:2;
+ unsigned short GpioMode13:2;
+ unsigned short GpioMode14:2;
+ unsigned short GpioMode15:2;
+} DSP_GPIO_MODE_15_8;
+
+/* Component masks that are defined in dspmgr.h */
+#define MW_ADC_MASK 0x0001
+#define MW_AIC2_MASK 0x0006
+#define MW_MIDI_MASK 0x0008
+#define MW_CDDAC_MASK 0x8001
+#define MW_AIC1_MASK 0xE006
+#define MW_UART_MASK 0xE00A
+#define MW_ACI_MASK 0xE00B
+
+/*
+* Definition of 3780i configuration structure. Unless otherwise stated,
+* these values are provided as input to the 3780i support layer. At present,
+* the only values maintained by the 3780i support layer are the saved UART
+* registers.
+*/
+typedef struct _DSP_3780I_CONFIG_SETTINGS {
+
+ /* Location of base configuration register */
+ unsigned short usBaseConfigIO;
+
+ /* Enables for various DSP components */
+ int bDSPEnabled;
+ int bModemEnabled;
+ int bInterruptClaimed;
+
+ /* IRQ, DMA, and Base I/O addresses for various DSP components */
+ unsigned short usDspIrq;
+ unsigned short usDspDma;
+ unsigned short usDspBaseIO;
+ unsigned short usUartIrq;
+ unsigned short usUartBaseIO;
+
+ /* IRQ modes for various DSP components */
+ int bDspIrqActiveLow;
+ int bUartIrqActiveLow;
+ int bDspIrqPulse;
+ int bUartIrqPulse;
+
+ /* Card abilities */
+ unsigned uIps;
+ unsigned uDStoreSize;
+ unsigned uIStoreSize;
+ unsigned uDmaBandwidth;
+
+ /* Adapter specific 3780i settings */
+ unsigned short usNumTransfers;
+ unsigned short usReRequest;
+ int bEnableMEMCS16;
+ unsigned short usIsaMemCmdWidth;
+ int bGateIOCHRDY;
+ int bEnablePwrMgmt;
+ unsigned short usHBusTimerLoadValue;
+ int bDisableLBusTimeout;
+ unsigned short usN_Divisor;
+ unsigned short usM_Multiplier;
+ int bPllBypass;
+ unsigned short usChipletEnable; /* Used with the chip reset register to enable specific chiplets */
+
+ /* Saved UART registers. These are maintained by the 3780i support layer. */
+ int bUartSaved; /* True after a successful save of the UART registers */
+ unsigned char ucIER; /* Interrupt enable register */
+ unsigned char ucFCR; /* FIFO control register */
+ unsigned char ucLCR; /* Line control register */
+ unsigned char ucMCR; /* Modem control register */
+ unsigned char ucSCR; /* Scratch register */
+ unsigned char ucDLL; /* Divisor latch, low byte */
+ unsigned char ucDLM; /* Divisor latch, high byte */
+} DSP_3780I_CONFIG_SETTINGS;
+
+
+/* 3780i support functions */
+int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+ unsigned short *pIrqMap,
+ unsigned short *pDmaMap);
+int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr);
+int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
+ void __user *pvBuffer, unsigned uCount,
+ unsigned long ulDSPAddr);
+int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr);
+int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr);
+int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr);
+unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
+ unsigned long ulMsaAddr);
+void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO,
+ unsigned long ulMsaAddr, unsigned short usValue);
+int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
+ unsigned short *pusIPCSource);
+
+/* I/O port access macros */
+#define MKWORD(var) (*((unsigned short *)(&var)))
+#define MKBYTE(var) (*((unsigned char *)(&var)))
+
+#define WriteMsaCfg(addr,value) dsp3780I_WriteMsaCfg(usDspBaseIO,addr,value)
+#define ReadMsaCfg(addr) dsp3780I_ReadMsaCfg(usDspBaseIO,addr)
+#define WriteGenCfg(index,value) dsp3780I_WriteGenCfg(usDspBaseIO,index,value)
+#define ReadGenCfg(index) dsp3780I_ReadGenCfg(usDspBaseIO,index)
+
+#define InWordDsp(index) inw(usDspBaseIO+index)
+#define InByteDsp(index) inb(usDspBaseIO+index)
+#define OutWordDsp(index,value) outw(value,usDspBaseIO+index)
+#define OutByteDsp(index,value) outb(value,usDspBaseIO+index)
+
+#endif
diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile
new file mode 100644
index 0000000000..a24fe96e3c
--- /dev/null
+++ b/drivers/char/mwave/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for ACP Modem (Mwave).
+#
+# See the README file in this directory for more info. <paulsch@us.ibm.com>
+#
+
+obj-$(CONFIG_MWAVE) += mwave.o
+
+mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
+
+# To have the mwave driver disable other uarts if necessary
+# ccflags-y := -DMWAVE_FUTZ_WITH_OTHER_DEVICES
+
+# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
+ccflags-y += -DMW_TRACE
diff --git a/drivers/char/mwave/README b/drivers/char/mwave/README
new file mode 100644
index 0000000000..c2a58f428b
--- /dev/null
+++ b/drivers/char/mwave/README
@@ -0,0 +1,47 @@
+Module options
+--------------
+
+The mwave module takes the following options. Note that these options
+are not saved by the BIOS and so do not persist after unload and reload.
+
+ mwave_debug=value, where value is bitwise OR of trace flags:
+ 0x0001 mwavedd api tracing
+ 0x0002 smapi api tracing
+ 0x0004 3780i tracing
+ 0x0008 tp3780i tracing
+
+ Tracing only occurs if the driver has been compiled with the
+ MW_TRACE macro #defined (i.e. let ccflags-y := -DMW_TRACE
+ in the Makefile).
+
+ mwave_3780i_irq=5/7/10/11/15
+ If the dsp irq has not been setup and stored in bios by the
+ thinkpad configuration utility then this parameter allows the
+ irq used by the dsp to be configured.
+
+ mwave_3780i_io=0x130/0x350/0x0070/0xDB0
+ If the dsp io range has not been setup and stored in bios by the
+ thinkpad configuration utility then this parameter allows the
+ io range used by the dsp to be configured.
+
+ mwave_uart_irq=3/4
+ If the mwave's uart irq has not been setup and stored in bios by the
+ thinkpad configuration utility then this parameter allows the
+ irq used by the mwave uart to be configured.
+
+ mwave_uart_io=0x3f8/0x2f8/0x3E8/0x2E8
+ If the uart io range has not been setup and stored in bios by the
+ thinkpad configuration utility then this parameter allows the
+ io range used by the mwave uart to be configured.
+
+Example to enable the 3780i DSP using ttyS1 resources:
+
+ insmod mwave mwave_3780i_irq=10 mwave_3780i_io=0x0130 mwave_uart_irq=3 mwave_uart_io=0x2f8
+
+Accessing the driver
+--------------------
+
+You must also create a node for the driver:
+ mkdir -p /dev/modems
+ mknod --mode=660 /dev/modems/mwave c 10 219
+
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
new file mode 100644
index 0000000000..11272d605e
--- /dev/null
+++ b/drivers/char/mwave/mwavedd.c
@@ -0,0 +1,703 @@
+/*
+*
+* mwavedd.c -- mwave device driver
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/serial.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/serial_8250.h>
+#include <linux/nospec.h>
+#include "smapi.h"
+#include "mwavedd.h"
+#include "3780i.h"
+#include "tp3780i.h"
+
+MODULE_DESCRIPTION("3780i Advanced Communications Processor (Mwave) driver");
+MODULE_AUTHOR("Mike Sullivan and Paul Schroeder");
+MODULE_LICENSE("GPL");
+
+/*
+* These parameters support the setting of MWave resources. Note that no
+* checks are made against other devices (ie. superio) for conflicts.
+* We'll depend on users using the tpctl utility to do that for now
+*/
+static DEFINE_MUTEX(mwave_mutex);
+int mwave_debug = 0;
+int mwave_3780i_irq = 0;
+int mwave_3780i_io = 0;
+int mwave_uart_irq = 0;
+int mwave_uart_io = 0;
+module_param(mwave_debug, int, 0);
+module_param_hw(mwave_3780i_irq, int, irq, 0);
+module_param_hw(mwave_3780i_io, int, ioport, 0);
+module_param_hw(mwave_uart_irq, int, irq, 0);
+module_param_hw(mwave_uart_io, int, ioport, 0);
+
+static int mwave_open(struct inode *inode, struct file *file);
+static int mwave_close(struct inode *inode, struct file *file);
+static long mwave_ioctl(struct file *filp, unsigned int iocmd,
+ unsigned long ioarg);
+
+MWAVE_DEVICE_DATA mwave_s_mdd;
+
+static int mwave_open(struct inode *inode, struct file *file)
+{
+ unsigned int retval = 0;
+
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_open, entry inode %p file %p\n",
+ inode, file);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_open, exit return retval %x\n", retval);
+
+ return retval;
+}
+
+static int mwave_close(struct inode *inode, struct file *file)
+{
+ unsigned int retval = 0;
+
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_close, entry inode %p file %p\n",
+ inode, file);
+
+ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
+ retval);
+
+ return retval;
+}
+
+static long mwave_ioctl(struct file *file, unsigned int iocmd,
+ unsigned long ioarg)
+{
+ unsigned int retval = 0;
+ pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ void __user *arg = (void __user *)ioarg;
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
+ file, iocmd, (int) ioarg);
+
+ switch (iocmd) {
+
+ case IOCTL_MW_RESET:
+ PRINTK_1(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
+ " calling tp3780I_ResetDSP\n");
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ResetDSP(&pDrvData->rBDData);
+ mutex_unlock(&mwave_mutex);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
+ " retval %x from tp3780I_ResetDSP\n",
+ retval);
+ break;
+
+ case IOCTL_MW_RUN:
+ PRINTK_1(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
+ " calling tp3780I_StartDSP\n");
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_StartDSP(&pDrvData->rBDData);
+ mutex_unlock(&mwave_mutex);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
+ " retval %x from tp3780I_StartDSP\n",
+ retval);
+ break;
+
+ case IOCTL_MW_DSP_ABILITIES: {
+ MW_ABILITIES rAbilities;
+
+ PRINTK_1(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl,"
+ " IOCTL_MW_DSP_ABILITIES calling"
+ " tp3780I_QueryAbilities\n");
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
+ &rAbilities);
+ mutex_unlock(&mwave_mutex);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
+ " retval %x from tp3780I_QueryAbilities\n",
+ retval);
+ if (retval == 0) {
+ if( copy_to_user(arg, &rAbilities,
+ sizeof(MW_ABILITIES)) )
+ return -EFAULT;
+ }
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
+ " exit retval %x\n",
+ retval);
+ }
+ break;
+
+ case IOCTL_MW_READ_DATA:
+ case IOCTL_MW_READCLEAR_DATA: {
+ MW_READWRITE rReadData;
+ unsigned short __user *pusBuffer = NULL;
+
+ if( copy_from_user(&rReadData, arg,
+ sizeof(MW_READWRITE)) )
+ return -EFAULT;
+ pusBuffer = (unsigned short __user *) (rReadData.pBuf);
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
+ " size %lx, ioarg %lx pusBuffer %p\n",
+ rReadData.ulDataLength, ioarg, pusBuffer);
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
+ iocmd,
+ pusBuffer,
+ rReadData.ulDataLength,
+ rReadData.usDspAddress);
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_READ_INST: {
+ MW_READWRITE rReadData;
+ unsigned short __user *pusBuffer = NULL;
+
+ if( copy_from_user(&rReadData, arg,
+ sizeof(MW_READWRITE)) )
+ return -EFAULT;
+ pusBuffer = (unsigned short __user *) (rReadData.pBuf);
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_READ_INST,"
+ " size %lx, ioarg %lx pusBuffer %p\n",
+ rReadData.ulDataLength / 2, ioarg,
+ pusBuffer);
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
+ iocmd, pusBuffer,
+ rReadData.ulDataLength / 2,
+ rReadData.usDspAddress);
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_WRITE_DATA: {
+ MW_READWRITE rWriteData;
+ unsigned short __user *pusBuffer = NULL;
+
+ if( copy_from_user(&rWriteData, arg,
+ sizeof(MW_READWRITE)) )
+ return -EFAULT;
+ pusBuffer = (unsigned short __user *) (rWriteData.pBuf);
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA,"
+ " size %lx, ioarg %lx pusBuffer %p\n",
+ rWriteData.ulDataLength, ioarg,
+ pusBuffer);
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
+ iocmd, pusBuffer,
+ rWriteData.ulDataLength,
+ rWriteData.usDspAddress);
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_WRITE_INST: {
+ MW_READWRITE rWriteData;
+ unsigned short __user *pusBuffer = NULL;
+
+ if( copy_from_user(&rWriteData, arg,
+ sizeof(MW_READWRITE)) )
+ return -EFAULT;
+ pusBuffer = (unsigned short __user *)(rWriteData.pBuf);
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST,"
+ " size %lx, ioarg %lx pusBuffer %p\n",
+ rWriteData.ulDataLength, ioarg,
+ pusBuffer);
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
+ iocmd, pusBuffer,
+ rWriteData.ulDataLength,
+ rWriteData.usDspAddress);
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_REGISTER_IPC: {
+ unsigned int ipcnum = (unsigned int) ioarg;
+
+ if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_ioctl:"
+ " IOCTL_MW_REGISTER_IPC:"
+ " Error: Invalid ipcnum %x\n",
+ ipcnum);
+ return -EINVAL;
+ }
+ ipcnum = array_index_nospec(ipcnum,
+ ARRAY_SIZE(pDrvData->IPCs));
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
+ " ipcnum %x entry usIntCount %x\n",
+ ipcnum,
+ pDrvData->IPCs[ipcnum].usIntCount);
+
+ mutex_lock(&mwave_mutex);
+ pDrvData->IPCs[ipcnum].bIsHere = false;
+ pDrvData->IPCs[ipcnum].bIsEnabled = true;
+ mutex_unlock(&mwave_mutex);
+
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
+ " ipcnum %x exit\n",
+ ipcnum);
+ }
+ break;
+
+ case IOCTL_MW_GET_IPC: {
+ unsigned int ipcnum = (unsigned int) ioarg;
+
+ if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_ioctl:"
+ " IOCTL_MW_GET_IPC: Error:"
+ " Invalid ipcnum %x\n", ipcnum);
+ return -EINVAL;
+ }
+ ipcnum = array_index_nospec(ipcnum,
+ ARRAY_SIZE(pDrvData->IPCs));
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
+ " ipcnum %x, usIntCount %x\n",
+ ipcnum,
+ pDrvData->IPCs[ipcnum].usIntCount);
+
+ mutex_lock(&mwave_mutex);
+ if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, thread for"
+ " ipc %x going to sleep\n",
+ ipcnum);
+ add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
+ pDrvData->IPCs[ipcnum].bIsHere = true;
+ set_current_state(TASK_INTERRUPTIBLE);
+ /* check whether an event was signalled by */
+ /* the interrupt handler while we were gone */
+ if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */
+ pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl"
+ " IOCTL_MW_GET_IPC ipcnum %x"
+ " handling first int\n",
+ ipcnum);
+ } else { /* either 1st int has not yet occurred, or we have already handled the first int */
+ schedule();
+ if (pDrvData->IPCs[ipcnum].usIntCount == 1) {
+ pDrvData->IPCs[ipcnum].usIntCount = 2;
+ }
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl"
+ " IOCTL_MW_GET_IPC ipcnum %x"
+ " woke up and returning to"
+ " application\n",
+ ipcnum);
+ }
+ pDrvData->IPCs[ipcnum].bIsHere = false;
+ remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
+ set_current_state(TASK_RUNNING);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC,"
+ " returning thread for ipc %x"
+ " processing\n",
+ ipcnum);
+ }
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_UNREGISTER_IPC: {
+ unsigned int ipcnum = (unsigned int) ioarg;
+
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC"
+ " ipcnum %x\n",
+ ipcnum);
+ if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_ioctl:"
+ " IOCTL_MW_UNREGISTER_IPC:"
+ " Error: Invalid ipcnum %x\n",
+ ipcnum);
+ return -EINVAL;
+ }
+ ipcnum = array_index_nospec(ipcnum,
+ ARRAY_SIZE(pDrvData->IPCs));
+ mutex_lock(&mwave_mutex);
+ if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
+ pDrvData->IPCs[ipcnum].bIsEnabled = false;
+ if (pDrvData->IPCs[ipcnum].bIsHere == true) {
+ wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue);
+ }
+ }
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ default:
+ return -ENOTTY;
+ } /* switch */
+
+ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+static ssize_t mwave_read(struct file *file, char __user *buf, size_t count,
+ loff_t * ppos)
+{
+ PRINTK_5(TRACE_MWAVE,
+ "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
+ file, buf, count, ppos);
+
+ return -EINVAL;
+}
+
+
+static ssize_t mwave_write(struct file *file, const char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ PRINTK_5(TRACE_MWAVE,
+ "mwavedd::mwave_write entry file %p, buf %p,"
+ " count %zx ppos %p\n",
+ file, buf, count, ppos);
+
+ return -EINVAL;
+}
+
+
+static int register_serial_portandirq(unsigned int port, int irq)
+{
+ struct uart_8250_port uart;
+
+ switch ( port ) {
+ case 0x3f8:
+ case 0x2f8:
+ case 0x3e8:
+ case 0x2e8:
+ /* OK */
+ break;
+ default:
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::register_serial_portandirq:"
+ " Error: Illegal port %x\n", port );
+ return -1;
+ } /* switch */
+ /* port is okay */
+
+ switch ( irq ) {
+ case 3:
+ case 4:
+ case 5:
+ case 7:
+ /* OK */
+ break;
+ default:
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::register_serial_portandirq:"
+ " Error: Illegal irq %x\n", irq );
+ return -1;
+ } /* switch */
+ /* irq is okay */
+
+ memset(&uart, 0, sizeof(uart));
+
+ uart.port.uartclk = 1843200;
+ uart.port.iobase = port;
+ uart.port.irq = irq;
+ uart.port.iotype = UPIO_PORT;
+ uart.port.flags = UPF_SHARE_IRQ;
+ return serial8250_register_8250_port(&uart);
+}
+
+
+static const struct file_operations mwave_fops = {
+ .owner = THIS_MODULE,
+ .read = mwave_read,
+ .write = mwave_write,
+ .unlocked_ioctl = mwave_ioctl,
+ .open = mwave_open,
+ .release = mwave_close,
+ .llseek = default_llseek,
+};
+
+
+static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops };
+
+#if 0 /* totally b0rked */
+/*
+ * sysfs support <paulsch@us.ibm.com>
+ */
+
+struct device mwave_device;
+
+/* Prevent code redundancy, create a macro for mwave_show_* functions. */
+#define mwave_show_function(attr_name, format_string, field) \
+static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ DSP_3780I_CONFIG_SETTINGS *pSettings = \
+ &mwave_s_mdd.rBDData.rDspSettings; \
+ return sprintf(buf, format_string, pSettings->field); \
+}
+
+/* All of our attributes are read attributes. */
+#define mwave_dev_rd_attr(attr_name, format_string, field) \
+ mwave_show_function(attr_name, format_string, field) \
+static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL)
+
+mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma);
+mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq);
+mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO);
+mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq);
+mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO);
+
+static struct device_attribute * const mwave_dev_attrs[] = {
+ &dev_attr_3780i_dma,
+ &dev_attr_3780i_irq,
+ &dev_attr_3780i_io,
+ &dev_attr_uart_irq,
+ &dev_attr_uart_io,
+};
+#endif
+
+/*
+* mwave_init is called on module load
+*
+* mwave_exit is called on module unload
+* mwave_exit is also used to clean up after an aborted mwave_init
+*/
+static void mwave_exit(void)
+{
+ pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+
+ PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n");
+
+#if 0
+ for (i = 0; i < pDrvData->nr_registered_attrs; i++)
+ device_remove_file(&mwave_device, mwave_dev_attrs[i]);
+ pDrvData->nr_registered_attrs = 0;
+
+ if (pDrvData->device_registered) {
+ device_unregister(&mwave_device);
+ pDrvData->device_registered = false;
+ }
+#endif
+
+ if ( pDrvData->sLine >= 0 ) {
+ serial8250_unregister_port(pDrvData->sLine);
+ }
+ if (pDrvData->bMwaveDevRegistered) {
+ misc_deregister(&mwave_misc_dev);
+ }
+ if (pDrvData->bDSPEnabled) {
+ tp3780I_DisableDSP(&pDrvData->rBDData);
+ }
+ if (pDrvData->bResourcesClaimed) {
+ tp3780I_ReleaseResources(&pDrvData->rBDData);
+ }
+ if (pDrvData->bBDInitialized) {
+ tp3780I_Cleanup(&pDrvData->rBDData);
+ }
+
+ PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n");
+}
+
+module_exit(mwave_exit);
+
+static int __init mwave_init(void)
+{
+ int i;
+ int retval = 0;
+ pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+
+ PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n");
+
+ memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA));
+
+ pDrvData->bBDInitialized = false;
+ pDrvData->bResourcesClaimed = false;
+ pDrvData->bDSPEnabled = false;
+ pDrvData->bDSPReset = false;
+ pDrvData->bMwaveDevRegistered = false;
+ pDrvData->sLine = -1;
+
+ for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) {
+ pDrvData->IPCs[i].bIsEnabled = false;
+ pDrvData->IPCs[i].bIsHere = false;
+ pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */
+ init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue);
+ }
+
+ retval = tp3780I_InitializeBoardData(&pDrvData->rBDData);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_init, return from tp3780I_InitializeBoardData"
+ " retval %x\n",
+ retval);
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_init: Error:"
+ " Failed to initialize board data\n");
+ goto cleanup_error;
+ }
+ pDrvData->bBDInitialized = true;
+
+ retval = tp3780I_CalcResources(&pDrvData->rBDData);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_init, return from tp3780I_CalcResources"
+ " retval %x\n",
+ retval);
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to calculate resources\n");
+ goto cleanup_error;
+ }
+
+ retval = tp3780I_ClaimResources(&pDrvData->rBDData);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_init, return from tp3780I_ClaimResources"
+ " retval %x\n",
+ retval);
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to claim resources\n");
+ goto cleanup_error;
+ }
+ pDrvData->bResourcesClaimed = true;
+
+ retval = tp3780I_EnableDSP(&pDrvData->rBDData);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_init, return from tp3780I_EnableDSP"
+ " retval %x\n",
+ retval);
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to enable DSP\n");
+ goto cleanup_error;
+ }
+ pDrvData->bDSPEnabled = true;
+
+ if (misc_register(&mwave_misc_dev) < 0) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to register misc device\n");
+ goto cleanup_error;
+ }
+ pDrvData->bMwaveDevRegistered = true;
+
+ pDrvData->sLine = register_serial_portandirq(
+ pDrvData->rBDData.rDspSettings.usUartBaseIO,
+ pDrvData->rBDData.rDspSettings.usUartIrq
+ );
+ if (pDrvData->sLine < 0) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to register serial driver\n");
+ goto cleanup_error;
+ }
+ /* uart is registered */
+
+#if 0
+ /* sysfs */
+ memset(&mwave_device, 0, sizeof (struct device));
+ dev_set_name(&mwave_device, "mwave");
+
+ if (device_register(&mwave_device))
+ goto cleanup_error;
+ pDrvData->device_registered = true;
+ for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
+ if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to create sysfs file %s\n",
+ mwave_dev_attrs[i]->attr.name);
+ goto cleanup_error;
+ }
+ pDrvData->nr_registered_attrs++;
+ }
+#endif
+
+ /* SUCCESS! */
+ return 0;
+
+cleanup_error:
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_init: Error:"
+ " Failed to initialize\n");
+ mwave_exit(); /* clean up */
+
+ return -EIO;
+}
+
+module_init(mwave_init);
+
diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h
new file mode 100644
index 0000000000..21cb09c7be
--- /dev/null
+++ b/drivers/char/mwave/mwavedd.h
@@ -0,0 +1,152 @@
+/*
+*
+* mwavedd.h -- declarations for mwave device driver
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_MWAVEDD_H
+#define _LINUX_MWAVEDD_H
+#include "3780i.h"
+#include "tp3780i.h"
+#include "smapi.h"
+#include "mwavepub.h"
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+extern int mwave_debug;
+extern int mwave_3780i_irq;
+extern int mwave_3780i_io;
+extern int mwave_uart_irq;
+extern int mwave_uart_io;
+
+#define PRINTK_ERROR printk
+#define KERN_ERR_MWAVE KERN_ERR "mwave: "
+
+#define TRACE_MWAVE 0x0001
+#define TRACE_SMAPI 0x0002
+#define TRACE_3780I 0x0004
+#define TRACE_TP3780I 0x0008
+
+#ifdef MW_TRACE
+#define PRINTK_1(f,s) \
+ if (f & (mwave_debug)) { \
+ printk(s); \
+ }
+
+#define PRINTK_2(f,s,v1) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1); \
+ }
+
+#define PRINTK_3(f,s,v1,v2) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2); \
+ }
+
+#define PRINTK_4(f,s,v1,v2,v3) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3); \
+ }
+
+#define PRINTK_5(f,s,v1,v2,v3,v4) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3,v4); \
+ }
+
+#define PRINTK_6(f,s,v1,v2,v3,v4,v5) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3,v4,v5); \
+ }
+
+#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3,v4,v5,v6); \
+ }
+
+#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3,v4,v5,v6,v7); \
+ }
+
+#else
+#define PRINTK_1(f,s)
+#define PRINTK_2(f,s,v1)
+#define PRINTK_3(f,s,v1,v2)
+#define PRINTK_4(f,s,v1,v2,v3)
+#define PRINTK_5(f,s,v1,v2,v3,v4)
+#define PRINTK_6(f,s,v1,v2,v3,v4,v5)
+#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6)
+#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7)
+#endif
+
+
+typedef struct _MWAVE_IPC {
+ unsigned short usIntCount; /* 0=none, 1=first, 2=greater than 1st */
+ bool bIsEnabled;
+ bool bIsHere;
+ /* entry spin lock */
+ wait_queue_head_t ipc_wait_queue;
+} MWAVE_IPC;
+
+typedef struct _MWAVE_DEVICE_DATA {
+ THINKPAD_BD_DATA rBDData; /* board driver's data area */
+ unsigned long ulIPCSource_ISR; /* IPC source bits for recently processed intr, set during ISR processing */
+ unsigned long ulIPCSource_DPC; /* IPC source bits for recently processed intr, set during DPC processing */
+ bool bBDInitialized;
+ bool bResourcesClaimed;
+ bool bDSPEnabled;
+ bool bDSPReset;
+ MWAVE_IPC IPCs[16];
+ bool bMwaveDevRegistered;
+ short sLine;
+ int nr_registered_attrs;
+ int device_registered;
+
+} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA;
+
+extern MWAVE_DEVICE_DATA mwave_s_mdd;
+
+#endif
diff --git a/drivers/char/mwave/mwavepub.h b/drivers/char/mwave/mwavepub.h
new file mode 100644
index 0000000000..60c961ae23
--- /dev/null
+++ b/drivers/char/mwave/mwavepub.h
@@ -0,0 +1,89 @@
+/*
+*
+* mwavepub.h -- PUBLIC declarations for the mwave driver
+* and applications using it
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_MWAVEPUB_H
+#define _LINUX_MWAVEPUB_H
+
+#include <linux/miscdevice.h>
+
+
+typedef struct _MW_ABILITIES {
+ unsigned long instr_per_sec;
+ unsigned long data_size;
+ unsigned long inst_size;
+ unsigned long bus_dma_bw;
+ unsigned short uart_enable;
+ short component_count;
+ unsigned long component_list[7];
+ char mwave_os_name[16];
+ char bios_task_name[16];
+} MW_ABILITIES, *pMW_ABILITIES;
+
+
+typedef struct _MW_READWRITE {
+ unsigned short usDspAddress; /* The dsp address */
+ unsigned long ulDataLength; /* The size in bytes of the data or user buffer */
+ void __user *pBuf; /* Input:variable sized buffer */
+} MW_READWRITE, *pMW_READWRITE;
+
+#define IOCTL_MW_RESET _IO(MWAVE_MINOR,1)
+#define IOCTL_MW_RUN _IO(MWAVE_MINOR,2)
+#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,MW_ABILITIES)
+#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,MW_READWRITE)
+#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,MW_READWRITE)
+#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,MW_READWRITE)
+#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,MW_READWRITE)
+#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,MW_READWRITE)
+#define IOCTL_MW_REGISTER_IPC _IOW(MWAVE_MINOR,9,int)
+#define IOCTL_MW_UNREGISTER_IPC _IOW(MWAVE_MINOR,10,int)
+#define IOCTL_MW_GET_IPC _IOW(MWAVE_MINOR,11,int)
+#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,MW_READWRITE)
+
+
+#endif
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
new file mode 100644
index 0000000000..f8d79d393b
--- /dev/null
+++ b/drivers/char/mwave/smapi.c
@@ -0,0 +1,572 @@
+/*
+*
+* smapi.c -- SMAPI interface routines
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#include <linux/kernel.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include "smapi.h"
+#include "mwavedd.h"
+
+static unsigned short g_usSmapiPort = 0;
+
+
+static int smapi_request(unsigned short inBX, unsigned short inCX,
+ unsigned short inDI, unsigned short inSI,
+ unsigned short *outAX, unsigned short *outBX,
+ unsigned short *outCX, unsigned short *outDX,
+ unsigned short *outDI, unsigned short *outSI)
+{
+ unsigned short myoutAX = 2, *pmyoutAX = &myoutAX;
+ unsigned short myoutBX = 3, *pmyoutBX = &myoutBX;
+ unsigned short myoutCX = 4, *pmyoutCX = &myoutCX;
+ unsigned short myoutDX = 5, *pmyoutDX = &myoutDX;
+ unsigned short myoutDI = 6, *pmyoutDI = &myoutDI;
+ unsigned short myoutSI = 7, *pmyoutSI = &myoutSI;
+ unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK;
+ unsigned int inBXCX = (inBX << 16) | inCX;
+ unsigned int inDISI = (inDI << 16) | inSI;
+ int retval = 0;
+
+ PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n",
+ inBX, inCX, inDI, inSI);
+
+ __asm__ __volatile__("movw $0x5380,%%ax\n\t"
+ "movl %7,%%ebx\n\t"
+ "shrl $16, %%ebx\n\t"
+ "movw %7,%%cx\n\t"
+ "movl %8,%%edi\n\t"
+ "shrl $16,%%edi\n\t"
+ "movw %8,%%si\n\t"
+ "movw %9,%%dx\n\t"
+ "out %%al,%%dx\n\t"
+ "out %%al,$0x4F\n\t"
+ "cmpb $0x53,%%ah\n\t"
+ "je 2f\n\t"
+ "1:\n\t"
+ "orb %%ah,%%ah\n\t"
+ "jnz 2f\n\t"
+ "movw %%ax,%0\n\t"
+ "movw %%bx,%1\n\t"
+ "movw %%cx,%2\n\t"
+ "movw %%dx,%3\n\t"
+ "movw %%di,%4\n\t"
+ "movw %%si,%5\n\t"
+ "movw $1,%6\n\t"
+ "2:\n\t":"=m"(*(unsigned short *) pmyoutAX),
+ "=m"(*(unsigned short *) pmyoutBX),
+ "=m"(*(unsigned short *) pmyoutCX),
+ "=m"(*(unsigned short *) pmyoutDX),
+ "=m"(*(unsigned short *) pmyoutDI),
+ "=m"(*(unsigned short *) pmyoutSI),
+ "=m"(*(unsigned short *) pusSmapiOK)
+ :"m"(inBXCX), "m"(inDISI), "m"(g_usSmapiPort)
+ :"%eax", "%ebx", "%ecx", "%edx", "%edi",
+ "%esi");
+
+ PRINTK_8(TRACE_SMAPI,
+ "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n",
+ myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI,
+ usSmapiOK);
+ *outAX = myoutAX;
+ *outBX = myoutBX;
+ *outCX = myoutCX;
+ *outDX = myoutDX;
+ *outDI = myoutDI;
+ *outSI = myoutSI;
+
+ retval = (usSmapiOK == 1) ? 0 : -EIO;
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval);
+ return retval;
+}
+
+
+int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
+{
+ int bRC;
+ unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
+
+ bRC = smapi_request(0x1802, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n");
+ return bRC;
+ }
+
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
+
+ pSettings->bDSPPresent = ((usBX & 0x0100) != 0);
+ pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
+ pSettings->usDspIRQ = usSI & 0x00FF;
+ pSettings->usDspDMA = (usSI & 0xFF00) >> 8;
+ if ((usDI & 0x00FF) < ARRAY_SIZE(ausDspBases)) {
+ pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF];
+ } else {
+ pSettings->usDspBaseIO = 0;
+ }
+ PRINTK_6(TRACE_SMAPI,
+ "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n",
+ pSettings->bDSPPresent, pSettings->bDSPEnabled,
+ pSettings->usDspIRQ, pSettings->usDspDMA,
+ pSettings->usDspBaseIO);
+
+ /* check for illegal values */
+ if ( pSettings->usDspBaseIO == 0 )
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n");
+ if ( pSettings->usDspIRQ == 0 )
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n");
+
+ bRC = smapi_request(0x1804, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) {
+ PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n");
+ return bRC;
+ }
+
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
+
+ pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
+ pSettings->usUartIRQ = usSI & 0x000F;
+ if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
+ pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8];
+ } else {
+ pSettings->usUartBaseIO = 0;
+ }
+
+ PRINTK_4(TRACE_SMAPI,
+ "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n",
+ pSettings->bModemEnabled,
+ pSettings->usUartIRQ,
+ pSettings->usUartBaseIO);
+
+ /* check for illegal values */
+ if ( pSettings->usUartBaseIO == 0 )
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n");
+ if ( pSettings->usUartIRQ == 0 )
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n");
+
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC);
+
+ return bRC;
+}
+
+
+int smapi_set_DSP_cfg(void)
+{
+ int bRC = -EIO;
+ int i;
+ unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+ static const unsigned short ausDspIrqs[] = {
+ 5, 7, 10, 11, 15 };
+ static const unsigned short ausUartIrqs[] = {
+ 3, 4 };
+
+ unsigned short dspio_index = 0, uartio_index = 0;
+
+ PRINTK_5(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n",
+ mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
+
+ if (mwave_3780i_io) {
+ for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
+ if (mwave_3780i_io == ausDspBases[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(ausDspBases)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
+ return bRC;
+ }
+ dspio_index = i;
+ }
+
+ if (mwave_3780i_irq) {
+ for (i = 0; i < ARRAY_SIZE(ausDspIrqs); i++) {
+ if (mwave_3780i_irq == ausDspIrqs[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(ausDspIrqs)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
+ return bRC;
+ }
+ }
+
+ if (mwave_uart_io) {
+ for (i = 0; i < ARRAY_SIZE(ausUartBases); i++) {
+ if (mwave_uart_io == ausUartBases[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(ausUartBases)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
+ return bRC;
+ }
+ uartio_index = i;
+ }
+
+
+ if (mwave_uart_irq) {
+ for (i = 0; i < ARRAY_SIZE(ausUartIrqs); i++) {
+ if (mwave_uart_irq == ausUartIrqs[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(ausUartIrqs)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
+ return bRC;
+ }
+ }
+
+ if (mwave_uart_irq || mwave_uart_io) {
+
+ /* Check serial port A */
+ bRC = smapi_request(0x1402, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ /* bRC == 0 */
+ if (usBX & 0x0100) { /* serial port A is present */
+ if (usCX & 1) { /* serial port is enabled */
+ if ((usSI & 0xFF) == mwave_uart_irq) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n");
+ bRC = smapi_request(0x1403, 0x0100, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1402, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ } else {
+ if ((usSI >> 8) == uartio_index) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n");
+ bRC = smapi_request (0x1403, 0x0100, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request (0x1402, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ }
+ }
+ }
+ }
+
+ /* Check serial port B */
+ bRC = smapi_request(0x1404, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ /* bRC == 0 */
+ if (usBX & 0x0100) { /* serial port B is present */
+ if (usCX & 1) { /* serial port is enabled */
+ if ((usSI & 0xFF) == mwave_uart_irq) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
+ bRC = smapi_request(0x1405, 0x0100, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1404, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ } else {
+ if ((usSI >> 8) == uartio_index) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1 (TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
+ bRC = smapi_request (0x1405, 0x0100, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request (0x1404, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ }
+ }
+ }
+ }
+
+ /* Check IR port */
+ bRC = smapi_request(0x1700, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1704, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ /* bRC == 0 */
+ if ((usCX & 0xff) != 0xff) { /* IR port not disabled */
+ if ((usCX & 0xff) == mwave_uart_irq) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
+ bRC = smapi_request(0x1701, 0x0100, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1700, 0, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1704, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ } else {
+ if ((usSI & 0xff) == uartio_index) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
+ bRC = smapi_request(0x1701, 0x0100, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1700, 0, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1704, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ }
+ }
+ }
+ }
+
+ bRC = smapi_request(0x1802, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ if (mwave_3780i_io) {
+ usDI = dspio_index;
+ }
+ if (mwave_3780i_irq) {
+ usSI = (usSI & 0xff00) | mwave_3780i_irq;
+ }
+
+ bRC = smapi_request(0x1803, 0x0101, usDI, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ bRC = smapi_request(0x1804, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ if (mwave_uart_io) {
+ usSI = (usSI & 0x00ff) | (uartio_index << 8);
+ }
+ if (mwave_uart_irq) {
+ usSI = (usSI & 0xff00) | mwave_uart_irq;
+ }
+ bRC = smapi_request(0x1805, 0x0101, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ bRC = smapi_request(0x1802, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ bRC = smapi_request(0x1804, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+/* normal exit: */
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n");
+ return 0;
+
+exit_conflict:
+ /* Message has already been printed */
+ return -EIO;
+
+exit_smapi_request_error:
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC);
+ return bRC;
+}
+
+
+int smapi_set_DSP_power_state(bool bOn)
+{
+ int bRC;
+ unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
+ unsigned short usPowerFunction;
+
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn);
+
+ usPowerFunction = (bOn) ? 1 : 0;
+
+ bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC);
+
+ return bRC;
+}
+
+#if 0
+static int SmapiQuerySystemID(void)
+{
+ int bRC = -EIO;
+ unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff,
+ usDX = 0xffff, usDI = 0xffff, usSI = 0xffff;
+
+ printk("smapi::SmapiQUerySystemID entry\n");
+ bRC = smapi_request(0x0000, 0, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+
+ if (bRC == 0) {
+ printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n",
+ usAX, usBX, usCX, usDX, usDI, usSI);
+ } else {
+ printk("smapi::SmapiQuerySystemID smapi_request error\n");
+ }
+
+ return bRC;
+}
+#endif /* 0 */
+
+int smapi_init(void)
+{
+ int retval = -EIO;
+ unsigned short usSmapiID = 0;
+ unsigned long flags;
+
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n");
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ usSmapiID = CMOS_READ(0x7C);
+ usSmapiID |= (CMOS_READ(0x7D) << 8);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID);
+
+ if (usSmapiID == 0x5349) {
+ spin_lock_irqsave(&rtc_lock, flags);
+ g_usSmapiPort = CMOS_READ(0x7E);
+ g_usSmapiPort |= (CMOS_READ(0x7F) << 8);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ if (g_usSmapiPort == 0) {
+ PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n");
+ } else {
+ PRINTK_2(TRACE_SMAPI,
+ "smapi::smapi_init, exit true g_usSmapiPort %x\n",
+ g_usSmapiPort);
+ retval = 0;
+ //SmapiQuerySystemID();
+ }
+ } else {
+ PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n");
+ retval = -ENXIO;
+ }
+
+ return retval;
+}
diff --git a/drivers/char/mwave/smapi.h b/drivers/char/mwave/smapi.h
new file mode 100644
index 0000000000..ebc206b000
--- /dev/null
+++ b/drivers/char/mwave/smapi.h
@@ -0,0 +1,76 @@
+/*
+*
+* smapi.h -- declarations for SMAPI interface routines
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_SMAPI_H
+#define _LINUX_SMAPI_H
+
+typedef struct {
+ int bDSPPresent;
+ int bDSPEnabled;
+ int bModemEnabled;
+ int bMIDIEnabled;
+ int bSblstEnabled;
+ unsigned short usDspIRQ;
+ unsigned short usDspDMA;
+ unsigned short usDspBaseIO;
+ unsigned short usUartIRQ;
+ unsigned short usUartBaseIO;
+ unsigned short usMidiIRQ;
+ unsigned short usMidiBaseIO;
+ unsigned short usSndblstIRQ;
+ unsigned short usSndblstDMA;
+ unsigned short usSndblstBaseIO;
+} SMAPI_DSP_SETTINGS;
+
+int smapi_init(void);
+int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings);
+int smapi_set_DSP_cfg(void);
+int smapi_set_DSP_power_state(bool bOn);
+
+
+#endif
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
new file mode 100644
index 0000000000..83eaffeb22
--- /dev/null
+++ b/drivers/char/mwave/tp3780i.c
@@ -0,0 +1,574 @@
+/*
+*
+* tp3780i.c -- board driver for 3780i on ThinkPads
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include "smapi.h"
+#include "mwavedd.h"
+#include "tp3780i.h"
+#include "3780i.h"
+#include "mwavepub.h"
+
+static unsigned short s_ausThinkpadIrqToField[16] =
+ { 0xFFFF, 0xFFFF, 0xFFFF, 0x0001, 0x0002, 0x0003, 0xFFFF, 0x0004,
+ 0xFFFF, 0xFFFF, 0x0005, 0x0006, 0xFFFF, 0xFFFF, 0xFFFF, 0x0007 };
+static unsigned short s_ausThinkpadDmaToField[8] =
+ { 0x0001, 0x0002, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0003, 0x0004 };
+static unsigned short s_numIrqs = 16, s_numDmas = 8;
+
+
+static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
+{
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ DSP_GPIO_OUTPUT_DATA_15_8 rGpioOutputData;
+ DSP_GPIO_DRIVER_ENABLE_15_8 rGpioDriverEnable;
+ DSP_GPIO_MODE_15_8 rGpioMode;
+
+ PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM, entry\n");
+
+ MKWORD(rGpioMode) = ReadMsaCfg(DSP_GpioModeControl_15_8);
+ rGpioMode.GpioMode10 = 0;
+ WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode));
+
+ MKWORD(rGpioDriverEnable) = 0;
+ rGpioDriverEnable.Enable10 = true;
+ rGpioDriverEnable.Mask10 = true;
+ WriteMsaCfg(DSP_GpioDriverEnable_15_8, MKWORD(rGpioDriverEnable));
+
+ MKWORD(rGpioOutputData) = 0;
+ rGpioOutputData.Latch10 = 0;
+ rGpioOutputData.Mask10 = true;
+ WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData));
+
+ PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n");
+}
+
+
+static irqreturn_t UartInterrupt(int irq, void *dev_id)
+{
+ PRINTK_3(TRACE_TP3780I,
+ "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t DspInterrupt(int irq, void *dev_id)
+{
+ pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pDrvData->rBDData.rDspSettings;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ unsigned short usIPCSource = 0, usIsolationMask, usPCNum;
+
+ PRINTK_3(TRACE_TP3780I,
+ "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+
+ if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) {
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::DspInterrupt, return from dsp3780i_GetIPCSource, usIPCSource %x\n",
+ usIPCSource);
+ usIsolationMask = 1;
+ for (usPCNum = 1; usPCNum <= 16; usPCNum++) {
+ if (usIPCSource & usIsolationMask) {
+ usIPCSource &= ~usIsolationMask;
+ PRINTK_3(TRACE_TP3780I,
+ "tp3780i::DspInterrupt usPCNum %x usIPCSource %x\n",
+ usPCNum, usIPCSource);
+ if (pDrvData->IPCs[usPCNum - 1].usIntCount == 0) {
+ pDrvData->IPCs[usPCNum - 1].usIntCount = 1;
+ }
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::DspInterrupt usIntCount %x\n",
+ pDrvData->IPCs[usPCNum - 1].usIntCount);
+ if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == true) {
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::DspInterrupt, waking up usPCNum %x\n",
+ usPCNum - 1);
+ wake_up_interruptible(&pDrvData->IPCs[usPCNum - 1].ipc_wait_queue);
+ } else {
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::DspInterrupt, no one waiting for IPC %x\n",
+ usPCNum - 1);
+ }
+ }
+ if (usIPCSource == 0)
+ break;
+ /* try next IPC */
+ usIsolationMask = usIsolationMask << 1;
+ }
+ } else {
+ PRINTK_1(TRACE_TP3780I,
+ "tp3780i::DspInterrupt, return false from dsp3780i_GetIPCSource\n");
+ }
+ PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt exit\n");
+ return IRQ_HANDLED;
+}
+
+
+int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData);
+
+ pBDData->bDSPEnabled = false;
+ pSettings->bInterruptClaimed = false;
+
+ retval = smapi_init();
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_InitializeBoardData: Error: SMAPI is not available on this machine\n");
+ } else {
+ if (mwave_3780i_irq || mwave_3780i_io || mwave_uart_irq || mwave_uart_io) {
+ retval = smapi_set_DSP_cfg();
+ }
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData exit retval %x\n", retval);
+
+ return retval;
+}
+
+void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData)
+{
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData);
+}
+
+int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
+{
+ SMAPI_DSP_SETTINGS rSmapiInfo;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_CalcResources entry pBDData %p\n", pBDData);
+
+ if (smapi_query_DSP_cfg(&rSmapiInfo)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Could not query DSP config. Aborting.\n");
+ return -EIO;
+ }
+
+ /* Sanity check */
+ if (
+ ( rSmapiInfo.usDspIRQ == 0 )
+ || ( rSmapiInfo.usDspBaseIO == 0 )
+ || ( rSmapiInfo.usUartIRQ == 0 )
+ || ( rSmapiInfo.usUartBaseIO == 0 )
+ ) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Illegal resource setting. Aborting.\n");
+ return -EIO;
+ }
+
+ pSettings->bDSPEnabled = (rSmapiInfo.bDSPEnabled && rSmapiInfo.bDSPPresent);
+ pSettings->bModemEnabled = rSmapiInfo.bModemEnabled;
+ pSettings->usDspIrq = rSmapiInfo.usDspIRQ;
+ pSettings->usDspDma = rSmapiInfo.usDspDMA;
+ pSettings->usDspBaseIO = rSmapiInfo.usDspBaseIO;
+ pSettings->usUartIrq = rSmapiInfo.usUartIRQ;
+ pSettings->usUartBaseIO = rSmapiInfo.usUartBaseIO;
+
+ pSettings->uDStoreSize = TP_ABILITIES_DATA_SIZE;
+ pSettings->uIStoreSize = TP_ABILITIES_INST_SIZE;
+ pSettings->uIps = TP_ABILITIES_INTS_PER_SEC;
+
+ if (pSettings->bDSPEnabled && pSettings->bModemEnabled && pSettings->usDspIrq == pSettings->usUartIrq) {
+ pBDData->bShareDspIrq = pBDData->bShareUartIrq = 1;
+ } else {
+ pBDData->bShareDspIrq = pBDData->bShareUartIrq = 0;
+ }
+
+ PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources exit\n");
+
+ return 0;
+}
+
+
+int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct resource *pres;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_ClaimResources entry pBDData %p\n", pBDData);
+
+ pres = request_region(pSettings->usDspBaseIO, 16, "mwave_3780i");
+ if ( pres == NULL ) retval = -EIO;
+
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_ClaimResources: Error: Could not claim I/O region starting at %x\n", pSettings->usDspBaseIO);
+ retval = -EIO;
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources exit retval %x\n", retval);
+
+ return retval;
+}
+
+int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReleaseResources entry pBDData %p\n", pBDData);
+
+ release_region(pSettings->usDspBaseIO & (~3), 16);
+
+ if (pSettings->bInterruptClaimed) {
+ free_irq(pSettings->usDspIrq, NULL);
+ pSettings->bInterruptClaimed = false;
+ }
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReleaseResources exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+
+int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
+{
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ bool bDSPPoweredUp = false, bInterruptAllocated = false;
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData);
+
+ if (pBDData->bDSPEnabled) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: DSP already enabled!\n");
+ goto exit_cleanup;
+ }
+
+ if (!pSettings->bDSPEnabled) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780::tp3780I_EnableDSP: Error: pSettings->bDSPEnabled not set\n");
+ goto exit_cleanup;
+ }
+
+ if (
+ (pSettings->usDspIrq >= s_numIrqs)
+ || (pSettings->usDspDma >= s_numDmas)
+ || (s_ausThinkpadIrqToField[pSettings->usDspIrq] == 0xFFFF)
+ || (s_ausThinkpadDmaToField[pSettings->usDspDma] == 0xFFFF)
+ ) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: invalid irq %x\n", pSettings->usDspIrq);
+ goto exit_cleanup;
+ }
+
+ if (
+ ((pSettings->usDspBaseIO & 0xF00F) != 0)
+ || (pSettings->usDspBaseIO & 0x0FF0) == 0
+ ) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid DSP base I/O address %x\n", pSettings->usDspBaseIO);
+ goto exit_cleanup;
+ }
+
+ if (pSettings->bModemEnabled) {
+ if (
+ pSettings->usUartIrq >= s_numIrqs
+ || s_ausThinkpadIrqToField[pSettings->usUartIrq] == 0xFFFF
+ ) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid UART IRQ %x\n", pSettings->usUartIrq);
+ goto exit_cleanup;
+ }
+ switch (pSettings->usUartBaseIO) {
+ case 0x03F8:
+ case 0x02F8:
+ case 0x03E8:
+ case 0x02E8:
+ break;
+
+ default:
+ PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Invalid UART base I/O address %x\n", pSettings->usUartBaseIO);
+ goto exit_cleanup;
+ }
+ }
+
+ pSettings->bDspIrqActiveLow = pSettings->bDspIrqPulse = true;
+ pSettings->bUartIrqActiveLow = pSettings->bUartIrqPulse = true;
+
+ if (pBDData->bShareDspIrq) {
+ pSettings->bDspIrqActiveLow = false;
+ }
+ if (pBDData->bShareUartIrq) {
+ pSettings->bUartIrqActiveLow = false;
+ }
+
+ pSettings->usNumTransfers = TP_CFG_NumTransfers;
+ pSettings->usReRequest = TP_CFG_RerequestTimer;
+ pSettings->bEnableMEMCS16 = TP_CFG_MEMCS16;
+ pSettings->usIsaMemCmdWidth = TP_CFG_IsaMemCmdWidth;
+ pSettings->bGateIOCHRDY = TP_CFG_GateIOCHRDY;
+ pSettings->bEnablePwrMgmt = TP_CFG_EnablePwrMgmt;
+ pSettings->usHBusTimerLoadValue = TP_CFG_HBusTimerValue;
+ pSettings->bDisableLBusTimeout = TP_CFG_DisableLBusTimeout;
+ pSettings->usN_Divisor = TP_CFG_N_Divisor;
+ pSettings->usM_Multiplier = TP_CFG_M_Multiplier;
+ pSettings->bPllBypass = TP_CFG_PllBypass;
+ pSettings->usChipletEnable = TP_CFG_ChipletEnable;
+
+ if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq);
+ goto exit_cleanup;
+ } else { /* no conflict just release */
+ free_irq(pSettings->usUartIrq, NULL);
+ }
+
+ if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) {
+ PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq);
+ goto exit_cleanup;
+ } else {
+ PRINTK_3(TRACE_TP3780I,
+ "tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n",
+ pSettings->usDspIrq, pBDData->bShareDspIrq);
+ bInterruptAllocated = true;
+ pSettings->bInterruptClaimed = true;
+ }
+
+ smapi_set_DSP_power_state(false);
+ if (smapi_set_DSP_power_state(true)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(true) failed\n");
+ goto exit_cleanup;
+ } else {
+ bDSPPoweredUp = true;
+ }
+
+ if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) {
+ PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: dsp7880I_EnableDSP() failed\n");
+ goto exit_cleanup;
+ }
+
+ EnableSRAM(pBDData);
+
+ pBDData->bDSPEnabled = true;
+
+ PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n");
+
+ return 0;
+
+exit_cleanup:
+ PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n");
+ if (bDSPPoweredUp)
+ smapi_set_DSP_power_state(false);
+ if (bInterruptAllocated) {
+ free_irq(pSettings->usDspIrq, NULL);
+ pSettings->bInterruptClaimed = false;
+ }
+ return -EIO;
+}
+
+
+int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP entry pBDData %p\n", pBDData);
+
+ if (pBDData->bDSPEnabled) {
+ dsp3780I_DisableDSP(&pBDData->rDspSettings);
+ if (pSettings->bInterruptClaimed) {
+ free_irq(pSettings->usDspIrq, NULL);
+ pSettings->bInterruptClaimed = false;
+ }
+ smapi_set_DSP_power_state(false);
+ pBDData->bDSPEnabled = false;
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP entry pBDData %p\n",
+ pBDData);
+
+ if (dsp3780I_Reset(pSettings) == 0) {
+ EnableSRAM(pBDData);
+ } else {
+ retval = -EIO;
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP entry pBDData %p\n", pBDData);
+
+ if (dsp3780I_Run(pSettings) == 0) {
+ // @BUG @TBD EnableSRAM(pBDData);
+ } else {
+ retval = -EIO;
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities)
+{
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
+
+ memset(pAbilities, 0, sizeof(*pAbilities));
+ /* fill out standard constant fields */
+ pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
+ pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
+ pAbilities->inst_size = pBDData->rDspSettings.uIStoreSize;
+ pAbilities->bus_dma_bw = pBDData->rDspSettings.uDmaBandwidth;
+
+ /* fill out dynamically determined fields */
+ pAbilities->component_list[0] = 0x00010000 | MW_ADC_MASK;
+ pAbilities->component_list[1] = 0x00010000 | MW_ACI_MASK;
+ pAbilities->component_list[2] = 0x00010000 | MW_AIC1_MASK;
+ pAbilities->component_list[3] = 0x00010000 | MW_AIC2_MASK;
+ pAbilities->component_list[4] = 0x00010000 | MW_CDDAC_MASK;
+ pAbilities->component_list[5] = 0x00010000 | MW_MIDI_MASK;
+ pAbilities->component_list[6] = 0x00010000 | MW_UART_MASK;
+ pAbilities->component_count = 7;
+
+ /* Fill out Mwave OS and BIOS task names */
+
+ memcpy(pAbilities->mwave_os_name, TP_ABILITIES_MWAVEOS_NAME,
+ sizeof(TP_ABILITIES_MWAVEOS_NAME));
+ memcpy(pAbilities->bios_task_name, TP_ABILITIES_BIOSTASK_NAME,
+ sizeof(TP_ABILITIES_BIOSTASK_NAME));
+
+ PRINTK_1(TRACE_TP3780I,
+ "tp3780i::tp3780I_QueryAbilities exit retval=SUCCESSFUL\n");
+
+ return 0;
+}
+
+int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+ void __user *pvBuffer, unsigned int uCount,
+ unsigned long ulDSPAddr)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ bool bRC = 0;
+
+ PRINTK_6(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
+
+ if (pBDData->bDSPEnabled) {
+ switch (uOpcode) {
+ case IOCTL_MW_READ_DATA:
+ bRC = dsp3780I_ReadDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+
+ case IOCTL_MW_READCLEAR_DATA:
+ bRC = dsp3780I_ReadAndClearDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+
+ case IOCTL_MW_WRITE_DATA:
+ bRC = dsp3780I_WriteDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+ }
+ }
+
+ retval = (bRC) ? -EIO : 0;
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+ void __user *pvBuffer, unsigned int uCount,
+ unsigned long ulDSPAddr)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ bool bRC = 0;
+
+ PRINTK_6(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
+
+ if (pBDData->bDSPEnabled) {
+ switch (uOpcode) {
+ case IOCTL_MW_READ_INST:
+ bRC = dsp3780I_ReadIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+
+ case IOCTL_MW_WRITE_INST:
+ bRC = dsp3780I_WriteIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+ }
+ }
+
+ retval = (bRC) ? -EIO : 0;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReadWriteDspIStore exit retval %x\n", retval);
+
+ return retval;
+}
+
diff --git a/drivers/char/mwave/tp3780i.h b/drivers/char/mwave/tp3780i.h
new file mode 100644
index 0000000000..8bd976d42f
--- /dev/null
+++ b/drivers/char/mwave/tp3780i.h
@@ -0,0 +1,103 @@
+/*
+*
+* tp3780i.h -- declarations for tp3780i.c
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_TP3780I_H
+#define _LINUX_TP3780I_H
+
+#include <asm/io.h>
+#include "mwavepub.h"
+
+
+/* DSP abilities constants for 3780i based Thinkpads */
+#define TP_ABILITIES_INTS_PER_SEC 39160800
+#define TP_ABILITIES_DATA_SIZE 32768
+#define TP_ABILITIES_INST_SIZE 32768
+#define TP_ABILITIES_MWAVEOS_NAME "mwaveos0700.dsp"
+#define TP_ABILITIES_BIOSTASK_NAME "mwbio701.dsp"
+
+
+/* DSP configuration values for 3780i based Thinkpads */
+#define TP_CFG_NumTransfers 3 /* 16 transfers */
+#define TP_CFG_RerequestTimer 1 /* 2 usec */
+#define TP_CFG_MEMCS16 0 /* Disabled, 16-bit memory assumed */
+#define TP_CFG_IsaMemCmdWidth 3 /* 295 nsec (16-bit) */
+#define TP_CFG_GateIOCHRDY 0 /* No IOCHRDY gating */
+#define TP_CFG_EnablePwrMgmt 1 /* Enable low poser suspend/resume */
+#define TP_CFG_HBusTimerValue 255 /* HBus timer load value */
+#define TP_CFG_DisableLBusTimeout 0 /* Enable LBus timeout */
+#define TP_CFG_N_Divisor 32 /* Clock = 39.1608 Mhz */
+#define TP_CFG_M_Multiplier 37 /* " */
+#define TP_CFG_PllBypass 0 /* don't bypass */
+#define TP_CFG_ChipletEnable 0xFFFF /* Enable all chiplets */
+
+typedef struct {
+ int bDSPEnabled;
+ int bShareDspIrq;
+ int bShareUartIrq;
+ DSP_3780I_CONFIG_SETTINGS rDspSettings;
+} THINKPAD_BD_DATA;
+
+int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData);
+int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData);
+int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities);
+void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData);
+int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+ void __user *pvBuffer, unsigned int uCount,
+ unsigned long ulDSPAddr);
+int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+ void __user *pvBuffer, unsigned int uCount,
+ unsigned long ulDSPAddr);
+
+
+#endif
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
new file mode 100644
index 0000000000..da930c72bc
--- /dev/null
+++ b/drivers/char/nsc_gpio.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* linux/drivers/char/nsc_gpio.c
+
+ National Semiconductor common GPIO device-file/VFS methods.
+ Allows a user space process to control the GPIO pins.
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+ Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
+*/
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#define NAME "nsc_gpio"
+
+void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
+{
+ /* retrieve current config w/o changing it */
+ u32 config = amp->gpio_config(index, ~0, 0);
+
+ /* user requested via 'v' command, so its INFO */
+ dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n",
+ index, config,
+ (config & 1) ? "OE" : "TS", /* output-enabled/tristate */
+ (config & 2) ? "PP" : "OD", /* push pull / open drain */
+ (config & 4) ? "PUE" : "PUD", /* pull up enabled/disabled */
+ (config & 8) ? "LOCKED" : "", /* locked / unlocked */
+ (config & 16) ? "LEVEL" : "EDGE",/* level/edge input */
+ (config & 32) ? "HI" : "LO", /* trigger on rise/fall edge */
+ (config & 64) ? "DEBOUNCE" : "", /* debounce */
+
+ amp->gpio_get(index), amp->gpio_current(index));
+}
+
+ssize_t nsc_gpio_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ unsigned m = iminor(file_inode(file));
+ struct nsc_gpio_ops *amp = file->private_data;
+ struct device *dev = amp->dev;
+ size_t i;
+ int err = 0;
+
+ for (i = 0; i < len; ++i) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ switch (c) {
+ case '0':
+ amp->gpio_set(m, 0);
+ break;
+ case '1':
+ amp->gpio_set(m, 1);
+ break;
+ case 'O':
+ dev_dbg(dev, "GPIO%d output enabled\n", m);
+ amp->gpio_config(m, ~1, 1);
+ break;
+ case 'o':
+ dev_dbg(dev, "GPIO%d output disabled\n", m);
+ amp->gpio_config(m, ~1, 0);
+ break;
+ case 'T':
+ dev_dbg(dev, "GPIO%d output is push pull\n", m);
+ amp->gpio_config(m, ~2, 2);
+ break;
+ case 't':
+ dev_dbg(dev, "GPIO%d output is open drain\n", m);
+ amp->gpio_config(m, ~2, 0);
+ break;
+ case 'P':
+ dev_dbg(dev, "GPIO%d pull up enabled\n", m);
+ amp->gpio_config(m, ~4, 4);
+ break;
+ case 'p':
+ dev_dbg(dev, "GPIO%d pull up disabled\n", m);
+ amp->gpio_config(m, ~4, 0);
+ break;
+ case 'v':
+ /* View Current pin settings */
+ amp->gpio_dump(amp, m);
+ break;
+ case '\n':
+ /* end of settings string, do nothing */
+ break;
+ default:
+ dev_err(dev, "io%2d bad setting: chr<0x%2x>\n",
+ m, (int)c);
+ err++;
+ }
+ }
+ if (err)
+ return -EINVAL; /* full string handled, report error */
+
+ return len;
+}
+
+ssize_t nsc_gpio_read(struct file *file, char __user * buf,
+ size_t len, loff_t * ppos)
+{
+ unsigned m = iminor(file_inode(file));
+ int value;
+ struct nsc_gpio_ops *amp = file->private_data;
+
+ value = amp->gpio_get(m);
+ if (put_user(value ? '1' : '0', buf))
+ return -EFAULT;
+
+ return 1;
+}
+
+/* common file-ops routines for both scx200_gpio and pc87360_gpio */
+EXPORT_SYMBOL(nsc_gpio_write);
+EXPORT_SYMBOL(nsc_gpio_read);
+EXPORT_SYMBOL(nsc_gpio_dump);
+
+static int __init nsc_gpio_init(void)
+{
+ printk(KERN_DEBUG NAME " initializing\n");
+ return 0;
+}
+
+static void __exit nsc_gpio_cleanup(void)
+{
+ printk(KERN_DEBUG NAME " cleanup\n");
+}
+
+module_init(nsc_gpio_init);
+module_exit(nsc_gpio_cleanup);
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi GPIO Common Methods");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
new file mode 100644
index 0000000000..e9f694b368
--- /dev/null
+++ b/drivers/char/nvram.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CMOS/NV-RAM driver for Linux
+ *
+ * Copyright (C) 1997 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ * idea by and with help from Richard Jelinek <rj@suse.de>
+ * Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
+ *
+ * This driver allows you to access the contents of the non-volatile memory in
+ * the mc146818rtc.h real-time clock. This chip is built into all PCs and into
+ * many Atari machines. In the former it's called "CMOS-RAM", in the latter
+ * "NVRAM" (NV stands for non-volatile).
+ *
+ * The data are supplied as a (seekable) character device, /dev/nvram. The
+ * size of this file is dependent on the controller. The usual size is 114,
+ * the number of freely available bytes in the memory (i.e., not used by the
+ * RTC itself).
+ *
+ * Checksums over the NVRAM contents are managed by this driver. In case of a
+ * bad checksum, reads and writes return -EIO. The checksum can be initialized
+ * to a sane state either by ioctl(NVRAM_INIT) (clear whole NVRAM) or
+ * ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid
+ * again; use with care!)
+ *
+ * 1.1 Cesar Barros: SMP locking fixes
+ * added changelog
+ * 1.2 Erik Gilling: Cobalt Networks support
+ * Tim Hockin: general cleanup, Cobalt support
+ * 1.3 Wim Van Sebroeck: convert PRINT_PROC to seq_file
+ */
+
+#define NVRAM_VERSION "1.3"
+
+#include <linux/module.h>
+#include <linux/nvram.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/mc146818rtc.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+
+#ifdef CONFIG_PPC
+#include <asm/nvram.h>
+#endif
+
+static DEFINE_MUTEX(nvram_mutex);
+static DEFINE_SPINLOCK(nvram_state_lock);
+static int nvram_open_cnt; /* #times opened */
+static int nvram_open_mode; /* special open modes */
+static ssize_t nvram_size;
+#define NVRAM_WRITE 1 /* opened for writing (exclusive) */
+#define NVRAM_EXCL 2 /* opened with O_EXCL */
+
+#ifdef CONFIG_X86
+/*
+ * These functions are provided to be called internally or by other parts of
+ * the kernel. It's up to the caller to ensure correct checksum before reading
+ * or after writing (needs to be done only once).
+ *
+ * It is worth noting that these functions all access bytes of general
+ * purpose memory in the NVRAM - that is to say, they all add the
+ * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not
+ * know about the RTC cruft.
+ */
+
+#define NVRAM_BYTES (128 - NVRAM_FIRST_BYTE)
+
+/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
+ * rtc_lock held. Due to the index-port/data-port design of the RTC, we
+ * don't want two different things trying to get to it at once. (e.g. the
+ * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
+ */
+
+static unsigned char __nvram_read_byte(int i)
+{
+ return CMOS_READ(NVRAM_FIRST_BYTE + i);
+}
+
+static unsigned char pc_nvram_read_byte(int i)
+{
+ unsigned long flags;
+ unsigned char c;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ c = __nvram_read_byte(i);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return c;
+}
+
+/* This races nicely with trying to read with checksum checking (nvram_read) */
+static void __nvram_write_byte(unsigned char c, int i)
+{
+ CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
+}
+
+static void pc_nvram_write_byte(unsigned char c, int i)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ __nvram_write_byte(c, i);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+}
+
+/* On PCs, the checksum is built only over bytes 2..31 */
+#define PC_CKS_RANGE_START 2
+#define PC_CKS_RANGE_END 31
+#define PC_CKS_LOC 32
+
+static int __nvram_check_checksum(void)
+{
+ int i;
+ unsigned short sum = 0;
+ unsigned short expect;
+
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
+ __nvram_read_byte(PC_CKS_LOC+1);
+ return (sum & 0xffff) == expect;
+}
+
+static void __nvram_set_checksum(void)
+{
+ int i;
+ unsigned short sum = 0;
+
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ __nvram_write_byte(sum >> 8, PC_CKS_LOC);
+ __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
+}
+
+static long pc_nvram_set_checksum(void)
+{
+ spin_lock_irq(&rtc_lock);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+}
+
+static long pc_nvram_initialize(void)
+{
+ ssize_t i;
+
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ __nvram_write_byte(0, i);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+}
+
+static ssize_t pc_nvram_get_size(void)
+{
+ return NVRAM_BYTES;
+}
+
+static ssize_t pc_nvram_read(char *buf, size_t count, loff_t *ppos)
+{
+ char *p = buf;
+ loff_t i;
+
+ spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ *p = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
+
+ *ppos = i;
+ return p - buf;
+}
+
+static ssize_t pc_nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+ char *p = buf;
+ loff_t i;
+
+ spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ __nvram_write_byte(*p, i);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+
+ *ppos = i;
+ return p - buf;
+}
+
+const struct nvram_ops arch_nvram_ops = {
+ .read = pc_nvram_read,
+ .write = pc_nvram_write,
+ .read_byte = pc_nvram_read_byte,
+ .write_byte = pc_nvram_write_byte,
+ .get_size = pc_nvram_get_size,
+ .set_checksum = pc_nvram_set_checksum,
+ .initialize = pc_nvram_initialize,
+};
+EXPORT_SYMBOL(arch_nvram_ops);
+#endif /* CONFIG_X86 */
+
+/*
+ * The are the file operation function for user access to /dev/nvram
+ */
+
+static loff_t nvram_misc_llseek(struct file *file, loff_t offset, int origin)
+{
+ return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
+ nvram_size);
+}
+
+static ssize_t nvram_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ ssize_t ret;
+
+
+ if (*ppos >= nvram_size)
+ return 0;
+
+ count = min_t(size_t, count, nvram_size - *ppos);
+ count = min_t(size_t, count, PAGE_SIZE);
+
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = nvram_read(tmp, count, ppos);
+ if (ret <= 0)
+ goto out;
+
+ if (copy_to_user(buf, tmp, ret)) {
+ *ppos -= ret;
+ ret = -EFAULT;
+ }
+
+out:
+ kfree(tmp);
+ return ret;
+}
+
+static ssize_t nvram_misc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ ssize_t ret;
+
+ if (*ppos >= nvram_size)
+ return 0;
+
+ count = min_t(size_t, count, nvram_size - *ppos);
+ count = min_t(size_t, count, PAGE_SIZE);
+
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ ret = nvram_write(tmp, count, ppos);
+ kfree(tmp);
+ return ret;
+}
+
+static long nvram_misc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+#ifdef CONFIG_PPC
+ case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
+ pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
+ fallthrough;
+ case IOC_NVRAM_GET_OFFSET:
+ ret = -EINVAL;
+#ifdef CONFIG_PPC_PMAC
+ if (machine_is(powermac)) {
+ int part, offset;
+
+ if (copy_from_user(&part, (void __user *)arg,
+ sizeof(part)) != 0)
+ return -EFAULT;
+ if (part < pmac_nvram_OF || part > pmac_nvram_NR)
+ return -EINVAL;
+ offset = pmac_get_partition(part);
+ if (offset < 0)
+ return -EINVAL;
+ if (copy_to_user((void __user *)arg,
+ &offset, sizeof(offset)) != 0)
+ return -EFAULT;
+ ret = 0;
+ }
+#endif
+ break;
+#ifdef CONFIG_PPC32
+ case IOC_NVRAM_SYNC:
+ if (ppc_md.nvram_sync != NULL) {
+ mutex_lock(&nvram_mutex);
+ ppc_md.nvram_sync();
+ mutex_unlock(&nvram_mutex);
+ }
+ ret = 0;
+ break;
+#endif
+#elif defined(CONFIG_X86) || defined(CONFIG_M68K)
+ case NVRAM_INIT:
+ /* initialize NVRAM contents and checksum */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (arch_nvram_ops.initialize != NULL) {
+ mutex_lock(&nvram_mutex);
+ ret = arch_nvram_ops.initialize();
+ mutex_unlock(&nvram_mutex);
+ }
+ break;
+ case NVRAM_SETCKS:
+ /* just set checksum, contents unchanged (maybe useful after
+ * checksum garbaged somehow...) */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (arch_nvram_ops.set_checksum != NULL) {
+ mutex_lock(&nvram_mutex);
+ ret = arch_nvram_ops.set_checksum();
+ mutex_unlock(&nvram_mutex);
+ }
+ break;
+#endif /* CONFIG_X86 || CONFIG_M68K */
+ }
+ return ret;
+}
+
+static int nvram_misc_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&nvram_state_lock);
+
+ /* Prevent multiple readers/writers if desired. */
+ if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
+ (nvram_open_mode & NVRAM_EXCL)) {
+ spin_unlock(&nvram_state_lock);
+ return -EBUSY;
+ }
+
+#if defined(CONFIG_X86) || defined(CONFIG_M68K)
+ /* Prevent multiple writers if the set_checksum ioctl is implemented. */
+ if ((arch_nvram_ops.set_checksum != NULL) &&
+ (file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE)) {
+ spin_unlock(&nvram_state_lock);
+ return -EBUSY;
+ }
+#endif
+
+ if (file->f_flags & O_EXCL)
+ nvram_open_mode |= NVRAM_EXCL;
+ if (file->f_mode & FMODE_WRITE)
+ nvram_open_mode |= NVRAM_WRITE;
+ nvram_open_cnt++;
+
+ spin_unlock(&nvram_state_lock);
+
+ return 0;
+}
+
+static int nvram_misc_release(struct inode *inode, struct file *file)
+{
+ spin_lock(&nvram_state_lock);
+
+ nvram_open_cnt--;
+
+ /* if only one instance is open, clear the EXCL bit */
+ if (nvram_open_mode & NVRAM_EXCL)
+ nvram_open_mode &= ~NVRAM_EXCL;
+ if (file->f_mode & FMODE_WRITE)
+ nvram_open_mode &= ~NVRAM_WRITE;
+
+ spin_unlock(&nvram_state_lock);
+
+ return 0;
+}
+
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
+static const char * const floppy_types[] = {
+ "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
+ "3.5'' 2.88M", "3.5'' 2.88M"
+};
+
+static const char * const gfx_types[] = {
+ "EGA, VGA, ... (with BIOS)",
+ "CGA (40 cols)",
+ "CGA (80 cols)",
+ "monochrome",
+};
+
+static void pc_nvram_proc_read(unsigned char *nvram, struct seq_file *seq,
+ void *offset)
+{
+ int checksum;
+ int type;
+
+ spin_lock_irq(&rtc_lock);
+ checksum = __nvram_check_checksum();
+ spin_unlock_irq(&rtc_lock);
+
+ seq_printf(seq, "Checksum status: %svalid\n", checksum ? "" : "not ");
+
+ seq_printf(seq, "# floppies : %d\n",
+ (nvram[6] & 1) ? (nvram[6] >> 6) + 1 : 0);
+ seq_printf(seq, "Floppy 0 type : ");
+ type = nvram[2] >> 4;
+ if (type < ARRAY_SIZE(floppy_types))
+ seq_printf(seq, "%s\n", floppy_types[type]);
+ else
+ seq_printf(seq, "%d (unknown)\n", type);
+ seq_printf(seq, "Floppy 1 type : ");
+ type = nvram[2] & 0x0f;
+ if (type < ARRAY_SIZE(floppy_types))
+ seq_printf(seq, "%s\n", floppy_types[type]);
+ else
+ seq_printf(seq, "%d (unknown)\n", type);
+
+ seq_printf(seq, "HD 0 type : ");
+ type = nvram[4] >> 4;
+ if (type)
+ seq_printf(seq, "%02x\n", type == 0x0f ? nvram[11] : type);
+ else
+ seq_printf(seq, "none\n");
+
+ seq_printf(seq, "HD 1 type : ");
+ type = nvram[4] & 0x0f;
+ if (type)
+ seq_printf(seq, "%02x\n", type == 0x0f ? nvram[12] : type);
+ else
+ seq_printf(seq, "none\n");
+
+ seq_printf(seq, "HD type 48 data: %d/%d/%d C/H/S, precomp %d, lz %d\n",
+ nvram[18] | (nvram[19] << 8),
+ nvram[20], nvram[25],
+ nvram[21] | (nvram[22] << 8), nvram[23] | (nvram[24] << 8));
+ seq_printf(seq, "HD type 49 data: %d/%d/%d C/H/S, precomp %d, lz %d\n",
+ nvram[39] | (nvram[40] << 8),
+ nvram[41], nvram[46],
+ nvram[42] | (nvram[43] << 8), nvram[44] | (nvram[45] << 8));
+
+ seq_printf(seq, "DOS base memory: %d kB\n", nvram[7] | (nvram[8] << 8));
+ seq_printf(seq, "Extended memory: %d kB (configured), %d kB (tested)\n",
+ nvram[9] | (nvram[10] << 8), nvram[34] | (nvram[35] << 8));
+
+ seq_printf(seq, "Gfx adapter : %s\n",
+ gfx_types[(nvram[6] >> 4) & 3]);
+
+ seq_printf(seq, "FPU : %sinstalled\n",
+ (nvram[6] & 2) ? "" : "not ");
+
+ return;
+}
+
+static int nvram_proc_read(struct seq_file *seq, void *offset)
+{
+ unsigned char contents[NVRAM_BYTES];
+ int i = 0;
+
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ contents[i] = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
+
+ pc_nvram_proc_read(contents, seq, offset);
+
+ return 0;
+}
+#endif /* CONFIG_X86 && CONFIG_PROC_FS */
+
+static const struct file_operations nvram_misc_fops = {
+ .owner = THIS_MODULE,
+ .llseek = nvram_misc_llseek,
+ .read = nvram_misc_read,
+ .write = nvram_misc_write,
+ .unlocked_ioctl = nvram_misc_ioctl,
+ .open = nvram_misc_open,
+ .release = nvram_misc_release,
+};
+
+static struct miscdevice nvram_misc = {
+ NVRAM_MINOR,
+ "nvram",
+ &nvram_misc_fops,
+};
+
+static int __init nvram_module_init(void)
+{
+ int ret;
+
+ nvram_size = nvram_get_size();
+ if (nvram_size < 0)
+ return nvram_size;
+
+ ret = misc_register(&nvram_misc);
+ if (ret) {
+ pr_err("nvram: can't misc_register on minor=%d\n", NVRAM_MINOR);
+ return ret;
+ }
+
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
+ if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) {
+ pr_err("nvram: can't create /proc/driver/nvram\n");
+ misc_deregister(&nvram_misc);
+ return -ENOMEM;
+ }
+#endif
+
+ pr_info("Non-volatile memory driver v" NVRAM_VERSION "\n");
+ return 0;
+}
+
+static void __exit nvram_module_exit(void)
+{
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
+ remove_proc_entry("driver/nvram", NULL);
+#endif
+ misc_deregister(&nvram_misc);
+}
+
+module_init(nvram_module_init);
+module_exit(nvram_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
+MODULE_ALIAS("devname:nvram");
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
new file mode 100644
index 0000000000..ea378c0ed5
--- /dev/null
+++ b/drivers/char/nwbutton.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NetWinder Button Driver-
+ * Copyright (C) Alex Holden <alex@linuxhacker.org> 1998, 1999.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#define __NWBUTTON_C /* Tell the header file who we are */
+#include "nwbutton.h"
+
+static void button_sequence_finished(struct timer_list *unused);
+
+static int button_press_count; /* The count of button presses */
+/* Times for the end of a sequence */
+static DEFINE_TIMER(button_timer, button_sequence_finished);
+static DECLARE_WAIT_QUEUE_HEAD(button_wait_queue); /* Used for blocking read */
+static char button_output_buffer[32]; /* Stores data to write out of device */
+static int bcount; /* The number of bytes in the buffer */
+static int bdelay = BUTTON_DELAY; /* The delay, in jiffies */
+static struct button_callback button_callback_list[32]; /* The callback list */
+static int callback_count; /* The number of callbacks registered */
+static int reboot_count = NUM_PRESSES_REBOOT; /* Number of presses to reboot */
+
+/*
+ * This function is called by other drivers to register a callback function
+ * to be called when a particular number of button presses occurs.
+ * The callback list is a static array of 32 entries (I somehow doubt many
+ * people are ever going to want to register more than 32 different actions
+ * to be performed by the kernel on different numbers of button presses ;).
+ * However, if an attempt to register a 33rd entry (perhaps a stuck loop
+ * somewhere registering the same entry over and over?) it will fail to
+ * do so and return -ENOMEM. If an attempt is made to register a null pointer,
+ * it will fail to do so and return -EINVAL.
+ * Because callbacks can be unregistered at random the list can become
+ * fragmented, so we need to search through the list until we find the first
+ * free entry.
+ *
+ * FIXME: Has anyone spotted any locking functions int his code recently ??
+ */
+
+int button_add_callback (void (*callback) (void), int count)
+{
+ int lp = 0;
+ if (callback_count == 32) {
+ return -ENOMEM;
+ }
+ if (!callback) {
+ return -EINVAL;
+ }
+ callback_count++;
+ for (; (button_callback_list [lp].callback); lp++);
+ button_callback_list [lp].callback = callback;
+ button_callback_list [lp].count = count;
+ return 0;
+}
+
+/*
+ * This function is called by other drivers to deregister a callback function.
+ * If you attempt to unregister a callback which does not exist, it will fail
+ * with -EINVAL. If there is more than one entry with the same address,
+ * because it searches the list from end to beginning, it will unregister the
+ * last one to be registered first (FILO- First In Last Out).
+ * Note that this is not necessarily true if the entries are not submitted
+ * at the same time, because another driver could have unregistered a callback
+ * between the submissions creating a gap earlier in the list, which would
+ * be filled first at submission time.
+ */
+
+int button_del_callback (void (*callback) (void))
+{
+ int lp = 31;
+ if (!callback) {
+ return -EINVAL;
+ }
+ while (lp >= 0) {
+ if ((button_callback_list [lp].callback) == callback) {
+ button_callback_list [lp].callback = NULL;
+ button_callback_list [lp].count = 0;
+ callback_count--;
+ return 0;
+ }
+ lp--;
+ }
+ return -EINVAL;
+}
+
+/*
+ * This function is called by button_sequence_finished to search through the
+ * list of callback functions, and call any of them whose count argument
+ * matches the current count of button presses. It starts at the beginning
+ * of the list and works up to the end. It will refuse to follow a null
+ * pointer (which should never happen anyway).
+ */
+
+static void button_consume_callbacks (int bpcount)
+{
+ int lp = 0;
+ for (; lp <= 31; lp++) {
+ if ((button_callback_list [lp].count) == bpcount) {
+ if (button_callback_list [lp].callback) {
+ button_callback_list[lp].callback();
+ }
+ }
+ }
+}
+
+/*
+ * This function is called when the button_timer times out.
+ * ie. When you don't press the button for bdelay jiffies, this is taken to
+ * mean you have ended the sequence of key presses, and this function is
+ * called to wind things up (write the press_count out to /dev/button, call
+ * any matching registered function callbacks, initiate reboot, etc.).
+ */
+
+static void button_sequence_finished(struct timer_list *unused)
+{
+ if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
+ button_press_count == reboot_count)
+ kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */
+ button_consume_callbacks (button_press_count);
+ bcount = sprintf (button_output_buffer, "%d\n", button_press_count);
+ button_press_count = 0; /* Reset the button press counter */
+ wake_up_interruptible (&button_wait_queue);
+}
+
+/*
+ * This handler is called when the orange button is pressed (GPIO 10 of the
+ * SuperIO chip, which maps to logical IRQ 26). If the press_count is 0,
+ * this is the first press, so it starts a timer and increments the counter.
+ * If it is higher than 0, it deletes the old timer, starts a new one, and
+ * increments the counter.
+ */
+
+static irqreturn_t button_handler (int irq, void *dev_id)
+{
+ button_press_count++;
+ mod_timer(&button_timer, jiffies + bdelay);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * This function is called when a user space program attempts to read
+ * /dev/nwbutton. It puts the device to sleep on the wait queue until
+ * button_sequence_finished writes some data to the buffer and flushes
+ * the queue, at which point it writes the data out to the device and
+ * returns the number of characters it has written. This function is
+ * reentrant, so that many processes can be attempting to read from the
+ * device at any one time.
+ */
+
+static int button_read (struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ DEFINE_WAIT(wait);
+ prepare_to_wait(&button_wait_queue, &wait, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&button_wait_queue, &wait);
+ return (copy_to_user (buffer, &button_output_buffer, bcount))
+ ? -EFAULT : bcount;
+}
+
+/*
+ * This structure is the file operations structure, which specifies what
+ * callbacks functions the kernel should call when a user mode process
+ * attempts to perform these operations on the device.
+ */
+
+static const struct file_operations button_fops = {
+ .owner = THIS_MODULE,
+ .read = button_read,
+ .llseek = noop_llseek,
+};
+
+/*
+ * This structure is the misc device structure, which specifies the minor
+ * device number (158 in this case), the name of the device (for /proc/misc),
+ * and the address of the above file operations structure.
+ */
+
+static struct miscdevice button_misc_device = {
+ BUTTON_MINOR,
+ "nwbutton",
+ &button_fops,
+};
+
+/*
+ * This function is called to initialise the driver, either from misc.c at
+ * bootup if the driver is compiled into the kernel, or from init_module
+ * below at module insert time. It attempts to register the device node
+ * and the IRQ and fails with a warning message if either fails, though
+ * neither ever should because the device number and IRQ are unique to
+ * this driver.
+ */
+
+static int __init nwbutton_init(void)
+{
+ if (!machine_is_netwinder())
+ return -ENODEV;
+
+ printk (KERN_INFO "NetWinder Button Driver Version %s (C) Alex Holden "
+ "<alex@linuxhacker.org> 1998.\n", VERSION);
+
+ if (misc_register (&button_misc_device)) {
+ printk (KERN_WARNING "nwbutton: Couldn't register device 10, "
+ "%d.\n", BUTTON_MINOR);
+ return -EBUSY;
+ }
+
+ if (request_irq (IRQ_NETWINDER_BUTTON, button_handler, 0,
+ "nwbutton", NULL)) {
+ printk (KERN_WARNING "nwbutton: IRQ %d is not free.\n",
+ IRQ_NETWINDER_BUTTON);
+ misc_deregister (&button_misc_device);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void __exit nwbutton_exit (void)
+{
+ free_irq (IRQ_NETWINDER_BUTTON, NULL);
+ misc_deregister (&button_misc_device);
+}
+
+
+MODULE_AUTHOR("Alex Holden");
+MODULE_LICENSE("GPL");
+
+module_init(nwbutton_init);
+module_exit(nwbutton_exit);
diff --git a/drivers/char/nwbutton.h b/drivers/char/nwbutton.h
new file mode 100644
index 0000000000..f2b9fdc1f9
--- /dev/null
+++ b/drivers/char/nwbutton.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NWBUTTON_H
+#define __NWBUTTON_H
+
+/*
+ * NetWinder Button Driver-
+ * Copyright (C) Alex Holden <alex@linuxhacker.org> 1998, 1999.
+ */
+
+#ifdef __NWBUTTON_C /* Actually compiling the driver itself */
+
+/* Various defines: */
+
+#define NUM_PRESSES_REBOOT 2 /* How many presses to activate shutdown */
+#define BUTTON_DELAY 30 /* How many jiffies for sequence to end */
+#define VERSION "0.3" /* Driver version number */
+
+/* Structure definitions: */
+
+struct button_callback {
+ void (*callback) (void);
+ int count;
+};
+
+/* Function prototypes: */
+
+static void button_sequence_finished(struct timer_list *unused);
+static irqreturn_t button_handler (int irq, void *dev_id);
+int button_init (void);
+int button_add_callback (void (*callback) (void), int count);
+int button_del_callback (void (*callback) (void));
+static void button_consume_callbacks (int bpcount);
+
+#else /* Not compiling the driver itself */
+
+extern int button_add_callback (void (*callback) (void), int count);
+extern int button_del_callback (void (*callback) (void));
+
+#endif /* __NWBUTTON_C */
+#endif /* __NWBUTTON_H */
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
new file mode 100644
index 0000000000..0973c2c2b0
--- /dev/null
+++ b/drivers/char/nwflash.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Flash memory interface rev.5 driver for the Intel
+ * Flash chips used on the NetWinder.
+ *
+ * 20/08/2000 RMK use __ioremap to map flash into virtual memory
+ * make a few more places use "volatile"
+ * 22/05/2001 RMK - Lock read against write
+ * - merge printk level changes (with mods) from Alan Cox.
+ * - use *ppos as the file position, not file->f_pos.
+ * - fix check for out of range pos and r/w size
+ *
+ * Please note that we are tampering with the only flash chip in the
+ * machine, which contains the bootup code. We therefore have the
+ * power to convert these machines into doorstops...
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+
+#include <asm/hardware/dec21285.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+#include <linux/uaccess.h>
+
+/*****************************************************************************/
+#include <asm/nwflash.h>
+
+#define NWFLASH_VERSION "6.4"
+
+static DEFINE_MUTEX(flash_mutex);
+static void kick_open(void);
+static int get_flash_id(void);
+static int erase_block(int nBlock);
+static int write_block(unsigned long p, const char __user *buf, int count);
+
+#define KFLASH_SIZE 1024*1024 //1 Meg
+#define KFLASH_SIZE4 4*1024*1024 //4 Meg
+#define KFLASH_ID 0x89A6 //Intel flash
+#define KFLASH_ID4 0xB0D4 //Intel flash 4Meg
+
+static bool flashdebug; //if set - we will display progress msgs
+
+static int gbWriteEnable;
+static int gbWriteBase64Enable;
+static volatile unsigned char *FLASH_BASE;
+static int gbFlashSize = KFLASH_SIZE;
+static DEFINE_MUTEX(nwflash_mutex);
+
+static int get_flash_id(void)
+{
+ volatile unsigned int c1, c2;
+
+ /*
+ * try to get flash chip ID
+ */
+ kick_open();
+ c2 = inb(0x80);
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x90;
+ udelay(15);
+ c1 = *(volatile unsigned char *) FLASH_BASE;
+ c2 = inb(0x80);
+
+ /*
+ * on 4 Meg flash the second byte is actually at offset 2...
+ */
+ if (c1 == 0xB0)
+ c2 = *(volatile unsigned char *) (FLASH_BASE + 2);
+ else
+ c2 = *(volatile unsigned char *) (FLASH_BASE + 1);
+
+ c2 += (c1 << 8);
+
+ /*
+ * set it back to read mode
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
+
+ if (c2 == KFLASH_ID4)
+ gbFlashSize = KFLASH_SIZE4;
+
+ return c2;
+}
+
+static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ mutex_lock(&flash_mutex);
+ switch (cmd) {
+ case CMD_WRITE_DISABLE:
+ gbWriteBase64Enable = 0;
+ gbWriteEnable = 0;
+ break;
+
+ case CMD_WRITE_ENABLE:
+ gbWriteEnable = 1;
+ break;
+
+ case CMD_WRITE_BASE64K_ENABLE:
+ gbWriteBase64Enable = 1;
+ break;
+
+ default:
+ gbWriteBase64Enable = 0;
+ gbWriteEnable = 0;
+ mutex_unlock(&flash_mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&flash_mutex);
+ return 0;
+}
+
+static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
+ loff_t *ppos)
+{
+ ssize_t ret;
+
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, "
+ "buffer=%p, count=0x%zx.\n", *ppos, buf, size);
+ /*
+ * We now lock against reads and writes. --rmk
+ */
+ if (mutex_lock_interruptible(&nwflash_mutex))
+ return -ERESTARTSYS;
+
+ ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize);
+ mutex_unlock(&nwflash_mutex);
+
+ return ret;
+}
+
+static ssize_t flash_write(struct file *file, const char __user *buf,
+ size_t size, loff_t * ppos)
+{
+ unsigned long p = *ppos;
+ unsigned int count = size;
+ int written;
+ int nBlock, temp, rc;
+ int i, j;
+
+ if (flashdebug)
+ printk("flash_write: offset=0x%lX, buffer=0x%p, count=0x%X.\n",
+ p, buf, count);
+
+ if (!gbWriteEnable)
+ return -EINVAL;
+
+ if (p < 64 * 1024 && (!gbWriteBase64Enable))
+ return -EINVAL;
+
+ /*
+ * check for out of range pos or count
+ */
+ if (p >= gbFlashSize)
+ return count ? -ENXIO : 0;
+
+ if (count > gbFlashSize - p)
+ count = gbFlashSize - p;
+
+ if (!access_ok(buf, count))
+ return -EFAULT;
+
+ /*
+ * We now lock against reads and writes. --rmk
+ */
+ if (mutex_lock_interruptible(&nwflash_mutex))
+ return -ERESTARTSYS;
+
+ written = 0;
+
+ nBlock = (int) p >> 16; //block # of 64K bytes
+
+ /*
+ * # of 64K blocks to erase and write
+ */
+ temp = ((int) (p + count) >> 16) - nBlock + 1;
+
+ /*
+ * write ends at exactly 64k boundary?
+ */
+ if (((int) (p + count) & 0xFFFF) == 0)
+ temp -= 1;
+
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_write: writing %d block(s) "
+ "starting at %d.\n", temp, nBlock);
+
+ for (; temp; temp--, nBlock++) {
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_write: erasing block %d.\n", nBlock);
+
+ /*
+ * first we have to erase the block(s), where we will write...
+ */
+ i = 0;
+ j = 0;
+ RetryBlock:
+ do {
+ rc = erase_block(nBlock);
+ i++;
+ } while (rc && i < 10);
+
+ if (rc) {
+ printk(KERN_ERR "flash_write: erase error %x\n", rc);
+ break;
+ }
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_write: writing offset %lX, "
+ "from buf %p, bytes left %X.\n", p, buf,
+ count - written);
+
+ /*
+ * write_block will limit write to space left in this block
+ */
+ rc = write_block(p, buf, count - written);
+ j++;
+
+ /*
+ * if somehow write verify failed? Can't happen??
+ */
+ if (!rc) {
+ /*
+ * retry up to 10 times
+ */
+ if (j < 10)
+ goto RetryBlock;
+ else
+ /*
+ * else quit with error...
+ */
+ rc = -1;
+
+ }
+ if (rc < 0) {
+ printk(KERN_ERR "flash_write: write error %X\n", rc);
+ break;
+ }
+ p += rc;
+ buf += rc;
+ written += rc;
+ *ppos += rc;
+
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written);
+ }
+
+ mutex_unlock(&nwflash_mutex);
+
+ return written;
+}
+
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t flash_llseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ mutex_lock(&flash_mutex);
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n",
+ (unsigned int) offset, orig);
+
+ ret = no_seek_end_llseek_size(file, offset, orig, gbFlashSize);
+ mutex_unlock(&flash_mutex);
+ return ret;
+}
+
+
+/*
+ * assume that main Write routine did the parameter checking...
+ * so just go ahead and erase, what requested!
+ */
+
+static int erase_block(int nBlock)
+{
+ volatile unsigned int c1;
+ volatile unsigned char *pWritePtr;
+ unsigned long timeout;
+ int temp, temp1;
+
+ /*
+ * reset footbridge to the correct offset 0 (...0..3)
+ */
+ *CSR_ROMWRITEREG = 0;
+
+ /*
+ * dummy ROM read
+ */
+ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
+
+ kick_open();
+ /*
+ * reset status if old errors
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+
+ /*
+ * erase a block...
+ * aim at the middle of a current block...
+ */
+ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + 0x8000 + (nBlock << 16)));
+ /*
+ * dummy read
+ */
+ c1 = *pWritePtr;
+
+ kick_open();
+ /*
+ * erase
+ */
+ *(volatile unsigned char *) pWritePtr = 0x20;
+
+ /*
+ * confirm
+ */
+ *(volatile unsigned char *) pWritePtr = 0xD0;
+
+ /*
+ * wait 10 ms
+ */
+ msleep(10);
+
+ /*
+ * wait while erasing in process (up to 10 sec)
+ */
+ timeout = jiffies + 10 * HZ;
+ c1 = 0;
+ while (!(c1 & 0x80) && time_before(jiffies, timeout)) {
+ msleep(10);
+ /*
+ * read any address
+ */
+ c1 = *(volatile unsigned char *) (pWritePtr);
+ // printk("Flash_erase: status=%X.\n",c1);
+ }
+
+ /*
+ * set flash for normal read access
+ */
+ kick_open();
+// *(volatile unsigned char*)(FLASH_BASE+0x8000) = 0xFF;
+ *(volatile unsigned char *) pWritePtr = 0xFF; //back to normal operation
+
+ /*
+ * check if erase errors were reported
+ */
+ if (c1 & 0x20) {
+ printk(KERN_ERR "flash_erase: err at %p\n", pWritePtr);
+
+ /*
+ * reset error
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+ return -2;
+ }
+
+ /*
+ * just to make sure - verify if erased OK...
+ */
+ msleep(10);
+
+ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + (nBlock << 16)));
+
+ for (temp = 0; temp < 16 * 1024; temp++, pWritePtr += 4) {
+ if ((temp1 = *(volatile unsigned int *) pWritePtr) != 0xFFFFFFFF) {
+ printk(KERN_ERR "flash_erase: verify err at %p = %X\n",
+ pWritePtr, temp1);
+ return -1;
+ }
+ }
+
+ return 0;
+
+}
+
+/*
+ * write_block will limit number of bytes written to the space in this block
+ */
+static int write_block(unsigned long p, const char __user *buf, int count)
+{
+ volatile unsigned int c1;
+ volatile unsigned int c2;
+ unsigned char *pWritePtr;
+ unsigned int uAddress;
+ unsigned int offset;
+ unsigned long timeout;
+ unsigned long timeout1;
+
+ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
+
+ /*
+ * check if write will end in this block....
+ */
+ offset = p & 0xFFFF;
+
+ if (offset + count > 0x10000)
+ count = 0x10000 - offset;
+
+ /*
+ * wait up to 30 sec for this block
+ */
+ timeout = jiffies + 30 * HZ;
+
+ for (offset = 0; offset < count; offset++, pWritePtr++) {
+ uAddress = (unsigned int) pWritePtr;
+ uAddress &= 0xFFFFFFFC;
+ if (__get_user(c2, buf + offset))
+ return -EFAULT;
+
+ WriteRetry:
+ /*
+ * dummy read
+ */
+ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
+
+ /*
+ * kick open the write gate
+ */
+ kick_open();
+
+ /*
+ * program footbridge to the correct offset...0..3
+ */
+ *CSR_ROMWRITEREG = (unsigned int) pWritePtr & 3;
+
+ /*
+ * write cmd
+ */
+ *(volatile unsigned char *) (uAddress) = 0x40;
+
+ /*
+ * data to write
+ */
+ *(volatile unsigned char *) (uAddress) = c2;
+
+ /*
+ * get status
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x10000) = 0x70;
+
+ c1 = 0;
+
+ /*
+ * wait up to 1 sec for this byte
+ */
+ timeout1 = jiffies + 1 * HZ;
+
+ /*
+ * while not ready...
+ */
+ while (!(c1 & 0x80) && time_before(jiffies, timeout1))
+ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
+
+ /*
+ * if timeout getting status
+ */
+ if (time_after_eq(jiffies, timeout1)) {
+ kick_open();
+ /*
+ * reset err
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+
+ goto WriteRetry;
+ }
+ /*
+ * switch on read access, as a default flash operation mode
+ */
+ kick_open();
+ /*
+ * read access
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
+
+ /*
+ * if hardware reports an error writing, and not timeout -
+ * reset the chip and retry
+ */
+ if (c1 & 0x10) {
+ kick_open();
+ /*
+ * reset err
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+
+ /*
+ * before timeout?
+ */
+ if (time_before(jiffies, timeout)) {
+ if (flashdebug)
+ printk(KERN_DEBUG "write_block: Retrying write at 0x%X)n",
+ pWritePtr - FLASH_BASE);
+
+ /*
+ * wait couple ms
+ */
+ msleep(10);
+
+ goto WriteRetry;
+ } else {
+ printk(KERN_ERR "write_block: timeout at 0x%X\n",
+ pWritePtr - FLASH_BASE);
+ /*
+ * return error -2
+ */
+ return -2;
+
+ }
+ }
+ }
+
+ msleep(10);
+
+ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
+
+ for (offset = 0; offset < count; offset++) {
+ char c, c1;
+ if (__get_user(c, buf))
+ return -EFAULT;
+ buf++;
+ if ((c1 = *pWritePtr++) != c) {
+ printk(KERN_ERR "write_block: verify error at 0x%X (%02X!=%02X)\n",
+ pWritePtr - FLASH_BASE, c1, c);
+ return 0;
+ }
+ }
+
+ return count;
+}
+
+
+static void kick_open(void)
+{
+ unsigned long flags;
+
+ /*
+ * we want to write a bit pattern XXX1 to Xilinx to enable
+ * the write gate, which will be open for about the next 2ms.
+ */
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+
+ /*
+ * let the ISA bus to catch on...
+ */
+ udelay(25);
+}
+
+static const struct file_operations flash_fops =
+{
+ .owner = THIS_MODULE,
+ .llseek = flash_llseek,
+ .read = flash_read,
+ .write = flash_write,
+ .unlocked_ioctl = flash_ioctl,
+};
+
+static struct miscdevice flash_miscdev =
+{
+ NWFLASH_MINOR,
+ "nwflash",
+ &flash_fops
+};
+
+static int __init nwflash_init(void)
+{
+ int ret = -ENODEV;
+
+ if (machine_is_netwinder()) {
+ int id;
+
+ FLASH_BASE = ioremap(DC21285_FLASH, KFLASH_SIZE4);
+ if (!FLASH_BASE)
+ goto out;
+
+ id = get_flash_id();
+ if ((id != KFLASH_ID) && (id != KFLASH_ID4)) {
+ ret = -ENXIO;
+ iounmap((void *)FLASH_BASE);
+ printk("Flash: incorrect ID 0x%04X.\n", id);
+ goto out;
+ }
+
+ printk("Flash ROM driver v.%s, flash device ID 0x%04X, size %d Mb.\n",
+ NWFLASH_VERSION, id, gbFlashSize / (1024 * 1024));
+
+ ret = misc_register(&flash_miscdev);
+ if (ret < 0) {
+ iounmap((void *)FLASH_BASE);
+ }
+ }
+out:
+ return ret;
+}
+
+static void __exit nwflash_exit(void)
+{
+ misc_deregister(&flash_miscdev);
+ iounmap((void *)FLASH_BASE);
+}
+
+MODULE_LICENSE("GPL");
+
+module_param(flashdebug, bool, 0644);
+
+module_init(nwflash_init);
+module_exit(nwflash_exit);
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
new file mode 100644
index 0000000000..c39a836ebd
--- /dev/null
+++ b/drivers/char/pc8736x_gpio.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* linux/drivers/char/pc8736x_gpio.c
+
+ National Semiconductor PC8736x GPIO driver. Allows a user space
+ process to play with the GPIO pins.
+
+ Copyright (c) 2005,2006 Jim Cromie <jim.cromie@gmail.com>
+
+ adapted from linux/drivers/char/scx200_gpio.c
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>,
+*/
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#define DEVNAME "pc8736x_gpio"
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi/Winbond PC-8736x GPIO Pin Driver");
+MODULE_LICENSE("GPL");
+
+static int major; /* default to dynamic major */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+static DEFINE_MUTEX(pc8736x_gpio_config_lock);
+static unsigned pc8736x_gpio_base;
+static u8 pc8736x_gpio_shadow[4];
+
+#define SIO_BASE1 0x2E /* 1st command-reg to check */
+#define SIO_BASE2 0x4E /* alt command-reg to check */
+
+#define SIO_SID 0x20 /* SuperI/O ID Register */
+#define SIO_SID_PC87365 0xe5 /* Expected value in ID Register for PC87365 */
+#define SIO_SID_PC87366 0xe9 /* Expected value in ID Register for PC87366 */
+
+#define SIO_CF1 0x21 /* chip config, bit0 is chip enable */
+
+#define PC8736X_GPIO_RANGE 16 /* ioaddr range */
+#define PC8736X_GPIO_CT 32 /* minors matching 4 8 bit ports */
+
+#define SIO_UNIT_SEL 0x7 /* unit select reg */
+#define SIO_UNIT_ACT 0x30 /* unit enable */
+#define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */
+#define SIO_VLM_UNIT 0x0D
+#define SIO_TMS_UNIT 0x0E
+
+/* config-space addrs to read/write each unit's runtime addr */
+#define SIO_BASE_HADDR 0x60
+#define SIO_BASE_LADDR 0x61
+
+/* GPIO config-space pin-control addresses */
+#define SIO_GPIO_PIN_SELECT 0xF0
+#define SIO_GPIO_PIN_CONFIG 0xF1
+#define SIO_GPIO_PIN_EVENT 0xF2
+
+static unsigned char superio_cmd = 0;
+static unsigned char selected_device = 0xFF; /* bogus start val */
+
+/* GPIO port runtime access, functionality */
+static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */
+/* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */
+
+#define PORT_OUT 0
+#define PORT_IN 1
+#define PORT_EVT_EN 2
+#define PORT_EVT_STST 3
+
+static struct platform_device *pdev; /* use in dev_*() */
+
+static inline void superio_outb(int addr, int val)
+{
+ outb_p(addr, superio_cmd);
+ outb_p(val, superio_cmd + 1);
+}
+
+static inline int superio_inb(int addr)
+{
+ outb_p(addr, superio_cmd);
+ return inb_p(superio_cmd + 1);
+}
+
+static int pc8736x_superio_present(void)
+{
+ int id;
+
+ /* try the 2 possible values, read a hardware reg to verify */
+ superio_cmd = SIO_BASE1;
+ id = superio_inb(SIO_SID);
+ if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
+ return superio_cmd;
+
+ superio_cmd = SIO_BASE2;
+ id = superio_inb(SIO_SID);
+ if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
+ return superio_cmd;
+
+ return 0;
+}
+
+static void device_select(unsigned devldn)
+{
+ superio_outb(SIO_UNIT_SEL, devldn);
+ selected_device = devldn;
+}
+
+static void select_pin(unsigned iminor)
+{
+ /* select GPIO port/pin from device minor number */
+ device_select(SIO_GPIO_UNIT);
+ superio_outb(SIO_GPIO_PIN_SELECT,
+ ((iminor << 1) & 0xF0) | (iminor & 0x7));
+}
+
+static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits,
+ u32 func_slct)
+{
+ u32 config, new_config;
+
+ mutex_lock(&pc8736x_gpio_config_lock);
+
+ device_select(SIO_GPIO_UNIT);
+ select_pin(index);
+
+ /* read current config value */
+ config = superio_inb(func_slct);
+
+ /* set new config */
+ new_config = (config & mask) | bits;
+ superio_outb(func_slct, new_config);
+
+ mutex_unlock(&pc8736x_gpio_config_lock);
+
+ return config;
+}
+
+static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits)
+{
+ return pc8736x_gpio_configure_fn(index, mask, bits,
+ SIO_GPIO_PIN_CONFIG);
+}
+
+static int pc8736x_gpio_get(unsigned minor)
+{
+ int port, bit, val;
+
+ port = minor >> 3;
+ bit = minor & 7;
+ val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+ val >>= bit;
+ val &= 1;
+
+ dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n",
+ minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit,
+ val);
+
+ return val;
+}
+
+static void pc8736x_gpio_set(unsigned minor, int val)
+{
+ int port, bit, curval;
+
+ minor &= 0x1f;
+ port = minor >> 3;
+ bit = minor & 7;
+ curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+ dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n",
+ pc8736x_gpio_base + port_offset[port] + PORT_OUT,
+ curval, bit, (curval & ~(1 << bit)), val, (val << bit));
+
+ val = (curval & ~(1 << bit)) | (val << bit);
+
+ dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)"
+ " %2x -> %2x\n", minor, port, bit, curval, val);
+
+ outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+ curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+ val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+
+ dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val);
+ pc8736x_gpio_shadow[port] = val;
+}
+
+static int pc8736x_gpio_current(unsigned minor)
+{
+ int port, bit;
+ minor &= 0x1f;
+ port = minor >> 3;
+ bit = minor & 7;
+ return ((pc8736x_gpio_shadow[port] >> bit) & 0x01);
+}
+
+static void pc8736x_gpio_change(unsigned index)
+{
+ pc8736x_gpio_set(index, !pc8736x_gpio_current(index));
+}
+
+static struct nsc_gpio_ops pc8736x_gpio_ops = {
+ .owner = THIS_MODULE,
+ .gpio_config = pc8736x_gpio_configure,
+ .gpio_dump = nsc_gpio_dump,
+ .gpio_get = pc8736x_gpio_get,
+ .gpio_set = pc8736x_gpio_set,
+ .gpio_change = pc8736x_gpio_change,
+ .gpio_current = pc8736x_gpio_current
+};
+
+static int pc8736x_gpio_open(struct inode *inode, struct file *file)
+{
+ unsigned m = iminor(inode);
+ file->private_data = &pc8736x_gpio_ops;
+
+ dev_dbg(&pdev->dev, "open %d\n", m);
+
+ if (m >= PC8736X_GPIO_CT)
+ return -EINVAL;
+ return nonseekable_open(inode, file);
+}
+
+static const struct file_operations pc8736x_gpio_fileops = {
+ .owner = THIS_MODULE,
+ .open = pc8736x_gpio_open,
+ .write = nsc_gpio_write,
+ .read = nsc_gpio_read,
+ .llseek = no_llseek,
+};
+
+static void __init pc8736x_init_shadow(void)
+{
+ int port;
+
+ /* read the current values driven on the GPIO signals */
+ for (port = 0; port < 4; ++port)
+ pc8736x_gpio_shadow[port]
+ = inb_p(pc8736x_gpio_base + port_offset[port]
+ + PORT_OUT);
+
+}
+
+static struct cdev pc8736x_gpio_cdev;
+
+static int __init pc8736x_gpio_init(void)
+{
+ int rc;
+ dev_t devid;
+
+ pdev = platform_device_alloc(DEVNAME, 0);
+ if (!pdev)
+ return -ENOMEM;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ rc = -ENODEV;
+ goto undo_platform_dev_alloc;
+ }
+ dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n");
+
+ if (!pc8736x_superio_present()) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "no device found\n");
+ goto undo_platform_dev_add;
+ }
+ pc8736x_gpio_ops.dev = &pdev->dev;
+
+ /* Verify that chip and it's GPIO unit are both enabled.
+ My BIOS does this, so I take minimum action here
+ */
+ rc = superio_inb(SIO_CF1);
+ if (!(rc & 0x01)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "device not enabled\n");
+ goto undo_platform_dev_add;
+ }
+ device_select(SIO_GPIO_UNIT);
+ if (!superio_inb(SIO_UNIT_ACT)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "GPIO unit not enabled\n");
+ goto undo_platform_dev_add;
+ }
+
+ /* read the GPIO unit base addr that chip responds to */
+ pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8
+ | superio_inb(SIO_BASE_LADDR));
+
+ if (!request_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE, DEVNAME)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "GPIO ioport %x busy\n",
+ pc8736x_gpio_base);
+ goto undo_platform_dev_add;
+ }
+ dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base);
+
+ if (major) {
+ devid = MKDEV(major, 0);
+ rc = register_chrdev_region(devid, PC8736X_GPIO_CT, DEVNAME);
+ } else {
+ rc = alloc_chrdev_region(&devid, 0, PC8736X_GPIO_CT, DEVNAME);
+ major = MAJOR(devid);
+ }
+
+ if (rc < 0) {
+ dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc);
+ goto undo_request_region;
+ }
+ if (!major) {
+ major = rc;
+ dev_dbg(&pdev->dev, "got dynamic major %d\n", major);
+ }
+
+ pc8736x_init_shadow();
+
+ /* ignore minor errs, and succeed */
+ cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops);
+ cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT);
+
+ return 0;
+
+undo_request_region:
+ release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
+undo_platform_dev_add:
+ platform_device_del(pdev);
+undo_platform_dev_alloc:
+ platform_device_put(pdev);
+
+ return rc;
+}
+
+static void __exit pc8736x_gpio_cleanup(void)
+{
+ dev_dbg(&pdev->dev, "cleanup\n");
+
+ cdev_del(&pc8736x_gpio_cdev);
+ unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT);
+ release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
+
+ platform_device_unregister(pdev);
+}
+
+module_init(pc8736x_gpio_init);
+module_exit(pc8736x_gpio_cleanup);
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
new file mode 100644
index 0000000000..3c99696b14
--- /dev/null
+++ b/drivers/char/powernv-op-panel.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OPAL Operator Panel Display Driver
+ *
+ * Copyright 2016, Suraj Jitindar Singh, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+
+#include <asm/opal.h>
+
+/*
+ * This driver creates a character device (/dev/op_panel) which exposes the
+ * operator panel (character LCD display) on IBM Power Systems machines
+ * with FSPs.
+ * A character buffer written to the device will be displayed on the
+ * operator panel.
+ */
+
+static DEFINE_MUTEX(oppanel_mutex);
+
+static u32 num_lines, oppanel_size;
+static oppanel_line_t *oppanel_lines;
+static char *oppanel_data;
+
+static loff_t oppanel_llseek(struct file *filp, loff_t offset, int whence)
+{
+ return fixed_size_llseek(filp, offset, whence, oppanel_size);
+}
+
+static ssize_t oppanel_read(struct file *filp, char __user *userbuf, size_t len,
+ loff_t *f_pos)
+{
+ return simple_read_from_buffer(userbuf, len, f_pos, oppanel_data,
+ oppanel_size);
+}
+
+static int __op_panel_update_display(void)
+{
+ struct opal_msg msg;
+ int rc, token;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ pr_debug("Couldn't get OPAL async token [token=%d]\n",
+ token);
+ return token;
+ }
+
+ rc = opal_write_oppanel_async(token, oppanel_lines, num_lines);
+ switch (rc) {
+ case OPAL_ASYNC_COMPLETION:
+ rc = opal_async_wait_response(token, &msg);
+ if (rc) {
+ pr_debug("Failed to wait for async response [rc=%d]\n",
+ rc);
+ break;
+ }
+ rc = opal_get_async_rc(msg);
+ if (rc != OPAL_SUCCESS) {
+ pr_debug("OPAL async call returned failed [rc=%d]\n",
+ rc);
+ break;
+ }
+ break;
+ case OPAL_SUCCESS:
+ break;
+ default:
+ pr_debug("OPAL write op-panel call failed [rc=%d]\n", rc);
+ }
+
+ opal_async_release_token(token);
+ return rc;
+}
+
+static ssize_t oppanel_write(struct file *filp, const char __user *userbuf,
+ size_t len, loff_t *f_pos)
+{
+ loff_t f_pos_prev = *f_pos;
+ ssize_t ret;
+ int rc;
+
+ if (!*f_pos)
+ memset(oppanel_data, ' ', oppanel_size);
+ else if (*f_pos >= oppanel_size)
+ return -EFBIG;
+
+ ret = simple_write_to_buffer(oppanel_data, oppanel_size, f_pos, userbuf,
+ len);
+ if (ret > 0) {
+ rc = __op_panel_update_display();
+ if (rc != OPAL_SUCCESS) {
+ pr_err_ratelimited("OPAL call failed to write to op panel display [rc=%d]\n",
+ rc);
+ *f_pos = f_pos_prev;
+ return -EIO;
+ }
+ }
+ return ret;
+}
+
+static int oppanel_open(struct inode *inode, struct file *filp)
+{
+ if (!mutex_trylock(&oppanel_mutex)) {
+ pr_debug("Device Busy\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int oppanel_release(struct inode *inode, struct file *filp)
+{
+ mutex_unlock(&oppanel_mutex);
+ return 0;
+}
+
+static const struct file_operations oppanel_fops = {
+ .owner = THIS_MODULE,
+ .llseek = oppanel_llseek,
+ .read = oppanel_read,
+ .write = oppanel_write,
+ .open = oppanel_open,
+ .release = oppanel_release
+};
+
+static struct miscdevice oppanel_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "op_panel",
+ .fops = &oppanel_fops
+};
+
+static int oppanel_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 line_len;
+ int rc, i;
+
+ rc = of_property_read_u32(np, "#length", &line_len);
+ if (rc) {
+ pr_err_ratelimited("Operator panel length property not found\n");
+ return rc;
+ }
+ rc = of_property_read_u32(np, "#lines", &num_lines);
+ if (rc) {
+ pr_err_ratelimited("Operator panel lines property not found\n");
+ return rc;
+ }
+ oppanel_size = line_len * num_lines;
+
+ pr_devel("Operator panel of size %u found with %u lines of length %u\n",
+ oppanel_size, num_lines, line_len);
+
+ oppanel_data = kcalloc(oppanel_size, sizeof(*oppanel_data), GFP_KERNEL);
+ if (!oppanel_data)
+ return -ENOMEM;
+
+ oppanel_lines = kcalloc(num_lines, sizeof(oppanel_line_t), GFP_KERNEL);
+ if (!oppanel_lines) {
+ rc = -ENOMEM;
+ goto free_oppanel_data;
+ }
+
+ memset(oppanel_data, ' ', oppanel_size);
+ for (i = 0; i < num_lines; i++) {
+ oppanel_lines[i].line_len = cpu_to_be64(line_len);
+ oppanel_lines[i].line = cpu_to_be64(__pa(&oppanel_data[i *
+ line_len]));
+ }
+
+ rc = misc_register(&oppanel_dev);
+ if (rc) {
+ pr_err_ratelimited("Failed to register as misc device\n");
+ goto free_oppanel;
+ }
+
+ return 0;
+
+free_oppanel:
+ kfree(oppanel_lines);
+free_oppanel_data:
+ kfree(oppanel_data);
+ return rc;
+}
+
+static int oppanel_remove(struct platform_device *pdev)
+{
+ misc_deregister(&oppanel_dev);
+ kfree(oppanel_lines);
+ kfree(oppanel_data);
+ return 0;
+}
+
+static const struct of_device_id oppanel_match[] = {
+ { .compatible = "ibm,opal-oppanel" },
+ { },
+};
+
+static struct platform_driver oppanel_driver = {
+ .driver = {
+ .name = "powernv-op-panel",
+ .of_match_table = oppanel_match,
+ },
+ .probe = oppanel_probe,
+ .remove = oppanel_remove,
+};
+
+module_platform_driver(oppanel_driver);
+
+MODULE_DEVICE_TABLE(of, oppanel_match);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PowerNV Operator Panel LCD Display Driver");
+MODULE_AUTHOR("Suraj Jitindar Singh <sjitindarsingh@gmail.com>");
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
new file mode 100644
index 0000000000..4c188e9e47
--- /dev/null
+++ b/drivers/char/ppdev.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/char/ppdev.c
+ *
+ * This is the code behind /dev/parport* -- it allows a user-space
+ * application to use the parport subsystem.
+ *
+ * Copyright (C) 1998-2000, 2002 Tim Waugh <tim@cyberelk.net>
+ *
+ * A /dev/parportx device node represents an arbitrary device
+ * on port 'x'. The following operations are possible:
+ *
+ * open do nothing, set up default IEEE 1284 protocol to be COMPAT
+ * close release port and unregister device (if necessary)
+ * ioctl
+ * EXCL register device exclusively (may fail)
+ * CLAIM (register device first time) parport_claim_or_block
+ * RELEASE parport_release
+ * SETMODE set the IEEE 1284 protocol to use for read/write
+ * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be
+ * confused with ioctl(fd, SETPHASER, &stun). ;-)
+ * DATADIR data_forward / data_reverse
+ * WDATA write_data
+ * RDATA read_data
+ * WCONTROL write_control
+ * RCONTROL read_control
+ * FCONTROL frob_control
+ * RSTATUS read_status
+ * NEGOT parport_negotiate
+ * YIELD parport_yield_blocking
+ * WCTLONIRQ on interrupt, set control lines
+ * CLRIRQ clear (and return) interrupt count
+ * SETTIME sets device timeout (struct timeval)
+ * GETTIME gets device timeout (struct timeval)
+ * GETMODES gets hardware supported modes (unsigned int)
+ * GETMODE gets the current IEEE1284 mode
+ * GETPHASE gets the current IEEE1284 phase
+ * GETFLAGS gets current (user-visible) flags
+ * SETFLAGS sets current (user-visible) flags
+ * read/write read or write in current IEEE 1284 protocol
+ * select wait for interrupt (in readfds)
+ *
+ * Changes:
+ * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999.
+ *
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 2000/08/25
+ * - On error, copy_from_user and copy_to_user do not return -EFAULT,
+ * They return the positive number of bytes *not* copied due to address
+ * space errors.
+ *
+ * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001.
+ * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched/signal.h>
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/parport.h>
+#include <linux/ctype.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#include <linux/ppdev.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+
+#define PP_VERSION "ppdev: user-space parallel port driver"
+#define CHRDEV "ppdev"
+
+struct pp_struct {
+ struct pardevice *pdev;
+ wait_queue_head_t irq_wait;
+ atomic_t irqc;
+ unsigned int flags;
+ int irqresponse;
+ unsigned char irqctl;
+ struct ieee1284_info state;
+ struct ieee1284_info saved_state;
+ long default_inactivity;
+ int index;
+};
+
+/* should we use PARDEVICE_MAX here? */
+static struct device *devices[PARPORT_MAX];
+
+static DEFINE_IDA(ida_index);
+
+/* pp_struct.flags bitfields */
+#define PP_CLAIMED (1<<0)
+#define PP_EXCL (1<<1)
+
+/* Other constants */
+#define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */
+#define PP_BUFFER_SIZE 1024
+#define PARDEVICE_MAX 8
+
+static DEFINE_MUTEX(pp_do_mutex);
+
+/* define fixed sized ioctl cmd for y2038 migration */
+#define PPGETTIME32 _IOR(PP_IOCTL, 0x95, s32[2])
+#define PPSETTIME32 _IOW(PP_IOCTL, 0x96, s32[2])
+#define PPGETTIME64 _IOR(PP_IOCTL, 0x95, s64[2])
+#define PPSETTIME64 _IOW(PP_IOCTL, 0x96, s64[2])
+
+static inline void pp_enable_irq(struct pp_struct *pp)
+{
+ struct parport *port = pp->pdev->port;
+
+ port->ops->enable_irq(port);
+}
+
+static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int minor = iminor(file_inode(file));
+ struct pp_struct *pp = file->private_data;
+ char *kbuffer;
+ ssize_t bytes_read = 0;
+ struct parport *pport;
+ int mode;
+
+ if (!(pp->flags & PP_CLAIMED)) {
+ /* Don't have the port claimed */
+ pr_debug(CHRDEV "%x: claim the port first\n", minor);
+ return -EINVAL;
+ }
+
+ /* Trivial case. */
+ if (count == 0)
+ return 0;
+
+ kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
+ if (!kbuffer)
+ return -ENOMEM;
+ pport = pp->pdev->port;
+ mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
+
+ parport_set_timeout(pp->pdev,
+ (file->f_flags & O_NONBLOCK) ?
+ PARPORT_INACTIVITY_O_NONBLOCK :
+ pp->default_inactivity);
+
+ while (bytes_read == 0) {
+ ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
+
+ if (mode == IEEE1284_MODE_EPP) {
+ /* various specials for EPP mode */
+ int flags = 0;
+ size_t (*fn)(struct parport *, void *, size_t, int);
+
+ if (pp->flags & PP_W91284PIC)
+ flags |= PARPORT_W91284PIC;
+ if (pp->flags & PP_FASTREAD)
+ flags |= PARPORT_EPP_FAST;
+ if (pport->ieee1284.mode & IEEE1284_ADDR)
+ fn = pport->ops->epp_read_addr;
+ else
+ fn = pport->ops->epp_read_data;
+ bytes_read = (*fn)(pport, kbuffer, need, flags);
+ } else {
+ bytes_read = parport_read(pport, kbuffer, need);
+ }
+
+ if (bytes_read != 0)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ bytes_read = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ bytes_read = -ERESTARTSYS;
+ break;
+ }
+
+ cond_resched();
+ }
+
+ parport_set_timeout(pp->pdev, pp->default_inactivity);
+
+ if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
+ bytes_read = -EFAULT;
+
+ kfree(kbuffer);
+ pp_enable_irq(pp);
+ return bytes_read;
+}
+
+static ssize_t pp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = iminor(file_inode(file));
+ struct pp_struct *pp = file->private_data;
+ char *kbuffer;
+ ssize_t bytes_written = 0;
+ ssize_t wrote;
+ int mode;
+ struct parport *pport;
+
+ if (!(pp->flags & PP_CLAIMED)) {
+ /* Don't have the port claimed */
+ pr_debug(CHRDEV "%x: claim the port first\n", minor);
+ return -EINVAL;
+ }
+
+ kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
+ if (!kbuffer)
+ return -ENOMEM;
+
+ pport = pp->pdev->port;
+ mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
+
+ parport_set_timeout(pp->pdev,
+ (file->f_flags & O_NONBLOCK) ?
+ PARPORT_INACTIVITY_O_NONBLOCK :
+ pp->default_inactivity);
+
+ while (bytes_written < count) {
+ ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
+
+ if (copy_from_user(kbuffer, buf + bytes_written, n)) {
+ bytes_written = -EFAULT;
+ break;
+ }
+
+ if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
+ /* do a fast EPP write */
+ if (pport->ieee1284.mode & IEEE1284_ADDR) {
+ wrote = pport->ops->epp_write_addr(pport,
+ kbuffer, n, PARPORT_EPP_FAST);
+ } else {
+ wrote = pport->ops->epp_write_data(pport,
+ kbuffer, n, PARPORT_EPP_FAST);
+ }
+ } else {
+ wrote = parport_write(pp->pdev->port, kbuffer, n);
+ }
+
+ if (wrote <= 0) {
+ if (!bytes_written)
+ bytes_written = wrote;
+ break;
+ }
+
+ bytes_written += wrote;
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (!bytes_written)
+ bytes_written = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current))
+ break;
+
+ cond_resched();
+ }
+
+ parport_set_timeout(pp->pdev, pp->default_inactivity);
+
+ kfree(kbuffer);
+ pp_enable_irq(pp);
+ return bytes_written;
+}
+
+static void pp_irq(void *private)
+{
+ struct pp_struct *pp = private;
+
+ if (pp->irqresponse) {
+ parport_write_control(pp->pdev->port, pp->irqctl);
+ pp->irqresponse = 0;
+ }
+
+ atomic_inc(&pp->irqc);
+ wake_up_interruptible(&pp->irq_wait);
+}
+
+static int register_device(int minor, struct pp_struct *pp)
+{
+ struct parport *port;
+ struct pardevice *pdev = NULL;
+ char *name;
+ struct pardev_cb ppdev_cb;
+ int rc = 0, index;
+
+ name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
+ if (name == NULL)
+ return -ENOMEM;
+
+ port = parport_find_number(minor);
+ if (!port) {
+ pr_warn("%s: no associated port!\n", name);
+ rc = -ENXIO;
+ goto err;
+ }
+
+ index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+ memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+ ppdev_cb.irq_func = pp_irq;
+ ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+ ppdev_cb.private = pp;
+ pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
+ parport_put_port(port);
+
+ if (!pdev) {
+ pr_warn("%s: failed to register device!\n", name);
+ rc = -ENXIO;
+ ida_simple_remove(&ida_index, index);
+ goto err;
+ }
+
+ pp->pdev = pdev;
+ pp->index = index;
+ dev_dbg(&pdev->dev, "registered pardevice\n");
+err:
+ kfree(name);
+ return rc;
+}
+
+static enum ieee1284_phase init_phase(int mode)
+{
+ switch (mode & ~(IEEE1284_DEVICEID
+ | IEEE1284_ADDR)) {
+ case IEEE1284_MODE_NIBBLE:
+ case IEEE1284_MODE_BYTE:
+ return IEEE1284_PH_REV_IDLE;
+ }
+ return IEEE1284_PH_FWD_IDLE;
+}
+
+static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec)
+{
+ long to_jiffies;
+
+ if ((tv_sec < 0) || (tv_usec < 0))
+ return -EINVAL;
+
+ to_jiffies = usecs_to_jiffies(tv_usec);
+ to_jiffies += tv_sec * HZ;
+ if (to_jiffies <= 0)
+ return -EINVAL;
+
+ pdev->timeout = to_jiffies;
+ return 0;
+}
+
+static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ unsigned int minor = iminor(file_inode(file));
+ struct pp_struct *pp = file->private_data;
+ struct parport *port;
+ void __user *argp = (void __user *)arg;
+ struct ieee1284_info *info;
+ unsigned char reg;
+ unsigned char mask;
+ int mode;
+ s32 time32[2];
+ s64 time64[2];
+ struct timespec64 ts;
+ int ret;
+
+ /* First handle the cases that don't take arguments. */
+ switch (cmd) {
+ case PPCLAIM:
+ {
+ if (pp->flags & PP_CLAIMED) {
+ dev_dbg(&pp->pdev->dev, "you've already got it!\n");
+ return -EINVAL;
+ }
+
+ /* Deferred device registration. */
+ if (!pp->pdev) {
+ int err = register_device(minor, pp);
+
+ if (err)
+ return err;
+ }
+
+ ret = parport_claim_or_block(pp->pdev);
+ if (ret < 0)
+ return ret;
+
+ pp->flags |= PP_CLAIMED;
+
+ /* For interrupt-reporting to work, we need to be
+ * informed of each interrupt. */
+ pp_enable_irq(pp);
+
+ /* We may need to fix up the state machine. */
+ info = &pp->pdev->port->ieee1284;
+ pp->saved_state.mode = info->mode;
+ pp->saved_state.phase = info->phase;
+ info->mode = pp->state.mode;
+ info->phase = pp->state.phase;
+ pp->default_inactivity = parport_set_timeout(pp->pdev, 0);
+ parport_set_timeout(pp->pdev, pp->default_inactivity);
+
+ return 0;
+ }
+ case PPEXCL:
+ if (pp->pdev) {
+ dev_dbg(&pp->pdev->dev,
+ "too late for PPEXCL; already registered\n");
+ if (pp->flags & PP_EXCL)
+ /* But it's not really an error. */
+ return 0;
+ /* There's no chance of making the driver happy. */
+ return -EINVAL;
+ }
+
+ /* Just remember to register the device exclusively
+ * when we finally do the registration. */
+ pp->flags |= PP_EXCL;
+ return 0;
+ case PPSETMODE:
+ {
+ int mode;
+
+ if (copy_from_user(&mode, argp, sizeof(mode)))
+ return -EFAULT;
+ /* FIXME: validate mode */
+ pp->state.mode = mode;
+ pp->state.phase = init_phase(mode);
+
+ if (pp->flags & PP_CLAIMED) {
+ pp->pdev->port->ieee1284.mode = mode;
+ pp->pdev->port->ieee1284.phase = pp->state.phase;
+ }
+
+ return 0;
+ }
+ case PPGETMODE:
+ {
+ int mode;
+
+ if (pp->flags & PP_CLAIMED)
+ mode = pp->pdev->port->ieee1284.mode;
+ else
+ mode = pp->state.mode;
+
+ if (copy_to_user(argp, &mode, sizeof(mode)))
+ return -EFAULT;
+ return 0;
+ }
+ case PPSETPHASE:
+ {
+ int phase;
+
+ if (copy_from_user(&phase, argp, sizeof(phase)))
+ return -EFAULT;
+
+ /* FIXME: validate phase */
+ pp->state.phase = phase;
+
+ if (pp->flags & PP_CLAIMED)
+ pp->pdev->port->ieee1284.phase = phase;
+
+ return 0;
+ }
+ case PPGETPHASE:
+ {
+ int phase;
+
+ if (pp->flags & PP_CLAIMED)
+ phase = pp->pdev->port->ieee1284.phase;
+ else
+ phase = pp->state.phase;
+ if (copy_to_user(argp, &phase, sizeof(phase)))
+ return -EFAULT;
+ return 0;
+ }
+ case PPGETMODES:
+ {
+ unsigned int modes;
+
+ port = parport_find_number(minor);
+ if (!port)
+ return -ENODEV;
+
+ modes = port->modes;
+ parport_put_port(port);
+ if (copy_to_user(argp, &modes, sizeof(modes)))
+ return -EFAULT;
+ return 0;
+ }
+ case PPSETFLAGS:
+ {
+ int uflags;
+
+ if (copy_from_user(&uflags, argp, sizeof(uflags)))
+ return -EFAULT;
+ pp->flags &= ~PP_FLAGMASK;
+ pp->flags |= (uflags & PP_FLAGMASK);
+ return 0;
+ }
+ case PPGETFLAGS:
+ {
+ int uflags;
+
+ uflags = pp->flags & PP_FLAGMASK;
+ if (copy_to_user(argp, &uflags, sizeof(uflags)))
+ return -EFAULT;
+ return 0;
+ }
+ } /* end switch() */
+
+ /* Everything else requires the port to be claimed, so check
+ * that now. */
+ if ((pp->flags & PP_CLAIMED) == 0) {
+ pr_debug(CHRDEV "%x: claim the port first\n", minor);
+ return -EINVAL;
+ }
+
+ port = pp->pdev->port;
+ switch (cmd) {
+ case PPRSTATUS:
+ reg = parport_read_status(port);
+ if (copy_to_user(argp, &reg, sizeof(reg)))
+ return -EFAULT;
+ return 0;
+ case PPRDATA:
+ reg = parport_read_data(port);
+ if (copy_to_user(argp, &reg, sizeof(reg)))
+ return -EFAULT;
+ return 0;
+ case PPRCONTROL:
+ reg = parport_read_control(port);
+ if (copy_to_user(argp, &reg, sizeof(reg)))
+ return -EFAULT;
+ return 0;
+ case PPYIELD:
+ parport_yield_blocking(pp->pdev);
+ return 0;
+
+ case PPRELEASE:
+ /* Save the state machine's state. */
+ info = &pp->pdev->port->ieee1284;
+ pp->state.mode = info->mode;
+ pp->state.phase = info->phase;
+ info->mode = pp->saved_state.mode;
+ info->phase = pp->saved_state.phase;
+ parport_release(pp->pdev);
+ pp->flags &= ~PP_CLAIMED;
+ return 0;
+
+ case PPWCONTROL:
+ if (copy_from_user(&reg, argp, sizeof(reg)))
+ return -EFAULT;
+ parport_write_control(port, reg);
+ return 0;
+
+ case PPWDATA:
+ if (copy_from_user(&reg, argp, sizeof(reg)))
+ return -EFAULT;
+ parport_write_data(port, reg);
+ return 0;
+
+ case PPFCONTROL:
+ if (copy_from_user(&mask, argp,
+ sizeof(mask)))
+ return -EFAULT;
+ if (copy_from_user(&reg, 1 + (unsigned char __user *) arg,
+ sizeof(reg)))
+ return -EFAULT;
+ parport_frob_control(port, mask, reg);
+ return 0;
+
+ case PPDATADIR:
+ if (copy_from_user(&mode, argp, sizeof(mode)))
+ return -EFAULT;
+ if (mode)
+ port->ops->data_reverse(port);
+ else
+ port->ops->data_forward(port);
+ return 0;
+
+ case PPNEGOT:
+ if (copy_from_user(&mode, argp, sizeof(mode)))
+ return -EFAULT;
+ switch ((ret = parport_negotiate(port, mode))) {
+ case 0: break;
+ case -1: /* handshake failed, peripheral not IEEE 1284 */
+ ret = -EIO;
+ break;
+ case 1: /* handshake succeeded, peripheral rejected mode */
+ ret = -ENXIO;
+ break;
+ }
+ pp_enable_irq(pp);
+ return ret;
+
+ case PPWCTLONIRQ:
+ if (copy_from_user(&reg, argp, sizeof(reg)))
+ return -EFAULT;
+
+ /* Remember what to set the control lines to, for next
+ * time we get an interrupt. */
+ pp->irqctl = reg;
+ pp->irqresponse = 1;
+ return 0;
+
+ case PPCLRIRQ:
+ ret = atomic_read(&pp->irqc);
+ if (copy_to_user(argp, &ret, sizeof(ret)))
+ return -EFAULT;
+ atomic_sub(ret, &pp->irqc);
+ return 0;
+
+ case PPSETTIME32:
+ if (copy_from_user(time32, argp, sizeof(time32)))
+ return -EFAULT;
+
+ if ((time32[0] < 0) || (time32[1] < 0))
+ return -EINVAL;
+
+ return pp_set_timeout(pp->pdev, time32[0], time32[1]);
+
+ case PPSETTIME64:
+ if (copy_from_user(time64, argp, sizeof(time64)))
+ return -EFAULT;
+
+ if ((time64[0] < 0) || (time64[1] < 0))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] >>= 32;
+
+ return pp_set_timeout(pp->pdev, time64[0], time64[1]);
+
+ case PPGETTIME32:
+ jiffies_to_timespec64(pp->pdev->timeout, &ts);
+ time32[0] = ts.tv_sec;
+ time32[1] = ts.tv_nsec / NSEC_PER_USEC;
+
+ if (copy_to_user(argp, time32, sizeof(time32)))
+ return -EFAULT;
+
+ return 0;
+
+ case PPGETTIME64:
+ jiffies_to_timespec64(pp->pdev->timeout, &ts);
+ time64[0] = ts.tv_sec;
+ time64[1] = ts.tv_nsec / NSEC_PER_USEC;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] <<= 32;
+
+ if (copy_to_user(argp, time64, sizeof(time64)))
+ return -EFAULT;
+
+ return 0;
+
+ default:
+ dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Keep the compiler happy */
+ return 0;
+}
+
+static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ mutex_lock(&pp_do_mutex);
+ ret = pp_do_ioctl(file, cmd, arg);
+ mutex_unlock(&pp_do_mutex);
+ return ret;
+}
+
+static int pp_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ struct pp_struct *pp;
+
+ if (minor >= PARPORT_MAX)
+ return -ENXIO;
+
+ pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ pp->state.mode = IEEE1284_MODE_COMPAT;
+ pp->state.phase = init_phase(pp->state.mode);
+ pp->flags = 0;
+ pp->irqresponse = 0;
+ atomic_set(&pp->irqc, 0);
+ init_waitqueue_head(&pp->irq_wait);
+
+ /* Defer the actual device registration until the first claim.
+ * That way, we know whether or not the driver wants to have
+ * exclusive access to the port (PPEXCL).
+ */
+ pp->pdev = NULL;
+ file->private_data = pp;
+
+ return 0;
+}
+
+static int pp_release(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ struct pp_struct *pp = file->private_data;
+ int compat_negot;
+
+ compat_negot = 0;
+ if (!(pp->flags & PP_CLAIMED) && pp->pdev &&
+ (pp->state.mode != IEEE1284_MODE_COMPAT)) {
+ struct ieee1284_info *info;
+
+ /* parport released, but not in compatibility mode */
+ parport_claim_or_block(pp->pdev);
+ pp->flags |= PP_CLAIMED;
+ info = &pp->pdev->port->ieee1284;
+ pp->saved_state.mode = info->mode;
+ pp->saved_state.phase = info->phase;
+ info->mode = pp->state.mode;
+ info->phase = pp->state.phase;
+ compat_negot = 1;
+ } else if ((pp->flags & PP_CLAIMED) && pp->pdev &&
+ (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) {
+ compat_negot = 2;
+ }
+ if (compat_negot) {
+ parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT);
+ dev_dbg(&pp->pdev->dev,
+ "negotiated back to compatibility mode because user-space forgot\n");
+ }
+
+ if ((pp->flags & PP_CLAIMED) && pp->pdev) {
+ struct ieee1284_info *info;
+
+ info = &pp->pdev->port->ieee1284;
+ pp->state.mode = info->mode;
+ pp->state.phase = info->phase;
+ info->mode = pp->saved_state.mode;
+ info->phase = pp->saved_state.phase;
+ parport_release(pp->pdev);
+ if (compat_negot != 1) {
+ pr_debug(CHRDEV "%x: released pardevice "
+ "because user-space forgot\n", minor);
+ }
+ }
+
+ if (pp->pdev) {
+ parport_unregister_device(pp->pdev);
+ ida_simple_remove(&ida_index, pp->index);
+ pp->pdev = NULL;
+ pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
+ }
+
+ kfree(pp);
+
+ return 0;
+}
+
+/* No kernel lock held - fine */
+static __poll_t pp_poll(struct file *file, poll_table *wait)
+{
+ struct pp_struct *pp = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &pp->irq_wait, wait);
+ if (atomic_read(&pp->irqc))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+}
+
+static const struct class ppdev_class = {
+ .name = CHRDEV,
+};
+
+static const struct file_operations pp_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = pp_read,
+ .write = pp_write,
+ .poll = pp_poll,
+ .unlocked_ioctl = pp_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .open = pp_open,
+ .release = pp_release,
+};
+
+static void pp_attach(struct parport *port)
+{
+ struct device *ret;
+
+ if (devices[port->number])
+ return;
+
+ ret = device_create(&ppdev_class, port->dev,
+ MKDEV(PP_MAJOR, port->number), NULL,
+ "parport%d", port->number);
+ if (IS_ERR(ret)) {
+ pr_err("Failed to create device parport%d\n",
+ port->number);
+ return;
+ }
+ devices[port->number] = ret;
+}
+
+static void pp_detach(struct parport *port)
+{
+ if (!devices[port->number])
+ return;
+
+ device_destroy(&ppdev_class, MKDEV(PP_MAJOR, port->number));
+ devices[port->number] = NULL;
+}
+
+static int pp_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+ int len = strlen(drv->name);
+
+ if (strncmp(par_dev->name, drv->name, len))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver pp_driver = {
+ .name = CHRDEV,
+ .probe = pp_probe,
+ .match_port = pp_attach,
+ .detach = pp_detach,
+ .devmodel = true,
+};
+
+static int __init ppdev_init(void)
+{
+ int err = 0;
+
+ if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
+ pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR);
+ return -EIO;
+ }
+ err = class_register(&ppdev_class);
+ if (err)
+ goto out_chrdev;
+
+ err = parport_register_driver(&pp_driver);
+ if (err < 0) {
+ pr_warn(CHRDEV ": unable to register with parport\n");
+ goto out_class;
+ }
+
+ pr_info(PP_VERSION "\n");
+ goto out;
+
+out_class:
+ class_unregister(&ppdev_class);
+out_chrdev:
+ unregister_chrdev(PP_MAJOR, CHRDEV);
+out:
+ return err;
+}
+
+static void __exit ppdev_cleanup(void)
+{
+ /* Clean up all parport stuff */
+ parport_unregister_driver(&pp_driver);
+ class_unregister(&ppdev_class);
+ unregister_chrdev(PP_MAJOR, CHRDEV);
+}
+
+module_init(ppdev_init);
+module_exit(ppdev_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
new file mode 100644
index 0000000000..23871cde41
--- /dev/null
+++ b/drivers/char/ps3flash.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PS3 FLASH ROM Storage Driver
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ */
+
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+
+
+#define DEVICE_NAME "ps3flash"
+
+#define FLASH_BLOCK_SIZE (256*1024)
+
+
+struct ps3flash_private {
+ struct mutex mutex; /* Bounce buffer mutex */
+ u64 chunk_sectors;
+ int tag; /* Start sector of buffer, -1 if invalid */
+ bool dirty;
+};
+
+static struct ps3_storage_device *ps3flash_dev;
+
+static int ps3flash_read_write_sectors(struct ps3_storage_device *dev,
+ u64 start_sector, int write)
+{
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar,
+ start_sector, priv->chunk_sectors,
+ write);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
+ __LINE__, write ? "write" : "read", res);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int ps3flash_writeback(struct ps3_storage_device *dev)
+{
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ int res;
+
+ if (!priv->dirty || priv->tag < 0)
+ return 0;
+
+ res = ps3flash_read_write_sectors(dev, priv->tag, 1);
+ if (res)
+ return res;
+
+ priv->dirty = false;
+ return 0;
+}
+
+static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector)
+{
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ int res;
+
+ if (start_sector == priv->tag)
+ return 0;
+
+ res = ps3flash_writeback(dev);
+ if (res)
+ return res;
+
+ priv->tag = -1;
+
+ res = ps3flash_read_write_sectors(dev, start_sector, 0);
+ if (res)
+ return res;
+
+ priv->tag = start_sector;
+ return 0;
+}
+
+static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ps3_storage_device *dev = ps3flash_dev;
+ return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
+ dev->regions[dev->region_idx].size*dev->blk_size);
+}
+
+static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf,
+ size_t count, loff_t *pos)
+{
+ struct ps3_storage_device *dev = ps3flash_dev;
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ u64 size, sector, offset;
+ int res;
+ size_t remaining, n;
+ const void *src;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n",
+ __func__, __LINE__, count, *pos, userbuf, kernelbuf);
+
+ size = dev->regions[dev->region_idx].size*dev->blk_size;
+ if (*pos >= size || !count)
+ return 0;
+
+ if (*pos + count > size) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u Truncating count from %zu to %llu\n", __func__,
+ __LINE__, count, size - *pos);
+ count = size - *pos;
+ }
+
+ sector = *pos / dev->bounce_size * priv->chunk_sectors;
+ offset = *pos % dev->bounce_size;
+
+ remaining = count;
+ do {
+ n = min_t(u64, remaining, dev->bounce_size - offset);
+ src = dev->bounce_buf + offset;
+
+ mutex_lock(&priv->mutex);
+
+ res = ps3flash_fetch(dev, sector);
+ if (res)
+ goto fail;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n",
+ __func__, __LINE__, n, src, userbuf, kernelbuf);
+ if (userbuf) {
+ if (copy_to_user(userbuf, src, n)) {
+ res = -EFAULT;
+ goto fail;
+ }
+ userbuf += n;
+ }
+ if (kernelbuf) {
+ memcpy(kernelbuf, src, n);
+ kernelbuf += n;
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ *pos += n;
+ remaining -= n;
+ sector += priv->chunk_sectors;
+ offset = 0;
+ } while (remaining > 0);
+
+ return count;
+
+fail:
+ mutex_unlock(&priv->mutex);
+ return res;
+}
+
+static ssize_t ps3flash_write(const char __user *userbuf,
+ const void *kernelbuf, size_t count, loff_t *pos)
+{
+ struct ps3_storage_device *dev = ps3flash_dev;
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ u64 size, sector, offset;
+ int res = 0;
+ size_t remaining, n;
+ void *dst;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n",
+ __func__, __LINE__, count, *pos, userbuf, kernelbuf);
+
+ size = dev->regions[dev->region_idx].size*dev->blk_size;
+ if (*pos >= size || !count)
+ return 0;
+
+ if (*pos + count > size) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u Truncating count from %zu to %llu\n", __func__,
+ __LINE__, count, size - *pos);
+ count = size - *pos;
+ }
+
+ sector = *pos / dev->bounce_size * priv->chunk_sectors;
+ offset = *pos % dev->bounce_size;
+
+ remaining = count;
+ do {
+ n = min_t(u64, remaining, dev->bounce_size - offset);
+ dst = dev->bounce_buf + offset;
+
+ mutex_lock(&priv->mutex);
+
+ if (n != dev->bounce_size)
+ res = ps3flash_fetch(dev, sector);
+ else if (sector != priv->tag)
+ res = ps3flash_writeback(dev);
+ if (res)
+ goto fail;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n",
+ __func__, __LINE__, n, userbuf, kernelbuf, dst);
+ if (userbuf) {
+ if (copy_from_user(dst, userbuf, n)) {
+ res = -EFAULT;
+ goto fail;
+ }
+ userbuf += n;
+ }
+ if (kernelbuf) {
+ memcpy(dst, kernelbuf, n);
+ kernelbuf += n;
+ }
+
+ priv->tag = sector;
+ priv->dirty = true;
+
+ mutex_unlock(&priv->mutex);
+
+ *pos += n;
+ remaining -= n;
+ sector += priv->chunk_sectors;
+ offset = 0;
+ } while (remaining > 0);
+
+ return count;
+
+fail:
+ mutex_unlock(&priv->mutex);
+ return res;
+}
+
+static ssize_t ps3flash_user_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ return ps3flash_read(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_user_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ return ps3flash_write(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos)
+{
+ return ps3flash_read(NULL, buf, count, &pos);
+}
+
+static ssize_t ps3flash_kernel_write(const void *buf, size_t count,
+ loff_t pos)
+{
+ ssize_t res;
+ int wb;
+
+ res = ps3flash_write(NULL, buf, count, &pos);
+ if (res < 0)
+ return res;
+
+ /* Make kernel writes synchronous */
+ wb = ps3flash_writeback(ps3flash_dev);
+ if (wb)
+ return wb;
+
+ return res;
+}
+
+static int ps3flash_flush(struct file *file, fl_owner_t id)
+{
+ return ps3flash_writeback(ps3flash_dev);
+}
+
+static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ struct inode *inode = file_inode(file);
+ int err;
+ inode_lock(inode);
+ err = ps3flash_writeback(ps3flash_dev);
+ inode_unlock(inode);
+ return err;
+}
+
+static irqreturn_t ps3flash_interrupt(int irq, void *data)
+{
+ struct ps3_storage_device *dev = data;
+ int res;
+ u64 tag, status;
+
+ res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
+
+ if (tag != dev->tag)
+ dev_err(&dev->sbd.core,
+ "%s:%u: tag mismatch, got %llx, expected %llx\n",
+ __func__, __LINE__, tag, dev->tag);
+
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
+ __func__, __LINE__, res, status);
+ } else {
+ dev->lv1_status = status;
+ complete(&dev->done);
+ }
+ return IRQ_HANDLED;
+}
+
+static const struct file_operations ps3flash_fops = {
+ .owner = THIS_MODULE,
+ .llseek = ps3flash_llseek,
+ .read = ps3flash_user_read,
+ .write = ps3flash_user_write,
+ .flush = ps3flash_flush,
+ .fsync = ps3flash_fsync,
+};
+
+static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = {
+ .read = ps3flash_kernel_read,
+ .write = ps3flash_kernel_write,
+};
+
+static struct miscdevice ps3flash_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &ps3flash_fops,
+};
+
+static int ps3flash_probe(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ struct ps3flash_private *priv;
+ int error;
+ unsigned long tmp;
+
+ tmp = dev->regions[dev->region_idx].start*dev->blk_size;
+ if (tmp % FLASH_BLOCK_SIZE) {
+ dev_err(&dev->sbd.core,
+ "%s:%u region start %lu is not aligned\n", __func__,
+ __LINE__, tmp);
+ return -EINVAL;
+ }
+ tmp = dev->regions[dev->region_idx].size*dev->blk_size;
+ if (tmp % FLASH_BLOCK_SIZE) {
+ dev_err(&dev->sbd.core,
+ "%s:%u region size %lu is not aligned\n", __func__,
+ __LINE__, tmp);
+ return -EINVAL;
+ }
+
+ /* use static buffer, kmalloc cannot allocate 256 KiB */
+ if (!ps3flash_bounce_buffer.address)
+ return -ENODEV;
+
+ if (ps3flash_dev) {
+ dev_err(&dev->sbd.core,
+ "Only one FLASH device is supported\n");
+ return -EBUSY;
+ }
+
+ ps3flash_dev = dev;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ ps3_system_bus_set_drvdata(&dev->sbd, priv);
+ mutex_init(&priv->mutex);
+ priv->tag = -1;
+
+ dev->bounce_size = ps3flash_bounce_buffer.size;
+ dev->bounce_buf = ps3flash_bounce_buffer.address;
+ priv->chunk_sectors = dev->bounce_size / dev->blk_size;
+
+ error = ps3stor_setup(dev, ps3flash_interrupt);
+ if (error)
+ goto fail_free_priv;
+
+ ps3flash_misc.parent = &dev->sbd.core;
+ error = misc_register(&ps3flash_misc);
+ if (error) {
+ dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n",
+ __func__, __LINE__, error);
+ goto fail_teardown;
+ }
+
+ dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n",
+ __func__, __LINE__, ps3flash_misc.minor);
+
+ ps3_os_area_flash_register(&ps3flash_kernel_ops);
+ return 0;
+
+fail_teardown:
+ ps3stor_teardown(dev);
+fail_free_priv:
+ kfree(priv);
+ ps3_system_bus_set_drvdata(&dev->sbd, NULL);
+fail:
+ ps3flash_dev = NULL;
+ return error;
+}
+
+static void ps3flash_remove(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+
+ ps3_os_area_flash_register(NULL);
+ misc_deregister(&ps3flash_misc);
+ ps3stor_teardown(dev);
+ kfree(ps3_system_bus_get_drvdata(&dev->sbd));
+ ps3_system_bus_set_drvdata(&dev->sbd, NULL);
+ ps3flash_dev = NULL;
+}
+
+
+static struct ps3_system_bus_driver ps3flash = {
+ .match_id = PS3_MATCH_ID_STOR_FLASH,
+ .core.name = DEVICE_NAME,
+ .core.owner = THIS_MODULE,
+ .probe = ps3flash_probe,
+ .remove = ps3flash_remove,
+ .shutdown = ps3flash_remove,
+};
+
+
+static int __init ps3flash_init(void)
+{
+ return ps3_system_bus_driver_register(&ps3flash);
+}
+
+static void __exit ps3flash_exit(void)
+{
+ ps3_system_bus_driver_unregister(&ps3flash);
+}
+
+module_init(ps3flash_init);
+module_exit(ps3flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver");
+MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH);
diff --git a/drivers/char/random.c b/drivers/char/random.c
new file mode 100644
index 0000000000..3cb37760df
--- /dev/null
+++ b/drivers/char/random.c
@@ -0,0 +1,1699 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
+ *
+ * This driver produces cryptographically secure pseudorandom data. It is divided
+ * into roughly six sections, each with a section header:
+ *
+ * - Initialization and readiness waiting.
+ * - Fast key erasure RNG, the "crng".
+ * - Entropy accumulation and extraction routines.
+ * - Entropy collection routines.
+ * - Userspace reader/writer interfaces.
+ * - Sysctl interface.
+ *
+ * The high level overview is that there is one input pool, into which
+ * various pieces of data are hashed. Prior to initialization, some of that
+ * data is then "credited" as having a certain number of bits of entropy.
+ * When enough bits of entropy are available, the hash is finalized and
+ * handed as a key to a stream cipher that expands it indefinitely for
+ * various consumers. This key is periodically refreshed as the various
+ * entropy collectors, described below, add data to the input pool.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/nodemask.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/ptrace.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/ratelimit.h>
+#include <linux/syscalls.h>
+#include <linux/completion.h>
+#include <linux/uuid.h>
+#include <linux/uaccess.h>
+#include <linux/suspend.h>
+#include <linux/siphash.h>
+#include <linux/sched/isolation.h>
+#include <crypto/chacha.h>
+#include <crypto/blake2s.h>
+#include <asm/archrandom.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/io.h>
+
+/*********************************************************************
+ *
+ * Initialization and readiness waiting.
+ *
+ * Much of the RNG infrastructure is devoted to various dependencies
+ * being able to wait until the RNG has collected enough entropy and
+ * is ready for safe consumption.
+ *
+ *********************************************************************/
+
+/*
+ * crng_init is protected by base_crng->lock, and only increases
+ * its value (from empty->early->ready).
+ */
+static enum {
+ CRNG_EMPTY = 0, /* Little to no entropy collected */
+ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
+ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
+} crng_init __read_mostly = CRNG_EMPTY;
+static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
+#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
+/* Various types of waiters for crng_init->CRNG_READY transition. */
+static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+static struct fasync_struct *fasync;
+static ATOMIC_NOTIFIER_HEAD(random_ready_notifier);
+
+/* Control how we warn userspace. */
+static struct ratelimit_state urandom_warning =
+ RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
+static int ratelimit_disable __read_mostly =
+ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
+/*
+ * Returns whether or not the input pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
+ * u16,u32,u64,long} family of functions.
+ *
+ * Returns: true if the input pool has been seeded.
+ * false if the input pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
+
+static void __cold crng_set_ready(struct work_struct *work)
+{
+ static_branch_enable(&crng_is_ready);
+}
+
+/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
+static void try_to_generate_entropy(void);
+
+/*
+ * Wait for the input pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
+ * long} family of functions. Using any of these functions without first
+ * calling this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the input pool has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+ while (!crng_ready()) {
+ int ret;
+
+ try_to_generate_entropy();
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ if (ret)
+ return ret > 0 ? 0 : ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
+
+/*
+ * Add a callback function that will be invoked when the crng is initialised,
+ * or immediately if it already has been. Only use this is you are absolutely
+ * sure it is required. Most users should instead be able to test
+ * `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`.
+ */
+int __cold execute_with_initialized_rng(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&random_ready_notifier.lock, flags);
+ if (crng_ready())
+ nb->notifier_call(nb, 0, NULL);
+ else
+ ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
+ spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
+ return ret;
+}
+
+#define warn_unseeded_randomness() \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
+ __func__, (void *)_RET_IP_, crng_init)
+
+
+/*********************************************************************
+ *
+ * Fast key erasure RNG, the "crng".
+ *
+ * These functions expand entropy from the entropy extractor into
+ * long streams for external consumption using the "fast key erasure"
+ * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
+ *
+ * There are a few exported interfaces for use by other drivers:
+ *
+ * void get_random_bytes(void *buf, size_t len)
+ * u8 get_random_u8()
+ * u16 get_random_u16()
+ * u32 get_random_u32()
+ * u32 get_random_u32_below(u32 ceil)
+ * u32 get_random_u32_above(u32 floor)
+ * u32 get_random_u32_inclusive(u32 floor, u32 ceil)
+ * u64 get_random_u64()
+ * unsigned long get_random_long()
+ *
+ * These interfaces will return the requested number of random bytes
+ * into the given buffer or as a return value. This is equivalent to
+ * a read from /dev/urandom. The u8, u16, u32, u64, long family of
+ * functions may be higher performance for one-off random integers,
+ * because they do a bit of buffering and do not invoke reseeding
+ * until the buffer is emptied.
+ *
+ *********************************************************************/
+
+enum {
+ CRNG_RESEED_START_INTERVAL = HZ,
+ CRNG_RESEED_INTERVAL = 60 * HZ
+};
+
+static struct {
+ u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
+ unsigned long generation;
+ spinlock_t lock;
+} base_crng = {
+ .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
+};
+
+struct crng {
+ u8 key[CHACHA_KEY_SIZE];
+ unsigned long generation;
+ local_lock_t lock;
+};
+
+static DEFINE_PER_CPU(struct crng, crngs) = {
+ .generation = ULONG_MAX,
+ .lock = INIT_LOCAL_LOCK(crngs.lock),
+};
+
+/*
+ * Return the interval until the next reseeding, which is normally
+ * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
+ * proportional to the uptime.
+ */
+static unsigned int crng_reseed_interval(void)
+{
+ static bool early_boot = true;
+
+ if (unlikely(READ_ONCE(early_boot))) {
+ time64_t uptime = ktime_get_seconds();
+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
+ WRITE_ONCE(early_boot, false);
+ else
+ return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
+ (unsigned int)uptime / 2 * HZ);
+ }
+ return CRNG_RESEED_INTERVAL;
+}
+
+/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
+static void extract_entropy(void *buf, size_t len);
+
+/* This extracts a new crng key from the input pool. */
+static void crng_reseed(struct work_struct *work)
+{
+ static DECLARE_DELAYED_WORK(next_reseed, crng_reseed);
+ unsigned long flags;
+ unsigned long next_gen;
+ u8 key[CHACHA_KEY_SIZE];
+
+ /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
+ if (likely(system_unbound_wq))
+ queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
+
+ extract_entropy(key, sizeof(key));
+
+ /*
+ * We copy the new key into the base_crng, overwriting the old one,
+ * and update the generation counter. We avoid hitting ULONG_MAX,
+ * because the per-cpu crngs are initialized to ULONG_MAX, so this
+ * forces new CPUs that come online to always initialize.
+ */
+ spin_lock_irqsave(&base_crng.lock, flags);
+ memcpy(base_crng.key, key, sizeof(base_crng.key));
+ next_gen = base_crng.generation + 1;
+ if (next_gen == ULONG_MAX)
+ ++next_gen;
+ WRITE_ONCE(base_crng.generation, next_gen);
+ if (!static_branch_likely(&crng_is_ready))
+ crng_init = CRNG_READY;
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ memzero_explicit(key, sizeof(key));
+}
+
+/*
+ * This generates a ChaCha block using the provided key, and then
+ * immediately overwrites that key with half the block. It returns
+ * the resultant ChaCha state to the user, along with the second
+ * half of the block containing 32 bytes of random data that may
+ * be used; random_data_len may not be greater than 32.
+ *
+ * The returned ChaCha state contains within it a copy of the old
+ * key value, at index 4, so the state should always be zeroed out
+ * immediately after using in order to maintain forward secrecy.
+ * If the state cannot be erased in a timely manner, then it is
+ * safer to set the random_data parameter to &chacha_state[4] so
+ * that this function overwrites it before returning.
+ */
+static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
+ u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
+{
+ u8 first_block[CHACHA_BLOCK_SIZE];
+
+ BUG_ON(random_data_len > 32);
+
+ chacha_init_consts(chacha_state);
+ memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ chacha20_block(chacha_state, first_block);
+
+ memcpy(key, first_block, CHACHA_KEY_SIZE);
+ memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+ memzero_explicit(first_block, sizeof(first_block));
+}
+
+/*
+ * This function returns a ChaCha state that you may use for generating
+ * random data. It also returns up to 32 bytes on its own of random data
+ * that may be used; random_data_len may not be greater than 32.
+ */
+static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
+{
+ unsigned long flags;
+ struct crng *crng;
+
+ BUG_ON(random_data_len > 32);
+
+ /*
+ * For the fast path, we check whether we're ready, unlocked first, and
+ * then re-check once locked later. In the case where we're really not
+ * ready, we do fast key erasure with the base_crng directly, extracting
+ * when crng_init is CRNG_EMPTY.
+ */
+ if (!crng_ready()) {
+ bool ready;
+
+ spin_lock_irqsave(&base_crng.lock, flags);
+ ready = crng_ready();
+ if (!ready) {
+ if (crng_init == CRNG_EMPTY)
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ random_data, random_data_len);
+ }
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ if (!ready)
+ return;
+ }
+
+ local_lock_irqsave(&crngs.lock, flags);
+ crng = raw_cpu_ptr(&crngs);
+
+ /*
+ * If our per-cpu crng is older than the base_crng, then it means
+ * somebody reseeded the base_crng. In that case, we do fast key
+ * erasure on the base_crng, and use its output as the new key
+ * for our per-cpu crng. This brings us up to date with base_crng.
+ */
+ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
+ spin_lock(&base_crng.lock);
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ crng->key, sizeof(crng->key));
+ crng->generation = base_crng.generation;
+ spin_unlock(&base_crng.lock);
+ }
+
+ /*
+ * Finally, when we've made it this far, our per-cpu crng has an up
+ * to date key, and we can do fast key erasure with it to produce
+ * some random data and a ChaCha state for the caller. All other
+ * branches of this function are "unlikely", so most of the time we
+ * should wind up here immediately.
+ */
+ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
+ local_unlock_irqrestore(&crngs.lock, flags);
+}
+
+static void _get_random_bytes(void *buf, size_t len)
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 tmp[CHACHA_BLOCK_SIZE];
+ size_t first_block_len;
+
+ if (!len)
+ return;
+
+ first_block_len = min_t(size_t, 32, len);
+ crng_make_state(chacha_state, buf, first_block_len);
+ len -= first_block_len;
+ buf += first_block_len;
+
+ while (len) {
+ if (len < CHACHA_BLOCK_SIZE) {
+ chacha20_block(chacha_state, tmp);
+ memcpy(buf, tmp, len);
+ memzero_explicit(tmp, sizeof(tmp));
+ break;
+ }
+
+ chacha20_block(chacha_state, buf);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+ len -= CHACHA_BLOCK_SIZE;
+ buf += CHACHA_BLOCK_SIZE;
+ }
+
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+}
+
+/*
+ * This returns random bytes in arbitrary quantities. The quality of the
+ * random bytes is good as /dev/urandom. In order to ensure that the
+ * randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
+ */
+void get_random_bytes(void *buf, size_t len)
+{
+ warn_unseeded_randomness();
+ _get_random_bytes(buf, len);
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+static ssize_t get_random_bytes_user(struct iov_iter *iter)
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 block[CHACHA_BLOCK_SIZE];
+ size_t ret = 0, copied;
+
+ if (unlikely(!iov_iter_count(iter)))
+ return 0;
+
+ /*
+ * Immediately overwrite the ChaCha key at index 4 with random
+ * bytes, in case userspace causes copy_to_iter() below to sleep
+ * forever, so that we still retain forward secrecy in that case.
+ */
+ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ /*
+ * However, if we're doing a read of len <= 32, we don't need to
+ * use chacha_state after, so we can simply return those bytes to
+ * the user directly.
+ */
+ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
+ ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+ goto out_zero_chacha;
+ }
+
+ for (;;) {
+ chacha20_block(chacha_state, block);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+
+ copied = copy_to_iter(block, sizeof(block), iter);
+ ret += copied;
+ if (!iov_iter_count(iter) || copied != sizeof(block))
+ break;
+
+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
+ if (ret % PAGE_SIZE == 0) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+ }
+
+ memzero_explicit(block, sizeof(block));
+out_zero_chacha:
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ return ret ? ret : -EFAULT;
+}
+
+/*
+ * Batched entropy returns random integers. The quality of the random
+ * number is good as /dev/urandom. In order to ensure that the randomness
+ * provided by this function is okay, the function wait_for_random_bytes()
+ * should be called and return 0 at least once at any point prior.
+ */
+
+#define DEFINE_BATCHED_ENTROPY(type) \
+struct batch_ ##type { \
+ /* \
+ * We make this 1.5x a ChaCha block, so that we get the \
+ * remaining 32 bytes from fast key erasure, plus one full \
+ * block from the detached ChaCha state. We can increase \
+ * the size of this later if needed so long as we keep the \
+ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
+ */ \
+ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
+ local_lock_t lock; \
+ unsigned long generation; \
+ unsigned int position; \
+}; \
+ \
+static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
+ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
+ .position = UINT_MAX \
+}; \
+ \
+type get_random_ ##type(void) \
+{ \
+ type ret; \
+ unsigned long flags; \
+ struct batch_ ##type *batch; \
+ unsigned long next_gen; \
+ \
+ warn_unseeded_randomness(); \
+ \
+ if (!crng_ready()) { \
+ _get_random_bytes(&ret, sizeof(ret)); \
+ return ret; \
+ } \
+ \
+ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
+ batch = raw_cpu_ptr(&batched_entropy_##type); \
+ \
+ next_gen = READ_ONCE(base_crng.generation); \
+ if (batch->position >= ARRAY_SIZE(batch->entropy) || \
+ next_gen != batch->generation) { \
+ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
+ batch->position = 0; \
+ batch->generation = next_gen; \
+ } \
+ \
+ ret = batch->entropy[batch->position]; \
+ batch->entropy[batch->position] = 0; \
+ ++batch->position; \
+ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
+ return ret; \
+} \
+EXPORT_SYMBOL(get_random_ ##type);
+
+DEFINE_BATCHED_ENTROPY(u8)
+DEFINE_BATCHED_ENTROPY(u16)
+DEFINE_BATCHED_ENTROPY(u32)
+DEFINE_BATCHED_ENTROPY(u64)
+
+u32 __get_random_u32_below(u32 ceil)
+{
+ /*
+ * This is the slow path for variable ceil. It is still fast, most of
+ * the time, by doing traditional reciprocal multiplication and
+ * opportunistically comparing the lower half to ceil itself, before
+ * falling back to computing a larger bound, and then rejecting samples
+ * whose lower half would indicate a range indivisible by ceil. The use
+ * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
+ * in 32-bits.
+ */
+ u32 rand = get_random_u32();
+ u64 mult;
+
+ /*
+ * This function is technically undefined for ceil == 0, and in fact
+ * for the non-underscored constant version in the header, we build bug
+ * on that. But for the non-constant case, it's convenient to have that
+ * evaluate to being a straight call to get_random_u32(), so that
+ * get_random_u32_inclusive() can work over its whole range without
+ * undefined behavior.
+ */
+ if (unlikely(!ceil))
+ return rand;
+
+ mult = (u64)ceil * rand;
+ if (unlikely((u32)mult < ceil)) {
+ u32 bound = -ceil % ceil;
+ while (unlikely((u32)mult < bound))
+ mult = (u64)ceil * get_random_u32();
+ }
+ return mult >> 32;
+}
+EXPORT_SYMBOL(__get_random_u32_below);
+
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU is coming up, with entry
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+ */
+int __cold random_prepare_cpu(unsigned int cpu)
+{
+ /*
+ * When the cpu comes back online, immediately invalidate both
+ * the per-cpu crng and all batches, so that we serve fresh
+ * randomness.
+ */
+ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
+ return 0;
+}
+#endif
+
+
+/**********************************************************************
+ *
+ * Entropy accumulation and extraction routines.
+ *
+ * Callers may add entropy via:
+ *
+ * static void mix_pool_bytes(const void *buf, size_t len)
+ *
+ * After which, if added entropy should be credited:
+ *
+ * static void credit_init_bits(size_t bits)
+ *
+ * Finally, extract entropy via:
+ *
+ * static void extract_entropy(void *buf, size_t len)
+ *
+ **********************************************************************/
+
+enum {
+ POOL_BITS = BLAKE2S_HASH_SIZE * 8,
+ POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
+ POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
+};
+
+static struct {
+ struct blake2s_state hash;
+ spinlock_t lock;
+ unsigned int init_bits;
+} input_pool = {
+ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
+ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
+ BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
+ .hash.outlen = BLAKE2S_HASH_SIZE,
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+};
+
+static void _mix_pool_bytes(const void *buf, size_t len)
+{
+ blake2s_update(&input_pool.hash, buf, len);
+}
+
+/*
+ * This function adds bytes into the input pool. It does not
+ * update the initialization bit counter; the caller should call
+ * credit_init_bits if this is appropriate.
+ */
+static void mix_pool_bytes(const void *buf, size_t len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(buf, len);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+}
+
+/*
+ * This is an HKDF-like construction for using the hashed collected entropy
+ * as a PRF key, that's then expanded block-by-block.
+ */
+static void extract_entropy(void *buf, size_t len)
+{
+ unsigned long flags;
+ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
+ struct {
+ unsigned long rdseed[32 / sizeof(long)];
+ size_t counter;
+ } block;
+ size_t i, longs;
+
+ for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
+ longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
+ if (longs) {
+ i += longs;
+ continue;
+ }
+ longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
+ if (longs) {
+ i += longs;
+ continue;
+ }
+ block.rdseed[i++] = random_get_entropy();
+ }
+
+ spin_lock_irqsave(&input_pool.lock, flags);
+
+ /* seed = HASHPRF(last_key, entropy_input) */
+ blake2s_final(&input_pool.hash, seed);
+
+ /* next_key = HASHPRF(seed, RDSEED || 0) */
+ block.counter = 0;
+ blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
+ blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
+
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+ memzero_explicit(next_key, sizeof(next_key));
+
+ while (len) {
+ i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
+ /* output = HASHPRF(seed, RDSEED || ++counter) */
+ ++block.counter;
+ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
+ len -= i;
+ buf += i;
+ }
+
+ memzero_explicit(seed, sizeof(seed));
+ memzero_explicit(&block, sizeof(block));
+}
+
+#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+
+static void __cold _credit_init_bits(size_t bits)
+{
+ static struct execute_work set_ready;
+ unsigned int new, orig, add;
+ unsigned long flags;
+
+ if (!bits)
+ return;
+
+ add = min_t(size_t, bits, POOL_BITS);
+
+ orig = READ_ONCE(input_pool.init_bits);
+ do {
+ new = min_t(unsigned int, POOL_BITS, orig + add);
+ } while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
+
+ if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
+ crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
+ if (static_key_initialized)
+ execute_in_process_context(crng_set_ready, &set_ready);
+ atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
+ if (urandom_warning.missed)
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
+ urandom_warning.missed);
+ } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
+ spin_lock_irqsave(&base_crng.lock, flags);
+ /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
+ if (crng_init == CRNG_EMPTY) {
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ crng_init = CRNG_EARLY;
+ }
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ }
+}
+
+
+/**********************************************************************
+ *
+ * Entropy collection routines.
+ *
+ * The following exported functions are used for pushing entropy into
+ * the above entropy accumulation routines:
+ *
+ * void add_device_randomness(const void *buf, size_t len);
+ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
+ * void add_bootloader_randomness(const void *buf, size_t len);
+ * void add_vmfork_randomness(const void *unique_vm_id, size_t len);
+ * void add_interrupt_randomness(int irq);
+ * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
+ * add_device_randomness() adds data to the input pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* credit any actual entropy to
+ * the pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+ * entropy as specified by the caller. If the entropy pool is full it will
+ * block until more entropy is needed.
+ *
+ * add_bootloader_randomness() is called by bootloader drivers, such as EFI
+ * and device tree, and credits its input depending on whether or not the
+ * command line option 'random.trust_bootloader'.
+ *
+ * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
+ * representing the current instance of a VM to the pool, without crediting,
+ * and then force-reseeds the crng so that it takes effect immediately.
+ *
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the input pool roughly once a second or after 64
+ * interrupts, crediting 1 bit of entropy for whichever comes first.
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well
+ * as the event type information from the hardware.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
+ * The last two routines try to estimate how many bits of entropy
+ * to credit. They do this by keeping track of the first and second
+ * order deltas of the event timings.
+ *
+ **********************************************************************/
+
+static bool trust_cpu __initdata = true;
+static bool trust_bootloader __initdata = true;
+static int __init parse_trust_cpu(char *arg)
+{
+ return kstrtobool(arg, &trust_cpu);
+}
+static int __init parse_trust_bootloader(char *arg)
+{
+ return kstrtobool(arg, &trust_bootloader);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
+early_param("random.trust_bootloader", parse_trust_bootloader);
+
+static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
+{
+ unsigned long flags, entropy = random_get_entropy();
+
+ /*
+ * Encode a representation of how long the system has been suspended,
+ * in a way that is distinct from prior system suspends.
+ */
+ ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
+
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&action, sizeof(action));
+ _mix_pool_bytes(stamps, sizeof(stamps));
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+
+ if (crng_ready() && (action == PM_RESTORE_PREPARE ||
+ (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
+ !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
+ crng_reseed(NULL);
+ pr_notice("crng reseeded on system resumption\n");
+ }
+ return 0;
+}
+
+static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
+
+/*
+ * This is called extremely early, before time keeping functionality is
+ * available, but arch randomness is. Interrupts are not yet enabled.
+ */
+void __init random_init_early(const char *command_line)
+{
+ unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
+ size_t i, longs, arch_bits;
+
+#if defined(LATENT_ENTROPY_PLUGIN)
+ static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
+ _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
+#endif
+
+ for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
+ longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
+ if (longs) {
+ _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+ i += longs;
+ continue;
+ }
+ longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
+ if (longs) {
+ _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+ i += longs;
+ continue;
+ }
+ arch_bits -= sizeof(*entropy) * 8;
+ ++i;
+ }
+
+ _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
+ _mix_pool_bytes(command_line, strlen(command_line));
+
+ /* Reseed if already seeded by earlier phases. */
+ if (crng_ready())
+ crng_reseed(NULL);
+ else if (trust_cpu)
+ _credit_init_bits(arch_bits);
+}
+
+/*
+ * This is called a little bit after the prior function, and now there is
+ * access to timestamps counters. Interrupts are not yet enabled.
+ */
+void __init random_init(void)
+{
+ unsigned long entropy = random_get_entropy();
+ ktime_t now = ktime_get_real();
+
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ add_latent_entropy();
+
+ /*
+ * If we were initialized by the cpu or bootloader before jump labels
+ * are initialized, then we should enable the static branch here, where
+ * it's guaranteed that jump labels have been initialized.
+ */
+ if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
+ crng_set_ready(NULL);
+
+ /* Reseed if already seeded by earlier phases. */
+ if (crng_ready())
+ crng_reseed(NULL);
+
+ WARN_ON(register_pm_notifier(&pm_notifier));
+
+ WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
+ "entropy collection will consequently suffer.");
+}
+
+/*
+ * Add device- or boot-specific data to the input pool to help
+ * initialize it.
+ *
+ * None of this adds any entropy; it is meant to avoid the problem of
+ * the entropy pool having similar initial state across largely
+ * identical devices.
+ */
+void add_device_randomness(const void *buf, size_t len)
+{
+ unsigned long entropy = random_get_entropy();
+ unsigned long flags;
+
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ _mix_pool_bytes(buf, len);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+}
+EXPORT_SYMBOL(add_device_randomness);
+
+/*
+ * Interface for in-kernel drivers of true hardware RNGs. Those devices
+ * may produce endless random bits, so this function will sleep for
+ * some amount of time after, if the sleep_after parameter is true.
+ */
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after)
+{
+ mix_pool_bytes(buf, len);
+ credit_init_bits(entropy);
+
+ /*
+ * Throttle writing to once every reseed interval, unless we're not yet
+ * initialized or no entropy is credited.
+ */
+ if (sleep_after && !kthread_should_stop() && (crng_ready() || !entropy))
+ schedule_timeout_interruptible(crng_reseed_interval());
+}
+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+
+/*
+ * Handle random seed passed by bootloader, and credit it depending
+ * on the command line option 'random.trust_bootloader'.
+ */
+void __init add_bootloader_randomness(const void *buf, size_t len)
+{
+ mix_pool_bytes(buf, len);
+ if (trust_bootloader)
+ credit_init_bits(len * 8);
+}
+
+#if IS_ENABLED(CONFIG_VMGENID)
+static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
+
+/*
+ * Handle a new unique VM ID, which is unique, not secret, so we
+ * don't credit it, but we do immediately force a reseed after so
+ * that it's used by the crng posthaste.
+ */
+void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
+{
+ add_device_randomness(unique_vm_id, len);
+ if (crng_ready()) {
+ crng_reseed(NULL);
+ pr_notice("crng reseeded due to virtual machine fork\n");
+ }
+ blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
+}
+#if IS_MODULE(CONFIG_VMGENID)
+EXPORT_SYMBOL_GPL(add_vmfork_randomness);
+#endif
+
+int __cold register_random_vmfork_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&vmfork_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
+
+int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&vmfork_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
+#endif
+
+struct fast_pool {
+ unsigned long pool[4];
+ unsigned long last;
+ unsigned int count;
+ struct timer_list mix;
+};
+
+static void mix_interrupt_randomness(struct timer_list *work);
+
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
+#ifdef CONFIG_64BIT
+#define FASTMIX_PERM SIPHASH_PERMUTATION
+ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
+#else
+#define FASTMIX_PERM HSIPHASH_PERMUTATION
+ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
+#endif
+ .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
+};
+
+/*
+ * This is [Half]SipHash-1-x, starting from an empty key. Because
+ * the key is fixed, it assumes that its inputs are non-malicious,
+ * and therefore this has no security on its own. s represents the
+ * four-word SipHash state, while v represents a two-word input.
+ */
+static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
+{
+ s[3] ^= v1;
+ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
+ s[0] ^= v1;
+ s[3] ^= v2;
+ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
+ s[0] ^= v2;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU has just come online, with
+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
+ */
+int __cold random_online_cpu(unsigned int cpu)
+{
+ /*
+ * During CPU shutdown and before CPU onlining, add_interrupt_
+ * randomness() may schedule mix_interrupt_randomness(), and
+ * set the MIX_INFLIGHT flag. However, because the worker can
+ * be scheduled on a different CPU during this period, that
+ * flag will never be cleared. For that reason, we zero out
+ * the flag here, which runs just after workqueues are onlined
+ * for the CPU again. This also has the effect of setting the
+ * irq randomness count to zero so that new accumulated irqs
+ * are fresh.
+ */
+ per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+ return 0;
+}
+#endif
+
+static void mix_interrupt_randomness(struct timer_list *work)
+{
+ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
+ /*
+ * The size of the copied stack pool is explicitly 2 longs so that we
+ * only ever ingest half of the siphash output each time, retaining
+ * the other half as the next "key" that carries over. The entropy is
+ * supposed to be sufficiently dispersed between bits so on average
+ * we don't wind up "losing" some.
+ */
+ unsigned long pool[2];
+ unsigned int count;
+
+ /* Check to see if we're running on the wrong CPU due to hotplug. */
+ local_irq_disable();
+ if (fast_pool != this_cpu_ptr(&irq_randomness)) {
+ local_irq_enable();
+ return;
+ }
+
+ /*
+ * Copy the pool to the stack so that the mixer always has a
+ * consistent view, before we reenable irqs again.
+ */
+ memcpy(pool, fast_pool->pool, sizeof(pool));
+ count = fast_pool->count;
+ fast_pool->count = 0;
+ fast_pool->last = jiffies;
+ local_irq_enable();
+
+ mix_pool_bytes(pool, sizeof(pool));
+ credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
+
+ memzero_explicit(pool, sizeof(pool));
+}
+
+void add_interrupt_randomness(int irq)
+{
+ enum { MIX_INFLIGHT = 1U << 31 };
+ unsigned long entropy = random_get_entropy();
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned int new_count;
+
+ fast_mix(fast_pool->pool, entropy,
+ (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
+ new_count = ++fast_pool->count;
+
+ if (new_count & MIX_INFLIGHT)
+ return;
+
+ if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
+ return;
+
+ fast_pool->count |= MIX_INFLIGHT;
+ if (!timer_pending(&fast_pool->mix)) {
+ fast_pool->mix.expires = jiffies;
+ add_timer_on(&fast_pool->mix, raw_smp_processor_id());
+ }
+}
+EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+
+/* There is one of these per entropy source */
+struct timer_rand_state {
+ unsigned long last_time;
+ long last_delta, last_delta2;
+};
+
+/*
+ * This function adds entropy to the entropy "pool" by using timing
+ * delays. It uses the timer_rand_state structure to make an estimate
+ * of how many bits of entropy this call has added to the pool. The
+ * value "num" is also added to the pool; it should somehow describe
+ * the type of event that just happened.
+ */
+static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
+{
+ unsigned long entropy = random_get_entropy(), now = jiffies, flags;
+ long delta, delta2, delta3;
+ unsigned int bits;
+
+ /*
+ * If we're in a hard IRQ, add_interrupt_randomness() will be called
+ * sometime after, so mix into the fast pool.
+ */
+ if (in_hardirq()) {
+ fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
+ } else {
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ _mix_pool_bytes(&num, sizeof(num));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+ }
+
+ if (crng_ready())
+ return;
+
+ /*
+ * Calculate number of bits of randomness we probably added.
+ * We take into account the first, second and third-order deltas
+ * in order to make our estimate.
+ */
+ delta = now - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, now);
+
+ delta2 = delta - READ_ONCE(state->last_delta);
+ WRITE_ONCE(state->last_delta, delta);
+
+ delta3 = delta2 - READ_ONCE(state->last_delta2);
+ WRITE_ONCE(state->last_delta2, delta2);
+
+ if (delta < 0)
+ delta = -delta;
+ if (delta2 < 0)
+ delta2 = -delta2;
+ if (delta3 < 0)
+ delta3 = -delta3;
+ if (delta > delta2)
+ delta = delta2;
+ if (delta > delta3)
+ delta = delta3;
+
+ /*
+ * delta is now minimum absolute delta. Round down by 1 bit
+ * on general principles, and limit entropy estimate to 11 bits.
+ */
+ bits = min(fls(delta >> 1), 11);
+
+ /*
+ * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
+ * will run after this, which uses a different crediting scheme of 1 bit
+ * per every 64 interrupts. In order to let that function do accounting
+ * close to the one in this function, we credit a full 64/64 bit per bit,
+ * and then subtract one to account for the extra one added.
+ */
+ if (in_hardirq())
+ this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
+ else
+ _credit_init_bits(bits);
+}
+
+void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
+{
+ static unsigned char last_value;
+ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
+
+ /* Ignore autorepeat and the like. */
+ if (value == last_value)
+ return;
+
+ last_value = value;
+ add_timer_randomness(&input_timer_state,
+ (type << 4) ^ code ^ (code >> 4) ^ value);
+}
+EXPORT_SYMBOL_GPL(add_input_randomness);
+
+#ifdef CONFIG_BLOCK
+void add_disk_randomness(struct gendisk *disk)
+{
+ if (!disk || !disk->random)
+ return;
+ /* First major is 1, so we get >= 0x200 here. */
+ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
+}
+EXPORT_SYMBOL_GPL(add_disk_randomness);
+
+void __cold rand_initialize_disk(struct gendisk *disk)
+{
+ struct timer_rand_state *state;
+
+ /*
+ * If kzalloc returns null, we just won't use that entropy
+ * source.
+ */
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ if (state) {
+ state->last_time = INITIAL_JIFFIES;
+ disk->random = state;
+ }
+}
+#endif
+
+struct entropy_timer_state {
+ unsigned long entropy;
+ struct timer_list timer;
+ atomic_t samples;
+ unsigned int samples_per_bit;
+};
+
+/*
+ * Each time the timer fires, we expect that we got an unpredictable jump in
+ * the cycle counter. Even if the timer is running on another CPU, the timer
+ * activity will be touching the stack of the CPU that is generating entropy.
+ *
+ * Note that we don't re-arm the timer in the timer itself - we are happy to be
+ * scheduled away, since that just makes the load more complex, but we do not
+ * want the timer to keep ticking unless the entropy loop is running.
+ *
+ * So the re-arming always happens in the entropy loop itself.
+ */
+static void __cold entropy_timer(struct timer_list *timer)
+{
+ struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
+ unsigned long entropy = random_get_entropy();
+
+ mix_pool_bytes(&entropy, sizeof(entropy));
+ if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0)
+ credit_init_bits(1);
+}
+
+/*
+ * If we have an actual cycle counter, see if we can generate enough entropy
+ * with timing noise.
+ */
+static void __cold try_to_generate_entropy(void)
+{
+ enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
+ u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1];
+ struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
+ unsigned int i, num_different = 0;
+ unsigned long last = random_get_entropy();
+ int cpu = -1;
+
+ for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
+ stack->entropy = random_get_entropy();
+ if (stack->entropy != last)
+ ++num_different;
+ last = stack->entropy;
+ }
+ stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
+ if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT)
+ return;
+
+ atomic_set(&stack->samples, 0);
+ timer_setup_on_stack(&stack->timer, entropy_timer, 0);
+ while (!crng_ready() && !signal_pending(current)) {
+ /*
+ * Check !timer_pending() and then ensure that any previous callback has finished
+ * executing by checking try_to_del_timer_sync(), before queueing the next one.
+ */
+ if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
+ struct cpumask timer_cpus;
+ unsigned int num_cpus;
+
+ /*
+ * Preemption must be disabled here, both to read the current CPU number
+ * and to avoid scheduling a timer on a dead CPU.
+ */
+ preempt_disable();
+
+ /* Only schedule callbacks on timer CPUs that are online. */
+ cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
+ num_cpus = cpumask_weight(&timer_cpus);
+ /* In very bizarre case of misconfiguration, fallback to all online. */
+ if (unlikely(num_cpus == 0)) {
+ timer_cpus = *cpu_online_mask;
+ num_cpus = cpumask_weight(&timer_cpus);
+ }
+
+ /* Basic CPU round-robin, which avoids the current CPU. */
+ do {
+ cpu = cpumask_next(cpu, &timer_cpus);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(&timer_cpus);
+ } while (cpu == smp_processor_id() && num_cpus > 1);
+
+ /* Expiring the timer at `jiffies` means it's the next tick. */
+ stack->timer.expires = jiffies;
+
+ add_timer_on(&stack->timer, cpu);
+
+ preempt_enable();
+ }
+ mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
+ schedule();
+ stack->entropy = random_get_entropy();
+ }
+ mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
+
+ del_timer_sync(&stack->timer);
+ destroy_timer_on_stack(&stack->timer);
+}
+
+
+/**********************************************************************
+ *
+ * Userspace reader/writer interfaces.
+ *
+ * getrandom(2) is the primary modern interface into the RNG and should
+ * be used in preference to anything else.
+ *
+ * Reading from /dev/random has the same functionality as calling
+ * getrandom(2) with flags=0. In earlier versions, however, it had
+ * vastly different semantics and should therefore be avoided, to
+ * prevent backwards compatibility issues.
+ *
+ * Reading from /dev/urandom has the same functionality as calling
+ * getrandom(2) with flags=GRND_INSECURE. Because it does not block
+ * waiting for the RNG to be ready, it should not be used.
+ *
+ * Writing to either /dev/random or /dev/urandom adds entropy to
+ * the input pool but does not credit it.
+ *
+ * Polling on /dev/random indicates when the RNG is initialized, on
+ * the read side, and when it wants new entropy, on the write side.
+ *
+ * Both /dev/random and /dev/urandom have the same set of ioctls for
+ * adding entropy, getting the entropy count, zeroing the count, and
+ * reseeding the crng.
+ *
+ **********************************************************************/
+
+SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
+{
+ struct iov_iter iter;
+ struct iovec iov;
+ int ret;
+
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
+ return -EINVAL;
+
+ /*
+ * Requesting insecure and blocking randomness at the same time makes
+ * no sense.
+ */
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
+ return -EINVAL;
+
+ if (!crng_ready() && !(flags & GRND_INSECURE)) {
+ if (flags & GRND_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter);
+ if (unlikely(ret))
+ return ret;
+ return get_random_bytes_user(&iter);
+}
+
+static __poll_t random_poll(struct file *file, poll_table *wait)
+{
+ poll_wait(file, &crng_init_wait, wait);
+ return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
+}
+
+static ssize_t write_pool_user(struct iov_iter *iter)
+{
+ u8 block[BLAKE2S_BLOCK_SIZE];
+ ssize_t ret = 0;
+ size_t copied;
+
+ if (unlikely(!iov_iter_count(iter)))
+ return 0;
+
+ for (;;) {
+ copied = copy_from_iter(block, sizeof(block), iter);
+ ret += copied;
+ mix_pool_bytes(block, copied);
+ if (!iov_iter_count(iter) || copied != sizeof(block))
+ break;
+
+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
+ if (ret % PAGE_SIZE == 0) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+ }
+
+ memzero_explicit(block, sizeof(block));
+ return ret ? ret : -EFAULT;
+}
+
+static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
+{
+ return write_pool_user(iter);
+}
+
+static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
+{
+ static int maxwarn = 10;
+
+ /*
+ * Opportunistically attempt to initialize the RNG on platforms that
+ * have fast cycle counters, but don't (for now) require it to succeed.
+ */
+ if (!crng_ready())
+ try_to_generate_entropy();
+
+ if (!crng_ready()) {
+ if (!ratelimit_disable && maxwarn <= 0)
+ ++urandom_warning.missed;
+ else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
+ --maxwarn;
+ pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
+ current->comm, iov_iter_count(iter));
+ }
+ }
+
+ return get_random_bytes_user(iter);
+}
+
+static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
+{
+ int ret;
+
+ if (!crng_ready() &&
+ ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
+ (kiocb->ki_filp->f_flags & O_NONBLOCK)))
+ return -EAGAIN;
+
+ ret = wait_for_random_bytes();
+ if (ret != 0)
+ return ret;
+ return get_random_bytes_user(iter);
+}
+
+static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int __user *p = (int __user *)arg;
+ int ent_count;
+
+ switch (cmd) {
+ case RNDGETENTCNT:
+ /* Inherently racy, no point locking. */
+ if (put_user(input_pool.init_bits, p))
+ return -EFAULT;
+ return 0;
+ case RNDADDTOENTCNT:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count, p))
+ return -EFAULT;
+ if (ent_count < 0)
+ return -EINVAL;
+ credit_init_bits(ent_count);
+ return 0;
+ case RNDADDENTROPY: {
+ struct iov_iter iter;
+ struct iovec iov;
+ ssize_t ret;
+ int len;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count, p++))
+ return -EFAULT;
+ if (ent_count < 0)
+ return -EINVAL;
+ if (get_user(len, p++))
+ return -EFAULT;
+ ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter);
+ if (unlikely(ret))
+ return ret;
+ ret = write_pool_user(&iter);
+ if (unlikely(ret < 0))
+ return ret;
+ /* Since we're crediting, enforce that it was all written into the pool. */
+ if (unlikely(ret != len))
+ return -EFAULT;
+ credit_init_bits(ent_count);
+ return 0;
+ }
+ case RNDZAPENTCNT:
+ case RNDCLEARPOOL:
+ /* No longer has any effect. */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return 0;
+ case RNDRESEEDCRNG:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!crng_ready())
+ return -ENODATA;
+ crng_reseed(NULL);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int random_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &fasync);
+}
+
+const struct file_operations random_fops = {
+ .read_iter = random_read_iter,
+ .write_iter = random_write_iter,
+ .poll = random_poll,
+ .unlocked_ioctl = random_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .fasync = random_fasync,
+ .llseek = noop_llseek,
+ .splice_read = copy_splice_read,
+ .splice_write = iter_file_splice_write,
+};
+
+const struct file_operations urandom_fops = {
+ .read_iter = urandom_read_iter,
+ .write_iter = random_write_iter,
+ .unlocked_ioctl = random_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .fasync = random_fasync,
+ .llseek = noop_llseek,
+ .splice_read = copy_splice_read,
+ .splice_write = iter_file_splice_write,
+};
+
+
+/********************************************************************
+ *
+ * Sysctl interface.
+ *
+ * These are partly unused legacy knobs with dummy values to not break
+ * userspace and partly still useful things. They are usually accessible
+ * in /proc/sys/kernel/random/ and are as follows:
+ *
+ * - boot_id - a UUID representing the current boot.
+ *
+ * - uuid - a random UUID, different each time the file is read.
+ *
+ * - poolsize - the number of bits of entropy that the input pool can
+ * hold, tied to the POOL_BITS constant.
+ *
+ * - entropy_avail - the number of bits of entropy currently in the
+ * input pool. Always <= poolsize.
+ *
+ * - write_wakeup_threshold - the amount of entropy in the input pool
+ * below which write polls to /dev/random will unblock, requesting
+ * more entropy, tied to the POOL_READY_BITS constant. It is writable
+ * to avoid breaking old userspaces, but writing to it does not
+ * change any behavior of the RNG.
+ *
+ * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
+ * It is writable to avoid breaking old userspaces, but writing
+ * to it does not change any behavior of the RNG.
+ *
+ ********************************************************************/
+
+#ifdef CONFIG_SYSCTL
+
+#include <linux/sysctl.h>
+
+static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
+static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
+static int sysctl_poolsize = POOL_BITS;
+static u8 sysctl_bootid[UUID_SIZE];
+
+/*
+ * This function is used to return both the bootid UUID, and random
+ * UUID. The difference is in whether table->data is NULL; if it is,
+ * then a new UUID is generated and returned to the user.
+ */
+static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
+ size_t *lenp, loff_t *ppos)
+{
+ u8 tmp_uuid[UUID_SIZE], *uuid;
+ char uuid_string[UUID_STRING_LEN + 1];
+ struct ctl_table fake_table = {
+ .data = uuid_string,
+ .maxlen = UUID_STRING_LEN
+ };
+
+ if (write)
+ return -EPERM;
+
+ uuid = table->data;
+ if (!uuid) {
+ uuid = tmp_uuid;
+ generate_random_uuid(uuid);
+ } else {
+ static DEFINE_SPINLOCK(bootid_spinlock);
+
+ spin_lock(&bootid_spinlock);
+ if (!uuid[8])
+ generate_random_uuid(uuid);
+ spin_unlock(&bootid_spinlock);
+ }
+
+ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
+ return proc_dostring(&fake_table, 0, buf, lenp, ppos);
+}
+
+/* The same as proc_dointvec, but writes don't change anything. */
+static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
+ size_t *lenp, loff_t *ppos)
+{
+ return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
+}
+
+static struct ctl_table random_table[] = {
+ {
+ .procname = "poolsize",
+ .data = &sysctl_poolsize,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "entropy_avail",
+ .data = &input_pool.init_bits,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "write_wakeup_threshold",
+ .data = &sysctl_random_write_wakeup_bits,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_do_rointvec,
+ },
+ {
+ .procname = "urandom_min_reseed_secs",
+ .data = &sysctl_random_min_urandom_seed,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_do_rointvec,
+ },
+ {
+ .procname = "boot_id",
+ .data = &sysctl_bootid,
+ .mode = 0444,
+ .proc_handler = proc_do_uuid,
+ },
+ {
+ .procname = "uuid",
+ .mode = 0444,
+ .proc_handler = proc_do_uuid,
+ },
+ { }
+};
+
+/*
+ * random_init() is called before sysctl_init(),
+ * so we cannot call register_sysctl_init() in random_init()
+ */
+static int __init random_sysctls_init(void)
+{
+ register_sysctl_init("kernel/random", random_table);
+ return 0;
+}
+device_initcall(random_sysctls_init);
+#endif
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
new file mode 100644
index 0000000000..9f701dcba9
--- /dev/null
+++ b/drivers/char/scx200_gpio.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* linux/drivers/char/scx200_gpio.c
+
+ National Semiconductor SCx200 GPIO driver. Allows a user space
+ process to play with the GPIO pins.
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+
+#include <linux/scx200_gpio.h>
+#include <linux/nsc_gpio.h>
+
+#define DRVNAME "scx200_gpio"
+
+static struct platform_device *pdev;
+
+MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
+MODULE_DESCRIPTION("NatSemi/AMD SCx200 GPIO Pin Driver");
+MODULE_LICENSE("GPL");
+
+static int major = 0; /* default to dynamic major */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+#define MAX_PINS 32 /* 64 later, when known ok */
+
+struct nsc_gpio_ops scx200_gpio_ops = {
+ .owner = THIS_MODULE,
+ .gpio_config = scx200_gpio_configure,
+ .gpio_dump = nsc_gpio_dump,
+ .gpio_get = scx200_gpio_get,
+ .gpio_set = scx200_gpio_set,
+ .gpio_change = scx200_gpio_change,
+ .gpio_current = scx200_gpio_current
+};
+EXPORT_SYMBOL_GPL(scx200_gpio_ops);
+
+static int scx200_gpio_open(struct inode *inode, struct file *file)
+{
+ unsigned m = iminor(inode);
+ file->private_data = &scx200_gpio_ops;
+
+ if (m >= MAX_PINS)
+ return -EINVAL;
+ return nonseekable_open(inode, file);
+}
+
+static int scx200_gpio_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations scx200_gpio_fileops = {
+ .owner = THIS_MODULE,
+ .write = nsc_gpio_write,
+ .read = nsc_gpio_read,
+ .open = scx200_gpio_open,
+ .release = scx200_gpio_release,
+ .llseek = no_llseek,
+};
+
+static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */
+
+static int __init scx200_gpio_init(void)
+{
+ int rc;
+ dev_t devid;
+
+ if (!scx200_gpio_present()) {
+ printk(KERN_ERR DRVNAME ": no SCx200 gpio present\n");
+ return -ENODEV;
+ }
+
+ /* support dev_dbg() with pdev->dev */
+ pdev = platform_device_alloc(DRVNAME, 0);
+ if (!pdev)
+ return -ENOMEM;
+
+ rc = platform_device_add(pdev);
+ if (rc)
+ goto undo_malloc;
+
+ /* nsc_gpio uses dev_dbg(), so needs this */
+ scx200_gpio_ops.dev = &pdev->dev;
+
+ if (major) {
+ devid = MKDEV(major, 0);
+ rc = register_chrdev_region(devid, MAX_PINS, "scx200_gpio");
+ } else {
+ rc = alloc_chrdev_region(&devid, 0, MAX_PINS, "scx200_gpio");
+ major = MAJOR(devid);
+ }
+ if (rc < 0) {
+ dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc);
+ goto undo_platform_device_add;
+ }
+
+ cdev_init(&scx200_gpio_cdev, &scx200_gpio_fileops);
+ cdev_add(&scx200_gpio_cdev, devid, MAX_PINS);
+
+ return 0; /* succeed */
+
+undo_platform_device_add:
+ platform_device_del(pdev);
+undo_malloc:
+ platform_device_put(pdev);
+
+ return rc;
+}
+
+static void __exit scx200_gpio_cleanup(void)
+{
+ cdev_del(&scx200_gpio_cdev);
+ /* cdev_put(&scx200_gpio_cdev); */
+
+ unregister_chrdev_region(MKDEV(major, 0), MAX_PINS);
+ platform_device_unregister(pdev);
+}
+
+module_init(scx200_gpio_init);
+module_exit(scx200_gpio_cleanup);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
new file mode 100644
index 0000000000..9211531689
--- /dev/null
+++ b/drivers/char/sonypi.c
@@ -0,0 +1,1548 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Sony Programmable I/O Control Device driver for VAIO
+ *
+ * Copyright (C) 2007 Mattia Dongili <malattia@linux.it>
+ *
+ * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net>
+ *
+ * Copyright (C) 2005 Narayanan R S <nars@kadamba.org>
+ *
+ * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
+ *
+ * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
+ *
+ * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp>
+ *
+ * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp>
+ *
+ * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
+ *
+ * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/kfifo.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/sonypi.h>
+
+#define SONYPI_DRIVER_VERSION "1.26"
+
+MODULE_AUTHOR("Stelian Pop <stelian@popies.net>");
+MODULE_DESCRIPTION("Sony Programmable I/O Control Device driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SONYPI_DRIVER_VERSION);
+
+static int minor = -1;
+module_param(minor, int, 0);
+MODULE_PARM_DESC(minor,
+ "minor number of the misc device, default is -1 (automatic)");
+
+static int verbose; /* = 0 */
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)");
+
+static int fnkeyinit; /* = 0 */
+module_param(fnkeyinit, int, 0444);
+MODULE_PARM_DESC(fnkeyinit,
+ "set this if your Fn keys do not generate any event");
+
+static int camera; /* = 0 */
+module_param(camera, int, 0444);
+MODULE_PARM_DESC(camera,
+ "set this if you have a MotionEye camera (PictureBook series)");
+
+static int compat; /* = 0 */
+module_param(compat, int, 0444);
+MODULE_PARM_DESC(compat,
+ "set this if you want to enable backward compatibility mode");
+
+static unsigned long mask = 0xffffffff;
+module_param(mask, ulong, 0644);
+MODULE_PARM_DESC(mask,
+ "set this to the mask of event you want to enable (see doc)");
+
+static int useinput = 1;
+module_param(useinput, int, 0444);
+MODULE_PARM_DESC(useinput,
+ "set this if you would like sonypi to feed events to the input subsystem");
+
+static int check_ioport = 1;
+module_param(check_ioport, int, 0444);
+MODULE_PARM_DESC(check_ioport,
+ "set this to 0 if you think the automatic ioport check for sony-laptop is wrong");
+
+#define SONYPI_DEVICE_MODEL_TYPE1 1
+#define SONYPI_DEVICE_MODEL_TYPE2 2
+#define SONYPI_DEVICE_MODEL_TYPE3 3
+
+/* type1 models use those */
+#define SONYPI_IRQ_PORT 0x8034
+#define SONYPI_IRQ_SHIFT 22
+#define SONYPI_TYPE1_BASE 0x50
+#define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14)
+#define SONYPI_TYPE1_REGION_SIZE 0x08
+#define SONYPI_TYPE1_EVTYPE_OFFSET 0x04
+
+/* type2 series specifics */
+#define SONYPI_SIRQ 0x9b
+#define SONYPI_SLOB 0x9c
+#define SONYPI_SHIB 0x9d
+#define SONYPI_TYPE2_REGION_SIZE 0x20
+#define SONYPI_TYPE2_EVTYPE_OFFSET 0x12
+
+/* type3 series specifics */
+#define SONYPI_TYPE3_BASE 0x40
+#define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */
+#define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */
+#define SONYPI_TYPE3_REGION_SIZE 0x20
+#define SONYPI_TYPE3_EVTYPE_OFFSET 0x12
+
+/* battery / brightness addresses */
+#define SONYPI_BAT_FLAGS 0x81
+#define SONYPI_LCD_LIGHT 0x96
+#define SONYPI_BAT1_PCTRM 0xa0
+#define SONYPI_BAT1_LEFT 0xa2
+#define SONYPI_BAT1_MAXRT 0xa4
+#define SONYPI_BAT2_PCTRM 0xa8
+#define SONYPI_BAT2_LEFT 0xaa
+#define SONYPI_BAT2_MAXRT 0xac
+#define SONYPI_BAT1_MAXTK 0xb0
+#define SONYPI_BAT1_FULL 0xb2
+#define SONYPI_BAT2_MAXTK 0xb8
+#define SONYPI_BAT2_FULL 0xba
+
+/* FAN0 information (reverse engineered from ACPI tables) */
+#define SONYPI_FAN0_STATUS 0x93
+#define SONYPI_TEMP_STATUS 0xC1
+
+/* ioports used for brightness and type2 events */
+#define SONYPI_DATA_IOPORT 0x62
+#define SONYPI_CST_IOPORT 0x66
+
+/* The set of possible ioports */
+struct sonypi_ioport_list {
+ u16 port1;
+ u16 port2;
+};
+
+static struct sonypi_ioport_list sonypi_type1_ioport_list[] = {
+ { 0x10c0, 0x10c4 }, /* looks like the default on C1Vx */
+ { 0x1080, 0x1084 },
+ { 0x1090, 0x1094 },
+ { 0x10a0, 0x10a4 },
+ { 0x10b0, 0x10b4 },
+ { 0x0, 0x0 }
+};
+
+static struct sonypi_ioport_list sonypi_type2_ioport_list[] = {
+ { 0x1080, 0x1084 },
+ { 0x10a0, 0x10a4 },
+ { 0x10c0, 0x10c4 },
+ { 0x10e0, 0x10e4 },
+ { 0x0, 0x0 }
+};
+
+/* same as in type 2 models */
+static struct sonypi_ioport_list *sonypi_type3_ioport_list =
+ sonypi_type2_ioport_list;
+
+/* The set of possible interrupts */
+struct sonypi_irq_list {
+ u16 irq;
+ u16 bits;
+};
+
+static struct sonypi_irq_list sonypi_type1_irq_list[] = {
+ { 11, 0x2 }, /* IRQ 11, GO22=0,GO23=1 in AML */
+ { 10, 0x1 }, /* IRQ 10, GO22=1,GO23=0 in AML */
+ { 5, 0x0 }, /* IRQ 5, GO22=0,GO23=0 in AML */
+ { 0, 0x3 } /* no IRQ, GO22=1,GO23=1 in AML */
+};
+
+static struct sonypi_irq_list sonypi_type2_irq_list[] = {
+ { 11, 0x80 }, /* IRQ 11, 0x80 in SIRQ in AML */
+ { 10, 0x40 }, /* IRQ 10, 0x40 in SIRQ in AML */
+ { 9, 0x20 }, /* IRQ 9, 0x20 in SIRQ in AML */
+ { 6, 0x10 }, /* IRQ 6, 0x10 in SIRQ in AML */
+ { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */
+};
+
+/* same as in type2 models */
+static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list;
+
+#define SONYPI_CAMERA_BRIGHTNESS 0
+#define SONYPI_CAMERA_CONTRAST 1
+#define SONYPI_CAMERA_HUE 2
+#define SONYPI_CAMERA_COLOR 3
+#define SONYPI_CAMERA_SHARPNESS 4
+
+#define SONYPI_CAMERA_PICTURE 5
+#define SONYPI_CAMERA_EXPOSURE_MASK 0xC
+#define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3
+#define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30
+#define SONYPI_CAMERA_MUTE_MASK 0x40
+
+/* the rest don't need a loop until not 0xff */
+#define SONYPI_CAMERA_AGC 6
+#define SONYPI_CAMERA_AGC_MASK 0x30
+#define SONYPI_CAMERA_SHUTTER_MASK 0x7
+
+#define SONYPI_CAMERA_SHUTDOWN_REQUEST 7
+#define SONYPI_CAMERA_CONTROL 0x10
+
+#define SONYPI_CAMERA_STATUS 7
+#define SONYPI_CAMERA_STATUS_READY 0x2
+#define SONYPI_CAMERA_STATUS_POSITION 0x4
+
+#define SONYPI_DIRECTION_BACKWARDS 0x4
+
+#define SONYPI_CAMERA_REVISION 8
+#define SONYPI_CAMERA_ROMVERSION 9
+
+/* Event masks */
+#define SONYPI_JOGGER_MASK 0x00000001
+#define SONYPI_CAPTURE_MASK 0x00000002
+#define SONYPI_FNKEY_MASK 0x00000004
+#define SONYPI_BLUETOOTH_MASK 0x00000008
+#define SONYPI_PKEY_MASK 0x00000010
+#define SONYPI_BACK_MASK 0x00000020
+#define SONYPI_HELP_MASK 0x00000040
+#define SONYPI_LID_MASK 0x00000080
+#define SONYPI_ZOOM_MASK 0x00000100
+#define SONYPI_THUMBPHRASE_MASK 0x00000200
+#define SONYPI_MEYE_MASK 0x00000400
+#define SONYPI_MEMORYSTICK_MASK 0x00000800
+#define SONYPI_BATTERY_MASK 0x00001000
+#define SONYPI_WIRELESS_MASK 0x00002000
+
+struct sonypi_event {
+ u8 data;
+ u8 event;
+};
+
+/* The set of possible button release events */
+static struct sonypi_event sonypi_releaseev[] = {
+ { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0, 0 }
+};
+
+/* The set of possible jogger events */
+static struct sonypi_event sonypi_joggerev[] = {
+ { 0x1f, SONYPI_EVENT_JOGDIAL_UP },
+ { 0x01, SONYPI_EVENT_JOGDIAL_DOWN },
+ { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED },
+ { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED },
+ { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP },
+ { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN },
+ { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED },
+ { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED },
+ { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP },
+ { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN },
+ { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED },
+ { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED },
+ { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible capture button events */
+static struct sonypi_event sonypi_captureev[] = {
+ { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
+ { 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
+ { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
+ { 0, 0 }
+};
+
+/* The set of possible fnkeys events */
+static struct sonypi_event sonypi_fnkeyev[] = {
+ { 0x10, SONYPI_EVENT_FNKEY_ESC },
+ { 0x11, SONYPI_EVENT_FNKEY_F1 },
+ { 0x12, SONYPI_EVENT_FNKEY_F2 },
+ { 0x13, SONYPI_EVENT_FNKEY_F3 },
+ { 0x14, SONYPI_EVENT_FNKEY_F4 },
+ { 0x15, SONYPI_EVENT_FNKEY_F5 },
+ { 0x16, SONYPI_EVENT_FNKEY_F6 },
+ { 0x17, SONYPI_EVENT_FNKEY_F7 },
+ { 0x18, SONYPI_EVENT_FNKEY_F8 },
+ { 0x19, SONYPI_EVENT_FNKEY_F9 },
+ { 0x1a, SONYPI_EVENT_FNKEY_F10 },
+ { 0x1b, SONYPI_EVENT_FNKEY_F11 },
+ { 0x1c, SONYPI_EVENT_FNKEY_F12 },
+ { 0x1f, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x21, SONYPI_EVENT_FNKEY_1 },
+ { 0x22, SONYPI_EVENT_FNKEY_2 },
+ { 0x31, SONYPI_EVENT_FNKEY_D },
+ { 0x32, SONYPI_EVENT_FNKEY_E },
+ { 0x33, SONYPI_EVENT_FNKEY_F },
+ { 0x34, SONYPI_EVENT_FNKEY_S },
+ { 0x35, SONYPI_EVENT_FNKEY_B },
+ { 0x36, SONYPI_EVENT_FNKEY_ONLY },
+ { 0, 0 }
+};
+
+/* The set of possible program key events */
+static struct sonypi_event sonypi_pkeyev[] = {
+ { 0x01, SONYPI_EVENT_PKEY_P1 },
+ { 0x02, SONYPI_EVENT_PKEY_P2 },
+ { 0x04, SONYPI_EVENT_PKEY_P3 },
+ { 0x5c, SONYPI_EVENT_PKEY_P1 },
+ { 0, 0 }
+};
+
+/* The set of possible bluetooth events */
+static struct sonypi_event sonypi_blueev[] = {
+ { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED },
+ { 0x59, SONYPI_EVENT_BLUETOOTH_ON },
+ { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF },
+ { 0, 0 }
+};
+
+/* The set of possible wireless events */
+static struct sonypi_event sonypi_wlessev[] = {
+ { 0x59, SONYPI_EVENT_WIRELESS_ON },
+ { 0x5a, SONYPI_EVENT_WIRELESS_OFF },
+ { 0, 0 }
+};
+
+/* The set of possible back button events */
+static struct sonypi_event sonypi_backev[] = {
+ { 0x20, SONYPI_EVENT_BACK_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible help button events */
+static struct sonypi_event sonypi_helpev[] = {
+ { 0x3b, SONYPI_EVENT_HELP_PRESSED },
+ { 0, 0 }
+};
+
+
+/* The set of possible lid events */
+static struct sonypi_event sonypi_lidev[] = {
+ { 0x51, SONYPI_EVENT_LID_CLOSED },
+ { 0x50, SONYPI_EVENT_LID_OPENED },
+ { 0, 0 }
+};
+
+/* The set of possible zoom events */
+static struct sonypi_event sonypi_zoomev[] = {
+ { 0x39, SONYPI_EVENT_ZOOM_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible thumbphrase events */
+static struct sonypi_event sonypi_thumbphraseev[] = {
+ { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible motioneye camera events */
+static struct sonypi_event sonypi_meyeev[] = {
+ { 0x00, SONYPI_EVENT_MEYE_FACE },
+ { 0x01, SONYPI_EVENT_MEYE_OPPOSITE },
+ { 0, 0 }
+};
+
+/* The set of possible memorystick events */
+static struct sonypi_event sonypi_memorystickev[] = {
+ { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT },
+ { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT },
+ { 0, 0 }
+};
+
+/* The set of possible battery events */
+static struct sonypi_event sonypi_batteryev[] = {
+ { 0x20, SONYPI_EVENT_BATTERY_INSERT },
+ { 0x30, SONYPI_EVENT_BATTERY_REMOVE },
+ { 0, 0 }
+};
+
+static struct sonypi_eventtypes {
+ int model;
+ u8 data;
+ unsigned long mask;
+ struct sonypi_event * events;
+} sonypi_eventtypes[] = {
+ { SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
+
+ { SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+
+ { SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0 }
+};
+
+#define SONYPI_BUF_SIZE 128
+
+/* Correspondance table between sonypi events and input layer events */
+static struct {
+ int sonypiev;
+ int inputev;
+} sonypi_inputkeys[] = {
+ { SONYPI_EVENT_CAPTURE_PRESSED, KEY_CAMERA },
+ { SONYPI_EVENT_FNKEY_ONLY, KEY_FN },
+ { SONYPI_EVENT_FNKEY_ESC, KEY_FN_ESC },
+ { SONYPI_EVENT_FNKEY_F1, KEY_FN_F1 },
+ { SONYPI_EVENT_FNKEY_F2, KEY_FN_F2 },
+ { SONYPI_EVENT_FNKEY_F3, KEY_FN_F3 },
+ { SONYPI_EVENT_FNKEY_F4, KEY_FN_F4 },
+ { SONYPI_EVENT_FNKEY_F5, KEY_FN_F5 },
+ { SONYPI_EVENT_FNKEY_F6, KEY_FN_F6 },
+ { SONYPI_EVENT_FNKEY_F7, KEY_FN_F7 },
+ { SONYPI_EVENT_FNKEY_F8, KEY_FN_F8 },
+ { SONYPI_EVENT_FNKEY_F9, KEY_FN_F9 },
+ { SONYPI_EVENT_FNKEY_F10, KEY_FN_F10 },
+ { SONYPI_EVENT_FNKEY_F11, KEY_FN_F11 },
+ { SONYPI_EVENT_FNKEY_F12, KEY_FN_F12 },
+ { SONYPI_EVENT_FNKEY_1, KEY_FN_1 },
+ { SONYPI_EVENT_FNKEY_2, KEY_FN_2 },
+ { SONYPI_EVENT_FNKEY_D, KEY_FN_D },
+ { SONYPI_EVENT_FNKEY_E, KEY_FN_E },
+ { SONYPI_EVENT_FNKEY_F, KEY_FN_F },
+ { SONYPI_EVENT_FNKEY_S, KEY_FN_S },
+ { SONYPI_EVENT_FNKEY_B, KEY_FN_B },
+ { SONYPI_EVENT_BLUETOOTH_PRESSED, KEY_BLUE },
+ { SONYPI_EVENT_BLUETOOTH_ON, KEY_BLUE },
+ { SONYPI_EVENT_PKEY_P1, KEY_PROG1 },
+ { SONYPI_EVENT_PKEY_P2, KEY_PROG2 },
+ { SONYPI_EVENT_PKEY_P3, KEY_PROG3 },
+ { SONYPI_EVENT_BACK_PRESSED, KEY_BACK },
+ { SONYPI_EVENT_HELP_PRESSED, KEY_HELP },
+ { SONYPI_EVENT_ZOOM_PRESSED, KEY_ZOOM },
+ { SONYPI_EVENT_THUMBPHRASE_PRESSED, BTN_THUMB },
+ { 0, 0 },
+};
+
+struct sonypi_keypress {
+ struct input_dev *dev;
+ int key;
+};
+
+static struct sonypi_device {
+ struct pci_dev *dev;
+ u16 irq;
+ u16 bits;
+ u16 ioport1;
+ u16 ioport2;
+ u16 region_size;
+ u16 evtype_offset;
+ int camera_power;
+ int bluetooth_power;
+ struct mutex lock;
+ struct kfifo fifo;
+ spinlock_t fifo_lock;
+ wait_queue_head_t fifo_proc_list;
+ struct fasync_struct *fifo_async;
+ int open_count;
+ int model;
+ struct input_dev *input_jog_dev;
+ struct input_dev *input_key_dev;
+ struct work_struct input_work;
+ struct kfifo input_fifo;
+ spinlock_t input_fifo_lock;
+} sonypi_device;
+
+#define ITERATIONS_LONG 10000
+#define ITERATIONS_SHORT 10
+
+#define wait_on_command(quiet, command, iterations) { \
+ unsigned int n = iterations; \
+ while (--n && (command)) \
+ udelay(1); \
+ if (!n && (verbose || !quiet)) \
+ printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \
+}
+
+#ifdef CONFIG_ACPI
+#define SONYPI_ACPI_ACTIVE (!acpi_disabled)
+#else
+#define SONYPI_ACPI_ACTIVE 0
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI
+static struct acpi_device *sonypi_acpi_device;
+static int acpi_driver_registered;
+#endif
+
+static int sonypi_ec_write(u8 addr, u8 value)
+{
+#ifdef CONFIG_ACPI
+ if (SONYPI_ACPI_ACTIVE)
+ return ec_write(addr, value);
+#endif
+ wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG);
+ outb_p(0x81, SONYPI_CST_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ outb_p(addr, SONYPI_DATA_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ outb_p(value, SONYPI_DATA_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ return 0;
+}
+
+static int sonypi_ec_read(u8 addr, u8 *value)
+{
+#ifdef CONFIG_ACPI
+ if (SONYPI_ACPI_ACTIVE)
+ return ec_read(addr, value);
+#endif
+ wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG);
+ outb_p(0x80, SONYPI_CST_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ outb_p(addr, SONYPI_DATA_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ *value = inb_p(SONYPI_DATA_IOPORT);
+ return 0;
+}
+
+static int ec_read16(u8 addr, u16 *value)
+{
+ u8 val_lb, val_hb;
+ if (sonypi_ec_read(addr, &val_lb))
+ return -1;
+ if (sonypi_ec_read(addr + 1, &val_hb))
+ return -1;
+ *value = val_lb | (val_hb << 8);
+ return 0;
+}
+
+/* Initializes the device - this comes from the AML code in the ACPI bios */
+static void sonypi_type1_srs(void)
+{
+ u32 v;
+
+ pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+ v = (v & 0xFFFF0000) | ((u32) sonypi_device.ioport1);
+ pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+
+ pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+ v = (v & 0xFFF0FFFF) |
+ (((u32) sonypi_device.ioport1 ^ sonypi_device.ioport2) << 16);
+ pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+
+ v = inl(SONYPI_IRQ_PORT);
+ v &= ~(((u32) 0x3) << SONYPI_IRQ_SHIFT);
+ v |= (((u32) sonypi_device.bits) << SONYPI_IRQ_SHIFT);
+ outl(v, SONYPI_IRQ_PORT);
+
+ pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+ v = (v & 0xFF1FFFFF) | 0x00C00000;
+ pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+}
+
+static void sonypi_type2_srs(void)
+{
+ if (sonypi_ec_write(SONYPI_SHIB, (sonypi_device.ioport1 & 0xFF00) >> 8))
+ printk(KERN_WARNING "ec_write failed\n");
+ if (sonypi_ec_write(SONYPI_SLOB, sonypi_device.ioport1 & 0x00FF))
+ printk(KERN_WARNING "ec_write failed\n");
+ if (sonypi_ec_write(SONYPI_SIRQ, sonypi_device.bits))
+ printk(KERN_WARNING "ec_write failed\n");
+ udelay(10);
+}
+
+static void sonypi_type3_srs(void)
+{
+ u16 v16;
+ u8 v8;
+
+ /* This model type uses the same initialization of
+ * the embedded controller as the type2 models. */
+ sonypi_type2_srs();
+
+ /* Initialization of PCI config space of the LPC interface bridge. */
+ v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01;
+ pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16);
+ pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8);
+ v8 = (v8 & 0xCF) | 0x10;
+ pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8);
+}
+
+/* Disables the device - this comes from the AML code in the ACPI bios */
+static void sonypi_type1_dis(void)
+{
+ u32 v;
+
+ pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+ v = v & 0xFF3FFFFF;
+ pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+
+ v = inl(SONYPI_IRQ_PORT);
+ v |= (0x3 << SONYPI_IRQ_SHIFT);
+ outl(v, SONYPI_IRQ_PORT);
+}
+
+static void sonypi_type2_dis(void)
+{
+ if (sonypi_ec_write(SONYPI_SHIB, 0))
+ printk(KERN_WARNING "ec_write failed\n");
+ if (sonypi_ec_write(SONYPI_SLOB, 0))
+ printk(KERN_WARNING "ec_write failed\n");
+ if (sonypi_ec_write(SONYPI_SIRQ, 0))
+ printk(KERN_WARNING "ec_write failed\n");
+}
+
+static void sonypi_type3_dis(void)
+{
+ sonypi_type2_dis();
+ udelay(10);
+ pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0);
+}
+
+static u8 sonypi_call1(u8 dev)
+{
+ u8 v1, v2;
+
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(dev, sonypi_device.ioport2);
+ v1 = inb_p(sonypi_device.ioport2);
+ v2 = inb_p(sonypi_device.ioport1);
+ return v2;
+}
+
+static u8 sonypi_call2(u8 dev, u8 fn)
+{
+ u8 v1;
+
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(dev, sonypi_device.ioport2);
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(fn, sonypi_device.ioport1);
+ v1 = inb_p(sonypi_device.ioport1);
+ return v1;
+}
+
+static u8 sonypi_call3(u8 dev, u8 fn, u8 v)
+{
+ u8 v1;
+
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(dev, sonypi_device.ioport2);
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(fn, sonypi_device.ioport1);
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(v, sonypi_device.ioport1);
+ v1 = inb_p(sonypi_device.ioport1);
+ return v1;
+}
+
+#if 0
+/* Get brightness, hue etc. Unreliable... */
+static u8 sonypi_read(u8 fn)
+{
+ u8 v1, v2;
+ int n = 100;
+
+ while (n--) {
+ v1 = sonypi_call2(0x8f, fn);
+ v2 = sonypi_call2(0x8f, fn);
+ if (v1 == v2 && v1 != 0xff)
+ return v1;
+ }
+ return 0xff;
+}
+#endif
+
+/* Set brightness, hue etc */
+static void sonypi_set(u8 fn, u8 v)
+{
+ wait_on_command(0, sonypi_call3(0x90, fn, v), ITERATIONS_SHORT);
+}
+
+/* Tests if the camera is ready */
+static int sonypi_camera_ready(void)
+{
+ u8 v;
+
+ v = sonypi_call2(0x8f, SONYPI_CAMERA_STATUS);
+ return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY));
+}
+
+/* Turns the camera off */
+static void sonypi_camera_off(void)
+{
+ sonypi_set(SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK);
+
+ if (!sonypi_device.camera_power)
+ return;
+
+ sonypi_call2(0x91, 0);
+ sonypi_device.camera_power = 0;
+}
+
+/* Turns the camera on */
+static void sonypi_camera_on(void)
+{
+ int i, j;
+
+ if (sonypi_device.camera_power)
+ return;
+
+ for (j = 5; j > 0; j--) {
+
+ while (sonypi_call2(0x91, 0x1))
+ msleep(10);
+ sonypi_call1(0x93);
+
+ for (i = 400; i > 0; i--) {
+ if (sonypi_camera_ready())
+ break;
+ msleep(10);
+ }
+ if (i)
+ break;
+ }
+
+ if (j == 0) {
+ printk(KERN_WARNING "sonypi: failed to power on camera\n");
+ return;
+ }
+
+ sonypi_set(0x10, 0x5a);
+ sonypi_device.camera_power = 1;
+}
+
+/* sets the bluetooth subsystem power state */
+static void sonypi_setbluetoothpower(u8 state)
+{
+ state = !!state;
+
+ if (sonypi_device.bluetooth_power == state)
+ return;
+
+ sonypi_call2(0x96, state);
+ sonypi_call1(0x82);
+ sonypi_device.bluetooth_power = state;
+}
+
+static void input_keyrelease(struct work_struct *work)
+{
+ struct sonypi_keypress kp;
+
+ while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp,
+ sizeof(kp), &sonypi_device.input_fifo_lock)
+ == sizeof(kp)) {
+ msleep(10);
+ input_report_key(kp.dev, kp.key, 0);
+ input_sync(kp.dev);
+ }
+}
+
+static void sonypi_report_input_event(u8 event)
+{
+ struct input_dev *jog_dev = sonypi_device.input_jog_dev;
+ struct input_dev *key_dev = sonypi_device.input_key_dev;
+ struct sonypi_keypress kp = { NULL };
+ int i;
+
+ switch (event) {
+ case SONYPI_EVENT_JOGDIAL_UP:
+ case SONYPI_EVENT_JOGDIAL_UP_PRESSED:
+ input_report_rel(jog_dev, REL_WHEEL, 1);
+ input_sync(jog_dev);
+ break;
+
+ case SONYPI_EVENT_JOGDIAL_DOWN:
+ case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED:
+ input_report_rel(jog_dev, REL_WHEEL, -1);
+ input_sync(jog_dev);
+ break;
+
+ case SONYPI_EVENT_JOGDIAL_PRESSED:
+ kp.key = BTN_MIDDLE;
+ kp.dev = jog_dev;
+ break;
+
+ case SONYPI_EVENT_FNKEY_RELEASED:
+ /* Nothing, not all VAIOs generate this event */
+ break;
+
+ default:
+ for (i = 0; sonypi_inputkeys[i].sonypiev; i++)
+ if (event == sonypi_inputkeys[i].sonypiev) {
+ kp.dev = key_dev;
+ kp.key = sonypi_inputkeys[i].inputev;
+ break;
+ }
+ break;
+ }
+
+ if (kp.dev) {
+ input_report_key(kp.dev, kp.key, 1);
+ input_sync(kp.dev);
+ kfifo_in_locked(&sonypi_device.input_fifo,
+ (unsigned char *)&kp, sizeof(kp),
+ &sonypi_device.input_fifo_lock);
+ schedule_work(&sonypi_device.input_work);
+ }
+}
+
+/* Interrupt handler: some event is available */
+static irqreturn_t sonypi_irq(int irq, void *dev_id)
+{
+ u8 v1, v2, event = 0;
+ int i, j;
+
+ v1 = inb_p(sonypi_device.ioport1);
+ v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset);
+
+ for (i = 0; sonypi_eventtypes[i].model; i++) {
+ if (sonypi_device.model != sonypi_eventtypes[i].model)
+ continue;
+ if ((v2 & sonypi_eventtypes[i].data) !=
+ sonypi_eventtypes[i].data)
+ continue;
+ if (!(mask & sonypi_eventtypes[i].mask))
+ continue;
+ for (j = 0; sonypi_eventtypes[i].events[j].event; j++) {
+ if (v1 == sonypi_eventtypes[i].events[j].data) {
+ event = sonypi_eventtypes[i].events[j].event;
+ goto found;
+ }
+ }
+ }
+
+ if (verbose)
+ printk(KERN_WARNING
+ "sonypi: unknown event port1=0x%02x,port2=0x%02x\n",
+ v1, v2);
+ /* We need to return IRQ_HANDLED here because there *are*
+ * events belonging to the sonypi device we don't know about,
+ * but we still don't want those to pollute the logs... */
+ return IRQ_HANDLED;
+
+found:
+ if (verbose > 1)
+ printk(KERN_INFO
+ "sonypi: event port1=0x%02x,port2=0x%02x\n", v1, v2);
+
+ if (useinput)
+ sonypi_report_input_event(event);
+
+ kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event,
+ sizeof(event), &sonypi_device.fifo_lock);
+ kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
+ wake_up_interruptible(&sonypi_device.fifo_proc_list);
+
+ return IRQ_HANDLED;
+}
+
+static int sonypi_misc_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &sonypi_device.fifo_async);
+}
+
+static int sonypi_misc_release(struct inode *inode, struct file *file)
+{
+ mutex_lock(&sonypi_device.lock);
+ sonypi_device.open_count--;
+ mutex_unlock(&sonypi_device.lock);
+ return 0;
+}
+
+static int sonypi_misc_open(struct inode *inode, struct file *file)
+{
+ mutex_lock(&sonypi_device.lock);
+ /* Flush input queue on first open */
+ if (!sonypi_device.open_count)
+ kfifo_reset(&sonypi_device.fifo);
+ sonypi_device.open_count++;
+ mutex_unlock(&sonypi_device.lock);
+
+ return 0;
+}
+
+static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ ssize_t ret;
+ unsigned char c;
+
+ if ((kfifo_len(&sonypi_device.fifo) == 0) &&
+ (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(sonypi_device.fifo_proc_list,
+ kfifo_len(&sonypi_device.fifo) != 0);
+ if (ret)
+ return ret;
+
+ while (ret < count &&
+ (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c),
+ &sonypi_device.fifo_lock) == sizeof(c))) {
+ if (put_user(c, buf++))
+ return -EFAULT;
+ ret++;
+ }
+
+ if (ret > 0) {
+ struct inode *inode = file_inode(file);
+ inode->i_atime = current_time(inode);
+ }
+
+ return ret;
+}
+
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
+{
+ poll_wait(file, &sonypi_device.fifo_proc_list, wait);
+ if (kfifo_len(&sonypi_device.fifo))
+ return EPOLLIN | EPOLLRDNORM;
+ return 0;
+}
+
+static long sonypi_misc_ioctl(struct file *fp,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret = 0;
+ void __user *argp = (void __user *)arg;
+ u8 val8;
+ u16 val16;
+
+ mutex_lock(&sonypi_device.lock);
+ switch (cmd) {
+ case SONYPI_IOCGBRT:
+ if (sonypi_ec_read(SONYPI_LCD_LIGHT, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCSBRT:
+ if (copy_from_user(&val8, argp, sizeof(val8))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (sonypi_ec_write(SONYPI_LCD_LIGHT, val8))
+ ret = -EIO;
+ break;
+ case SONYPI_IOCGBAT1CAP:
+ if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBAT1REM:
+ if (ec_read16(SONYPI_BAT1_LEFT, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBAT2CAP:
+ if (ec_read16(SONYPI_BAT2_FULL, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBAT2REM:
+ if (ec_read16(SONYPI_BAT2_LEFT, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBATFLAGS:
+ if (sonypi_ec_read(SONYPI_BAT_FLAGS, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ val8 &= 0x07;
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBLUE:
+ val8 = sonypi_device.bluetooth_power;
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCSBLUE:
+ if (copy_from_user(&val8, argp, sizeof(val8))) {
+ ret = -EFAULT;
+ break;
+ }
+ sonypi_setbluetoothpower(val8);
+ break;
+ /* FAN Controls */
+ case SONYPI_IOCGFAN:
+ if (sonypi_ec_read(SONYPI_FAN0_STATUS, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCSFAN:
+ if (copy_from_user(&val8, argp, sizeof(val8))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (sonypi_ec_write(SONYPI_FAN0_STATUS, val8))
+ ret = -EIO;
+ break;
+ /* GET Temperature (useful under APM) */
+ case SONYPI_IOCGTEMP:
+ if (sonypi_ec_read(SONYPI_TEMP_STATUS, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&sonypi_device.lock);
+ return ret;
+}
+
+static const struct file_operations sonypi_misc_fops = {
+ .owner = THIS_MODULE,
+ .read = sonypi_misc_read,
+ .poll = sonypi_misc_poll,
+ .open = sonypi_misc_open,
+ .release = sonypi_misc_release,
+ .fasync = sonypi_misc_fasync,
+ .unlocked_ioctl = sonypi_misc_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice sonypi_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sonypi",
+ .fops = &sonypi_misc_fops,
+};
+
+static void sonypi_enable(unsigned int camera_on)
+{
+ switch (sonypi_device.model) {
+ case SONYPI_DEVICE_MODEL_TYPE1:
+ sonypi_type1_srs();
+ break;
+ case SONYPI_DEVICE_MODEL_TYPE2:
+ sonypi_type2_srs();
+ break;
+ case SONYPI_DEVICE_MODEL_TYPE3:
+ sonypi_type3_srs();
+ break;
+ }
+
+ sonypi_call1(0x82);
+ sonypi_call2(0x81, 0xff);
+ sonypi_call1(compat ? 0x92 : 0x82);
+
+ /* Enable ACPI mode to get Fn key events */
+ if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
+ outb(0xf0, 0xb2);
+
+ if (camera && camera_on)
+ sonypi_camera_on();
+}
+
+static int sonypi_disable(void)
+{
+ sonypi_call2(0x81, 0); /* make sure we don't get any more events */
+ if (camera)
+ sonypi_camera_off();
+
+ /* disable ACPI mode */
+ if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
+ outb(0xf1, 0xb2);
+
+ switch (sonypi_device.model) {
+ case SONYPI_DEVICE_MODEL_TYPE1:
+ sonypi_type1_dis();
+ break;
+ case SONYPI_DEVICE_MODEL_TYPE2:
+ sonypi_type2_dis();
+ break;
+ case SONYPI_DEVICE_MODEL_TYPE3:
+ sonypi_type3_dis();
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static int sonypi_acpi_add(struct acpi_device *device)
+{
+ sonypi_acpi_device = device;
+ strcpy(acpi_device_name(device), "Sony laptop hotkeys");
+ strcpy(acpi_device_class(device), "sony/hotkey");
+ return 0;
+}
+
+static void sonypi_acpi_remove(struct acpi_device *device)
+{
+ sonypi_acpi_device = NULL;
+}
+
+static const struct acpi_device_id sonypi_device_ids[] = {
+ {"SNY6001", 0},
+ {"", 0},
+};
+
+static struct acpi_driver sonypi_acpi_driver = {
+ .name = "sonypi",
+ .class = "hkey",
+ .ids = sonypi_device_ids,
+ .ops = {
+ .add = sonypi_acpi_add,
+ .remove = sonypi_acpi_remove,
+ },
+};
+#endif
+
+static int sonypi_create_input_devices(struct platform_device *pdev)
+{
+ struct input_dev *jog_dev;
+ struct input_dev *key_dev;
+ int i;
+ int error;
+
+ sonypi_device.input_jog_dev = jog_dev = input_allocate_device();
+ if (!jog_dev)
+ return -ENOMEM;
+
+ jog_dev->name = "Sony Vaio Jogdial";
+ jog_dev->id.bustype = BUS_ISA;
+ jog_dev->id.vendor = PCI_VENDOR_ID_SONY;
+ jog_dev->dev.parent = &pdev->dev;
+
+ jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+ jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE);
+ jog_dev->relbit[0] = BIT_MASK(REL_WHEEL);
+
+ sonypi_device.input_key_dev = key_dev = input_allocate_device();
+ if (!key_dev) {
+ error = -ENOMEM;
+ goto err_free_jogdev;
+ }
+
+ key_dev->name = "Sony Vaio Keys";
+ key_dev->id.bustype = BUS_ISA;
+ key_dev->id.vendor = PCI_VENDOR_ID_SONY;
+ key_dev->dev.parent = &pdev->dev;
+
+ /* Initialize the Input Drivers: special keys */
+ key_dev->evbit[0] = BIT_MASK(EV_KEY);
+ for (i = 0; sonypi_inputkeys[i].sonypiev; i++)
+ if (sonypi_inputkeys[i].inputev)
+ set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit);
+
+ error = input_register_device(jog_dev);
+ if (error)
+ goto err_free_keydev;
+
+ error = input_register_device(key_dev);
+ if (error)
+ goto err_unregister_jogdev;
+
+ return 0;
+
+ err_unregister_jogdev:
+ input_unregister_device(jog_dev);
+ /* Set to NULL so we don't free it again below */
+ jog_dev = NULL;
+ err_free_keydev:
+ input_free_device(key_dev);
+ sonypi_device.input_key_dev = NULL;
+ err_free_jogdev:
+ input_free_device(jog_dev);
+ sonypi_device.input_jog_dev = NULL;
+
+ return error;
+}
+
+static int sonypi_setup_ioports(struct sonypi_device *dev,
+ const struct sonypi_ioport_list *ioport_list)
+{
+ /* try to detect if sony-laptop is being used and thus
+ * has already requested one of the known ioports.
+ * As in the deprecated check_region this is racy has we have
+ * multiple ioports available and one of them can be requested
+ * between this check and the subsequent request. Anyway, as an
+ * attempt to be some more user-friendly as we currently are,
+ * this is enough.
+ */
+ const struct sonypi_ioport_list *check = ioport_list;
+ while (check_ioport && check->port1) {
+ if (!request_region(check->port1,
+ sonypi_device.region_size,
+ "Sony Programmable I/O Device Check")) {
+ printk(KERN_ERR "sonypi: ioport 0x%.4x busy, using sony-laptop? "
+ "if not use check_ioport=0\n",
+ check->port1);
+ return -EBUSY;
+ }
+ release_region(check->port1, sonypi_device.region_size);
+ check++;
+ }
+
+ while (ioport_list->port1) {
+
+ if (request_region(ioport_list->port1,
+ sonypi_device.region_size,
+ "Sony Programmable I/O Device")) {
+ dev->ioport1 = ioport_list->port1;
+ dev->ioport2 = ioport_list->port2;
+ return 0;
+ }
+ ioport_list++;
+ }
+
+ return -EBUSY;
+}
+
+static int sonypi_setup_irq(struct sonypi_device *dev,
+ const struct sonypi_irq_list *irq_list)
+{
+ while (irq_list->irq) {
+
+ if (!request_irq(irq_list->irq, sonypi_irq,
+ IRQF_SHARED, "sonypi", sonypi_irq)) {
+ dev->irq = irq_list->irq;
+ dev->bits = irq_list->bits;
+ return 0;
+ }
+ irq_list++;
+ }
+
+ return -EBUSY;
+}
+
+static void sonypi_display_info(void)
+{
+ printk(KERN_INFO "sonypi: detected type%d model, "
+ "verbose = %d, fnkeyinit = %s, camera = %s, "
+ "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
+ sonypi_device.model,
+ verbose,
+ fnkeyinit ? "on" : "off",
+ camera ? "on" : "off",
+ compat ? "on" : "off",
+ mask,
+ useinput ? "on" : "off",
+ SONYPI_ACPI_ACTIVE ? "on" : "off");
+ printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
+ sonypi_device.irq,
+ sonypi_device.ioport1, sonypi_device.ioport2);
+
+ if (minor == -1)
+ printk(KERN_INFO "sonypi: device allocated minor is %d\n",
+ sonypi_misc_device.minor);
+}
+
+static int sonypi_probe(struct platform_device *dev)
+{
+ const struct sonypi_ioport_list *ioport_list;
+ const struct sonypi_irq_list *irq_list;
+ struct pci_dev *pcidev;
+ int error;
+
+ printk(KERN_WARNING "sonypi: please try the sony-laptop module instead "
+ "and report failures, see also "
+ "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n");
+
+ spin_lock_init(&sonypi_device.fifo_lock);
+ error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL);
+ if (error) {
+ printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
+ return error;
+ }
+
+ init_waitqueue_head(&sonypi_device.fifo_proc_list);
+ mutex_init(&sonypi_device.lock);
+ sonypi_device.bluetooth_power = -1;
+
+ if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1;
+ else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
+ else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH7_1, NULL)))
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
+ else
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2;
+
+ if (pcidev && pci_enable_device(pcidev)) {
+ printk(KERN_ERR "sonypi: pci_enable_device failed\n");
+ error = -EIO;
+ goto err_put_pcidev;
+ }
+
+ sonypi_device.dev = pcidev;
+
+ if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) {
+ ioport_list = sonypi_type1_ioport_list;
+ sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE;
+ sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET;
+ irq_list = sonypi_type1_irq_list;
+ } else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) {
+ ioport_list = sonypi_type2_ioport_list;
+ sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE;
+ sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET;
+ irq_list = sonypi_type2_irq_list;
+ } else {
+ ioport_list = sonypi_type3_ioport_list;
+ sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE;
+ sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET;
+ irq_list = sonypi_type3_irq_list;
+ }
+
+ error = sonypi_setup_ioports(&sonypi_device, ioport_list);
+ if (error) {
+ printk(KERN_ERR "sonypi: failed to request ioports\n");
+ goto err_disable_pcidev;
+ }
+
+ error = sonypi_setup_irq(&sonypi_device, irq_list);
+ if (error) {
+ printk(KERN_ERR "sonypi: request_irq failed\n");
+ goto err_free_ioports;
+ }
+
+ if (minor != -1)
+ sonypi_misc_device.minor = minor;
+ error = misc_register(&sonypi_misc_device);
+ if (error) {
+ printk(KERN_ERR "sonypi: misc_register failed\n");
+ goto err_free_irq;
+ }
+
+ sonypi_display_info();
+
+ if (useinput) {
+
+ error = sonypi_create_input_devices(dev);
+ if (error) {
+ printk(KERN_ERR
+ "sonypi: failed to create input devices\n");
+ goto err_miscdev_unregister;
+ }
+
+ spin_lock_init(&sonypi_device.input_fifo_lock);
+ error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (error) {
+ printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
+ goto err_inpdev_unregister;
+ }
+
+ INIT_WORK(&sonypi_device.input_work, input_keyrelease);
+ }
+
+ sonypi_enable(0);
+
+ return 0;
+
+ err_inpdev_unregister:
+ input_unregister_device(sonypi_device.input_key_dev);
+ input_unregister_device(sonypi_device.input_jog_dev);
+ err_miscdev_unregister:
+ misc_deregister(&sonypi_misc_device);
+ err_free_irq:
+ free_irq(sonypi_device.irq, sonypi_irq);
+ err_free_ioports:
+ release_region(sonypi_device.ioport1, sonypi_device.region_size);
+ err_disable_pcidev:
+ if (pcidev)
+ pci_disable_device(pcidev);
+ err_put_pcidev:
+ pci_dev_put(pcidev);
+ kfifo_free(&sonypi_device.fifo);
+
+ return error;
+}
+
+static int sonypi_remove(struct platform_device *dev)
+{
+ sonypi_disable();
+
+ synchronize_irq(sonypi_device.irq);
+ flush_work(&sonypi_device.input_work);
+
+ if (useinput) {
+ input_unregister_device(sonypi_device.input_key_dev);
+ input_unregister_device(sonypi_device.input_jog_dev);
+ kfifo_free(&sonypi_device.input_fifo);
+ }
+
+ misc_deregister(&sonypi_misc_device);
+
+ free_irq(sonypi_device.irq, sonypi_irq);
+ release_region(sonypi_device.ioport1, sonypi_device.region_size);
+
+ if (sonypi_device.dev) {
+ pci_disable_device(sonypi_device.dev);
+ pci_dev_put(sonypi_device.dev);
+ }
+
+ kfifo_free(&sonypi_device.fifo);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int old_camera_power;
+
+static int sonypi_suspend(struct device *dev)
+{
+ old_camera_power = sonypi_device.camera_power;
+ sonypi_disable();
+
+ return 0;
+}
+
+static int sonypi_resume(struct device *dev)
+{
+ sonypi_enable(old_camera_power);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume);
+#define SONYPI_PM (&sonypi_pm)
+#else
+#define SONYPI_PM NULL
+#endif
+
+static void sonypi_shutdown(struct platform_device *dev)
+{
+ sonypi_disable();
+}
+
+static struct platform_driver sonypi_driver = {
+ .driver = {
+ .name = "sonypi",
+ .pm = SONYPI_PM,
+ },
+ .probe = sonypi_probe,
+ .remove = sonypi_remove,
+ .shutdown = sonypi_shutdown,
+};
+
+static struct platform_device *sonypi_platform_device;
+
+static const struct dmi_system_id sonypi_dmi_table[] __initconst = {
+ {
+ .ident = "Sony Vaio",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"),
+ },
+ },
+ {
+ .ident = "Sony Vaio",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"),
+ },
+ },
+ { }
+};
+
+static int __init sonypi_init(void)
+{
+ int error;
+
+ printk(KERN_INFO
+ "sonypi: Sony Programmable I/O Controller Driver v%s.\n",
+ SONYPI_DRIVER_VERSION);
+
+ if (!dmi_check_system(sonypi_dmi_table))
+ return -ENODEV;
+
+ error = platform_driver_register(&sonypi_driver);
+ if (error)
+ return error;
+
+ sonypi_platform_device = platform_device_alloc("sonypi", -1);
+ if (!sonypi_platform_device) {
+ error = -ENOMEM;
+ goto err_driver_unregister;
+ }
+
+ error = platform_device_add(sonypi_platform_device);
+ if (error)
+ goto err_free_device;
+
+#ifdef CONFIG_ACPI
+ if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0)
+ acpi_driver_registered = 1;
+#endif
+
+ return 0;
+
+ err_free_device:
+ platform_device_put(sonypi_platform_device);
+ err_driver_unregister:
+ platform_driver_unregister(&sonypi_driver);
+ return error;
+}
+
+static void __exit sonypi_exit(void)
+{
+#ifdef CONFIG_ACPI
+ if (acpi_driver_registered)
+ acpi_bus_unregister_driver(&sonypi_acpi_driver);
+#endif
+ platform_device_unregister(sonypi_platform_device);
+ platform_driver_unregister(&sonypi_driver);
+ printk(KERN_INFO "sonypi: removed.\n");
+}
+
+module_init(sonypi_init);
+module_exit(sonypi_exit);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
new file mode 100644
index 0000000000..896a3550fb
--- /dev/null
+++ b/drivers/char/tlclk.c
@@ -0,0 +1,939 @@
+/*
+ * Telecom Clock driver for Intel NetStructure(tm) MPCBL0010
+ *
+ * Copyright (C) 2005 Kontron Canada
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <sebastien.bouchard@ca.kontron.com> and the current
+ * Maintainer <mark.gross@intel.com>
+ *
+ * Description : This is the TELECOM CLOCK module driver for the ATCA
+ * MPCBL0010 ATCA computer.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <asm/io.h> /* inb/outb */
+#include <linux/uaccess.h>
+
+MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>");
+MODULE_LICENSE("GPL");
+
+/*Hardware Reset of the PLL */
+#define RESET_ON 0x00
+#define RESET_OFF 0x01
+
+/* MODE SELECT */
+#define NORMAL_MODE 0x00
+#define HOLDOVER_MODE 0x10
+#define FREERUN_MODE 0x20
+
+/* FILTER SELECT */
+#define FILTER_6HZ 0x04
+#define FILTER_12HZ 0x00
+
+/* SELECT REFERENCE FREQUENCY */
+#define REF_CLK1_8kHz 0x00
+#define REF_CLK2_19_44MHz 0x02
+
+/* Select primary or secondary redundant clock */
+#define PRIMARY_CLOCK 0x00
+#define SECONDARY_CLOCK 0x01
+
+/* CLOCK TRANSMISSION DEFINE */
+#define CLK_8kHz 0xff
+#define CLK_16_384MHz 0xfb
+
+#define CLK_1_544MHz 0x00
+#define CLK_2_048MHz 0x01
+#define CLK_4_096MHz 0x02
+#define CLK_6_312MHz 0x03
+#define CLK_8_192MHz 0x04
+#define CLK_19_440MHz 0x06
+
+#define CLK_8_592MHz 0x08
+#define CLK_11_184MHz 0x09
+#define CLK_34_368MHz 0x0b
+#define CLK_44_736MHz 0x0a
+
+/* RECEIVED REFERENCE */
+#define AMC_B1 0
+#define AMC_B2 1
+
+/* HARDWARE SWITCHING DEFINE */
+#define HW_ENABLE 0x80
+#define HW_DISABLE 0x00
+
+/* HARDWARE SWITCHING MODE DEFINE */
+#define PLL_HOLDOVER 0x40
+#define LOST_CLOCK 0x00
+
+/* ALARMS DEFINE */
+#define UNLOCK_MASK 0x10
+#define HOLDOVER_MASK 0x20
+#define SEC_LOST_MASK 0x40
+#define PRI_LOST_MASK 0x80
+
+/* INTERRUPT CAUSE DEFINE */
+
+#define PRI_LOS_01_MASK 0x01
+#define PRI_LOS_10_MASK 0x02
+
+#define SEC_LOS_01_MASK 0x04
+#define SEC_LOS_10_MASK 0x08
+
+#define HOLDOVER_01_MASK 0x10
+#define HOLDOVER_10_MASK 0x20
+
+#define UNLOCK_01_MASK 0x40
+#define UNLOCK_10_MASK 0x80
+
+struct tlclk_alarms {
+ __u32 lost_clocks;
+ __u32 lost_primary_clock;
+ __u32 lost_secondary_clock;
+ __u32 primary_clock_back;
+ __u32 secondary_clock_back;
+ __u32 switchover_primary;
+ __u32 switchover_secondary;
+ __u32 pll_holdover;
+ __u32 pll_end_holdover;
+ __u32 pll_lost_sync;
+ __u32 pll_sync;
+};
+/* Telecom clock I/O register definition */
+#define TLCLK_BASE 0xa08
+#define TLCLK_REG0 TLCLK_BASE
+#define TLCLK_REG1 (TLCLK_BASE+1)
+#define TLCLK_REG2 (TLCLK_BASE+2)
+#define TLCLK_REG3 (TLCLK_BASE+3)
+#define TLCLK_REG4 (TLCLK_BASE+4)
+#define TLCLK_REG5 (TLCLK_BASE+5)
+#define TLCLK_REG6 (TLCLK_BASE+6)
+#define TLCLK_REG7 (TLCLK_BASE+7)
+
+#define SET_PORT_BITS(port, mask, val) outb(((inb(port) & mask) | val), port)
+
+/* 0 = Dynamic allocation of the major device number */
+#define TLCLK_MAJOR 0
+
+/* sysfs interface definition:
+Upon loading the driver will create a sysfs directory under
+/sys/devices/platform/telco_clock.
+
+This directory exports the following interfaces. There operation is
+documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
+alarms :
+current_ref :
+received_ref_clk3a :
+received_ref_clk3b :
+enable_clk3a_output :
+enable_clk3b_output :
+enable_clka0_output :
+enable_clka1_output :
+enable_clkb0_output :
+enable_clkb1_output :
+filter_select :
+hardware_switching :
+hardware_switching_mode :
+telclock_version :
+mode_select :
+refalign :
+reset :
+select_amcb1_transmit_clock :
+select_amcb2_transmit_clock :
+select_redundant_clock :
+select_ref_frequency :
+
+All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
+has the same effect as echo 0x99 > refalign.
+*/
+
+static unsigned int telclk_interrupt;
+
+static int int_events; /* Event that generate a interrupt */
+static int got_event; /* if events processing have been done */
+
+static void switchover_timeout(struct timer_list *t);
+static struct timer_list switchover_timer;
+static unsigned long tlclk_timer_data;
+
+static struct tlclk_alarms *alarm_events;
+
+static DEFINE_SPINLOCK(event_lock);
+
+static int tlclk_major = TLCLK_MAJOR;
+
+static irqreturn_t tlclk_interrupt(int irq, void *dev_id);
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+static unsigned long useflags;
+static DEFINE_MUTEX(tlclk_mutex);
+
+static int tlclk_open(struct inode *inode, struct file *filp)
+{
+ int result;
+
+ mutex_lock(&tlclk_mutex);
+ if (test_and_set_bit(0, &useflags)) {
+ result = -EBUSY;
+ /* this legacy device is always one per system and it doesn't
+ * know how to handle multiple concurrent clients.
+ */
+ goto out;
+ }
+
+ /* Make sure there is no interrupt pending while
+ * initialising interrupt handler */
+ inb(TLCLK_REG6);
+
+ /* This device is wired through the FPGA IO space of the ATCA blade
+ * we can't share this IRQ */
+ result = request_irq(telclk_interrupt, &tlclk_interrupt,
+ 0, "telco_clock", tlclk_interrupt);
+ if (result == -EBUSY)
+ printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n");
+ else
+ inb(TLCLK_REG6); /* Clear interrupt events */
+
+out:
+ mutex_unlock(&tlclk_mutex);
+ return result;
+}
+
+static int tlclk_release(struct inode *inode, struct file *filp)
+{
+ free_irq(telclk_interrupt, tlclk_interrupt);
+ clear_bit(0, &useflags);
+
+ return 0;
+}
+
+static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ if (count < sizeof(struct tlclk_alarms))
+ return -EIO;
+ if (mutex_lock_interruptible(&tlclk_mutex))
+ return -EINTR;
+
+
+ wait_event_interruptible(wq, got_event);
+ if (copy_to_user(buf, alarm_events, sizeof(struct tlclk_alarms))) {
+ mutex_unlock(&tlclk_mutex);
+ return -EFAULT;
+ }
+
+ memset(alarm_events, 0, sizeof(struct tlclk_alarms));
+ got_event = 0;
+
+ mutex_unlock(&tlclk_mutex);
+ return sizeof(struct tlclk_alarms);
+}
+
+static const struct file_operations tlclk_fops = {
+ .read = tlclk_read,
+ .open = tlclk_open,
+ .release = tlclk_release,
+ .llseek = noop_llseek,
+
+};
+
+static struct miscdevice tlclk_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "telco_clock",
+ .fops = &tlclk_fops,
+};
+
+static ssize_t show_current_ref(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_lock, flags);
+ ret_val = ((inb(TLCLK_REG1) & 0x08) >> 3);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return sprintf(buf, "0x%lX\n", ret_val);
+}
+
+static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
+
+
+static ssize_t show_telclock_version(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_lock, flags);
+ ret_val = inb(TLCLK_REG5);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return sprintf(buf, "0x%lX\n", ret_val);
+}
+
+static DEVICE_ATTR(telclock_version, S_IRUGO,
+ show_telclock_version, NULL);
+
+static ssize_t show_alarms(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_lock, flags);
+ ret_val = (inb(TLCLK_REG2) & 0xf0);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return sprintf(buf, "0x%lX\n", ret_val);
+}
+
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+
+static ssize_t store_received_ref_clk3a(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xef, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3a, (S_IWUSR|S_IWGRP), NULL,
+ store_received_ref_clk3a);
+
+
+static ssize_t store_received_ref_clk3b(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xdf, val << 1);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3b, (S_IWUSR|S_IWGRP), NULL,
+ store_received_ref_clk3b);
+
+
+static ssize_t store_enable_clk3b_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG3, 0x7f, val << 7);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clk3b_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clk3b_output);
+
+static ssize_t store_enable_clk3a_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG3, 0xbf, val << 6);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clk3a_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clk3a_output);
+
+static ssize_t store_enable_clkb1_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG2, 0xf7, val << 3);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clkb1_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clkb1_output);
+
+
+static ssize_t store_enable_clka1_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG2, 0xfb, val << 2);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clka1_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clka1_output);
+
+static ssize_t store_enable_clkb0_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG2, 0xfd, val << 1);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clkb0_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clkb0_output);
+
+static ssize_t store_enable_clka0_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG2, 0xfe, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clka0_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clka0_output);
+
+static ssize_t store_select_amcb2_transmit_clock(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
+ SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28);
+ SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
+ } else if (val >= CLK_8_592MHz) {
+ SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
+ switch (val) {
+ case CLK_8_592MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
+ break;
+ case CLK_11_184MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
+ break;
+ case CLK_34_368MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
+ break;
+ case CLK_44_736MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
+ break;
+ }
+ } else {
+ SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3);
+ }
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_amcb2_transmit_clock, (S_IWUSR|S_IWGRP), NULL,
+ store_select_amcb2_transmit_clock);
+
+static ssize_t store_select_amcb1_transmit_clock(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
+ SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5);
+ SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
+ } else if (val >= CLK_8_592MHz) {
+ SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7);
+ switch (val) {
+ case CLK_8_592MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
+ break;
+ case CLK_11_184MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
+ break;
+ case CLK_34_368MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
+ break;
+ case CLK_44_736MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
+ break;
+ }
+ } else {
+ SET_PORT_BITS(TLCLK_REG3, 0xf8, val);
+ }
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_amcb1_transmit_clock, (S_IWUSR|S_IWGRP), NULL,
+ store_select_amcb1_transmit_clock);
+
+static ssize_t store_select_redundant_clock(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xfe, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_redundant_clock, (S_IWUSR|S_IWGRP), NULL,
+ store_select_redundant_clock);
+
+static ssize_t store_select_ref_frequency(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xfd, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_ref_frequency, (S_IWUSR|S_IWGRP), NULL,
+ store_select_ref_frequency);
+
+static ssize_t store_filter_select(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0xfb, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(filter_select, (S_IWUSR|S_IWGRP), NULL, store_filter_select);
+
+static ssize_t store_hardware_switching_mode(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0xbf, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(hardware_switching_mode, (S_IWUSR|S_IWGRP), NULL,
+ store_hardware_switching_mode);
+
+static ssize_t store_hardware_switching(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0x7f, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(hardware_switching, (S_IWUSR|S_IWGRP), NULL,
+ store_hardware_switching);
+
+static ssize_t store_refalign (struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
+ SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
+ SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(refalign, (S_IWUSR|S_IWGRP), NULL, store_refalign);
+
+static ssize_t store_mode_select (struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0xcf, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(mode_select, (S_IWUSR|S_IWGRP), NULL, store_mode_select);
+
+static ssize_t store_reset (struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG4, 0xfd, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset);
+
+static struct attribute *tlclk_sysfs_entries[] = {
+ &dev_attr_current_ref.attr,
+ &dev_attr_telclock_version.attr,
+ &dev_attr_alarms.attr,
+ &dev_attr_received_ref_clk3a.attr,
+ &dev_attr_received_ref_clk3b.attr,
+ &dev_attr_enable_clk3a_output.attr,
+ &dev_attr_enable_clk3b_output.attr,
+ &dev_attr_enable_clkb1_output.attr,
+ &dev_attr_enable_clka1_output.attr,
+ &dev_attr_enable_clkb0_output.attr,
+ &dev_attr_enable_clka0_output.attr,
+ &dev_attr_select_amcb1_transmit_clock.attr,
+ &dev_attr_select_amcb2_transmit_clock.attr,
+ &dev_attr_select_redundant_clock.attr,
+ &dev_attr_select_ref_frequency.attr,
+ &dev_attr_filter_select.attr,
+ &dev_attr_hardware_switching_mode.attr,
+ &dev_attr_hardware_switching.attr,
+ &dev_attr_refalign.attr,
+ &dev_attr_mode_select.attr,
+ &dev_attr_reset.attr,
+ NULL
+};
+
+static const struct attribute_group tlclk_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = tlclk_sysfs_entries,
+};
+
+static struct platform_device *tlclk_device;
+
+static int __init tlclk_init(void)
+{
+ int ret;
+
+ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+ if (!alarm_events) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
+ if (ret < 0) {
+ printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
+ kfree(alarm_events);
+ return ret;
+ }
+ tlclk_major = ret;
+
+ /* Read telecom clock IRQ number (Set by BIOS) */
+ if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
+ printk(KERN_ERR "tlclk: request_region 0x%X failed.\n",
+ TLCLK_BASE);
+ ret = -EBUSY;
+ goto out2;
+ }
+
+ if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
+ printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
+ telclk_interrupt);
+ ret = -ENXIO;
+ goto out3;
+ }
+
+ timer_setup(&switchover_timer, switchover_timeout, 0);
+
+ ret = misc_register(&tlclk_miscdev);
+ if (ret < 0) {
+ printk(KERN_ERR "tlclk: misc_register returns %d.\n", ret);
+ goto out3;
+ }
+
+ tlclk_device = platform_device_register_simple("telco_clock",
+ -1, NULL, 0);
+ if (IS_ERR(tlclk_device)) {
+ printk(KERN_ERR "tlclk: platform_device_register failed.\n");
+ ret = PTR_ERR(tlclk_device);
+ goto out4;
+ }
+
+ ret = sysfs_create_group(&tlclk_device->dev.kobj,
+ &tlclk_attribute_group);
+ if (ret) {
+ printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n");
+ goto out5;
+ }
+
+ return 0;
+out5:
+ platform_device_unregister(tlclk_device);
+out4:
+ misc_deregister(&tlclk_miscdev);
+out3:
+ release_region(TLCLK_BASE, 8);
+out2:
+ kfree(alarm_events);
+ unregister_chrdev(tlclk_major, "telco_clock");
+out1:
+ return ret;
+}
+
+static void __exit tlclk_cleanup(void)
+{
+ sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group);
+ platform_device_unregister(tlclk_device);
+ misc_deregister(&tlclk_miscdev);
+ unregister_chrdev(tlclk_major, "telco_clock");
+
+ release_region(TLCLK_BASE, 8);
+ del_timer_sync(&switchover_timer);
+ kfree(alarm_events);
+
+}
+
+static void switchover_timeout(struct timer_list *unused)
+{
+ unsigned long flags = tlclk_timer_data;
+
+ if ((flags & 1)) {
+ if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
+ alarm_events->switchover_primary++;
+ } else {
+ if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
+ alarm_events->switchover_secondary++;
+ }
+
+ /* Alarm processing is done, wake up read task */
+ del_timer(&switchover_timer);
+ got_event = 1;
+ wake_up(&wq);
+}
+
+static irqreturn_t tlclk_interrupt(int irq, void *dev_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_lock, flags);
+ /* Read and clear interrupt events */
+ int_events = inb(TLCLK_REG6);
+
+ /* Primary_Los changed from 0 to 1 ? */
+ if (int_events & PRI_LOS_01_MASK) {
+ if (inb(TLCLK_REG2) & SEC_LOST_MASK)
+ alarm_events->lost_clocks++;
+ else
+ alarm_events->lost_primary_clock++;
+ }
+
+ /* Primary_Los changed from 1 to 0 ? */
+ if (int_events & PRI_LOS_10_MASK) {
+ alarm_events->primary_clock_back++;
+ SET_PORT_BITS(TLCLK_REG1, 0xFE, 1);
+ }
+ /* Secondary_Los changed from 0 to 1 ? */
+ if (int_events & SEC_LOS_01_MASK) {
+ if (inb(TLCLK_REG2) & PRI_LOST_MASK)
+ alarm_events->lost_clocks++;
+ else
+ alarm_events->lost_secondary_clock++;
+ }
+ /* Secondary_Los changed from 1 to 0 ? */
+ if (int_events & SEC_LOS_10_MASK) {
+ alarm_events->secondary_clock_back++;
+ SET_PORT_BITS(TLCLK_REG1, 0xFE, 0);
+ }
+ if (int_events & HOLDOVER_10_MASK)
+ alarm_events->pll_end_holdover++;
+
+ if (int_events & UNLOCK_01_MASK)
+ alarm_events->pll_lost_sync++;
+
+ if (int_events & UNLOCK_10_MASK)
+ alarm_events->pll_sync++;
+
+ /* Holdover changed from 0 to 1 ? */
+ if (int_events & HOLDOVER_01_MASK) {
+ alarm_events->pll_holdover++;
+
+ /* TIMEOUT in ~10ms */
+ switchover_timer.expires = jiffies + msecs_to_jiffies(10);
+ tlclk_timer_data = inb(TLCLK_REG1);
+ mod_timer(&switchover_timer, switchover_timer.expires);
+ } else {
+ got_event = 1;
+ wake_up(&wq);
+ }
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+module_init(tlclk_init);
+module_exit(tlclk_cleanup);
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
new file mode 100644
index 0000000000..776abbfd85
--- /dev/null
+++ b/drivers/char/toshiba.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* toshiba.c -- Linux driver for accessing the SMM on Toshiba laptops
+ *
+ * Copyright (c) 1996-2001 Jonathan A. Buzzard (jonathan@buzzard.org.uk)
+ *
+ * Valuable assistance and patches from:
+ * Tom May <tom@you-bastards.com>
+ * Rob Napier <rnapier@employees.org>
+ *
+ * Fn status port numbers for machine ID's courtesy of
+ * 0xfc02: Scott Eisert <scott.e@sky-eye.com>
+ * 0xfc04: Steve VanDevender <stevev@efn.org>
+ * 0xfc08: Garth Berry <garth@itsbruce.net>
+ * 0xfc0a: Egbert Eich <eich@xfree86.org>
+ * 0xfc10: Andrew Lofthouse <Andrew.Lofthouse@robins.af.mil>
+ * 0xfc11: Spencer Olson <solson@novell.com>
+ * 0xfc13: Claudius Frankewitz <kryp@gmx.de>
+ * 0xfc15: Tom May <tom@you-bastards.com>
+ * 0xfc17: Dave Konrad <konrad@xenia.it>
+ * 0xfc1a: George Betzos <betzos@engr.colostate.edu>
+ * 0xfc1b: Munemasa Wada <munemasa@jnovel.co.jp>
+ * 0xfc1d: Arthur Liu <armie@slap.mine.nu>
+ * 0xfc5a: Jacques L'helgoualc'h <lhh@free.fr>
+ * 0xfcd1: Mr. Dave Konrad <konrad@xenia.it>
+ *
+ * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ *
+ * This code is covered by the GNU GPL and you are free to make any
+ * changes you wish to it under the terms of the license. However the
+ * code has the potential to render your computer and/or someone else's
+ * unusable. Please proceed with care when modifying the code.
+ *
+ * Note: Unfortunately the laptop hardware can close the System Configuration
+ * Interface on it's own accord. It is therefore necessary for *all*
+ * programs using this driver to be aware that *any* SCI call can fail at
+ * *any* time. It is up to any program to be aware of this eventuality
+ * and take appropriate steps.
+ *
+ * The information used to write this driver has been obtained by reverse
+ * engineering the software supplied by Toshiba for their portable computers in
+ * strict accordance with the European Council Directive 92/250/EEC on the legal
+ * protection of computer programs, and it's implementation into English Law by
+ * the Copyright (Computer Programs) Regulations 1992 (S.I. 1992 No.3233).
+ */
+
+#define TOSH_VERSION "1.11 26/9/2001"
+#define TOSH_DEBUG 0
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/mutex.h>
+#include <linux/toshiba.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>");
+MODULE_DESCRIPTION("Toshiba laptop SMM driver");
+
+static DEFINE_MUTEX(tosh_mutex);
+static int tosh_fn;
+module_param_named(fn, tosh_fn, int, 0);
+MODULE_PARM_DESC(fn, "User specified Fn key detection port");
+
+static int tosh_id;
+static int tosh_bios;
+static int tosh_date;
+static int tosh_sci;
+static int tosh_fan;
+
+static long tosh_ioctl(struct file *, unsigned int,
+ unsigned long);
+
+
+static const struct file_operations tosh_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = tosh_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice tosh_device = {
+ TOSH_MINOR_DEV,
+ "toshiba",
+ &tosh_fops
+};
+
+/*
+ * Read the Fn key status
+ */
+#ifdef CONFIG_PROC_FS
+static int tosh_fn_status(void)
+{
+ unsigned char scan;
+ unsigned long flags;
+
+ if (tosh_fn!=0) {
+ scan = inb(tosh_fn);
+ } else {
+ local_irq_save(flags);
+ outb(0x8e, 0xe4);
+ scan = inb(0xe5);
+ local_irq_restore(flags);
+ }
+
+ return (int) scan;
+}
+#endif
+
+
+/*
+ * For the Portage 610CT and the Tecra 700CS/700CDT emulate the HCI fan function
+ */
+static int tosh_emulate_fan(SMMRegisters *regs)
+{
+ unsigned long eax,ecx,flags;
+ unsigned char al;
+
+ eax = regs->eax & 0xff00;
+ ecx = regs->ecx & 0xffff;
+
+ /* Portage 610CT */
+
+ if (tosh_id==0xfccb) {
+ if (eax==0xfe00) {
+ /* fan status */
+ local_irq_save(flags);
+ outb(0xbe, 0xe4);
+ al = inb(0xe5);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = (unsigned int) (al & 0x01);
+ }
+ if ((eax==0xff00) && (ecx==0x0000)) {
+ /* fan off */
+ local_irq_save(flags);
+ outb(0xbe, 0xe4);
+ al = inb(0xe5);
+ outb(0xbe, 0xe4);
+ outb (al | 0x01, 0xe5);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = 0x00;
+ }
+ if ((eax==0xff00) && (ecx==0x0001)) {
+ /* fan on */
+ local_irq_save(flags);
+ outb(0xbe, 0xe4);
+ al = inb(0xe5);
+ outb(0xbe, 0xe4);
+ outb(al & 0xfe, 0xe5);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = 0x01;
+ }
+ }
+
+ /* Tecra 700CS/CDT */
+
+ if (tosh_id==0xfccc) {
+ if (eax==0xfe00) {
+ /* fan status */
+ local_irq_save(flags);
+ outb(0xe0, 0xe4);
+ al = inb(0xe5);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = al & 0x01;
+ }
+ if ((eax==0xff00) && (ecx==0x0000)) {
+ /* fan off */
+ local_irq_save(flags);
+ outb(0xe0, 0xe4);
+ al = inb(0xe5);
+ outw(0xe0 | ((al & 0xfe) << 8), 0xe4);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = 0x00;
+ }
+ if ((eax==0xff00) && (ecx==0x0001)) {
+ /* fan on */
+ local_irq_save(flags);
+ outb(0xe0, 0xe4);
+ al = inb(0xe5);
+ outw(0xe0 | ((al | 0x01) << 8), 0xe4);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = 0x01;
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * Put the laptop into System Management Mode
+ */
+int tosh_smm(SMMRegisters *regs)
+{
+ int eax;
+
+ asm ("# load the values into the registers\n\t" \
+ "pushl %%eax\n\t" \
+ "movl 0(%%eax),%%edx\n\t" \
+ "push %%edx\n\t" \
+ "movl 4(%%eax),%%ebx\n\t" \
+ "movl 8(%%eax),%%ecx\n\t" \
+ "movl 12(%%eax),%%edx\n\t" \
+ "movl 16(%%eax),%%esi\n\t" \
+ "movl 20(%%eax),%%edi\n\t" \
+ "popl %%eax\n\t" \
+ "# call the System Management mode\n\t" \
+ "inb $0xb2,%%al\n\t"
+ "# fill out the memory with the values in the registers\n\t" \
+ "xchgl %%eax,(%%esp)\n\t"
+ "movl %%ebx,4(%%eax)\n\t" \
+ "movl %%ecx,8(%%eax)\n\t" \
+ "movl %%edx,12(%%eax)\n\t" \
+ "movl %%esi,16(%%eax)\n\t" \
+ "movl %%edi,20(%%eax)\n\t" \
+ "popl %%edx\n\t" \
+ "movl %%edx,0(%%eax)\n\t" \
+ "# setup the return value to the carry flag\n\t" \
+ "lahf\n\t" \
+ "shrl $8,%%eax\n\t" \
+ "andl $1,%%eax\n" \
+ : "=a" (eax)
+ : "a" (regs)
+ : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
+
+ return eax;
+}
+EXPORT_SYMBOL(tosh_smm);
+
+
+static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ SMMRegisters regs;
+ SMMRegisters __user *argp = (SMMRegisters __user *)arg;
+ unsigned short ax,bx;
+ int err;
+
+ if (!argp)
+ return -EINVAL;
+
+ if (copy_from_user(&regs, argp, sizeof(SMMRegisters)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case TOSH_SMM:
+ ax = regs.eax & 0xff00;
+ bx = regs.ebx & 0xffff;
+ /* block HCI calls to read/write memory & PCI devices */
+ if (((ax==0xff00) || (ax==0xfe00)) && (bx>0x0069))
+ return -EINVAL;
+
+ /* do we need to emulate the fan ? */
+ mutex_lock(&tosh_mutex);
+ if (tosh_fan==1) {
+ if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) {
+ err = tosh_emulate_fan(&regs);
+ mutex_unlock(&tosh_mutex);
+ break;
+ }
+ }
+ err = tosh_smm(&regs);
+ mutex_unlock(&tosh_mutex);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (copy_to_user(argp, &regs, sizeof(SMMRegisters)))
+ return -EFAULT;
+
+ return (err==0) ? 0:-EINVAL;
+}
+
+
+/*
+ * Print the information for /proc/toshiba
+ */
+#ifdef CONFIG_PROC_FS
+static int proc_toshiba_show(struct seq_file *m, void *v)
+{
+ int key;
+
+ key = tosh_fn_status();
+
+ /* Arguments
+ 0) Linux driver version (this will change if format changes)
+ 1) Machine ID
+ 2) SCI version
+ 3) BIOS version (major, minor)
+ 4) BIOS date (in SCI date format)
+ 5) Fn Key status
+ */
+ seq_printf(m, "1.1 0x%04x %d.%d %d.%d 0x%04x 0x%02x\n",
+ tosh_id,
+ (tosh_sci & 0xff00)>>8,
+ tosh_sci & 0xff,
+ (tosh_bios & 0xff00)>>8,
+ tosh_bios & 0xff,
+ tosh_date,
+ key);
+ return 0;
+}
+#endif
+
+
+/*
+ * Determine which port to use for the Fn key status
+ */
+static void tosh_set_fn_port(void)
+{
+ switch (tosh_id) {
+ case 0xfc02: case 0xfc04: case 0xfc09: case 0xfc0a: case 0xfc10:
+ case 0xfc11: case 0xfc13: case 0xfc15: case 0xfc1a: case 0xfc1b:
+ case 0xfc5a:
+ tosh_fn = 0x62;
+ break;
+ case 0xfc08: case 0xfc17: case 0xfc1d: case 0xfcd1: case 0xfce0:
+ case 0xfce2:
+ tosh_fn = 0x68;
+ break;
+ default:
+ tosh_fn = 0x00;
+ break;
+ }
+
+ return;
+}
+
+
+/*
+ * Get the machine identification number of the current model
+ */
+static int tosh_get_machine_id(void __iomem *bios)
+{
+ int id;
+ SMMRegisters regs;
+ unsigned short bx,cx;
+ unsigned long address;
+
+ id = (0x100*(int) readb(bios+0xfffe))+((int) readb(bios+0xfffa));
+
+ /* do we have a SCTTable machine identication number on our hands */
+
+ if (id==0xfc2f) {
+
+ /* start by getting a pointer into the BIOS */
+
+ regs.eax = 0xc000;
+ regs.ebx = 0x0000;
+ regs.ecx = 0x0000;
+ tosh_smm(&regs);
+ bx = (unsigned short) (regs.ebx & 0xffff);
+
+ /* At this point in the Toshiba routines under MS Windows
+ the bx register holds 0xe6f5. However my code is producing
+ a different value! For the time being I will just fudge the
+ value. This has been verified on a Satellite Pro 430CDT,
+ Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */
+#if TOSH_DEBUG
+ pr_debug("toshiba: debugging ID ebx=0x%04x\n", regs.ebx);
+#endif
+ bx = 0xe6f5;
+
+ /* now twiddle with our pointer a bit */
+
+ address = bx;
+ cx = readw(bios + address);
+ address = 9+bx+cx;
+ cx = readw(bios + address);
+ address = 0xa+cx;
+ cx = readw(bios + address);
+
+ /* now construct our machine identification number */
+
+ id = ((cx & 0xff)<<8)+((cx & 0xff00)>>8);
+ }
+
+ return id;
+}
+
+
+/*
+ * Probe for the presence of a Toshiba laptop
+ *
+ * returns and non-zero if unable to detect the presence of a Toshiba
+ * laptop, otherwise zero and determines the Machine ID, BIOS version and
+ * date, and SCI version.
+ */
+static int tosh_probe(void)
+{
+ int i,major,minor,day,year,month,flag;
+ unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
+ SMMRegisters regs;
+ void __iomem *bios = ioremap(0xf0000, 0x10000);
+
+ if (!bios)
+ return -ENOMEM;
+
+ /* extra sanity check for the string "TOSHIBA" in the BIOS because
+ some machines that are not Toshiba's pass the next test */
+
+ for (i=0;i<7;i++) {
+ if (readb(bios+0xe010+i)!=signature[i]) {
+ pr_err("toshiba: not a supported Toshiba laptop\n");
+ iounmap(bios);
+ return -ENODEV;
+ }
+ }
+
+ /* call the Toshiba SCI support check routine */
+
+ regs.eax = 0xf0f0;
+ regs.ebx = 0x0000;
+ regs.ecx = 0x0000;
+ flag = tosh_smm(&regs);
+
+ /* if this is not a Toshiba laptop carry flag is set and ah=0x86 */
+
+ if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) {
+ pr_err("toshiba: not a supported Toshiba laptop\n");
+ iounmap(bios);
+ return -ENODEV;
+ }
+
+ /* if we get this far then we are running on a Toshiba (probably)! */
+
+ tosh_sci = regs.edx & 0xffff;
+
+ /* next get the machine ID of the current laptop */
+
+ tosh_id = tosh_get_machine_id(bios);
+
+ /* get the BIOS version */
+
+ major = readb(bios+0xe009)-'0';
+ minor = ((readb(bios+0xe00b)-'0')*10)+(readb(bios+0xe00c)-'0');
+ tosh_bios = (major*0x100)+minor;
+
+ /* get the BIOS date */
+
+ day = ((readb(bios+0xfff5)-'0')*10)+(readb(bios+0xfff6)-'0');
+ month = ((readb(bios+0xfff8)-'0')*10)+(readb(bios+0xfff9)-'0');
+ year = ((readb(bios+0xfffb)-'0')*10)+(readb(bios+0xfffc)-'0');
+ tosh_date = (((year-90) & 0x1f)<<10) | ((month & 0xf)<<6)
+ | ((day & 0x1f)<<1);
+
+
+ /* in theory we should check the ports we are going to use for the
+ fn key detection (and the fan on the Portage 610/Tecra700), and
+ then request them to stop other drivers using them. However as
+ the keyboard driver grabs 0x60-0x6f and the pic driver grabs
+ 0xa0-0xbf we can't. We just have to live dangerously and use the
+ ports anyway, oh boy! */
+
+ /* do we need to emulate the fan? */
+
+ if ((tosh_id==0xfccb) || (tosh_id==0xfccc))
+ tosh_fan = 1;
+
+ iounmap(bios);
+
+ return 0;
+}
+
+static int __init toshiba_init(void)
+{
+ int retval;
+ /* are we running on a Toshiba laptop */
+
+ if (tosh_probe())
+ return -ENODEV;
+
+ pr_info("Toshiba System Management Mode driver v" TOSH_VERSION "\n");
+
+ /* set the port to use for Fn status if not specified as a parameter */
+ if (tosh_fn==0x00)
+ tosh_set_fn_port();
+
+ /* register the device file */
+ retval = misc_register(&tosh_device);
+ if (retval < 0)
+ return retval;
+
+#ifdef CONFIG_PROC_FS
+ {
+ struct proc_dir_entry *pde;
+
+ pde = proc_create_single("toshiba", 0, NULL, proc_toshiba_show);
+ if (!pde) {
+ misc_deregister(&tosh_device);
+ return -ENOMEM;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void __exit toshiba_exit(void)
+{
+ remove_proc_entry("toshiba", NULL);
+ misc_deregister(&tosh_device);
+}
+
+module_init(toshiba_init);
+module_exit(toshiba_exit);
+
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
new file mode 100644
index 0000000000..927088b2c3
--- /dev/null
+++ b/drivers/char/tpm/Kconfig
@@ -0,0 +1,214 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# TPM device configuration
+#
+
+menuconfig TCG_TPM
+ tristate "TPM Hardware Support"
+ depends on HAS_IOMEM
+ imply SECURITYFS
+ select CRYPTO
+ select CRYPTO_HASH_INFO
+ help
+ If you have a TPM security chip in your system, which
+ implements the Trusted Computing Group's specification,
+ say Yes and it will be accessible from within Linux. For
+ more information see <http://www.trustedcomputinggroup.org>.
+ An implementation of the Trusted Software Stack (TSS), the
+ userspace enablement piece of the specification, can be
+ obtained at: <http://sourceforge.net/projects/trousers>. To
+ compile this driver as a module, choose M here; the module
+ will be called tpm. If unsure, say N.
+ Notes:
+ 1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
+ and CONFIG_PNPACPI.
+ 2) Without ACPI enabled, the BIOS event log won't be accessible,
+ which is required to validate the PCR 0-7 values.
+
+if TCG_TPM
+
+config HW_RANDOM_TPM
+ bool "TPM HW Random Number Generator support"
+ depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m)
+ default y
+ help
+ This setting exposes the TPM's Random Number Generator as a hwrng
+ device. This allows the kernel to collect randomness from the TPM at
+ boot, and provides the TPM randomines in /dev/hwrng.
+
+ If unsure, say Y.
+
+config TCG_TIS_CORE
+ tristate
+ help
+ TCG TIS TPM core driver. It implements the TPM TCG TIS logic and hooks
+ into the TPM kernel APIs. Physical layers will register against it.
+
+config TCG_TIS
+ tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
+ depends on X86 || OF
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip that is compliant with the
+ TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO
+ specification (TPM2.0) say Yes and it will be accessible from
+ within Linux. To compile this driver as a module, choose M here;
+ the module will be called tpm_tis.
+
+config TCG_TIS_SPI
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (SPI)"
+ depends on SPI
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip which is connected to a regular,
+ non-tcg SPI master (i.e. most embedded platforms) that is compliant with the
+ TCG TIS 1.3 TPM specification (TPM1.2) or the TCG PTP FIFO
+ specification (TPM2.0) say Yes and it will be accessible from
+ within Linux. To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_spi.
+
+config TCG_TIS_SPI_CR50
+ bool "Cr50 SPI Interface"
+ depends on TCG_TIS_SPI
+ help
+ If you have a H1 secure module running Cr50 firmware on SPI bus,
+ say Yes and it will be accessible from within Linux.
+
+config TCG_TIS_I2C
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (I2C - generic)"
+ depends on I2C
+ select CRC_CCITT
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip, compliant with the TCG TPM PTP
+ (I2C interface) specification and connected to an I2C bus master,
+ say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_i2c.
+
+config TCG_TIS_SYNQUACER
+ tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
+ depends on ARCH_SYNQUACER || COMPILE_TEST
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip that is compliant with the
+ TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO
+ specification (TPM2.0) say Yes and it will be accessible from
+ within Linux on Socionext SynQuacer platform.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_synquacer.
+
+config TCG_TIS_I2C_CR50
+ tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
+ depends on I2C
+ help
+ This is a driver for the Google cr50 I2C TPM interface which is a
+ custom microcontroller and requires a custom i2c protocol interface
+ to handle the limitations of the hardware. To compile this driver
+ as a module, choose M here; the module will be called tcg_tis_i2c_cr50.
+
+config TCG_TIS_I2C_ATMEL
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
+ depends on I2C
+ help
+ If you have an Atmel I2C TPM security chip say Yes and it will be
+ accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will
+ be called tpm_tis_i2c_atmel.
+
+config TCG_TIS_I2C_INFINEON
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)"
+ depends on I2C
+ help
+ If you have a TPM security chip that is compliant with the
+ TCG TIS 1.2 TPM specification and Infineon's I2C Protocol Stack
+ Specification 0.20 say Yes and it will be accessible from within
+ Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_i2c_infineon.
+
+config TCG_TIS_I2C_NUVOTON
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)"
+ depends on I2C
+ help
+ If you have a TPM security chip with an I2C interface from
+ Nuvoton Technology Corp. say Yes and it will be accessible
+ from within Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_i2c_nuvoton.
+
+config TCG_NSC
+ tristate "National Semiconductor TPM Interface"
+ depends on X86
+ help
+ If you have a TPM security chip from National Semiconductor
+ say Yes and it will be accessible from within Linux. To
+ compile this driver as a module, choose M here; the module
+ will be called tpm_nsc.
+
+config TCG_ATMEL
+ tristate "Atmel TPM Interface"
+ depends on PPC64 || HAS_IOPORT_MAP
+ help
+ If you have a TPM security chip from Atmel say Yes and it
+ will be accessible from within Linux. To compile this driver
+ as a module, choose M here; the module will be called tpm_atmel.
+
+config TCG_INFINEON
+ tristate "Infineon Technologies TPM Interface"
+ depends on PNP
+ help
+ If you have a TPM security chip from Infineon Technologies
+ (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
+ will be accessible from within Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_infineon.
+ Further information on this driver and the supported hardware
+ can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
+
+config TCG_IBMVTPM
+ tristate "IBM VTPM Interface"
+ depends on PPC_PSERIES
+ help
+ If you have IBM virtual TPM (VTPM) support say Yes and it
+ will be accessible from within Linux. To compile this driver
+ as a module, choose M here; the module will be called tpm_ibmvtpm.
+
+config TCG_XEN
+ tristate "XEN TPM Interface"
+ depends on TCG_TPM && XEN
+ select XEN_XENBUS_FRONTEND
+ help
+ If you want to make TPM support available to a Xen user domain,
+ say Yes and it will be accessible from within Linux. See
+ the manpages for xl, xl.conf, and docs/misc/vtpm.txt in
+ the Xen source repository for more details.
+ To compile this driver as a module, choose M here; the module
+ will be called xen-tpmfront.
+
+config TCG_CRB
+ tristate "TPM 2.0 CRB Interface"
+ depends on ACPI
+ help
+ If you have a TPM security chip that is compliant with the
+ TCG CRB 2.0 TPM specification say Yes and it will be accessible
+ from within Linux. To compile this driver as a module, choose
+ M here; the module will be called tpm_crb.
+
+config TCG_VTPM_PROXY
+ tristate "VTPM Proxy Interface"
+ depends on TCG_TPM
+ help
+ This driver proxies for an emulated TPM (vTPM) running in userspace.
+ A device /dev/vtpmx is provided that creates a device pair
+ /dev/vtpmX and a server-side file descriptor on which the vTPM
+ can receive commands.
+
+config TCG_FTPM_TEE
+ tristate "TEE based fTPM Interface"
+ depends on TEE && OPTEE
+ help
+ This driver proxies for firmware TPM running in TEE.
+
+source "drivers/char/tpm/st33zp24/Kconfig"
+endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
new file mode 100644
index 0000000000..0222b1ddb3
--- /dev/null
+++ b/drivers/char/tpm/Makefile
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the kernel tpm device drivers.
+#
+obj-$(CONFIG_TCG_TPM) += tpm.o
+tpm-y := tpm-chip.o
+tpm-y += tpm-dev-common.o
+tpm-y += tpm-dev.o
+tpm-y += tpm-interface.o
+tpm-y += tpm1-cmd.o
+tpm-y += tpm2-cmd.o
+tpm-y += tpmrm-dev.o
+tpm-y += tpm2-space.o
+tpm-y += tpm-sysfs.o
+tpm-y += eventlog/common.o
+tpm-y += eventlog/tpm1.o
+tpm-y += eventlog/tpm2.o
+
+tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
+tpm-$(CONFIG_EFI) += eventlog/efi.o
+tpm-$(CONFIG_OF) += eventlog/of.o
+obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
+obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_TIS_SYNQUACER) += tpm_tis_synquacer.o
+
+obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
+tpm_tis_spi-y := tpm_tis_spi_main.o
+tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
+
+obj-$(CONFIG_TCG_TIS_I2C_CR50) += tpm_tis_i2c_cr50.o
+
+obj-$(CONFIG_TCG_TIS_I2C) += tpm_tis_i2c.o
+obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
+obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
+obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
+obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
+obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
+obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
+obj-$(CONFIG_TCG_CRB) += tpm_crb.o
+obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
+obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
new file mode 100644
index 0000000000..bd757d836c
--- /dev/null
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Access to the event log extended by the TCG BIOS of PC platform
+ */
+
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+struct acpi_tcpa {
+ struct acpi_table_header hdr;
+ u16 platform_class;
+ union {
+ struct client_hdr {
+ u32 log_max_len __packed;
+ u64 log_start_addr __packed;
+ } client;
+ struct server_hdr {
+ u16 reserved;
+ u64 log_max_len __packed;
+ u64 log_start_addr __packed;
+ } server;
+ };
+};
+
+/* Check that the given log is indeed a TPM2 log. */
+static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
+{
+ struct tcg_efi_specid_event_head *efispecid;
+ struct tcg_pcr_event *event_header;
+ int n;
+
+ if (len < sizeof(*event_header))
+ return false;
+ len -= sizeof(*event_header);
+ event_header = bios_event_log;
+
+ if (len < sizeof(*efispecid))
+ return false;
+ efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
+
+ n = memcmp(efispecid->signature, TCG_SPECID_SIG,
+ sizeof(TCG_SPECID_SIG));
+ return n == 0;
+}
+
+/* read binary bios log */
+int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ struct acpi_tcpa *buff;
+ acpi_status status;
+ void __iomem *virt;
+ u64 len, start;
+ struct tpm_bios_log *log;
+ struct acpi_table_tpm2 *tbl;
+ struct acpi_tpm2_phy *tpm2_phy;
+ int format;
+ int ret;
+
+ log = &chip->log;
+
+ /* Unfortuntely ACPI does not associate the event log with a specific
+ * TPM, like PPI. Thus all ACPI TPMs will read the same log.
+ */
+ if (!chip->acpi_dev_handle)
+ return -ENODEV;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ status = acpi_get_table("TPM2", 1,
+ (struct acpi_table_header **)&tbl);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (tbl->header.length <
+ sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) {
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return -ENODEV;
+ }
+
+ tpm2_phy = (void *)tbl + sizeof(*tbl);
+ len = tpm2_phy->log_area_minimum_length;
+
+ start = tpm2_phy->log_area_start_address;
+ if (!start || !len) {
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return -ENODEV;
+ }
+
+ acpi_put_table((struct acpi_table_header *)tbl);
+ format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ } else {
+ /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+ status = acpi_get_table(ACPI_SIG_TCPA, 1,
+ (struct acpi_table_header **)&buff);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ switch (buff->platform_class) {
+ case BIOS_SERVER:
+ len = buff->server.log_max_len;
+ start = buff->server.log_start_addr;
+ break;
+ case BIOS_CLIENT:
+ default:
+ len = buff->client.log_max_len;
+ start = buff->client.log_start_addr;
+ break;
+ }
+
+ acpi_put_table((struct acpi_table_header *)buff);
+ format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ }
+
+ if (!len) {
+ dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
+ return -EIO;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
+ if (!log->bios_event_log)
+ return -ENOMEM;
+
+ log->bios_event_log_end = log->bios_event_log + len;
+
+ ret = -EIO;
+ virt = acpi_os_map_iomem(start, len);
+ if (!virt) {
+ dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
+ /* try EFI log next */
+ ret = -ENODEV;
+ goto err;
+ }
+
+ memcpy_fromio(log->bios_event_log, virt, len);
+
+ acpi_os_unmap_iomem(virt, len);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
+ !tpm_is_tpm2_log(log->bios_event_log, len)) {
+ /* try EFI log next */
+ ret = -ENODEV;
+ goto err;
+ }
+
+ return format;
+
+err:
+ devm_kfree(&chip->dev, log->bios_event_log);
+ log->bios_event_log = NULL;
+ return ret;
+}
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
new file mode 100644
index 0000000000..639c3f395a
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005, 2012 IBM Corporation
+ *
+ * Authors:
+ * Kent Yoder <key@linux.vnet.ibm.com>
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Access to the event log created by a system's firmware / BIOS
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+static int tpm_bios_measurements_open(struct inode *inode,
+ struct file *file)
+{
+ int err;
+ struct seq_file *seq;
+ struct tpm_chip_seqops *chip_seqops;
+ const struct seq_operations *seqops;
+ struct tpm_chip *chip;
+
+ inode_lock(inode);
+ if (!inode->i_private) {
+ inode_unlock(inode);
+ return -ENODEV;
+ }
+ chip_seqops = inode->i_private;
+ seqops = chip_seqops->seqops;
+ chip = chip_seqops->chip;
+ get_device(&chip->dev);
+ inode_unlock(inode);
+
+ /* now register seq file */
+ err = seq_open(file, seqops);
+ if (!err) {
+ seq = file->private_data;
+ seq->private = chip;
+ }
+
+ return err;
+}
+
+static int tpm_bios_measurements_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct tpm_chip *chip = seq->private;
+
+ put_device(&chip->dev);
+
+ return seq_release(inode, file);
+}
+
+static const struct file_operations tpm_bios_measurements_ops = {
+ .owner = THIS_MODULE,
+ .open = tpm_bios_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tpm_bios_measurements_release,
+};
+
+static int tpm_read_log(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip->log.bios_event_log != NULL) {
+ dev_dbg(&chip->dev,
+ "%s: ERROR - event log already initialized\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ rc = tpm_read_log_acpi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
+ rc = tpm_read_log_efi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
+ return tpm_read_log_of(chip);
+}
+
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ */
+void tpm_bios_log_setup(struct tpm_chip *chip)
+{
+ const char *name = dev_name(&chip->dev);
+ unsigned int cnt;
+ int log_version;
+ int rc = 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
+ return;
+
+ rc = tpm_read_log(chip);
+ if (rc < 0)
+ return;
+ log_version = rc;
+
+ cnt = 0;
+ chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+ * compiled out. The caller should ignore the ENODEV return code.
+ */
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->bin_log_seqops.chip = chip;
+ if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ chip->bin_log_seqops.seqops =
+ &tpm2_binary_b_measurements_seqops;
+ else
+ chip->bin_log_seqops.seqops =
+ &tpm1_binary_b_measurements_seqops;
+
+
+ chip->bios_dir[cnt] =
+ securityfs_create_file("binary_bios_measurements",
+ 0440, chip->bios_dir[0],
+ (void *)&chip->bin_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+
+ chip->ascii_log_seqops.chip = chip;
+ chip->ascii_log_seqops.seqops =
+ &tpm1_ascii_b_measurements_seqops;
+
+ chip->bios_dir[cnt] =
+ securityfs_create_file("ascii_bios_measurements",
+ 0440, chip->bios_dir[0],
+ (void *)&chip->ascii_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+ }
+
+ return;
+
+err:
+ chip->bios_dir[cnt] = NULL;
+ tpm_bios_log_teardown(chip);
+ return;
+}
+
+void tpm_bios_log_teardown(struct tpm_chip *chip)
+{
+ int i;
+ struct inode *inode;
+
+ /* securityfs_remove currently doesn't take care of handling sync
+ * between removal and opening of pseudo files. To handle this, a
+ * workaround is added by making i_private = NULL here during removal
+ * and to check it during open(), both within inode_lock()/unlock().
+ * This design ensures that open() either safely gets kref or fails.
+ */
+ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+ if (chip->bios_dir[i]) {
+ inode = d_inode(chip->bios_dir[i]);
+ inode_lock(inode);
+ inode->i_private = NULL;
+ inode_unlock(inode);
+ securityfs_remove(chip->bios_dir[i]);
+ }
+ }
+}
diff --git a/drivers/char/tpm/eventlog/common.h b/drivers/char/tpm/eventlog/common.h
new file mode 100644
index 0000000000..47ff8136ce
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.h
@@ -0,0 +1,35 @@
+#ifndef __TPM_EVENTLOG_COMMON_H__
+#define __TPM_EVENTLOG_COMMON_H__
+
+#include "../tpm.h"
+
+extern const struct seq_operations tpm1_ascii_b_measurements_seqops;
+extern const struct seq_operations tpm1_binary_b_measurements_seqops;
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_EFI)
+int tpm_read_log_efi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_efi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
new file mode 100644
index 0000000000..4e9d7c2bf3
--- /dev/null
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Google
+ *
+ * Authors:
+ * Thiebaud Weksteen <tweek@google.com>
+ */
+
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+/* read binary bios log from EFI configuration table */
+int tpm_read_log_efi(struct tpm_chip *chip)
+{
+
+ struct efi_tcg2_final_events_table *final_tbl = NULL;
+ int final_events_log_size = efi_tpm_final_log_size;
+ struct linux_efi_tpm_eventlog *log_tbl;
+ struct tpm_bios_log *log;
+ u32 log_size;
+ u8 tpm_log_version;
+ void *tmp;
+ int ret;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return -ENODEV;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ log = &chip->log;
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table !\n");
+ return -ENOMEM;
+ }
+
+ log_size = log_tbl->size;
+ memunmap(log_tbl);
+
+ if (!log_size) {
+ pr_warn("UEFI TPM log area empty\n");
+ return -EIO;
+ }
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
+ MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table payload!\n");
+ return -ENOMEM;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = devm_kmemdup(&chip->dev, log_tbl->log, log_size, GFP_KERNEL);
+ if (!log->bios_event_log) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ log->bios_event_log_end = log->bios_event_log + log_size;
+ tpm_log_version = log_tbl->version;
+
+ ret = tpm_log_version;
+
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+ final_events_log_size == 0 ||
+ tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ goto out;
+
+ final_tbl = memremap(efi.tpm_final_log,
+ sizeof(*final_tbl) + final_events_log_size,
+ MEMREMAP_WB);
+ if (!final_tbl) {
+ pr_err("Could not map UEFI TPM final log\n");
+ devm_kfree(&chip->dev, log->bios_event_log);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * The 'final events log' size excludes the 'final events preboot log'
+ * at its beginning.
+ */
+ final_events_log_size -= log_tbl->final_events_preboot_size;
+
+ /*
+ * Allocate memory for the 'combined log' where we will append the
+ * 'final events log' to.
+ */
+ tmp = devm_krealloc(&chip->dev, log->bios_event_log,
+ log_size + final_events_log_size,
+ GFP_KERNEL);
+ if (!tmp) {
+ devm_kfree(&chip->dev, log->bios_event_log);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ log->bios_event_log = tmp;
+
+ /*
+ * Append any of the 'final events log' that didn't also end up in the
+ * 'main log'. Events can be logged in both if events are generated
+ * between GetEventLog() and ExitBootServices().
+ */
+ memcpy((void *)log->bios_event_log + log_size,
+ final_tbl->events + log_tbl->final_events_preboot_size,
+ final_events_log_size);
+ /*
+ * The size of the 'combined log' is the size of the 'main log' plus
+ * the size of the 'final events log'.
+ */
+ log->bios_event_log_end = log->bios_event_log +
+ log_size + final_events_log_size;
+
+out:
+ memunmap(final_tbl);
+ memunmap(log_tbl);
+ return ret;
+}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
new file mode 100644
index 0000000000..930fe43d5d
--- /dev/null
+++ b/drivers/char/tpm/eventlog/of.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <ashleydlai@gmail.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Read the event log created by the firmware on PPC64
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+static int tpm_read_log_memory_region(struct tpm_chip *chip)
+{
+ struct device_node *node;
+ struct resource res;
+ int rc;
+
+ node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0);
+ if (!node)
+ return -ENODEV;
+
+ rc = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
+ if (rc)
+ return rc;
+
+ chip->log.bios_event_log = devm_memremap(&chip->dev, res.start, resource_size(&res),
+ MEMREMAP_WB);
+ if (IS_ERR(chip->log.bios_event_log))
+ return -ENOMEM;
+
+ chip->log.bios_event_log_end = chip->log.bios_event_log + resource_size(&res);
+
+ return chip->flags & TPM_CHIP_FLAG_TPM2 ? EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 :
+ EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+}
+
+int tpm_read_log_of(struct tpm_chip *chip)
+{
+ struct device_node *np;
+ const u32 *sizep;
+ const u64 *basep;
+ struct tpm_bios_log *log;
+ u32 size;
+ u64 base;
+
+ log = &chip->log;
+ if (chip->dev.parent && chip->dev.parent->of_node)
+ np = chip->dev.parent->of_node;
+ else
+ return -ENODEV;
+
+ if (of_property_read_bool(np, "powered-while-suspended"))
+ chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
+
+ sizep = of_get_property(np, "linux,sml-size", NULL);
+ basep = of_get_property(np, "linux,sml-base", NULL);
+ if (sizep == NULL && basep == NULL)
+ return tpm_read_log_memory_region(chip);
+ if (sizep == NULL || basep == NULL)
+ return -EIO;
+
+ /*
+ * For both vtpm/tpm, firmware has log addr and log size in big
+ * endian format. But in case of vtpm, there is a method called
+ * sml-handover which is run during kernel init even before
+ * device tree is setup. This sml-handover function takes care
+ * of endianness and writes to sml-base and sml-size in little
+ * endian format. For this reason, vtpm doesn't need conversion
+ * but physical tpm needs the conversion.
+ */
+ if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0 &&
+ of_property_match_string(np, "compatible", "IBM,vtpm20") < 0) {
+ size = be32_to_cpup((__force __be32 *)sizep);
+ base = be64_to_cpup((__force __be64 *)basep);
+ } else {
+ size = *sizep;
+ base = *basep;
+ }
+
+ if (size == 0) {
+ dev_warn(&chip->dev, "%s: Event log area empty\n", __func__);
+ return -EIO;
+ }
+
+ log->bios_event_log = devm_kmemdup(&chip->dev, __va(base), size, GFP_KERNEL);
+ if (!log->bios_event_log)
+ return -ENOMEM;
+
+ log->bios_event_log_end = log->bios_event_log + size;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+}
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
new file mode 100644
index 0000000000..12ee42a31c
--- /dev/null
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005, 2012 IBM Corporation
+ *
+ * Authors:
+ * Kent Yoder <key@linux.vnet.ibm.com>
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Access to the event log created by a system's firmware / BIOS
+ */
+
+#include <linux/seq_file.h>
+#include <linux/efi.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+
+static const char* tcpa_event_type_strings[] = {
+ "PREBOOT",
+ "POST CODE",
+ "",
+ "NO ACTION",
+ "SEPARATOR",
+ "ACTION",
+ "EVENT TAG",
+ "S-CRTM Contents",
+ "S-CRTM Version",
+ "CPU Microcode",
+ "Platform Config Flags",
+ "Table of Devices",
+ "Compact Hash",
+ "IPL",
+ "IPL Partition Data",
+ "Non-Host Code",
+ "Non-Host Config",
+ "Non-Host Info"
+};
+
+static const char* tcpa_pc_event_id_strings[] = {
+ "",
+ "SMBIOS",
+ "BIS Certificate",
+ "POST BIOS ",
+ "ESCD ",
+ "CMOS",
+ "NVRAM",
+ "Option ROM",
+ "Option ROM config",
+ "",
+ "Option ROM microcode ",
+ "S-CRTM Version",
+ "S-CRTM Contents ",
+ "POST Contents ",
+ "Table of Devices",
+};
+
+/* returns pointer to start of pos. entry of tcg log */
+static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t i = 0;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *addr = log->bios_event_log;
+ void *limit = log->bios_event_log_end;
+ struct tcpa_event *event;
+ u32 converted_event_size;
+ u32 converted_event_type;
+
+ /* read over *pos measurements */
+ do {
+ event = addr;
+
+ /* check if current entry is valid */
+ if (addr + sizeof(struct tcpa_event) > limit)
+ return NULL;
+
+ converted_event_size =
+ do_endian_conversion(event->event_size);
+ converted_event_type =
+ do_endian_conversion(event->event_type);
+
+ if (((converted_event_type == 0) && (converted_event_size == 0))
+ || ((addr + sizeof(struct tcpa_event) + converted_event_size)
+ > limit))
+ return NULL;
+
+ if (i++ == *pos)
+ break;
+
+ addr += (sizeof(struct tcpa_event) + converted_event_size);
+ } while (1);
+
+ return addr;
+}
+
+static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
+ loff_t *pos)
+{
+ struct tcpa_event *event = v;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *limit = log->bios_event_log_end;
+ u32 converted_event_size;
+ u32 converted_event_type;
+
+ (*pos)++;
+ converted_event_size = do_endian_conversion(event->event_size);
+
+ v += sizeof(struct tcpa_event) + converted_event_size;
+
+ /* now check if current entry is valid */
+ if ((v + sizeof(struct tcpa_event)) > limit)
+ return NULL;
+
+ event = v;
+
+ converted_event_size = do_endian_conversion(event->event_size);
+ converted_event_type = do_endian_conversion(event->event_type);
+
+ if (((converted_event_type == 0) && (converted_event_size == 0)) ||
+ ((v + sizeof(struct tcpa_event) + converted_event_size) > limit))
+ return NULL;
+
+ return v;
+}
+
+static void tpm1_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int get_event_name(char *dest, struct tcpa_event *event,
+ unsigned char * event_entry)
+{
+ const char *name = "";
+ /* 41 so there is room for 40 data and 1 nul */
+ char data[41] = "";
+ int i, n_len = 0, d_len = 0;
+ struct tcpa_pc_event *pc_event;
+
+ switch (do_endian_conversion(event->event_type)) {
+ case PREBOOT:
+ case POST_CODE:
+ case UNUSED:
+ case NO_ACTION:
+ case SCRTM_CONTENTS:
+ case SCRTM_VERSION:
+ case CPU_MICROCODE:
+ case PLATFORM_CONFIG_FLAGS:
+ case TABLE_OF_DEVICES:
+ case COMPACT_HASH:
+ case IPL:
+ case IPL_PARTITION_DATA:
+ case NONHOST_CODE:
+ case NONHOST_CONFIG:
+ case NONHOST_INFO:
+ name = tcpa_event_type_strings[do_endian_conversion
+ (event->event_type)];
+ n_len = strlen(name);
+ break;
+ case SEPARATOR:
+ case ACTION:
+ if (MAX_TEXT_EVENT >
+ do_endian_conversion(event->event_size)) {
+ name = event_entry;
+ n_len = do_endian_conversion(event->event_size);
+ }
+ break;
+ case EVENT_TAG:
+ pc_event = (struct tcpa_pc_event *)event_entry;
+
+ /* ToDo Row data -> Base64 */
+
+ switch (do_endian_conversion(pc_event->event_id)) {
+ case SMBIOS:
+ case BIS_CERT:
+ case CMOS:
+ case NVRAM:
+ case OPTION_ROM_EXEC:
+ case OPTION_ROM_CONFIG:
+ case S_CRTM_VERSION:
+ name = tcpa_pc_event_id_strings[do_endian_conversion
+ (pc_event->event_id)];
+ n_len = strlen(name);
+ break;
+ /* hash data */
+ case POST_BIOS_ROM:
+ case ESCD:
+ case OPTION_ROM_MICROCODE:
+ case S_CRTM_CONTENTS:
+ case POST_CONTENTS:
+ name = tcpa_pc_event_id_strings[do_endian_conversion
+ (pc_event->event_id)];
+ n_len = strlen(name);
+ for (i = 0; i < 20; i++)
+ d_len += sprintf(&data[2*i], "%02x",
+ pc_event->event_data[i]);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]",
+ n_len, name, d_len, data);
+
+}
+
+static int tpm1_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+ struct tcpa_event *event = v;
+ struct tcpa_event temp_event;
+ char *temp_ptr;
+ int i;
+
+ memcpy(&temp_event, event, sizeof(struct tcpa_event));
+
+ /* convert raw integers for endianness */
+ temp_event.pcr_index = do_endian_conversion(event->pcr_index);
+ temp_event.event_type = do_endian_conversion(event->event_type);
+ temp_event.event_size = do_endian_conversion(event->event_size);
+
+ temp_ptr = (char *) &temp_event;
+
+ for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
+ seq_putc(m, temp_ptr[i]);
+
+ temp_ptr = (char *) v;
+
+ for (i = (sizeof(struct tcpa_event) - 1);
+ i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
+ seq_putc(m, temp_ptr[i]);
+
+ return 0;
+
+}
+
+static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
+{
+ char *eventname;
+ struct tcpa_event *event = v;
+ unsigned char *event_entry =
+ (unsigned char *)(v + sizeof(struct tcpa_event));
+
+ eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
+ if (!eventname) {
+ printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
+ __func__);
+ return -EFAULT;
+ }
+
+ /* 1st: PCR */
+ seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index));
+
+ /* 2nd: SHA1 */
+ seq_printf(m, "%20phN", event->pcr_value);
+
+ /* 3rd: event type identifier */
+ seq_printf(m, " %02x", do_endian_conversion(event->event_type));
+
+ get_event_name(eventname, event, event_entry);
+
+ /* 4th: eventname <= max + \'0' delimiter */
+ seq_printf(m, " %s\n", eventname);
+
+ kfree(eventname);
+ return 0;
+}
+
+const struct seq_operations tpm1_ascii_b_measurements_seqops = {
+ .start = tpm1_bios_measurements_start,
+ .next = tpm1_bios_measurements_next,
+ .stop = tpm1_bios_measurements_stop,
+ .show = tpm1_ascii_bios_measurements_show,
+};
+
+const struct seq_operations tpm1_binary_b_measurements_seqops = {
+ .start = tpm1_bios_measurements_start,
+ .next = tpm1_bios_measurements_next,
+ .stop = tpm1_bios_measurements_stop,
+ .show = tpm1_binary_bios_measurements_show,
+};
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
new file mode 100644
index 0000000000..37a0580098
--- /dev/null
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Access to TPM 2.0 event log as written by Firmware.
+ * It assumes that writer of event log has followed TCG Specification
+ * for Family "2.0" and written the event data in little endian.
+ * With that, it doesn't need any endian conversion for structure
+ * content.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+/*
+ * calc_tpm2_event_size() - calculate the event size, where event
+ * is an entry in the TPM 2.0 event log. The event is of type Crypto
+ * Agile Log Entry Format as defined in TCG EFI Protocol Specification
+ * Family "2.0".
+
+ * @event: event whose size is to be calculated.
+ * @event_header: the first event in the event log.
+ *
+ * Returns size of the event. If it is an invalid event, returns 0.
+ */
+static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+ struct tcg_pcr_event *event_header)
+{
+ return __calc_tpm2_event_size(event, event_header, false);
+}
+
+static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *addr = log->bios_event_log;
+ void *limit = log->bios_event_log_end;
+ struct tcg_pcr_event *event_header;
+ struct tcg_pcr_event2_head *event;
+ size_t size;
+ int i;
+
+ event_header = addr;
+ size = struct_size(event_header, event, event_header->event_size);
+
+ if (*pos == 0) {
+ if (addr + size < limit) {
+ if ((event_header->event_type == 0) &&
+ (event_header->event_size == 0))
+ return NULL;
+ return SEQ_START_TOKEN;
+ }
+ }
+
+ if (*pos > 0) {
+ addr += size;
+ event = addr;
+ size = calc_tpm2_event_size(event, event_header);
+ if ((addr + size >= limit) || (size == 0))
+ return NULL;
+ }
+
+ for (i = 0; i < (*pos - 1); i++) {
+ event = addr;
+ size = calc_tpm2_event_size(event, event_header);
+
+ if ((addr + size >= limit) || (size == 0))
+ return NULL;
+ addr += size;
+ }
+
+ return addr;
+}
+
+static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
+ loff_t *pos)
+{
+ struct tcg_pcr_event *event_header;
+ struct tcg_pcr_event2_head *event;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *limit = log->bios_event_log_end;
+ size_t event_size;
+ void *marker;
+
+ (*pos)++;
+ event_header = log->bios_event_log;
+
+ if (v == SEQ_START_TOKEN) {
+ event_size = struct_size(event_header, event,
+ event_header->event_size);
+ marker = event_header;
+ } else {
+ event = v;
+ event_size = calc_tpm2_event_size(event, event_header);
+ if (event_size == 0)
+ return NULL;
+ marker = event;
+ }
+
+ marker = marker + event_size;
+ if (marker >= limit)
+ return NULL;
+ v = marker;
+ event = v;
+
+ event_size = calc_tpm2_event_size(event, event_header);
+ if (((v + event_size) >= limit) || (event_size == 0))
+ return NULL;
+
+ return v;
+}
+
+static void tpm2_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ struct tcg_pcr_event *event_header = log->bios_event_log;
+ struct tcg_pcr_event2_head *event = v;
+ void *temp_ptr;
+ size_t size;
+
+ if (v == SEQ_START_TOKEN) {
+ size = struct_size(event_header, event,
+ event_header->event_size);
+ temp_ptr = event_header;
+
+ if (size > 0)
+ seq_write(m, temp_ptr, size);
+ } else {
+ size = calc_tpm2_event_size(event, event_header);
+ temp_ptr = event;
+ if (size > 0)
+ seq_write(m, temp_ptr, size);
+ }
+
+ return 0;
+}
+
+const struct seq_operations tpm2_binary_b_measurements_seqops = {
+ .start = tpm2_bios_measurements_start,
+ .next = tpm2_bios_measurements_next,
+ .stop = tpm2_bios_measurements_stop,
+ .show = tpm2_binary_bios_measurements_show,
+};
diff --git a/drivers/char/tpm/st33zp24/Kconfig b/drivers/char/tpm/st33zp24/Kconfig
new file mode 100644
index 0000000000..601c2ae5bf
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config TCG_TIS_ST33ZP24
+ tristate
+ help
+ STMicroelectronics ST33ZP24 core driver. It implements the core
+ TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will
+ register against it.
+
+ To compile this driver as a module, choose m here. The module will be called
+ tpm_st33zp24.
+
+config TCG_TIS_ST33ZP24_I2C
+ tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (I2C)"
+ depends on I2C
+ select TCG_TIS_ST33ZP24
+ help
+ This module adds support for the STMicroelectronics TPM security chip
+ ST33ZP24 with i2c interface.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_st33zp24_i2c.
+
+config TCG_TIS_ST33ZP24_SPI
+ tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (SPI)"
+ depends on SPI
+ select TCG_TIS_ST33ZP24
+ help
+ This module adds support for the STMicroelectronics TPM security chip
+ ST33ZP24 with spi interface.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_st33zp24_spi.
diff --git a/drivers/char/tpm/st33zp24/Makefile b/drivers/char/tpm/st33zp24/Makefile
new file mode 100644
index 0000000000..649e41107d
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for ST33ZP24 TPM 1.2 driver
+#
+
+tpm_st33zp24-objs = st33zp24.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24) += tpm_st33zp24.o
+
+tpm_st33zp24_i2c-objs = i2c.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24_I2C) += tpm_st33zp24_i2c.o
+
+tpm_st33zp24_spi-objs = spi.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24_SPI) += tpm_st33zp24_spi.o
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
new file mode 100644
index 0000000000..661574bb0a
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/acpi.h>
+#include <linux/tpm.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_DUMMY_BYTE 0xAA
+
+struct st33zp24_i2c_phy {
+ struct i2c_client *client;
+ u8 buf[ST33ZP24_BUFSIZE + 1];
+};
+
+/*
+ * write8_reg
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: Returns negative errno, or else the number of bytes written.
+ */
+static int write8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+{
+ struct st33zp24_i2c_phy *phy = phy_id;
+
+ phy->buf[0] = tpm_register;
+ memcpy(phy->buf + 1, tpm_data, tpm_size);
+ return i2c_master_send(phy->client, phy->buf, tpm_size + 1);
+} /* write8_reg() */
+
+/*
+ * read8_reg
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+{
+ struct st33zp24_i2c_phy *phy = phy_id;
+ u8 status = 0;
+ u8 data;
+
+ data = TPM_DUMMY_BYTE;
+ status = write8_reg(phy, tpm_register, &data, 1);
+ if (status == 2)
+ status = i2c_master_recv(phy->client, tpm_data, tpm_size);
+ return status;
+} /* read8_reg() */
+
+/*
+ * st33zp24_i2c_send
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, the length of the data
+ * @return: number of byte written successfully: should be one if success.
+ */
+static int st33zp24_i2c_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ return write8_reg(phy_id, tpm_register | TPM_WRITE_DIRECTION, tpm_data,
+ tpm_size);
+}
+
+/*
+ * st33zp24_i2c_recv
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int st33zp24_i2c_recv(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ return read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
+}
+
+static const struct st33zp24_phy_ops i2c_phy_ops = {
+ .send = st33zp24_i2c_send,
+ .recv = st33zp24_i2c_recv,
+};
+
+/*
+ * st33zp24_i2c_probe initialize the TPM device
+ * @param: client, the i2c_client description (TPM I2C description).
+ * @param: id, the i2c_device_id struct.
+ * @return: 0 in case of success.
+ * -1 in other case.
+ */
+static int st33zp24_i2c_probe(struct i2c_client *client)
+{
+ struct st33zp24_i2c_phy *phy;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_info(&client->dev, "client not i2c capable\n");
+ return -ENODEV;
+ }
+
+ phy = devm_kzalloc(&client->dev, sizeof(struct st33zp24_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->client = client;
+
+ return st33zp24_probe(phy, &i2c_phy_ops, &client->dev, client->irq);
+}
+
+/*
+ * st33zp24_i2c_remove remove the TPM device
+ * @param: client, the i2c_client description (TPM I2C description).
+ * @return: 0 in case of success.
+ */
+static void st33zp24_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ st33zp24_remove(chip);
+}
+
+static const struct i2c_device_id st33zp24_i2c_id[] = {
+ {TPM_ST33_I2C, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id);
+
+static const struct of_device_id of_st33zp24_i2c_match[] __maybe_unused = {
+ { .compatible = "st,st33zp24-i2c", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match);
+
+static const struct acpi_device_id st33zp24_i2c_acpi_match[] __maybe_unused = {
+ {"SMO3324"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, st33zp24_i2c_acpi_match);
+
+static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend,
+ st33zp24_pm_resume);
+
+static struct i2c_driver st33zp24_i2c_driver = {
+ .driver = {
+ .name = TPM_ST33_I2C,
+ .pm = &st33zp24_i2c_ops,
+ .of_match_table = of_match_ptr(of_st33zp24_i2c_match),
+ .acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match),
+ },
+ .probe = st33zp24_i2c_probe,
+ .remove = st33zp24_i2c_remove,
+ .id_table = st33zp24_i2c_id
+};
+
+module_i2c_driver(st33zp24_i2c_driver);
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("STM TPM 1.2 I2C ST33 Driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
new file mode 100644
index 0000000000..f5811b301d
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/of.h>
+#include <linux/acpi.h>
+#include <linux/tpm.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_DATA_FIFO 0x24
+#define TPM_INTF_CAPABILITY 0x14
+
+#define TPM_DUMMY_BYTE 0x00
+
+#define MAX_SPI_LATENCY 15
+#define LOCALITY0 0
+
+#define ST33ZP24_OK 0x5A
+#define ST33ZP24_UNDEFINED_ERR 0x80
+#define ST33ZP24_BADLOCALITY 0x81
+#define ST33ZP24_TISREGISTER_UNKNOWN 0x82
+#define ST33ZP24_LOCALITY_NOT_ACTIVATED 0x83
+#define ST33ZP24_HASH_END_BEFORE_HASH_START 0x84
+#define ST33ZP24_BAD_COMMAND_ORDER 0x85
+#define ST33ZP24_INCORECT_RECEIVED_LENGTH 0x86
+#define ST33ZP24_TPM_FIFO_OVERFLOW 0x89
+#define ST33ZP24_UNEXPECTED_READ_FIFO 0x8A
+#define ST33ZP24_UNEXPECTED_WRITE_FIFO 0x8B
+#define ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END 0x90
+#define ST33ZP24_DUMMY_BYTES 0x00
+
+/*
+ * TPM command can be up to 2048 byte, A TPM response can be up to
+ * 1024 byte.
+ * Between command and response, there are latency byte (up to 15
+ * usually on st33zp24 2 are enough).
+ *
+ * Overall when sending a command and expecting an answer we need if
+ * worst case:
+ * 2048 (for the TPM command) + 1024 (for the TPM answer). We need
+ * some latency byte before the answer is available (max 15).
+ * We have 2048 + 1024 + 15.
+ */
+#define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\
+ MAX_SPI_LATENCY)
+
+
+struct st33zp24_spi_phy {
+ struct spi_device *spi_device;
+
+ u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE];
+ u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE];
+
+ int latency;
+};
+
+static int st33zp24_status_to_errno(u8 code)
+{
+ switch (code) {
+ case ST33ZP24_OK:
+ return 0;
+ case ST33ZP24_UNDEFINED_ERR:
+ case ST33ZP24_BADLOCALITY:
+ case ST33ZP24_TISREGISTER_UNKNOWN:
+ case ST33ZP24_LOCALITY_NOT_ACTIVATED:
+ case ST33ZP24_HASH_END_BEFORE_HASH_START:
+ case ST33ZP24_BAD_COMMAND_ORDER:
+ case ST33ZP24_UNEXPECTED_READ_FIFO:
+ case ST33ZP24_UNEXPECTED_WRITE_FIFO:
+ case ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END:
+ return -EPROTO;
+ case ST33ZP24_INCORECT_RECEIVED_LENGTH:
+ case ST33ZP24_TPM_FIFO_OVERFLOW:
+ return -EMSGSIZE;
+ case ST33ZP24_DUMMY_BYTES:
+ return -ENOSYS;
+ }
+ return code;
+}
+
+/*
+ * st33zp24_spi_send
+ * Send byte to the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: should be zero if success else a negative error code.
+ */
+static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ int total_length = 0, ret = 0;
+ struct st33zp24_spi_phy *phy = phy_id;
+ struct spi_device *dev = phy->spi_device;
+ struct spi_transfer spi_xfer = {
+ .tx_buf = phy->tx_buf,
+ .rx_buf = phy->rx_buf,
+ };
+
+ /* Pre-Header */
+ phy->tx_buf[total_length++] = TPM_WRITE_DIRECTION | LOCALITY0;
+ phy->tx_buf[total_length++] = tpm_register;
+
+ if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) {
+ phy->tx_buf[total_length++] = tpm_size >> 8;
+ phy->tx_buf[total_length++] = tpm_size;
+ }
+
+ memcpy(&phy->tx_buf[total_length], tpm_data, tpm_size);
+ total_length += tpm_size;
+
+ memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, phy->latency);
+
+ spi_xfer.len = total_length + phy->latency;
+
+ ret = spi_sync_transfer(dev, &spi_xfer, 1);
+ if (ret == 0)
+ ret = phy->rx_buf[total_length + phy->latency - 1];
+
+ return st33zp24_status_to_errno(ret);
+} /* st33zp24_spi_send() */
+
+/*
+ * st33zp24_spi_read8_recv
+ * Recv byte from the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: should be zero if success else a negative error code.
+ */
+static int st33zp24_spi_read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ int total_length = 0, ret;
+ struct st33zp24_spi_phy *phy = phy_id;
+ struct spi_device *dev = phy->spi_device;
+ struct spi_transfer spi_xfer = {
+ .tx_buf = phy->tx_buf,
+ .rx_buf = phy->rx_buf,
+ };
+
+ /* Pre-Header */
+ phy->tx_buf[total_length++] = LOCALITY0;
+ phy->tx_buf[total_length++] = tpm_register;
+
+ memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE,
+ phy->latency + tpm_size);
+
+ spi_xfer.len = total_length + phy->latency + tpm_size;
+
+ /* header + status byte + size of the data + status byte */
+ ret = spi_sync_transfer(dev, &spi_xfer, 1);
+ if (tpm_size > 0 && ret == 0) {
+ ret = phy->rx_buf[total_length + phy->latency - 1];
+
+ memcpy(tpm_data, phy->rx_buf + total_length + phy->latency,
+ tpm_size);
+ }
+
+ return ret;
+} /* st33zp24_spi_read8_reg() */
+
+/*
+ * st33zp24_spi_recv
+ * Recv byte from the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int st33zp24_spi_recv(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ int ret;
+
+ ret = st33zp24_spi_read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
+ if (!st33zp24_status_to_errno(ret))
+ return tpm_size;
+ return ret;
+} /* st33zp24_spi_recv() */
+
+static int st33zp24_spi_evaluate_latency(void *phy_id)
+{
+ struct st33zp24_spi_phy *phy = phy_id;
+ int latency = 1, status = 0;
+ u8 data = 0;
+
+ while (!status && latency < MAX_SPI_LATENCY) {
+ phy->latency = latency;
+ status = st33zp24_spi_read8_reg(phy_id, TPM_INTF_CAPABILITY,
+ &data, 1);
+ latency++;
+ }
+ if (status < 0)
+ return status;
+ if (latency == MAX_SPI_LATENCY)
+ return -ENODEV;
+
+ return latency - 1;
+} /* evaluate_latency() */
+
+static const struct st33zp24_phy_ops spi_phy_ops = {
+ .send = st33zp24_spi_send,
+ .recv = st33zp24_spi_recv,
+};
+
+/*
+ * st33zp24_spi_probe initialize the TPM device
+ * @param: dev, the spi_device description (TPM SPI description).
+ * @return: 0 in case of success.
+ * or a negative value describing the error.
+ */
+static int st33zp24_spi_probe(struct spi_device *dev)
+{
+ struct st33zp24_spi_phy *phy;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct st33zp24_spi_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->spi_device = dev;
+
+ phy->latency = st33zp24_spi_evaluate_latency(phy);
+ if (phy->latency <= 0)
+ return -ENODEV;
+
+ return st33zp24_probe(phy, &spi_phy_ops, &dev->dev, dev->irq);
+}
+
+/*
+ * st33zp24_spi_remove remove the TPM device
+ * @param: client, the spi_device description (TPM SPI description).
+ * @return: 0 in case of success.
+ */
+static void st33zp24_spi_remove(struct spi_device *dev)
+{
+ struct tpm_chip *chip = spi_get_drvdata(dev);
+
+ st33zp24_remove(chip);
+}
+
+static const struct spi_device_id st33zp24_spi_id[] = {
+ {TPM_ST33_SPI, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, st33zp24_spi_id);
+
+static const struct of_device_id of_st33zp24_spi_match[] __maybe_unused = {
+ { .compatible = "st,st33zp24-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match);
+
+static const struct acpi_device_id st33zp24_spi_acpi_match[] __maybe_unused = {
+ {"SMO3324"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, st33zp24_spi_acpi_match);
+
+static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend,
+ st33zp24_pm_resume);
+
+static struct spi_driver st33zp24_spi_driver = {
+ .driver = {
+ .name = "st33zp24-spi",
+ .pm = &st33zp24_spi_ops,
+ .of_match_table = of_match_ptr(of_st33zp24_spi_match),
+ .acpi_match_table = ACPI_PTR(st33zp24_spi_acpi_match),
+ },
+ .probe = st33zp24_spi_probe,
+ .remove = st33zp24_spi_remove,
+ .id_table = st33zp24_spi_id,
+};
+
+module_spi_driver(st33zp24_spi_driver);
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
new file mode 100644
index 0000000000..a5b554cd47
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STMicroelectronics TPM Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/freezer.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/gpio/consumer.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_ACCESS 0x0
+#define TPM_STS 0x18
+#define TPM_DATA_FIFO 0x24
+#define TPM_INTF_CAPABILITY 0x14
+#define TPM_INT_STATUS 0x10
+#define TPM_INT_ENABLE 0x08
+
+#define LOCALITY0 0
+
+enum st33zp24_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum st33zp24_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum st33zp24_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_FIFO_AVALAIBLE_INT = 0x040,
+ TPM_INTF_WAKE_UP_READY_INT = 0x020,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_SHORT_TIMEOUT = 750,
+ TIS_LONG_TIMEOUT = 2000,
+};
+
+/*
+ * clear the pending interrupt.
+ */
+static u8 clear_interruption(struct st33zp24_dev *tpm_dev)
+{
+ u8 interrupt;
+
+ tpm_dev->ops->recv(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
+ tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
+ return interrupt;
+}
+
+/*
+ * cancel the current command execution or set STS to COMMAND READY.
+ */
+static void st33zp24_cancel(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u8 data;
+
+ data = TPM_STS_COMMAND_READY;
+ tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
+}
+
+/*
+ * return the TPM_STS register
+ */
+static u8 st33zp24_status(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u8 data;
+
+ tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1);
+ return data;
+}
+
+/*
+ * if the locality is active
+ */
+static bool check_locality(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u8 data;
+ u8 status;
+
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+ if (status && (data &
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
+ return true;
+
+ return false;
+}
+
+static int request_locality(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ long ret;
+ u8 data;
+
+ if (check_locality(chip))
+ return tpm_dev->locality;
+
+ data = TPM_ACCESS_REQUEST_USE;
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ stop = jiffies + chip->timeout_a;
+
+ /* Request locality is usually effective after the request */
+ do {
+ if (check_locality(chip))
+ return tpm_dev->locality;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+
+ /* could not get locality */
+ return -EACCES;
+}
+
+static void release_locality(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u8 data;
+
+ data = TPM_ACCESS_ACTIVE_LOCALITY;
+
+ tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+}
+
+/*
+ * get_burstcount return the burstcount value
+ */
+static int get_burstcount(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ int burstcnt, status;
+ u8 temp;
+
+ stop = jiffies + chip->timeout_d;
+ do {
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 1,
+ &temp, 1);
+ if (status < 0)
+ return -EBUSY;
+
+ burstcnt = temp;
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 2,
+ &temp, 1);
+ if (status < 0)
+ return -EBUSY;
+
+ burstcnt |= temp << 8;
+ if (burstcnt)
+ return burstcnt;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -EBUSY;
+}
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * wait for a TPM_STS value
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ wait_queue_head_t *queue, bool check_cancel)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ int ret = 0;
+ bool canceled = false;
+ bool condition;
+ u32 cur_intrs;
+ u8 status;
+
+ /* check current status */
+ status = st33zp24_status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ cur_intrs = tpm_dev->intrs;
+ clear_interruption(tpm_dev);
+ enable_irq(tpm_dev->irq);
+
+ do {
+ if (ret == -ERESTARTSYS && freezing(current))
+ clear_thread_flag(TIF_SIGPENDING);
+
+ timeout = stop - jiffies;
+ if ((long) timeout <= 0)
+ return -1;
+
+ ret = wait_event_interruptible_timeout(*queue,
+ cur_intrs != tpm_dev->intrs,
+ timeout);
+ clear_interruption(tpm_dev);
+ condition = wait_for_tpm_stat_cond(chip, mask,
+ check_cancel, &canceled);
+ if (ret >= 0 && condition) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ } while (ret == -ERESTARTSYS && freezing(current));
+
+ disable_irq_nosync(tpm_dev->irq);
+
+ } else {
+ do {
+ msleep(TPM_TIMEOUT);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+
+ return -ETIME;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ int size = 0, burstcnt, len, ret;
+
+ while (size < count &&
+ wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->timeout_c,
+ &tpm_dev->read_queue, true) == 0) {
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0)
+ return burstcnt;
+ len = min_t(int, burstcnt, count - size);
+ ret = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_DATA_FIFO,
+ buf + size, len);
+ if (ret < 0)
+ return ret;
+
+ size += len;
+ }
+ return size;
+}
+
+static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+
+ tpm_dev->intrs++;
+ wake_up_interruptible(&tpm_dev->read_queue);
+ disable_irq_nosync(tpm_dev->irq);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * send TPM commands through the I2C bus.
+ */
+static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
+ size_t len)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u32 status, i, size, ordinal;
+ int burstcnt = 0;
+ int ret;
+ u8 data;
+
+ if (len < TPM_HEADER_SIZE)
+ return -EBUSY;
+
+ ret = request_locality(chip);
+ if (ret < 0)
+ return ret;
+
+ status = st33zp24_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ st33zp24_cancel(chip);
+ if (wait_for_stat
+ (chip, TPM_STS_COMMAND_READY, chip->timeout_b,
+ &tpm_dev->read_queue, false) < 0) {
+ ret = -ETIME;
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < len - 1;) {
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0)
+ return burstcnt;
+ size = min_t(int, len - i - 1, burstcnt);
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO,
+ buf + i, size);
+ if (ret < 0)
+ goto out_err;
+
+ i += size;
+ }
+
+ status = st33zp24_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) == 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO,
+ buf + len - 1, 1);
+ if (ret < 0)
+ goto out_err;
+
+ status = st33zp24_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) != 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ data = TPM_STS_GO;
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
+ if (ret < 0)
+ goto out_err;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+
+ ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ tpm_calc_ordinal_duration(chip, ordinal),
+ &tpm_dev->read_queue, false);
+ if (ret < 0)
+ goto out_err;
+ }
+
+ return 0;
+out_err:
+ st33zp24_cancel(chip);
+ release_locality(chip);
+ return ret;
+}
+
+static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
+ size_t count)
+{
+ int size = 0;
+ u32 expected;
+
+ if (!chip)
+ return -EBUSY;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *)(buf + 2));
+ if (expected > count || expected < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (size < expected) {
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ }
+
+out:
+ st33zp24_cancel(chip);
+ release_locality(chip);
+ return size;
+}
+
+static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops st33zp24_tpm = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .send = st33zp24_send,
+ .recv = st33zp24_recv,
+ .cancel = st33zp24_cancel,
+ .status = st33zp24_status,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = st33zp24_req_canceled,
+};
+
+static const struct acpi_gpio_params lpcpd_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_st33zp24_gpios[] = {
+ { "lpcpd-gpios", &lpcpd_gpios, 1 },
+ { },
+};
+
+/*
+ * initialize the TPM device
+ */
+int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
+ struct device *dev, int irq)
+{
+ int ret;
+ u8 intmask = 0;
+ struct tpm_chip *chip;
+ struct st33zp24_dev *tpm_dev;
+
+ chip = tpmm_chip_alloc(dev, &st33zp24_tpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ tpm_dev = devm_kzalloc(dev, sizeof(struct st33zp24_dev),
+ GFP_KERNEL);
+ if (!tpm_dev)
+ return -ENOMEM;
+
+ tpm_dev->phy_id = phy_id;
+ tpm_dev->ops = ops;
+ dev_set_drvdata(&chip->dev, tpm_dev);
+
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ tpm_dev->locality = LOCALITY0;
+
+ if (ACPI_COMPANION(dev)) {
+ ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Get LPCPD GPIO. If lpcpd pin is not specified. This is not an
+ * issue as power management can be also managed by TPM specific
+ * commands.
+ */
+ tpm_dev->io_lpcpd = devm_gpiod_get_optional(dev, "lpcpd",
+ GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(tpm_dev->io_lpcpd);
+ if (ret) {
+ dev_err(dev, "failed to request lpcpd gpio: %d\n", ret);
+ return ret;
+ }
+
+ if (irq) {
+ /* INTERRUPT Setup */
+ init_waitqueue_head(&tpm_dev->read_queue);
+ tpm_dev->intrs = 0;
+
+ if (request_locality(chip) != LOCALITY0) {
+ ret = -ENODEV;
+ goto _tpm_clean_answer;
+ }
+
+ clear_interruption(tpm_dev);
+ ret = devm_request_irq(dev, irq, tpm_ioserirq_handler,
+ IRQF_TRIGGER_HIGH, "TPM SERIRQ management",
+ chip);
+ if (ret < 0) {
+ dev_err(&chip->dev, "TPM SERIRQ signals %d not available\n",
+ irq);
+ goto _tpm_clean_answer;
+ }
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_STS_VALID_INT
+ | TPM_INTF_DATA_AVAIL_INT;
+
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_ENABLE,
+ &intmask, 1);
+ if (ret < 0)
+ goto _tpm_clean_answer;
+
+ intmask = TPM_GLOBAL_INT_ENABLE;
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, (TPM_INT_ENABLE + 3),
+ &intmask, 1);
+ if (ret < 0)
+ goto _tpm_clean_answer;
+
+ tpm_dev->irq = irq;
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+
+ disable_irq_nosync(tpm_dev->irq);
+ }
+
+ return tpm_chip_register(chip);
+_tpm_clean_answer:
+ dev_info(&chip->dev, "TPM initialization fail\n");
+ return ret;
+}
+EXPORT_SYMBOL(st33zp24_probe);
+
+void st33zp24_remove(struct tpm_chip *chip)
+{
+ tpm_chip_unregister(chip);
+}
+EXPORT_SYMBOL(st33zp24_remove);
+
+#ifdef CONFIG_PM_SLEEP
+int st33zp24_pm_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+
+ int ret = 0;
+
+ if (tpm_dev->io_lpcpd)
+ gpiod_set_value_cansleep(tpm_dev->io_lpcpd, 0);
+ else
+ ret = tpm_pm_suspend(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(st33zp24_pm_suspend);
+
+int st33zp24_pm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ int ret = 0;
+
+ if (tpm_dev->io_lpcpd) {
+ gpiod_set_value_cansleep(tpm_dev->io_lpcpd, 1);
+ ret = wait_for_stat(chip,
+ TPM_STS_VALID, chip->timeout_b,
+ &tpm_dev->read_queue, false);
+ } else {
+ ret = tpm_pm_resume(dev);
+ if (!ret)
+ tpm1_do_selftest(chip);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(st33zp24_pm_resume);
+#endif
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("ST33ZP24 TPM 1.2 driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
new file mode 100644
index 0000000000..5acc85f711
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * STMicroelectronics TPM Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ */
+
+#ifndef __LOCAL_ST33ZP24_H__
+#define __LOCAL_ST33ZP24_H__
+
+#define TPM_ST33_I2C "st33zp24-i2c"
+#define TPM_ST33_SPI "st33zp24-spi"
+
+#define TPM_WRITE_DIRECTION 0x80
+#define ST33ZP24_BUFSIZE 2048
+
+struct st33zp24_dev {
+ struct tpm_chip *chip;
+ void *phy_id;
+ const struct st33zp24_phy_ops *ops;
+ int locality;
+ int irq;
+ u32 intrs;
+ struct gpio_desc *io_lpcpd;
+ wait_queue_head_t read_queue;
+};
+
+
+struct st33zp24_phy_ops {
+ int (*send)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
+ int (*recv)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
+};
+
+#ifdef CONFIG_PM_SLEEP
+int st33zp24_pm_suspend(struct device *dev);
+int st33zp24_pm_resume(struct device *dev);
+#endif
+
+int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
+ struct device *dev, int irq);
+void st33zp24_remove(struct tpm_chip *chip);
+#endif /* __LOCAL_ST33ZP24_H__ */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
new file mode 100644
index 0000000000..42b1062e33
--- /dev/null
+++ b/drivers/char/tpm/tpm-chip.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * TPM chip management routines.
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/freezer.h>
+#include <linux/major.h>
+#include <linux/tpm_eventlog.h>
+#include <linux/hw_random.h>
+#include "tpm.h"
+
+DEFINE_IDR(dev_nums_idr);
+static DEFINE_MUTEX(idr_lock);
+
+const struct class tpm_class = {
+ .name = "tpm",
+ .shutdown_pre = tpm_class_shutdown,
+};
+const struct class tpmrm_class = {
+ .name = "tpmrm",
+};
+dev_t tpm_devt;
+
+static int tpm_request_locality(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (!chip->ops->request_locality)
+ return 0;
+
+ rc = chip->ops->request_locality(chip, 0);
+ if (rc < 0)
+ return rc;
+
+ chip->locality = rc;
+ return 0;
+}
+
+static void tpm_relinquish_locality(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (!chip->ops->relinquish_locality)
+ return;
+
+ rc = chip->ops->relinquish_locality(chip, chip->locality);
+ if (rc)
+ dev_err(&chip->dev, "%s: : error %d\n", __func__, rc);
+
+ chip->locality = -1;
+}
+
+static int tpm_cmd_ready(struct tpm_chip *chip)
+{
+ if (!chip->ops->cmd_ready)
+ return 0;
+
+ return chip->ops->cmd_ready(chip);
+}
+
+static int tpm_go_idle(struct tpm_chip *chip)
+{
+ if (!chip->ops->go_idle)
+ return 0;
+
+ return chip->ops->go_idle(chip);
+}
+
+static void tpm_clk_enable(struct tpm_chip *chip)
+{
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, true);
+}
+
+static void tpm_clk_disable(struct tpm_chip *chip)
+{
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, false);
+}
+
+/**
+ * tpm_chip_start() - power on the TPM
+ * @chip: a TPM chip to use
+ *
+ * Return:
+ * * The response length - OK
+ * * -errno - A system error
+ */
+int tpm_chip_start(struct tpm_chip *chip)
+{
+ int ret;
+
+ tpm_clk_enable(chip);
+
+ if (chip->locality == -1) {
+ ret = tpm_request_locality(chip);
+ if (ret) {
+ tpm_clk_disable(chip);
+ return ret;
+ }
+ }
+
+ ret = tpm_cmd_ready(chip);
+ if (ret) {
+ tpm_relinquish_locality(chip);
+ tpm_clk_disable(chip);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_start);
+
+/**
+ * tpm_chip_stop() - power off the TPM
+ * @chip: a TPM chip to use
+ *
+ * Return:
+ * * The response length - OK
+ * * -errno - A system error
+ */
+void tpm_chip_stop(struct tpm_chip *chip)
+{
+ tpm_go_idle(chip);
+ tpm_relinquish_locality(chip);
+ tpm_clk_disable(chip);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_stop);
+
+/**
+ * tpm_try_get_ops() - Get a ref to the tpm_chip
+ * @chip: Chip to ref
+ *
+ * The caller must already have some kind of locking to ensure that chip is
+ * valid. This function will lock the chip so that the ops member can be
+ * accessed safely. The locking prevents tpm_chip_unregister from
+ * completing, so it should not be held for long periods.
+ *
+ * Returns -ERRNO if the chip could not be got.
+ */
+int tpm_try_get_ops(struct tpm_chip *chip)
+{
+ int rc = -EIO;
+
+ get_device(&chip->dev);
+
+ down_read(&chip->ops_sem);
+ if (!chip->ops)
+ goto out_ops;
+
+ mutex_lock(&chip->tpm_mutex);
+ rc = tpm_chip_start(chip);
+ if (rc)
+ goto out_lock;
+
+ return 0;
+out_lock:
+ mutex_unlock(&chip->tpm_mutex);
+out_ops:
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_try_get_ops);
+
+/**
+ * tpm_put_ops() - Release a ref to the tpm_chip
+ * @chip: Chip to put
+ *
+ * This is the opposite pair to tpm_try_get_ops(). After this returns chip may
+ * be kfree'd.
+ */
+void tpm_put_ops(struct tpm_chip *chip)
+{
+ tpm_chip_stop(chip);
+ mutex_unlock(&chip->tpm_mutex);
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+}
+EXPORT_SYMBOL_GPL(tpm_put_ops);
+
+/**
+ * tpm_default_chip() - find a TPM chip and get a reference to it
+ */
+struct tpm_chip *tpm_default_chip(void)
+{
+ struct tpm_chip *chip, *res = NULL;
+ int chip_num = 0;
+ int chip_prev;
+
+ mutex_lock(&idr_lock);
+
+ do {
+ chip_prev = chip_num;
+ chip = idr_get_next(&dev_nums_idr, &chip_num);
+ if (chip) {
+ get_device(&chip->dev);
+ res = chip;
+ break;
+ }
+ } while (chip_prev != chip_num);
+
+ mutex_unlock(&idr_lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(tpm_default_chip);
+
+/**
+ * tpm_find_get_ops() - find and reserve a TPM chip
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ *
+ * Finds a TPM chip and reserves its class device and operations. The chip must
+ * be released with tpm_put_ops() after use.
+ * This function is for internal use only. It supports existing TPM callers
+ * by accepting NULL, but those callers should be converted to pass in a chip
+ * directly.
+ *
+ * Return:
+ * A reserved &struct tpm_chip instance.
+ * %NULL if a chip is not found.
+ * %NULL if the chip is not available.
+ */
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip) {
+ if (!tpm_try_get_ops(chip))
+ return chip;
+ return NULL;
+ }
+
+ chip = tpm_default_chip();
+ if (!chip)
+ return NULL;
+ rc = tpm_try_get_ops(chip);
+ /* release additional reference we got from tpm_default_chip() */
+ put_device(&chip->dev);
+ if (rc)
+ return NULL;
+ return chip;
+}
+
+/**
+ * tpm_dev_release() - free chip memory and the device number
+ * @dev: the character device for the TPM chip
+ *
+ * This is used as the release function for the character device.
+ */
+static void tpm_dev_release(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ mutex_lock(&idr_lock);
+ idr_remove(&dev_nums_idr, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
+ kfree(chip->work_space.context_buf);
+ kfree(chip->work_space.session_buf);
+ kfree(chip->allocated_banks);
+ kfree(chip);
+}
+
+/**
+ * tpm_class_shutdown() - prepare the TPM device for loss of power.
+ * @dev: device to which the chip is associated.
+ *
+ * Issues a TPM2_Shutdown command prior to loss of power, as required by the
+ * TPM 2.0 spec. Then, calls bus- and device- specific shutdown code.
+ *
+ * Return: always 0 (i.e. success)
+ */
+int tpm_class_shutdown(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ down_write(&chip->ops_sem);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ if (!tpm_chip_start(chip)) {
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_stop(chip);
+ }
+ }
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
+
+ return 0;
+}
+
+/**
+ * tpm_chip_alloc() - allocate a new struct tpm_chip instance
+ * @pdev: device to which the chip is associated
+ * At this point pdev mst be initialized, but does not have to
+ * be registered
+ * @ops: struct tpm_class_ops instance
+ *
+ * Allocates a new struct tpm_chip instance and assigns a free
+ * device number for it. Must be paired with put_device(&chip->dev).
+ */
+struct tpm_chip *tpm_chip_alloc(struct device *pdev,
+ const struct tpm_class_ops *ops)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&chip->tpm_mutex);
+ init_rwsem(&chip->ops_sem);
+
+ chip->ops = ops;
+
+ mutex_lock(&idr_lock);
+ rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL);
+ mutex_unlock(&idr_lock);
+ if (rc < 0) {
+ dev_err(pdev, "No available tpm device numbers\n");
+ kfree(chip);
+ return ERR_PTR(rc);
+ }
+ chip->dev_num = rc;
+
+ device_initialize(&chip->dev);
+
+ chip->dev.class = &tpm_class;
+ chip->dev.release = tpm_dev_release;
+ chip->dev.parent = pdev;
+ chip->dev.groups = chip->groups;
+
+ if (chip->dev_num == 0)
+ chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
+ else
+ chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
+
+ rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num);
+ if (rc)
+ goto out;
+
+ if (!pdev)
+ chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
+
+ cdev_init(&chip->cdev, &tpm_fops);
+ chip->cdev.owner = THIS_MODULE;
+
+ rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
+ if (rc) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ chip->locality = -1;
+ return chip;
+
+out:
+ put_device(&chip->dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_alloc);
+
+static void tpm_put_device(void *dev)
+{
+ put_device(dev);
+}
+
+/**
+ * tpmm_chip_alloc() - allocate a new struct tpm_chip instance
+ * @pdev: parent device to which the chip is associated
+ * @ops: struct tpm_class_ops instance
+ *
+ * Same as tpm_chip_alloc except devm is used to do the put_device
+ */
+struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
+ const struct tpm_class_ops *ops)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = tpm_chip_alloc(pdev, ops);
+ if (IS_ERR(chip))
+ return chip;
+
+ rc = devm_add_action_or_reset(pdev,
+ tpm_put_device,
+ &chip->dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ dev_set_drvdata(pdev, chip);
+
+ return chip;
+}
+EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
+
+static int tpm_add_char_device(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = cdev_device_add(&chip->cdev, &chip->dev);
+ if (rc) {
+ dev_err(&chip->dev,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ dev_name(&chip->dev), MAJOR(chip->dev.devt),
+ MINOR(chip->dev.devt), rc);
+ return rc;
+ }
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) {
+ rc = tpm_devs_add(chip);
+ if (rc)
+ goto err_del_cdev;
+ }
+
+ /* Make the chip available. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, chip, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
+ return 0;
+
+err_del_cdev:
+ cdev_device_del(&chip->cdev, &chip->dev);
+ return rc;
+}
+
+static void tpm_del_char_device(struct tpm_chip *chip)
+{
+ cdev_device_del(&chip->cdev, &chip->dev);
+
+ /* Make the chip unavailable. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, NULL, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
+ /* Make the driver uncallable. */
+ down_write(&chip->ops_sem);
+
+ /*
+ * Check if chip->ops is still valid: In case that the controller
+ * drivers shutdown handler unregisters the controller in its
+ * shutdown handler we are called twice and chip->ops to NULL.
+ */
+ if (chip->ops) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ if (!tpm_chip_start(chip)) {
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_stop(chip);
+ }
+ }
+ chip->ops = NULL;
+ }
+ up_write(&chip->ops_sem);
+}
+
+static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
+{
+ struct attribute **i;
+
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) ||
+ tpm_is_firmware_upgrade(chip))
+ return;
+
+ sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
+
+ for (i = chip->groups[0]->attrs; *i != NULL; ++i)
+ sysfs_remove_link(&chip->dev.parent->kobj, (*i)->name);
+}
+
+/* For compatibility with legacy sysfs paths we provide symlinks from the
+ * parent dev directory to selected names within the tpm chip directory. Old
+ * kernel versions created these files directly under the parent.
+ */
+static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
+{
+ struct attribute **i;
+ int rc;
+
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) ||
+ tpm_is_firmware_upgrade(chip))
+ return 0;
+
+ rc = compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, "ppi", NULL);
+ if (rc && rc != -ENOENT)
+ return rc;
+
+ /* All the names from tpm-sysfs */
+ for (i = chip->groups[0]->attrs; *i != NULL; ++i) {
+ rc = compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name, NULL);
+ if (rc) {
+ tpm_del_legacy_sysfs(chip);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+
+ /* Give back zero bytes, as TPM chip has not yet fully resumed: */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+ return 0;
+
+ return tpm_get_random(chip, data, max);
+}
+
+static bool tpm_is_hwrng_enabled(struct tpm_chip *chip)
+{
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ return false;
+ if (tpm_is_firmware_upgrade(chip))
+ return false;
+ if (chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)
+ return false;
+ return true;
+}
+
+static int tpm_add_hwrng(struct tpm_chip *chip)
+{
+ if (!tpm_is_hwrng_enabled(chip))
+ return 0;
+
+ snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+ "tpm-rng-%d", chip->dev_num);
+ chip->hwrng.name = chip->hwrng_name;
+ chip->hwrng.read = tpm_hwrng_read;
+ return hwrng_register(&chip->hwrng);
+}
+
+static int tpm_get_pcr_allocation(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (tpm_is_firmware_upgrade(chip))
+ return 0;
+
+ rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ?
+ tpm2_get_pcr_allocation(chip) :
+ tpm1_get_pcr_allocation(chip);
+
+ if (rc > 0)
+ return -ENODEV;
+
+ return rc;
+}
+
+/*
+ * tpm_chip_bootstrap() - Boostrap TPM chip after power on
+ * @chip: TPM chip to use.
+ *
+ * Initialize TPM chip after power on. This a one-shot function: subsequent
+ * calls will have no effect.
+ */
+int tpm_chip_bootstrap(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip->flags & TPM_CHIP_FLAG_BOOTSTRAPPED)
+ return 0;
+
+ rc = tpm_chip_start(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm_auto_startup(chip);
+ if (rc)
+ goto stop;
+
+ rc = tpm_get_pcr_allocation(chip);
+stop:
+ tpm_chip_stop(chip);
+
+ /*
+ * Unconditionally set, as driver initialization should cease, when the
+ * boostrapping process fails.
+ */
+ chip->flags |= TPM_CHIP_FLAG_BOOTSTRAPPED;
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_bootstrap);
+
+/*
+ * tpm_chip_register() - create a character device for the TPM chip
+ * @chip: TPM chip to use.
+ *
+ * Creates a character device for the TPM chip and adds sysfs attributes for
+ * the device. As the last step this function adds the chip to the list of TPM
+ * chips available for in-kernel use.
+ *
+ * This function should be only called after the chip initialization is
+ * complete.
+ */
+int tpm_chip_register(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm_chip_bootstrap(chip);
+ if (rc)
+ return rc;
+
+ tpm_sysfs_add_device(chip);
+
+ tpm_bios_log_setup(chip);
+
+ tpm_add_ppi(chip);
+
+ rc = tpm_add_hwrng(chip);
+ if (rc)
+ goto out_ppi;
+
+ rc = tpm_add_char_device(chip);
+ if (rc)
+ goto out_hwrng;
+
+ rc = tpm_add_legacy_sysfs(chip);
+ if (rc) {
+ tpm_chip_unregister(chip);
+ return rc;
+ }
+
+ return 0;
+
+out_hwrng:
+ if (tpm_is_hwrng_enabled(chip))
+ hwrng_unregister(&chip->hwrng);
+out_ppi:
+ tpm_bios_log_teardown(chip);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_register);
+
+/*
+ * tpm_chip_unregister() - release the TPM driver
+ * @chip: TPM chip to use.
+ *
+ * Takes the chip first away from the list of available TPM chips and then
+ * cleans up all the resources reserved by tpm_chip_register().
+ *
+ * Once this function returns the driver call backs in 'op's will not be
+ * running and will no longer start.
+ *
+ * NOTE: This function should be only called before deinitializing chip
+ * resources.
+ */
+void tpm_chip_unregister(struct tpm_chip *chip)
+{
+ tpm_del_legacy_sysfs(chip);
+ if (tpm_is_hwrng_enabled(chip))
+ hwrng_unregister(&chip->hwrng);
+ tpm_bios_log_teardown(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
+ tpm_devs_remove(chip);
+ tpm_del_char_device(chip);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
new file mode 100644
index 0000000000..30b4c288c1
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * Device file system interface to the TPM
+ */
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include "tpm.h"
+#include "tpm-dev.h"
+
+static struct workqueue_struct *tpm_dev_wq;
+
+static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ u8 *buf, size_t bufsiz)
+{
+ struct tpm_header *header = (void *)buf;
+ ssize_t ret, len;
+
+ ret = tpm2_prepare_space(chip, space, buf, bufsiz);
+ /* If the command is not implemented by the TPM, synthesize a
+ * response with a TPM2_RC_COMMAND_CODE return for user-space.
+ */
+ if (ret == -EOPNOTSUPP) {
+ header->length = cpu_to_be32(sizeof(*header));
+ header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
+ TSS2_RESMGR_TPM_RC_LAYER);
+ ret = sizeof(*header);
+ }
+ if (ret)
+ goto out_rc;
+
+ len = tpm_transmit(chip, buf, bufsiz);
+ if (len < 0)
+ ret = len;
+
+ if (!ret)
+ ret = tpm2_commit_space(chip, space, buf, &len);
+
+out_rc:
+ return ret ? ret : len;
+}
+
+static void tpm_dev_async_work(struct work_struct *work)
+{
+ struct file_priv *priv =
+ container_of(work, struct file_priv, async_work);
+ ssize_t ret;
+
+ mutex_lock(&priv->buffer_mutex);
+ priv->command_enqueued = false;
+ ret = tpm_try_get_ops(priv->chip);
+ if (ret) {
+ priv->response_length = ret;
+ goto out;
+ }
+
+ ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer));
+ tpm_put_ops(priv->chip);
+
+ /*
+ * If ret is > 0 then tpm_dev_transmit returned the size of the
+ * response. If ret is < 0 then tpm_dev_transmit failed and
+ * returned an error code.
+ */
+ if (ret != 0) {
+ priv->response_length = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ }
+out:
+ mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
+}
+
+static void user_reader_timeout(struct timer_list *t)
+{
+ struct file_priv *priv = from_timer(priv, t, user_read_timer);
+
+ pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
+ task_tgid_nr(current));
+
+ schedule_work(&priv->timeout_work);
+}
+
+static void tpm_timeout_work(struct work_struct *work)
+{
+ struct file_priv *priv = container_of(work, struct file_priv,
+ timeout_work);
+
+ mutex_lock(&priv->buffer_mutex);
+ priv->response_read = true;
+ priv->response_length = 0;
+ memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
+ mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
+}
+
+void tpm_common_open(struct file *file, struct tpm_chip *chip,
+ struct file_priv *priv, struct tpm_space *space)
+{
+ priv->chip = chip;
+ priv->space = space;
+ priv->response_read = true;
+
+ mutex_init(&priv->buffer_mutex);
+ timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
+ INIT_WORK(&priv->timeout_work, tpm_timeout_work);
+ INIT_WORK(&priv->async_work, tpm_dev_async_work);
+ init_waitqueue_head(&priv->async_wait);
+ file->private_data = priv;
+}
+
+ssize_t tpm_common_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct file_priv *priv = file->private_data;
+ ssize_t ret_size = 0;
+ int rc;
+
+ mutex_lock(&priv->buffer_mutex);
+
+ if (priv->response_length) {
+ priv->response_read = true;
+
+ ret_size = min_t(ssize_t, size, priv->response_length);
+ if (ret_size <= 0) {
+ priv->response_length = 0;
+ goto out;
+ }
+
+ rc = copy_to_user(buf, priv->data_buffer + *off, ret_size);
+ if (rc) {
+ memset(priv->data_buffer, 0, TPM_BUFSIZE);
+ priv->response_length = 0;
+ ret_size = -EFAULT;
+ } else {
+ memset(priv->data_buffer + *off, 0, ret_size);
+ priv->response_length -= ret_size;
+ *off += ret_size;
+ }
+ }
+
+out:
+ if (!priv->response_length) {
+ *off = 0;
+ del_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->timeout_work);
+ }
+ mutex_unlock(&priv->buffer_mutex);
+ return ret_size;
+}
+
+ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct file_priv *priv = file->private_data;
+ int ret = 0;
+
+ if (size > TPM_BUFSIZE)
+ return -E2BIG;
+
+ mutex_lock(&priv->buffer_mutex);
+
+ /* Cannot perform a write until the read has cleared either via
+ * tpm_read or a user_read_timer timeout. This also prevents split
+ * buffered writes from blocking here.
+ */
+ if ((!priv->response_read && priv->response_length) ||
+ priv->command_enqueued) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (copy_from_user(priv->data_buffer, buf, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (size < 6 ||
+ size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->response_length = 0;
+ priv->response_read = false;
+ *off = 0;
+
+ /*
+ * If in nonblocking mode schedule an async job to send
+ * the command return the size.
+ * In case of error the err code will be returned in
+ * the subsequent read call.
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ priv->command_enqueued = true;
+ queue_work(tpm_dev_wq, &priv->async_work);
+ mutex_unlock(&priv->buffer_mutex);
+ return size;
+ }
+
+ /* atomic tpm command send and result receive. We only hold the ops
+ * lock during this period so that the tpm can be unregistered even if
+ * the char dev is held open.
+ */
+ if (tpm_try_get_ops(priv->chip)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer));
+ tpm_put_ops(priv->chip);
+
+ if (ret > 0) {
+ priv->response_length = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ ret = size;
+ }
+out:
+ mutex_unlock(&priv->buffer_mutex);
+ return ret;
+}
+
+__poll_t tpm_common_poll(struct file *file, poll_table *wait)
+{
+ struct file_priv *priv = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &priv->async_wait, wait);
+ mutex_lock(&priv->buffer_mutex);
+
+ /*
+ * The response_length indicates if there is still response
+ * (or part of it) to be consumed. Partial reads decrease it
+ * by the number of bytes read, and write resets it the zero.
+ */
+ if (priv->response_length)
+ mask = EPOLLIN | EPOLLRDNORM;
+ else
+ mask = EPOLLOUT | EPOLLWRNORM;
+
+ mutex_unlock(&priv->buffer_mutex);
+ return mask;
+}
+
+/*
+ * Called on file close
+ */
+void tpm_common_release(struct file *file, struct file_priv *priv)
+{
+ flush_work(&priv->async_work);
+ del_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->timeout_work);
+ file->private_data = NULL;
+ priv->response_length = 0;
+}
+
+int __init tpm_dev_common_init(void)
+{
+ tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
+
+ return !tpm_dev_wq ? -ENOMEM : 0;
+}
+
+void __exit tpm_dev_common_exit(void)
+{
+ if (tpm_dev_wq) {
+ destroy_workqueue(tpm_dev_wq);
+ tpm_dev_wq = NULL;
+ }
+}
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
new file mode 100644
index 0000000000..e2c0baa69f
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * Device file system interface to the TPM
+ */
+#include <linux/slab.h>
+#include "tpm-dev.h"
+
+static int tpm_open(struct inode *inode, struct file *file)
+{
+ struct tpm_chip *chip;
+ struct file_priv *priv;
+
+ chip = container_of(inode->i_cdev, struct tpm_chip, cdev);
+
+ /* It's assured that the chip will be opened just once,
+ * by the check of is_open variable, which is protected
+ * by driver_lock. */
+ if (test_and_set_bit(0, &chip->is_open)) {
+ dev_dbg(&chip->dev, "Another process owns this TPM\n");
+ return -EBUSY;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ goto out;
+
+ tpm_common_open(file, chip, priv, NULL);
+
+ return 0;
+
+ out:
+ clear_bit(0, &chip->is_open);
+ return -ENOMEM;
+}
+
+/*
+ * Called on file close
+ */
+static int tpm_release(struct inode *inode, struct file *file)
+{
+ struct file_priv *priv = file->private_data;
+
+ tpm_common_release(file, priv);
+ clear_bit(0, &priv->chip->is_open);
+ kfree(priv);
+
+ return 0;
+}
+
+const struct file_operations tpm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_common_read,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
+ .release = tpm_release,
+};
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
new file mode 100644
index 0000000000..f3742bcc73
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TPM_DEV_H
+#define _TPM_DEV_H
+
+#include <linux/poll.h>
+#include "tpm.h"
+
+struct file_priv {
+ struct tpm_chip *chip;
+ struct tpm_space *space;
+
+ struct mutex buffer_mutex;
+ struct timer_list user_read_timer; /* user needs to claim result */
+ struct work_struct timeout_work;
+ struct work_struct async_work;
+ wait_queue_head_t async_wait;
+ ssize_t response_length;
+ bool response_read;
+ bool command_enqueued;
+
+ u8 data_buffer[TPM_BUFSIZE];
+};
+
+void tpm_common_open(struct file *file, struct tpm_chip *chip,
+ struct file_priv *priv, struct tpm_space *space);
+ssize_t tpm_common_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off);
+ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off);
+__poll_t tpm_common_poll(struct file *file, poll_table *wait);
+
+void tpm_common_release(struct file *file, struct file_priv *priv);
+#endif
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
new file mode 100644
index 0000000000..66b16d26ee
--- /dev/null
+++ b/drivers/char/tpm/tpm-interface.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Note, the TPM chip is not interrupt driven (only polling)
+ * and can have very long timeouts (minutes!). Hence the unusual
+ * calls to msleep.
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/suspend.h>
+#include <linux/freezer.h>
+#include <linux/tpm_eventlog.h>
+
+#include "tpm.h"
+
+/*
+ * Bug workaround - some TPM's don't flush the most
+ * recently changed pcr on suspend, so force the flush
+ * with an extend to the selected _unused_ non-volatile pcr.
+ */
+static u32 tpm_suspend_pcr;
+module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
+MODULE_PARM_DESC(suspend_pcr,
+ "PCR to use for dummy writes to facilitate flush on suspend.");
+
+/**
+ * tpm_calc_ordinal_duration() - calculate the maximum command duration
+ * @chip: TPM chip to use.
+ * @ordinal: TPM command ordinal.
+ *
+ * The function returns the maximum amount of time the chip could take
+ * to return the result for a particular ordinal in jiffies.
+ *
+ * Return: A maximal duration time for an ordinal in jiffies.
+ */
+unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+{
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return tpm2_calc_ordinal_duration(chip, ordinal);
+ else
+ return tpm1_calc_ordinal_duration(chip, ordinal);
+}
+EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
+
+static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
+{
+ struct tpm_header *header = buf;
+ int rc;
+ ssize_t len = 0;
+ u32 count, ordinal;
+ unsigned long stop;
+
+ if (bufsiz < TPM_HEADER_SIZE)
+ return -EINVAL;
+
+ if (bufsiz > TPM_BUFSIZE)
+ bufsiz = TPM_BUFSIZE;
+
+ count = be32_to_cpu(header->length);
+ ordinal = be32_to_cpu(header->ordinal);
+ if (count == 0)
+ return -ENODATA;
+ if (count > bufsiz) {
+ dev_err(&chip->dev,
+ "invalid count value %x %zx\n", count, bufsiz);
+ return -E2BIG;
+ }
+
+ rc = chip->ops->send(chip, buf, count);
+ if (rc < 0) {
+ if (rc != -EPIPE)
+ dev_err(&chip->dev,
+ "%s: send(): error %d\n", __func__, rc);
+ return rc;
+ }
+
+ /* A sanity check. send() should just return zero on success e.g.
+ * not the command length.
+ */
+ if (rc > 0) {
+ dev_warn(&chip->dev,
+ "%s: send(): invalid value %d\n", __func__, rc);
+ rc = 0;
+ }
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ)
+ goto out_recv;
+
+ stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
+ do {
+ u8 status = chip->ops->status(chip);
+ if ((status & chip->ops->req_complete_mask) ==
+ chip->ops->req_complete_val)
+ goto out_recv;
+
+ if (chip->ops->req_canceled(chip, status)) {
+ dev_err(&chip->dev, "Operation Canceled\n");
+ return -ECANCELED;
+ }
+
+ tpm_msleep(TPM_TIMEOUT_POLL);
+ rmb();
+ } while (time_before(jiffies, stop));
+
+ chip->ops->cancel(chip);
+ dev_err(&chip->dev, "Operation Timed out\n");
+ return -ETIME;
+
+out_recv:
+ len = chip->ops->recv(chip, buf, bufsiz);
+ if (len < 0) {
+ rc = len;
+ dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc);
+ } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
+ rc = -EFAULT;
+
+ return rc ? rc : len;
+}
+
+/**
+ * tpm_transmit - Internal kernel interface to transmit TPM commands.
+ * @chip: a TPM chip to use
+ * @buf: a TPM command buffer
+ * @bufsiz: length of the TPM command buffer
+ *
+ * A wrapper around tpm_try_transmit() that handles TPM2_RC_RETRY returns from
+ * the TPM and retransmits the command after a delay up to a maximum wait of
+ * TPM2_DURATION_LONG.
+ *
+ * Note that TPM 1.x never returns TPM2_RC_RETRY so the retry logic is TPM 2.0
+ * only.
+ *
+ * Return:
+ * * The response length - OK
+ * * -errno - A system error
+ */
+ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz)
+{
+ struct tpm_header *header = (struct tpm_header *)buf;
+ /* space for header and handles */
+ u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)];
+ unsigned int delay_msec = TPM2_DURATION_SHORT;
+ u32 rc = 0;
+ ssize_t ret;
+ const size_t save_size = min(sizeof(save), bufsiz);
+ /* the command code is where the return code will be */
+ u32 cc = be32_to_cpu(header->return_code);
+
+ /*
+ * Subtlety here: if we have a space, the handles will be
+ * transformed, so when we restore the header we also have to
+ * restore the handles.
+ */
+ memcpy(save, buf, save_size);
+
+ for (;;) {
+ ret = tpm_try_transmit(chip, buf, bufsiz);
+ if (ret < 0)
+ break;
+ rc = be32_to_cpu(header->return_code);
+ if (rc != TPM2_RC_RETRY && rc != TPM2_RC_TESTING)
+ break;
+ /*
+ * return immediately if self test returns test
+ * still running to shorten boot time.
+ */
+ if (rc == TPM2_RC_TESTING && cc == TPM2_CC_SELF_TEST)
+ break;
+
+ if (delay_msec > TPM2_DURATION_LONG) {
+ if (rc == TPM2_RC_RETRY)
+ dev_err(&chip->dev, "in retry loop\n");
+ else
+ dev_err(&chip->dev,
+ "self test is still running\n");
+ break;
+ }
+ tpm_msleep(delay_msec);
+ delay_msec *= 2;
+ memcpy(buf, save, save_size);
+ }
+ return ret;
+}
+
+/**
+ * tpm_transmit_cmd - send a tpm command to the device
+ * @chip: a TPM chip to use
+ * @buf: a TPM command buffer
+ * @min_rsp_body_length: minimum expected length of response body
+ * @desc: command description used in the error message
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
+ */
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
+ size_t min_rsp_body_length, const char *desc)
+{
+ const struct tpm_header *header = (struct tpm_header *)buf->data;
+ int err;
+ ssize_t len;
+
+ len = tpm_transmit(chip, buf->data, PAGE_SIZE);
+ if (len < 0)
+ return len;
+
+ err = be32_to_cpu(header->return_code);
+ if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
+ && err != TPM2_RC_TESTING && desc)
+ dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
+ desc);
+ if (err)
+ return err;
+
+ if (len < min_rsp_body_length + TPM_HEADER_SIZE)
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_transmit_cmd);
+
+int tpm_get_timeouts(struct tpm_chip *chip)
+{
+ if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS)
+ return 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return tpm2_get_timeouts(chip);
+ else
+ return tpm1_get_timeouts(chip);
+}
+EXPORT_SYMBOL_GPL(tpm_get_timeouts);
+
+/**
+ * tpm_is_tpm2 - do we a have a TPM2 chip?
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ *
+ * Return:
+ * 1 if we have a TPM2 chip.
+ * 0 if we don't have a TPM2 chip.
+ * A negative number for system errors (errno).
+ */
+int tpm_is_tpm2(struct tpm_chip *chip)
+{
+ int rc;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
+
+ tpm_put_ops(chip);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_is_tpm2);
+
+/**
+ * tpm_pcr_read - read a PCR value from SHA1 bank
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @digest: the PCR bank and buffer current PCR value is written to
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest)
+{
+ int rc;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL);
+ else
+ rc = tpm1_pcr_read(chip, pcr_idx, digest->digest);
+
+ tpm_put_ops(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_read);
+
+/**
+ * tpm_pcr_extend - extend a PCR value in SHA1 bank.
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @digests: array of tpm_digest structures used to extend PCRs
+ *
+ * Note: callers must pass a digest for every allocated PCR bank, in the same
+ * order of the banks in chip->allocated_banks.
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests)
+{
+ int rc;
+ int i;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ rc = tpm2_pcr_extend(chip, pcr_idx, digests);
+ goto out;
+ }
+
+ rc = tpm1_pcr_extend(chip, pcr_idx, digests[0].digest,
+ "attempting extend a PCR value");
+
+out:
+ tpm_put_ops(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_extend);
+
+/**
+ * tpm_send - send a TPM command
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @cmd: a TPM command buffer
+ * @buflen: the length of the TPM command buffer
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ buf.data = cmd;
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command");
+
+ tpm_put_ops(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_send);
+
+int tpm_auto_startup(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (!(chip->ops->flags & TPM_OPS_AUTO_STARTUP))
+ return 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm2_auto_startup(chip);
+ else
+ rc = tpm1_auto_startup(chip);
+
+ return rc;
+}
+
+/*
+ * We are about to suspend. Save the TPM state
+ * so that it can be restored.
+ */
+int tpm_pm_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ int rc = 0;
+
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
+ goto suspended;
+
+ if ((chip->flags & TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED) &&
+ !pm_suspend_via_firmware())
+ goto suspended;
+
+ rc = tpm_try_get_ops(chip);
+ if (!rc) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_shutdown(chip, TPM2_SU_STATE);
+ else
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
+ tpm_put_ops(chip);
+ }
+
+suspended:
+ chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+
+ if (rc)
+ dev_err(dev, "Ignoring error %d while suspending\n", rc);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_pm_suspend);
+
+/*
+ * Resume from a power safe. The BIOS already restored
+ * the TPM state.
+ */
+int tpm_pm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip == NULL)
+ return -ENODEV;
+
+ chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED;
+
+ /*
+ * Guarantee that SUSPENDED is written last, so that hwrng does not
+ * activate before the chip has been fully resumed.
+ */
+ wmb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_pm_resume);
+
+/**
+ * tpm_get_random() - get random bytes from the TPM's RNG
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @out: destination buffer for the random bytes
+ * @max: the max number of bytes to write to @out
+ *
+ * Return: number of random bytes read or a negative error value.
+ */
+int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
+{
+ int rc;
+
+ if (!out || max > TPM_MAX_RNG_DATA)
+ return -EINVAL;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm2_get_random(chip, out, max);
+ else
+ rc = tpm1_get_random(chip, out, max);
+
+ tpm_put_ops(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_get_random);
+
+static int __init tpm_init(void)
+{
+ int rc;
+
+ rc = class_register(&tpm_class);
+ if (rc) {
+ pr_err("couldn't create tpm class\n");
+ return rc;
+ }
+
+ rc = class_register(&tpmrm_class);
+ if (rc) {
+ pr_err("couldn't create tpmrm class\n");
+ goto out_destroy_tpm_class;
+ }
+
+ rc = alloc_chrdev_region(&tpm_devt, 0, 2*TPM_NUM_DEVICES, "tpm");
+ if (rc < 0) {
+ pr_err("tpm: failed to allocate char dev region\n");
+ goto out_destroy_tpmrm_class;
+ }
+
+ rc = tpm_dev_common_init();
+ if (rc) {
+ pr_err("tpm: failed to allocate char dev region\n");
+ goto out_unreg_chrdev;
+ }
+
+ return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(tpm_devt, 2 * TPM_NUM_DEVICES);
+out_destroy_tpmrm_class:
+ class_unregister(&tpmrm_class);
+out_destroy_tpm_class:
+ class_unregister(&tpm_class);
+
+ return rc;
+}
+
+static void __exit tpm_exit(void)
+{
+ idr_destroy(&dev_nums_idr);
+ class_unregister(&tpm_class);
+ class_unregister(&tpmrm_class);
+ unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES);
+ tpm_dev_common_exit();
+}
+
+subsys_initcall(tpm_init);
+module_exit(tpm_exit);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
new file mode 100644
index 0000000000..54c71473aa
--- /dev/null
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * sysfs filesystem inspection interface to the TPM
+ */
+#include <linux/device.h>
+#include "tpm.h"
+
+struct tpm_readpubek_out {
+ u8 algorithm[4];
+ u8 encscheme[2];
+ u8 sigscheme[2];
+ __be32 paramsize;
+ u8 parameters[12];
+ __be32 keysize;
+ u8 modulus[256];
+ u8 checksum[20];
+} __packed;
+
+#define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
+#define TPM_ORD_READPUBEK 124
+
+static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_buf tpm_buf;
+ struct tpm_readpubek_out *out;
+ int i;
+ char *str = buf;
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ char anti_replay[20];
+
+ memset(&anti_replay, 0, sizeof(anti_replay));
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK))
+ goto out_ops;
+
+ tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
+
+ if (tpm_transmit_cmd(chip, &tpm_buf, READ_PUBEK_RESULT_MIN_BODY_SIZE,
+ "attempting to read the PUBEK"))
+ goto out_buf;
+
+ out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
+ str +=
+ sprintf(str,
+ "Algorithm: %4ph\n"
+ "Encscheme: %2ph\n"
+ "Sigscheme: %2ph\n"
+ "Parameters: %12ph\n"
+ "Modulus length: %d\n"
+ "Modulus:\n",
+ out->algorithm,
+ out->encscheme,
+ out->sigscheme,
+ out->parameters,
+ be32_to_cpu(out->keysize));
+
+ for (i = 0; i < 256; i += 16)
+ str += sprintf(str, "%16ph\n", &out->modulus[i]);
+
+out_buf:
+ tpm_buf_destroy(&tpm_buf);
+out_ops:
+ tpm_put_ops(chip);
+ return str - buf;
+}
+static DEVICE_ATTR_RO(pubek);
+
+static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ cap_t cap;
+ u8 digest[TPM_DIGEST_SIZE];
+ u32 i, j, num_pcrs;
+ char *str = buf;
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(chip, TPM_CAP_PROP_PCR, &cap,
+ "attempting to determine the number of PCRS",
+ sizeof(cap.num_pcrs))) {
+ tpm_put_ops(chip);
+ return 0;
+ }
+
+ num_pcrs = be32_to_cpu(cap.num_pcrs);
+ for (i = 0; i < num_pcrs; i++) {
+ if (tpm1_pcr_read(chip, i, digest)) {
+ str = buf;
+ break;
+ }
+ str += sprintf(str, "PCR-%02d: ", i);
+ for (j = 0; j < TPM_DIGEST_SIZE; j++)
+ str += sprintf(str, "%02X ", digest[j]);
+ str += sprintf(str, "\n");
+ }
+ tpm_put_ops(chip);
+ return str - buf;
+}
+static DEVICE_ATTR_RO(pcrs);
+
+static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent enabled state",
+ sizeof(cap.perm_flags)))
+ goto out_ops;
+
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(enabled);
+
+static ssize_t active_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent active state",
+ sizeof(cap.perm_flags)))
+ goto out_ops;
+
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(active);
+
+static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
+ "attempting to determine the owner state",
+ sizeof(cap.owned)))
+ goto out_ops;
+
+ rc = sprintf(buf, "%d\n", cap.owned);
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(owned);
+
+static ssize_t temp_deactivated_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
+ "attempting to determine the temporary state",
+ sizeof(cap.stclear_flags)))
+ goto out_ops;
+
+ rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(temp_deactivated);
+
+static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ struct tpm1_version *version;
+ ssize_t rc = 0;
+ char *str = buf;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
+ "attempting to determine the manufacturer",
+ sizeof(cap.manufacturer_id)))
+ goto out_ops;
+
+ str += sprintf(str, "Manufacturer: 0x%x\n",
+ be32_to_cpu(cap.manufacturer_id));
+
+ /* TPM 1.2 */
+ if (!tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version",
+ sizeof(cap.version2))) {
+ version = &cap.version2.version;
+ goto out_print;
+ }
+
+ /* TPM 1.1 */
+ if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1))) {
+ goto out_ops;
+ }
+
+ version = &cap.version1;
+
+out_print:
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ version->major, version->minor,
+ version->rev_major, version->rev_minor);
+
+ rc = str - buf;
+
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(caps);
+
+static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ chip->ops->cancel(chip);
+ tpm_put_ops(chip);
+ return count;
+}
+static DEVICE_ATTR_WO(cancel);
+
+static ssize_t durations_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ if (chip->duration[TPM_LONG] == 0)
+ return 0;
+
+ return sprintf(buf, "%d %d %d [%s]\n",
+ jiffies_to_usecs(chip->duration[TPM_SHORT]),
+ jiffies_to_usecs(chip->duration[TPM_MEDIUM]),
+ jiffies_to_usecs(chip->duration[TPM_LONG]),
+ chip->duration_adjusted
+ ? "adjusted" : "original");
+}
+static DEVICE_ATTR_RO(durations);
+
+static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return sprintf(buf, "%d %d %d %d [%s]\n",
+ jiffies_to_usecs(chip->timeout_a),
+ jiffies_to_usecs(chip->timeout_b),
+ jiffies_to_usecs(chip->timeout_c),
+ jiffies_to_usecs(chip->timeout_d),
+ chip->timeout_adjusted
+ ? "adjusted" : "original");
+}
+static DEVICE_ATTR_RO(timeouts);
+
+static ssize_t tpm_version_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2
+ ? "2" : "1");
+}
+static DEVICE_ATTR_RO(tpm_version_major);
+
+static struct attribute *tpm1_dev_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ &dev_attr_tpm_version_major.attr,
+ NULL,
+};
+
+static struct attribute *tpm2_dev_attrs[] = {
+ &dev_attr_tpm_version_major.attr,
+ NULL
+};
+
+static const struct attribute_group tpm1_dev_group = {
+ .attrs = tpm1_dev_attrs,
+};
+
+static const struct attribute_group tpm2_dev_group = {
+ .attrs = tpm2_dev_attrs,
+};
+
+struct tpm_pcr_attr {
+ int alg_id;
+ int pcr;
+ struct device_attribute attr;
+};
+
+#define to_tpm_pcr_attr(a) container_of(a, struct tpm_pcr_attr, attr)
+
+static ssize_t pcr_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_pcr_attr *ha = to_tpm_pcr_attr(attr);
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ struct tpm_digest digest;
+ int i;
+ int digest_size = 0;
+ int rc;
+ char *str = buf;
+
+ for (i = 0; i < chip->nr_allocated_banks; i++)
+ if (ha->alg_id == chip->allocated_banks[i].alg_id)
+ digest_size = chip->allocated_banks[i].digest_size;
+ /* should never happen */
+ if (!digest_size)
+ return -EINVAL;
+
+ digest.alg_id = ha->alg_id;
+ rc = tpm_pcr_read(chip, ha->pcr, &digest);
+ if (rc)
+ return rc;
+ for (i = 0; i < digest_size; i++)
+ str += sprintf(str, "%02X", digest.digest[i]);
+ str += sprintf(str, "\n");
+
+ return str - buf;
+}
+
+/*
+ * The following set of defines represents all the magic to build
+ * the per hash attribute groups for displaying each bank of PCRs.
+ * The only slight problem with this approach is that every PCR is
+ * hard coded to be present, so you don't know if an PCR is missing
+ * until a cat of the file returns -EINVAL
+ *
+ * Also note you must ignore checkpatch warnings in this macro
+ * code. This is deep macro magic that checkpatch.pl doesn't
+ * understand.
+ */
+
+/* Note, this must match TPM2_PLATFORM_PCR which is fixed at 24. */
+#define _TPM_HELPER(_alg, _hash, F) \
+ F(_alg, _hash, 0) \
+ F(_alg, _hash, 1) \
+ F(_alg, _hash, 2) \
+ F(_alg, _hash, 3) \
+ F(_alg, _hash, 4) \
+ F(_alg, _hash, 5) \
+ F(_alg, _hash, 6) \
+ F(_alg, _hash, 7) \
+ F(_alg, _hash, 8) \
+ F(_alg, _hash, 9) \
+ F(_alg, _hash, 10) \
+ F(_alg, _hash, 11) \
+ F(_alg, _hash, 12) \
+ F(_alg, _hash, 13) \
+ F(_alg, _hash, 14) \
+ F(_alg, _hash, 15) \
+ F(_alg, _hash, 16) \
+ F(_alg, _hash, 17) \
+ F(_alg, _hash, 18) \
+ F(_alg, _hash, 19) \
+ F(_alg, _hash, 20) \
+ F(_alg, _hash, 21) \
+ F(_alg, _hash, 22) \
+ F(_alg, _hash, 23)
+
+/* ignore checkpatch warning about trailing ; in macro. */
+#define PCR_ATTR(_alg, _hash, _pcr) \
+ static struct tpm_pcr_attr dev_attr_pcr_##_hash##_##_pcr = { \
+ .alg_id = _alg, \
+ .pcr = _pcr, \
+ .attr = { \
+ .attr = { \
+ .name = __stringify(_pcr), \
+ .mode = 0444 \
+ }, \
+ .show = pcr_value_show \
+ } \
+ };
+
+#define PCR_ATTRS(_alg, _hash) \
+ _TPM_HELPER(_alg, _hash, PCR_ATTR)
+
+/* ignore checkpatch warning about trailing , in macro. */
+#define PCR_ATTR_VAL(_alg, _hash, _pcr) \
+ &dev_attr_pcr_##_hash##_##_pcr.attr.attr,
+
+#define PCR_ATTR_GROUP_ARRAY(_alg, _hash) \
+ static struct attribute *pcr_group_attrs_##_hash[] = { \
+ _TPM_HELPER(_alg, _hash, PCR_ATTR_VAL) \
+ NULL \
+ }
+
+#define PCR_ATTR_GROUP(_alg, _hash) \
+ static struct attribute_group pcr_group_##_hash = { \
+ .name = "pcr-" __stringify(_hash), \
+ .attrs = pcr_group_attrs_##_hash \
+ }
+
+#define PCR_ATTR_BUILD(_alg, _hash) \
+ PCR_ATTRS(_alg, _hash) \
+ PCR_ATTR_GROUP_ARRAY(_alg, _hash); \
+ PCR_ATTR_GROUP(_alg, _hash)
+/*
+ * End of macro structure to build an attribute group containing 24
+ * PCR value files for each supported hash algorithm
+ */
+
+/*
+ * The next set of macros implements the cleverness for each hash to
+ * build a static attribute group called pcr_group_<hash> which can be
+ * added to chip->groups[].
+ *
+ * The first argument is the TPM algorithm id and the second is the
+ * hash used as both the suffix and the group name. Note: the group
+ * name is a directory in the top level tpm class with the name
+ * pcr-<hash>, so it must not clash with any other names already
+ * in the sysfs directory.
+ */
+PCR_ATTR_BUILD(TPM_ALG_SHA1, sha1);
+PCR_ATTR_BUILD(TPM_ALG_SHA256, sha256);
+PCR_ATTR_BUILD(TPM_ALG_SHA384, sha384);
+PCR_ATTR_BUILD(TPM_ALG_SHA512, sha512);
+PCR_ATTR_BUILD(TPM_ALG_SM3_256, sm3);
+
+
+void tpm_sysfs_add_device(struct tpm_chip *chip)
+{
+ int i;
+
+ WARN_ON(chip->groups_cnt != 0);
+
+ if (tpm_is_firmware_upgrade(chip))
+ return;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
+ else
+ chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
+
+ /* add one group for each bank hash */
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ switch (chip->allocated_banks[i].alg_id) {
+ case TPM_ALG_SHA1:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha1;
+ break;
+ case TPM_ALG_SHA256:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha256;
+ break;
+ case TPM_ALG_SHA384:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha384;
+ break;
+ case TPM_ALG_SHA512:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha512;
+ break;
+ case TPM_ALG_SM3_256:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sm3;
+ break;
+ default:
+ /*
+ * If triggers, send a patch to add both a
+ * PCR_ATTR_BUILD() macro above for the
+ * missing algorithm as well as an additional
+ * case in this switch statement.
+ */
+ dev_err(&chip->dev,
+ "TPM with unsupported bank algorithm 0x%04x",
+ chip->allocated_banks[i].alg_id);
+ break;
+ }
+ }
+
+ /*
+ * This will only trigger if someone has added an additional
+ * hash to the tpm_algorithms enum without incrementing
+ * TPM_MAX_HASHES.
+ */
+ WARN_ON(chip->groups_cnt > TPM_MAX_HASHES + 1);
+}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
new file mode 100644
index 0000000000..61445f1dc4
--- /dev/null
+++ b/drivers/char/tpm/tpm.h
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#ifndef __TPM_H__
+#define __TPM_H__
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/tpm.h>
+#include <linux/tpm_eventlog.h>
+
+#ifdef CONFIG_X86
+#include <asm/intel-family.h>
+#endif
+
+#define TPM_MINOR 224 /* officially assigned */
+#define TPM_BUFSIZE 4096
+#define TPM_NUM_DEVICES 65536
+#define TPM_RETRY 50
+
+enum tpm_timeout {
+ TPM_TIMEOUT = 5, /* msecs */
+ TPM_TIMEOUT_RETRY = 100, /* msecs */
+ TPM_TIMEOUT_RANGE_US = 300, /* usecs */
+ TPM_TIMEOUT_POLL = 1, /* msecs */
+ TPM_TIMEOUT_USECS_MIN = 100, /* usecs */
+ TPM_TIMEOUT_USECS_MAX = 500 /* usecs */
+};
+
+/* TPM addresses */
+enum tpm_addr {
+ TPM_SUPERIO_ADDR = 0x2E,
+ TPM_ADDR = 0x4E,
+};
+
+#define TPM_WARN_RETRY 0x800
+#define TPM_WARN_DOING_SELFTEST 0x802
+#define TPM_ERR_DEACTIVATED 0x6
+#define TPM_ERR_DISABLED 0x7
+#define TPM_ERR_FAILEDSELFTEST 0x1C
+#define TPM_ERR_INVALID_POSTINIT 38
+
+#define TPM_TAG_RQU_COMMAND 193
+
+/* TPM2 specific constants. */
+#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */
+
+struct stclear_flags_t {
+ __be16 tag;
+ u8 deactivated;
+ u8 disableForceClear;
+ u8 physicalPresence;
+ u8 physicalPresenceLock;
+ u8 bGlobalLock;
+} __packed;
+
+struct tpm1_version {
+ u8 major;
+ u8 minor;
+ u8 rev_major;
+ u8 rev_minor;
+} __packed;
+
+struct tpm1_version2 {
+ __be16 tag;
+ struct tpm1_version version;
+} __packed;
+
+struct timeout_t {
+ __be32 a;
+ __be32 b;
+ __be32 c;
+ __be32 d;
+} __packed;
+
+struct duration_t {
+ __be32 tpm_short;
+ __be32 tpm_medium;
+ __be32 tpm_long;
+} __packed;
+
+struct permanent_flags_t {
+ __be16 tag;
+ u8 disable;
+ u8 ownership;
+ u8 deactivated;
+ u8 readPubek;
+ u8 disableOwnerClear;
+ u8 allowMaintenance;
+ u8 physicalPresenceLifetimeLock;
+ u8 physicalPresenceHWEnable;
+ u8 physicalPresenceCMDEnable;
+ u8 CEKPUsed;
+ u8 TPMpost;
+ u8 TPMpostLock;
+ u8 FIPS;
+ u8 operator;
+ u8 enableRevokeEK;
+ u8 nvLocked;
+ u8 readSRKPub;
+ u8 tpmEstablished;
+ u8 maintenanceDone;
+ u8 disableFullDALogicInfo;
+} __packed;
+
+typedef union {
+ struct permanent_flags_t perm_flags;
+ struct stclear_flags_t stclear_flags;
+ __u8 owned;
+ __be32 num_pcrs;
+ struct tpm1_version version1;
+ struct tpm1_version2 version2;
+ __be32 manufacturer_id;
+ struct timeout_t timeout;
+ struct duration_t duration;
+} cap_t;
+
+enum tpm_capabilities {
+ TPM_CAP_FLAG = 4,
+ TPM_CAP_PROP = 5,
+ TPM_CAP_VERSION_1_1 = 0x06,
+ TPM_CAP_VERSION_1_2 = 0x1A,
+};
+
+enum tpm_sub_capabilities {
+ TPM_CAP_PROP_PCR = 0x101,
+ TPM_CAP_PROP_MANUFACTURER = 0x103,
+ TPM_CAP_FLAG_PERM = 0x108,
+ TPM_CAP_FLAG_VOL = 0x109,
+ TPM_CAP_PROP_OWNER = 0x111,
+ TPM_CAP_PROP_TIS_TIMEOUT = 0x115,
+ TPM_CAP_PROP_TIS_DURATION = 0x120,
+};
+
+enum tpm2_pt_props {
+ TPM2_PT_NONE = 0x00000000,
+ TPM2_PT_GROUP = 0x00000100,
+ TPM2_PT_FIXED = TPM2_PT_GROUP * 1,
+ TPM2_PT_FAMILY_INDICATOR = TPM2_PT_FIXED + 0,
+ TPM2_PT_LEVEL = TPM2_PT_FIXED + 1,
+ TPM2_PT_REVISION = TPM2_PT_FIXED + 2,
+ TPM2_PT_DAY_OF_YEAR = TPM2_PT_FIXED + 3,
+ TPM2_PT_YEAR = TPM2_PT_FIXED + 4,
+ TPM2_PT_MANUFACTURER = TPM2_PT_FIXED + 5,
+ TPM2_PT_VENDOR_STRING_1 = TPM2_PT_FIXED + 6,
+ TPM2_PT_VENDOR_STRING_2 = TPM2_PT_FIXED + 7,
+ TPM2_PT_VENDOR_STRING_3 = TPM2_PT_FIXED + 8,
+ TPM2_PT_VENDOR_STRING_4 = TPM2_PT_FIXED + 9,
+ TPM2_PT_VENDOR_TPM_TYPE = TPM2_PT_FIXED + 10,
+ TPM2_PT_FIRMWARE_VERSION_1 = TPM2_PT_FIXED + 11,
+ TPM2_PT_FIRMWARE_VERSION_2 = TPM2_PT_FIXED + 12,
+ TPM2_PT_INPUT_BUFFER = TPM2_PT_FIXED + 13,
+ TPM2_PT_HR_TRANSIENT_MIN = TPM2_PT_FIXED + 14,
+ TPM2_PT_HR_PERSISTENT_MIN = TPM2_PT_FIXED + 15,
+ TPM2_PT_HR_LOADED_MIN = TPM2_PT_FIXED + 16,
+ TPM2_PT_ACTIVE_SESSIONS_MAX = TPM2_PT_FIXED + 17,
+ TPM2_PT_PCR_COUNT = TPM2_PT_FIXED + 18,
+ TPM2_PT_PCR_SELECT_MIN = TPM2_PT_FIXED + 19,
+ TPM2_PT_CONTEXT_GAP_MAX = TPM2_PT_FIXED + 20,
+ TPM2_PT_NV_COUNTERS_MAX = TPM2_PT_FIXED + 22,
+ TPM2_PT_NV_INDEX_MAX = TPM2_PT_FIXED + 23,
+ TPM2_PT_MEMORY = TPM2_PT_FIXED + 24,
+ TPM2_PT_CLOCK_UPDATE = TPM2_PT_FIXED + 25,
+ TPM2_PT_CONTEXT_HASH = TPM2_PT_FIXED + 26,
+ TPM2_PT_CONTEXT_SYM = TPM2_PT_FIXED + 27,
+ TPM2_PT_CONTEXT_SYM_SIZE = TPM2_PT_FIXED + 28,
+ TPM2_PT_ORDERLY_COUNT = TPM2_PT_FIXED + 29,
+ TPM2_PT_MAX_COMMAND_SIZE = TPM2_PT_FIXED + 30,
+ TPM2_PT_MAX_RESPONSE_SIZE = TPM2_PT_FIXED + 31,
+ TPM2_PT_MAX_DIGEST = TPM2_PT_FIXED + 32,
+ TPM2_PT_MAX_OBJECT_CONTEXT = TPM2_PT_FIXED + 33,
+ TPM2_PT_MAX_SESSION_CONTEXT = TPM2_PT_FIXED + 34,
+ TPM2_PT_PS_FAMILY_INDICATOR = TPM2_PT_FIXED + 35,
+ TPM2_PT_PS_LEVEL = TPM2_PT_FIXED + 36,
+ TPM2_PT_PS_REVISION = TPM2_PT_FIXED + 37,
+ TPM2_PT_PS_DAY_OF_YEAR = TPM2_PT_FIXED + 38,
+ TPM2_PT_PS_YEAR = TPM2_PT_FIXED + 39,
+ TPM2_PT_SPLIT_MAX = TPM2_PT_FIXED + 40,
+ TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41,
+ TPM2_PT_LIBRARY_COMMANDS = TPM2_PT_FIXED + 42,
+ TPM2_PT_VENDOR_COMMANDS = TPM2_PT_FIXED + 43,
+ TPM2_PT_NV_BUFFER_MAX = TPM2_PT_FIXED + 44,
+ TPM2_PT_MODES = TPM2_PT_FIXED + 45,
+ TPM2_PT_MAX_CAP_BUFFER = TPM2_PT_FIXED + 46,
+ TPM2_PT_VAR = TPM2_PT_GROUP * 2,
+ TPM2_PT_PERMANENT = TPM2_PT_VAR + 0,
+ TPM2_PT_STARTUP_CLEAR = TPM2_PT_VAR + 1,
+ TPM2_PT_HR_NV_INDEX = TPM2_PT_VAR + 2,
+ TPM2_PT_HR_LOADED = TPM2_PT_VAR + 3,
+ TPM2_PT_HR_LOADED_AVAIL = TPM2_PT_VAR + 4,
+ TPM2_PT_HR_ACTIVE = TPM2_PT_VAR + 5,
+ TPM2_PT_HR_ACTIVE_AVAIL = TPM2_PT_VAR + 6,
+ TPM2_PT_HR_TRANSIENT_AVAIL = TPM2_PT_VAR + 7,
+ TPM2_PT_HR_PERSISTENT = TPM2_PT_VAR + 8,
+ TPM2_PT_HR_PERSISTENT_AVAIL = TPM2_PT_VAR + 9,
+ TPM2_PT_NV_COUNTERS = TPM2_PT_VAR + 10,
+ TPM2_PT_NV_COUNTERS_AVAIL = TPM2_PT_VAR + 11,
+ TPM2_PT_ALGORITHM_SET = TPM2_PT_VAR + 12,
+ TPM2_PT_LOADED_CURVES = TPM2_PT_VAR + 13,
+ TPM2_PT_LOCKOUT_COUNTER = TPM2_PT_VAR + 14,
+ TPM2_PT_MAX_AUTH_FAIL = TPM2_PT_VAR + 15,
+ TPM2_PT_LOCKOUT_INTERVAL = TPM2_PT_VAR + 16,
+ TPM2_PT_LOCKOUT_RECOVERY = TPM2_PT_VAR + 17,
+ TPM2_PT_NV_WRITE_RECOVERY = TPM2_PT_VAR + 18,
+ TPM2_PT_AUDIT_COUNTER_0 = TPM2_PT_VAR + 19,
+ TPM2_PT_AUDIT_COUNTER_1 = TPM2_PT_VAR + 20,
+};
+
+/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
+ * bytes, but 128 is still a relatively large number of random bytes and
+ * anything much bigger causes users of struct tpm_cmd_t to start getting
+ * compiler warnings about stack frame size. */
+#define TPM_MAX_RNG_DATA 128
+
+extern const struct class tpm_class;
+extern const struct class tpmrm_class;
+extern dev_t tpm_devt;
+extern const struct file_operations tpm_fops;
+extern const struct file_operations tpmrm_fops;
+extern struct idr dev_nums_idr;
+
+ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz);
+int tpm_get_timeouts(struct tpm_chip *);
+int tpm_auto_startup(struct tpm_chip *chip);
+
+int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr);
+int tpm1_auto_startup(struct tpm_chip *chip);
+int tpm1_do_selftest(struct tpm_chip *chip);
+int tpm1_get_timeouts(struct tpm_chip *chip);
+unsigned long tpm1_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm1_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash,
+ const char *log_msg);
+int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
+ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
+ const char *desc, size_t min_cap_length);
+int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max);
+int tpm1_get_pcr_allocation(struct tpm_chip *chip);
+unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm_pm_suspend(struct device *dev);
+int tpm_pm_resume(struct device *dev);
+int tpm_class_shutdown(struct device *dev);
+
+static inline void tpm_msleep(unsigned int delay_msec)
+{
+ usleep_range((delay_msec * 1000) - TPM_TIMEOUT_RANGE_US,
+ delay_msec * 1000);
+};
+
+int tpm_chip_bootstrap(struct tpm_chip *chip);
+int tpm_chip_start(struct tpm_chip *chip);
+void tpm_chip_stop(struct tpm_chip *chip);
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
+
+struct tpm_chip *tpm_chip_alloc(struct device *dev,
+ const struct tpm_class_ops *ops);
+struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
+ const struct tpm_class_ops *ops);
+int tpm_chip_register(struct tpm_chip *chip);
+void tpm_chip_unregister(struct tpm_chip *chip);
+
+void tpm_sysfs_add_device(struct tpm_chip *chip);
+
+
+#ifdef CONFIG_ACPI
+extern void tpm_add_ppi(struct tpm_chip *chip);
+#else
+static inline void tpm_add_ppi(struct tpm_chip *chip)
+{
+}
+#endif
+
+int tpm2_get_timeouts(struct tpm_chip *chip);
+int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest, u16 *digest_size_ptr);
+int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests);
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
+ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
+ u32 *value, const char *desc);
+
+ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
+int tpm2_auto_startup(struct tpm_chip *chip);
+void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
+unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm2_probe(struct tpm_chip *chip);
+int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
+int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
+void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
+void tpm2_flush_space(struct tpm_chip *chip);
+int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
+ size_t cmdsiz);
+int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
+ size_t *bufsiz);
+int tpm_devs_add(struct tpm_chip *chip);
+void tpm_devs_remove(struct tpm_chip *chip);
+
+void tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
+int tpm_dev_common_init(void);
+void tpm_dev_common_exit(void);
+#endif
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
new file mode 100644
index 0000000000..cf64c73851
--- /dev/null
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/freezer.h>
+#include <linux/tpm_eventlog.h>
+
+#include "tpm.h"
+
+#define TPM_MAX_ORDINAL 243
+
+/*
+ * Array with one entry per ordinal defining the maximum amount
+ * of time the chip could take to return the result. The ordinal
+ * designation of short, medium or long is defined in a table in
+ * TCG Specification TPM Main Part 2 TPM Structures Section 17. The
+ * values of the SHORT, MEDIUM, and LONG durations are retrieved
+ * from the chip during initialization with a call to tpm_get_timeouts.
+ */
+static const u8 tpm1_ordinal_duration[TPM_MAX_ORDINAL] = {
+ TPM_UNDEFINED, /* 0 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 5 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 10 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_LONG,
+ TPM_MEDIUM, /* 15 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_SHORT, /* 20 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT, /* 25 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 30 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 35 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 40 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 45 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_LONG,
+ TPM_MEDIUM, /* 50 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 55 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 60 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 65 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 70 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 75 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 80 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 85 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 90 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 95 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 100 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 105 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 110 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 115 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 120 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 125 */
+ TPM_SHORT,
+ TPM_LONG,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 130 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_UNDEFINED, /* 135 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 140 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 145 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 150 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 155 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 160 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 165 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 170 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 175 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 180 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM, /* 185 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 190 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 195 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 200 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 205 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 210 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_UNDEFINED, /* 215 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 220 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 225 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 230 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 235 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 240 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+};
+
+/**
+ * tpm1_calc_ordinal_duration() - calculate the maximum command duration
+ * @chip: TPM chip to use.
+ * @ordinal: TPM command ordinal.
+ *
+ * The function returns the maximum amount of time the chip could take
+ * to return the result for a particular ordinal in jiffies.
+ *
+ * Return: A maximal duration time for an ordinal in jiffies.
+ */
+unsigned long tpm1_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+{
+ int duration_idx = TPM_UNDEFINED;
+ int duration = 0;
+
+ /*
+ * We only have a duration table for protected commands, where the upper
+ * 16 bits are 0. For the few other ordinals the fallback will be used.
+ */
+ if (ordinal < TPM_MAX_ORDINAL)
+ duration_idx = tpm1_ordinal_duration[ordinal];
+
+ if (duration_idx != TPM_UNDEFINED)
+ duration = chip->duration[duration_idx];
+ if (duration <= 0)
+ return 2 * 60 * HZ;
+ else
+ return duration;
+}
+
+#define TPM_ORD_STARTUP 153
+#define TPM_ST_CLEAR 1
+
+/**
+ * tpm1_startup() - turn on the TPM
+ * @chip: TPM chip to use
+ *
+ * Normally the firmware should start the TPM. This function is provided as a
+ * workaround if this does not happen. A legal case for this could be for
+ * example when a TPM emulator is used.
+ *
+ * Return: same as tpm_transmit_cmd()
+ */
+static int tpm1_startup(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ dev_info(&chip->dev, "starting up the TPM manually\n");
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_STARTUP);
+ if (rc < 0)
+ return rc;
+
+ tpm_buf_append_u16(&buf, TPM_ST_CLEAR);
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM");
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+int tpm1_get_timeouts(struct tpm_chip *chip)
+{
+ cap_t cap;
+ unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
+ unsigned long durations[3];
+ ssize_t rc;
+
+ rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL,
+ sizeof(cap.timeout));
+ if (rc == TPM_ERR_INVALID_POSTINIT) {
+ if (tpm1_startup(chip))
+ return rc;
+
+ rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts",
+ sizeof(cap.timeout));
+ }
+
+ if (rc) {
+ dev_err(&chip->dev, "A TPM error (%zd) occurred attempting to determine the timeouts\n",
+ rc);
+ return rc;
+ }
+
+ timeout_old[0] = jiffies_to_usecs(chip->timeout_a);
+ timeout_old[1] = jiffies_to_usecs(chip->timeout_b);
+ timeout_old[2] = jiffies_to_usecs(chip->timeout_c);
+ timeout_old[3] = jiffies_to_usecs(chip->timeout_d);
+ timeout_chip[0] = be32_to_cpu(cap.timeout.a);
+ timeout_chip[1] = be32_to_cpu(cap.timeout.b);
+ timeout_chip[2] = be32_to_cpu(cap.timeout.c);
+ timeout_chip[3] = be32_to_cpu(cap.timeout.d);
+ memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff));
+
+ /*
+ * Provide ability for vendor overrides of timeout values in case
+ * of misreporting.
+ */
+ if (chip->ops->update_timeouts)
+ chip->ops->update_timeouts(chip, timeout_eff);
+
+ if (!chip->timeout_adjusted) {
+ /* Restore default if chip reported 0 */
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) {
+ if (timeout_eff[i])
+ continue;
+
+ timeout_eff[i] = timeout_old[i];
+ chip->timeout_adjusted = true;
+ }
+
+ if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) {
+ /* timeouts in msec rather usec */
+ for (i = 0; i != ARRAY_SIZE(timeout_eff); i++)
+ timeout_eff[i] *= 1000;
+ chip->timeout_adjusted = true;
+ }
+ }
+
+ /* Report adjusted timeouts */
+ if (chip->timeout_adjusted) {
+ dev_info(&chip->dev, HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
+ timeout_chip[0], timeout_eff[0],
+ timeout_chip[1], timeout_eff[1],
+ timeout_chip[2], timeout_eff[2],
+ timeout_chip[3], timeout_eff[3]);
+ }
+
+ chip->timeout_a = usecs_to_jiffies(timeout_eff[0]);
+ chip->timeout_b = usecs_to_jiffies(timeout_eff[1]);
+ chip->timeout_c = usecs_to_jiffies(timeout_eff[2]);
+ chip->timeout_d = usecs_to_jiffies(timeout_eff[3]);
+
+ rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap,
+ "attempting to determine the durations",
+ sizeof(cap.duration));
+ if (rc)
+ return rc;
+
+ chip->duration[TPM_SHORT] =
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short));
+ chip->duration[TPM_MEDIUM] =
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium));
+ chip->duration[TPM_LONG] =
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
+ chip->duration[TPM_LONG_LONG] = 0; /* not used under 1.2 */
+
+ /*
+ * Provide the ability for vendor overrides of duration values in case
+ * of misreporting.
+ */
+ if (chip->ops->update_durations)
+ chip->ops->update_durations(chip, durations);
+
+ if (chip->duration_adjusted) {
+ dev_info(&chip->dev, HW_ERR "Adjusting reported durations.");
+ chip->duration[TPM_SHORT] = durations[0];
+ chip->duration[TPM_MEDIUM] = durations[1];
+ chip->duration[TPM_LONG] = durations[2];
+ }
+
+ /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
+ * value wrong and apparently reports msecs rather than usecs. So we
+ * fix up the resulting too-small TPM_SHORT value to make things work.
+ * We also scale the TPM_MEDIUM and -_LONG values by 1000.
+ */
+ if (chip->duration[TPM_SHORT] < (HZ / 100)) {
+ chip->duration[TPM_SHORT] = HZ;
+ chip->duration[TPM_MEDIUM] *= 1000;
+ chip->duration[TPM_LONG] *= 1000;
+ chip->duration_adjusted = true;
+ dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
+ }
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
+ return 0;
+}
+
+#define TPM_ORD_PCR_EXTEND 20
+int tpm1_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash,
+ const char *log_msg)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCR_EXTEND);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, pcr_idx);
+ tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE);
+
+ rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE, log_msg);
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+#define TPM_ORD_GET_CAP 101
+ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
+ const char *desc, size_t min_cap_length)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_CAP);
+ if (rc)
+ return rc;
+
+ if (subcap_id == TPM_CAP_VERSION_1_1 ||
+ subcap_id == TPM_CAP_VERSION_1_2) {
+ tpm_buf_append_u32(&buf, subcap_id);
+ tpm_buf_append_u32(&buf, 0);
+ } else {
+ if (subcap_id == TPM_CAP_FLAG_PERM ||
+ subcap_id == TPM_CAP_FLAG_VOL)
+ tpm_buf_append_u32(&buf, TPM_CAP_FLAG);
+ else
+ tpm_buf_append_u32(&buf, TPM_CAP_PROP);
+
+ tpm_buf_append_u32(&buf, 4);
+ tpm_buf_append_u32(&buf, subcap_id);
+ }
+ rc = tpm_transmit_cmd(chip, &buf, min_cap_length, desc);
+ if (!rc)
+ *cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4];
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm1_getcap);
+
+#define TPM_ORD_GET_RANDOM 70
+struct tpm1_get_random_out {
+ __be32 rng_data_len;
+ u8 rng_data[TPM_MAX_RNG_DATA];
+} __packed;
+
+/**
+ * tpm1_get_random() - get random bytes from the TPM's RNG
+ * @chip: a &struct tpm_chip instance
+ * @dest: destination buffer for the random bytes
+ * @max: the maximum number of bytes to write to @dest
+ *
+ * Return:
+ * * number of bytes read
+ * * -errno (positive TPM return codes are masked to -EIO)
+ */
+int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
+{
+ struct tpm1_get_random_out *out;
+ u32 num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA);
+ struct tpm_buf buf;
+ u32 total = 0;
+ int retries = 5;
+ u32 recd;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_RANDOM);
+ if (rc)
+ return rc;
+
+ do {
+ tpm_buf_append_u32(&buf, num_bytes);
+
+ rc = tpm_transmit_cmd(chip, &buf, sizeof(out->rng_data_len),
+ "attempting get random");
+ if (rc) {
+ if (rc > 0)
+ rc = -EIO;
+ goto out;
+ }
+
+ out = (struct tpm1_get_random_out *)&buf.data[TPM_HEADER_SIZE];
+
+ recd = be32_to_cpu(out->rng_data_len);
+ if (recd > num_bytes) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE +
+ sizeof(out->rng_data_len) + recd) {
+ rc = -EFAULT;
+ goto out;
+ }
+ memcpy(dest, out->rng_data, recd);
+
+ dest += recd;
+ total += recd;
+ num_bytes -= recd;
+
+ tpm_buf_reset(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_RANDOM);
+ } while (retries-- && total < max);
+
+ rc = total ? (int)total : -EIO;
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+#define TPM_ORD_PCRREAD 21
+int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCRREAD);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, pcr_idx);
+
+ rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE,
+ "attempting to read a pcr value");
+ if (rc)
+ goto out;
+
+ if (tpm_buf_length(&buf) < TPM_DIGEST_SIZE) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ memcpy(res_buf, &buf.data[TPM_HEADER_SIZE], TPM_DIGEST_SIZE);
+
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+#define TPM_ORD_CONTINUE_SELFTEST 83
+/**
+ * tpm1_continue_selftest() - run TPM's selftest
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+static int tpm1_continue_selftest(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_CONTINUE_SELFTEST);
+ if (rc)
+ return rc;
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "continue selftest");
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+/**
+ * tpm1_do_selftest - have the TPM continue its selftest and wait until it
+ * can receive further commands
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+int tpm1_do_selftest(struct tpm_chip *chip)
+{
+ int rc;
+ unsigned int loops;
+ unsigned int delay_msec = 100;
+ unsigned long duration;
+ u8 dummy[TPM_DIGEST_SIZE];
+
+ duration = tpm1_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST);
+
+ loops = jiffies_to_msecs(duration) / delay_msec;
+
+ rc = tpm1_continue_selftest(chip);
+ if (rc == TPM_ERR_INVALID_POSTINIT) {
+ chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
+ dev_info(&chip->dev, "TPM not ready (%d)\n", rc);
+ }
+ /* This may fail if there was no TPM driver during a suspend/resume
+ * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
+ */
+ if (rc)
+ return rc;
+
+ do {
+ /* Attempt to read a PCR value */
+ rc = tpm1_pcr_read(chip, 0, dummy);
+
+ /* Some buggy TPMs will not respond to tpm_tis_ready() for
+ * around 300ms while the self test is ongoing, keep trying
+ * until the self test duration expires.
+ */
+ if (rc == -ETIME) {
+ dev_info(&chip->dev, HW_ERR "TPM command timed out during continue self test");
+ tpm_msleep(delay_msec);
+ continue;
+ }
+
+ if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
+ dev_info(&chip->dev, "TPM is disabled/deactivated (0x%X)\n",
+ rc);
+ /* TPM is disabled and/or deactivated; driver can
+ * proceed and TPM does handle commands for
+ * suspend/resume correctly
+ */
+ return 0;
+ }
+ if (rc != TPM_WARN_DOING_SELFTEST)
+ return rc;
+ tpm_msleep(delay_msec);
+ } while (--loops > 0);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm1_do_selftest);
+
+/**
+ * tpm1_auto_startup - Perform the standard automatic TPM initialization
+ * sequence
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error.
+ */
+int tpm1_auto_startup(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm1_get_timeouts(chip);
+ if (rc)
+ goto out;
+ rc = tpm1_do_selftest(chip);
+ if (rc == TPM_ERR_FAILEDSELFTEST) {
+ dev_warn(&chip->dev, "TPM self test failed, switching to the firmware upgrade mode\n");
+ /* A TPM in this state possibly allows or needs a firmware upgrade */
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ return 0;
+ } else if (rc) {
+ dev_err(&chip->dev, "TPM self test failed\n");
+ goto out;
+ }
+
+ return rc;
+out:
+ if (rc > 0)
+ rc = -ENODEV;
+ return rc;
+}
+
+#define TPM_ORD_SAVESTATE 152
+
+/**
+ * tpm1_pm_suspend() - pm suspend handler
+ * @chip: TPM chip to use.
+ * @tpm_suspend_pcr: flush pcr for buggy TPM chips.
+ *
+ * The functions saves the TPM state to be restored on resume.
+ *
+ * Return:
+ * * 0 on success,
+ * * < 0 on error.
+ */
+int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
+{
+ u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
+ struct tpm_buf buf;
+ unsigned int try;
+ int rc;
+
+
+ /* for buggy tpm, flush pcrs with extend to selected dummy */
+ if (tpm_suspend_pcr)
+ rc = tpm1_pcr_extend(chip, tpm_suspend_pcr, dummy_hash,
+ "extending dummy pcr before suspend");
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_SAVESTATE);
+ if (rc)
+ return rc;
+ /* now do the actual savestate */
+ for (try = 0; try < TPM_RETRY; try++) {
+ rc = tpm_transmit_cmd(chip, &buf, 0, NULL);
+ /*
+ * If the TPM indicates that it is too busy to respond to
+ * this command then retry before giving up. It can take
+ * several seconds for this TPM to be ready.
+ *
+ * This can happen if the TPM has already been sent the
+ * SaveState command before the driver has loaded. TCG 1.2
+ * specification states that any communication after SaveState
+ * may cause the TPM to invalidate previously saved state.
+ */
+ if (rc != TPM_WARN_RETRY)
+ break;
+ tpm_msleep(TPM_TIMEOUT_RETRY);
+
+ tpm_buf_reset(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_SAVESTATE);
+ }
+
+ if (rc)
+ dev_err(&chip->dev, "Error (%d) sending savestate before suspend\n",
+ rc);
+ else if (try > 0)
+ dev_warn(&chip->dev, "TPM savestate took %dms\n",
+ try * TPM_TIMEOUT_RETRY);
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+/**
+ * tpm1_get_pcr_allocation() - initialize the allocated bank
+ * @chip: TPM chip to use.
+ *
+ * The function initializes the SHA1 allocated bank to extend PCR
+ *
+ * Return:
+ * * 0 on success,
+ * * < 0 on error.
+ */
+int tpm1_get_pcr_allocation(struct tpm_chip *chip)
+{
+ chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
+ GFP_KERNEL);
+ if (!chip->allocated_banks)
+ return -ENOMEM;
+
+ chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
+ chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
+ chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
+ chip->nr_allocated_banks = 1;
+
+ return 0;
+}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
new file mode 100644
index 0000000000..93545be190
--- /dev/null
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -0,0 +1,789 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains TPM2 protocol implementations of the commands
+ * used by the kernel internally.
+ */
+
+#include "tpm.h"
+#include <crypto/hash_info.h>
+
+static struct tpm2_hash tpm2_hash_map[] = {
+ {HASH_ALGO_SHA1, TPM_ALG_SHA1},
+ {HASH_ALGO_SHA256, TPM_ALG_SHA256},
+ {HASH_ALGO_SHA384, TPM_ALG_SHA384},
+ {HASH_ALGO_SHA512, TPM_ALG_SHA512},
+ {HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
+};
+
+int tpm2_get_timeouts(struct tpm_chip *chip)
+{
+ /* Fixed timeouts for TPM2 */
+ chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
+ chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
+ chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
+ chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
+
+ /* PTP spec timeouts */
+ chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT);
+ chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM);
+ chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG);
+
+ /* Key creation commands long timeouts */
+ chip->duration[TPM_LONG_LONG] =
+ msecs_to_jiffies(TPM2_DURATION_LONG_LONG);
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
+
+ return 0;
+}
+
+/**
+ * tpm2_ordinal_duration_index() - returns an index to the chip duration table
+ * @ordinal: TPM command ordinal.
+ *
+ * The function returns an index to the chip duration table
+ * (enum tpm_duration), that describes the maximum amount of
+ * time the chip could take to return the result for a particular ordinal.
+ *
+ * The values of the MEDIUM, and LONG durations are taken
+ * from the PC Client Profile (PTP) specification (750, 2000 msec)
+ *
+ * LONG_LONG is for commands that generates keys which empirically takes
+ * a longer time on some systems.
+ *
+ * Return:
+ * * TPM_MEDIUM
+ * * TPM_LONG
+ * * TPM_LONG_LONG
+ * * TPM_UNDEFINED
+ */
+static u8 tpm2_ordinal_duration_index(u32 ordinal)
+{
+ switch (ordinal) {
+ /* Startup */
+ case TPM2_CC_STARTUP: /* 144 */
+ return TPM_MEDIUM;
+
+ case TPM2_CC_SELF_TEST: /* 143 */
+ return TPM_LONG;
+
+ case TPM2_CC_GET_RANDOM: /* 17B */
+ return TPM_LONG;
+
+ case TPM2_CC_SEQUENCE_UPDATE: /* 15C */
+ return TPM_MEDIUM;
+ case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */
+ return TPM_MEDIUM;
+ case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */
+ return TPM_MEDIUM;
+ case TPM2_CC_HASH_SEQUENCE_START: /* 186 */
+ return TPM_MEDIUM;
+
+ case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
+ return TPM_LONG_LONG;
+
+ case TPM2_CC_PCR_EXTEND: /* 182 */
+ return TPM_MEDIUM;
+
+ case TPM2_CC_HIERARCHY_CONTROL: /* 121 */
+ return TPM_LONG;
+ case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */
+ return TPM_LONG;
+
+ case TPM2_CC_GET_CAPABILITY: /* 17A */
+ return TPM_MEDIUM;
+
+ case TPM2_CC_NV_READ: /* 14E */
+ return TPM_LONG;
+
+ case TPM2_CC_CREATE_PRIMARY: /* 131 */
+ return TPM_LONG_LONG;
+ case TPM2_CC_CREATE: /* 153 */
+ return TPM_LONG_LONG;
+ case TPM2_CC_CREATE_LOADED: /* 191 */
+ return TPM_LONG_LONG;
+
+ default:
+ return TPM_UNDEFINED;
+ }
+}
+
+/**
+ * tpm2_calc_ordinal_duration() - calculate the maximum command duration
+ * @chip: TPM chip to use.
+ * @ordinal: TPM command ordinal.
+ *
+ * The function returns the maximum amount of time the chip could take
+ * to return the result for a particular ordinal in jiffies.
+ *
+ * Return: A maximal duration time for an ordinal in jiffies.
+ */
+unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+{
+ unsigned int index;
+
+ index = tpm2_ordinal_duration_index(ordinal);
+
+ if (index != TPM_UNDEFINED)
+ return chip->duration[index];
+ else
+ return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
+}
+
+
+struct tpm2_pcr_read_out {
+ __be32 update_cnt;
+ __be32 pcr_selects_cnt;
+ __be16 hash_alg;
+ u8 pcr_select_size;
+ u8 pcr_select[TPM2_PCR_SELECT_MIN];
+ __be32 digests_cnt;
+ __be16 digest_size;
+ u8 digest[];
+} __packed;
+
+/**
+ * tpm2_pcr_read() - read a PCR value
+ * @chip: TPM chip to use.
+ * @pcr_idx: index of the PCR to read.
+ * @digest: PCR bank and buffer current PCR value is written to.
+ * @digest_size_ptr: pointer to variable that stores the digest size.
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ */
+int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest, u16 *digest_size_ptr)
+{
+ int i;
+ int rc;
+ struct tpm_buf buf;
+ struct tpm2_pcr_read_out *out;
+ u8 pcr_select[TPM2_PCR_SELECT_MIN] = {0};
+ u16 digest_size;
+ u16 expected_digest_size = 0;
+
+ if (pcr_idx >= TPM2_PLATFORM_PCR)
+ return -EINVAL;
+
+ if (!digest_size_ptr) {
+ for (i = 0; i < chip->nr_allocated_banks &&
+ chip->allocated_banks[i].alg_id != digest->alg_id; i++)
+ ;
+
+ if (i == chip->nr_allocated_banks)
+ return -EINVAL;
+
+ expected_digest_size = chip->allocated_banks[i].digest_size;
+ }
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_PCR_READ);
+ if (rc)
+ return rc;
+
+ pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
+
+ tpm_buf_append_u32(&buf, 1);
+ tpm_buf_append_u16(&buf, digest->alg_id);
+ tpm_buf_append_u8(&buf, TPM2_PCR_SELECT_MIN);
+ tpm_buf_append(&buf, (const unsigned char *)pcr_select,
+ sizeof(pcr_select));
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to read a pcr value");
+ if (rc)
+ goto out;
+
+ out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE];
+ digest_size = be16_to_cpu(out->digest_size);
+ if (digest_size > sizeof(digest->digest) ||
+ (!digest_size_ptr && digest_size != expected_digest_size)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (digest_size_ptr)
+ *digest_size_ptr = digest_size;
+
+ memcpy(digest->digest, out->digest, digest_size);
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+struct tpm2_null_auth_area {
+ __be32 handle;
+ __be16 nonce_size;
+ u8 attributes;
+ __be16 auth_size;
+} __packed;
+
+/**
+ * tpm2_pcr_extend() - extend a PCR value
+ *
+ * @chip: TPM chip to use.
+ * @pcr_idx: index of the PCR.
+ * @digests: list of pcr banks and corresponding digest values to extend.
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ */
+int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests)
+{
+ struct tpm_buf buf;
+ struct tpm2_null_auth_area auth_area;
+ int rc;
+ int i;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, pcr_idx);
+
+ auth_area.handle = cpu_to_be32(TPM2_RS_PW);
+ auth_area.nonce_size = 0;
+ auth_area.attributes = 0;
+ auth_area.auth_size = 0;
+
+ tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area));
+ tpm_buf_append(&buf, (const unsigned char *)&auth_area,
+ sizeof(auth_area));
+ tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
+
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ tpm_buf_append_u16(&buf, digests[i].alg_id);
+ tpm_buf_append(&buf, (const unsigned char *)&digests[i].digest,
+ chip->allocated_banks[i].digest_size);
+ }
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+struct tpm2_get_random_out {
+ __be16 size;
+ u8 buffer[TPM_MAX_RNG_DATA];
+} __packed;
+
+/**
+ * tpm2_get_random() - get random bytes from the TPM RNG
+ *
+ * @chip: a &tpm_chip instance
+ * @dest: destination buffer
+ * @max: the max number of random bytes to pull
+ *
+ * Return:
+ * size of the buffer on success,
+ * -errno otherwise (positive TPM return codes are masked to -EIO)
+ */
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
+{
+ struct tpm2_get_random_out *out;
+ struct tpm_buf buf;
+ u32 recd;
+ u32 num_bytes = max;
+ int err;
+ int total = 0;
+ int retries = 5;
+ u8 *dest_ptr = dest;
+
+ if (!num_bytes || max > TPM_MAX_RNG_DATA)
+ return -EINVAL;
+
+ err = tpm_buf_init(&buf, 0, 0);
+ if (err)
+ return err;
+
+ do {
+ tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM);
+ tpm_buf_append_u16(&buf, num_bytes);
+ err = tpm_transmit_cmd(chip, &buf,
+ offsetof(struct tpm2_get_random_out,
+ buffer),
+ "attempting get random");
+ if (err) {
+ if (err > 0)
+ err = -EIO;
+ goto out;
+ }
+
+ out = (struct tpm2_get_random_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
+ if (tpm_buf_length(&buf) <
+ TPM_HEADER_SIZE +
+ offsetof(struct tpm2_get_random_out, buffer) +
+ recd) {
+ err = -EFAULT;
+ goto out;
+ }
+ memcpy(dest_ptr, out->buffer, recd);
+
+ dest_ptr += recd;
+ total += recd;
+ num_bytes -= recd;
+ } while (retries-- && total < max);
+
+ tpm_buf_destroy(&buf);
+ return total ? total : -EIO;
+out:
+ tpm_buf_destroy(&buf);
+ return err;
+}
+
+/**
+ * tpm2_flush_context() - execute a TPM2_FlushContext command
+ * @chip: TPM chip to use
+ * @handle: context handle
+ */
+void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT);
+ if (rc) {
+ dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n",
+ handle);
+ return;
+ }
+
+ tpm_buf_append_u32(&buf, handle);
+
+ tpm_transmit_cmd(chip, &buf, 0, "flushing context");
+ tpm_buf_destroy(&buf);
+}
+EXPORT_SYMBOL_GPL(tpm2_flush_context);
+
+struct tpm2_get_cap_out {
+ u8 more_data;
+ __be32 subcap_id;
+ __be32 property_cnt;
+ __be32 property_id;
+ __be32 value;
+} __packed;
+
+/**
+ * tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property
+ * @chip: a &tpm_chip instance
+ * @property_id: property ID.
+ * @value: output variable.
+ * @desc: passed to tpm_transmit_cmd()
+ *
+ * Return:
+ * 0 on success,
+ * -errno or a TPM return code otherwise
+ */
+ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
+ const char *desc)
+{
+ struct tpm2_get_cap_out *out;
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, property_id);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, &buf, 0, NULL);
+ if (!rc) {
+ out = (struct tpm2_get_cap_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ /*
+ * To prevent failing boot up of some systems, Infineon TPM2.0
+ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
+ * the TPM2_Getcapability command returns a zero length list
+ * in field upgrade mode.
+ */
+ if (be32_to_cpu(out->property_cnt) > 0)
+ *value = be32_to_cpu(out->value);
+ else
+ rc = -ENODATA;
+ }
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm2_get_tpm_pt);
+
+/**
+ * tpm2_shutdown() - send a TPM shutdown command
+ *
+ * Sends a TPM shutdown command. The shutdown command is used in call
+ * sites where the system is going down. If it fails, there is not much
+ * that can be done except print an error message.
+ *
+ * @chip: a &tpm_chip instance
+ * @shutdown_type: TPM_SU_CLEAR or TPM_SU_STATE.
+ */
+void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SHUTDOWN);
+ if (rc)
+ return;
+ tpm_buf_append_u16(&buf, shutdown_type);
+ tpm_transmit_cmd(chip, &buf, 0, "stopping the TPM");
+ tpm_buf_destroy(&buf);
+}
+
+/**
+ * tpm2_do_selftest() - ensure that all self tests have passed
+ *
+ * @chip: TPM chip to use
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ *
+ * The TPM can either run all self tests synchronously and then return
+ * RC_SUCCESS once all tests were successful. Or it can choose to run the tests
+ * asynchronously and return RC_TESTING immediately while the self tests still
+ * execute in the background. This function handles both cases and waits until
+ * all tests have completed.
+ */
+static int tpm2_do_selftest(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ int full;
+ int rc;
+
+ for (full = 0; full < 2; full++) {
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SELF_TEST);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u8(&buf, full);
+ rc = tpm_transmit_cmd(chip, &buf, 0,
+ "attempting the self test");
+ tpm_buf_destroy(&buf);
+
+ if (rc == TPM2_RC_TESTING)
+ rc = TPM2_RC_SUCCESS;
+ if (rc == TPM2_RC_INITIALIZE || rc == TPM2_RC_SUCCESS)
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm2_probe() - probe for the TPM 2.0 protocol
+ * @chip: a &tpm_chip instance
+ *
+ * Send an idempotent TPM 2.0 command and see whether there is TPM2 chip in the
+ * other end based on the response tag. The flag TPM_CHIP_FLAG_TPM2 is set by
+ * this function if this is the case.
+ *
+ * Return:
+ * 0 on success,
+ * -errno otherwise
+ */
+int tpm2_probe(struct tpm_chip *chip)
+{
+ struct tpm_header *out;
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, TPM_PT_TOTAL_COMMANDS);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, &buf, 0, NULL);
+ /* We ignore TPM return codes on purpose. */
+ if (rc >= 0) {
+ out = (struct tpm_header *)buf.data;
+ if (be16_to_cpu(out->tag) == TPM2_ST_NO_SESSIONS)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ }
+ tpm_buf_destroy(&buf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm2_probe);
+
+static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index)
+{
+ struct tpm_bank_info *bank = chip->allocated_banks + bank_index;
+ struct tpm_digest digest = { .alg_id = bank->alg_id };
+ int i;
+
+ /*
+ * Avoid unnecessary PCR read operations to reduce overhead
+ * and obtain identifiers of the crypto subsystem.
+ */
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
+ enum hash_algo crypto_algo = tpm2_hash_map[i].crypto_id;
+
+ if (bank->alg_id != tpm2_hash_map[i].tpm_id)
+ continue;
+
+ bank->digest_size = hash_digest_size[crypto_algo];
+ bank->crypto_id = crypto_algo;
+ return 0;
+ }
+
+ bank->crypto_id = HASH_ALGO__LAST;
+
+ return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size);
+}
+
+struct tpm2_pcr_selection {
+ __be16 hash_alg;
+ u8 size_of_select;
+ u8 pcr_select[3];
+} __packed;
+
+ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
+{
+ struct tpm2_pcr_selection pcr_selection;
+ struct tpm_buf buf;
+ void *marker;
+ void *end;
+ void *pcr_select_offset;
+ u32 sizeof_pcr_selection;
+ u32 nr_possible_banks;
+ u32 nr_alloc_banks = 0;
+ u16 hash_alg;
+ u32 rsp_len;
+ int rc;
+ int i = 0;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, TPM2_CAP_PCRS);
+ tpm_buf_append_u32(&buf, 0);
+ tpm_buf_append_u32(&buf, 1);
+
+ rc = tpm_transmit_cmd(chip, &buf, 9, "get tpm pcr allocation");
+ if (rc)
+ goto out;
+
+ nr_possible_banks = be32_to_cpup(
+ (__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
+
+ chip->allocated_banks = kcalloc(nr_possible_banks,
+ sizeof(*chip->allocated_banks),
+ GFP_KERNEL);
+ if (!chip->allocated_banks) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ marker = &buf.data[TPM_HEADER_SIZE + 9];
+
+ rsp_len = be32_to_cpup((__be32 *)&buf.data[2]);
+ end = &buf.data[rsp_len];
+
+ for (i = 0; i < nr_possible_banks; i++) {
+ pcr_select_offset = marker +
+ offsetof(struct tpm2_pcr_selection, size_of_select);
+ if (pcr_select_offset >= end) {
+ rc = -EFAULT;
+ break;
+ }
+
+ memcpy(&pcr_selection, marker, sizeof(pcr_selection));
+ hash_alg = be16_to_cpu(pcr_selection.hash_alg);
+
+ pcr_select_offset = memchr_inv(pcr_selection.pcr_select, 0,
+ pcr_selection.size_of_select);
+ if (pcr_select_offset) {
+ chip->allocated_banks[nr_alloc_banks].alg_id = hash_alg;
+
+ rc = tpm2_init_bank_info(chip, nr_alloc_banks);
+ if (rc < 0)
+ break;
+
+ nr_alloc_banks++;
+ }
+
+ sizeof_pcr_selection = sizeof(pcr_selection.hash_alg) +
+ sizeof(pcr_selection.size_of_select) +
+ pcr_selection.size_of_select;
+ marker = marker + sizeof_pcr_selection;
+ }
+
+ chip->nr_allocated_banks = nr_alloc_banks;
+out:
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ u32 nr_commands;
+ __be32 *attrs;
+ u32 cc;
+ int i;
+ int rc;
+
+ rc = tpm2_get_tpm_pt(chip, TPM_PT_TOTAL_COMMANDS, &nr_commands, NULL);
+ if (rc)
+ goto out;
+
+ if (nr_commands > 0xFFFFF) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands,
+ GFP_KERNEL);
+ if (!chip->cc_attrs_tbl) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ goto out;
+
+ tpm_buf_append_u32(&buf, TPM2_CAP_COMMANDS);
+ tpm_buf_append_u32(&buf, TPM2_CC_FIRST);
+ tpm_buf_append_u32(&buf, nr_commands);
+
+ rc = tpm_transmit_cmd(chip, &buf, 9 + 4 * nr_commands, NULL);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ goto out;
+ }
+
+ if (nr_commands !=
+ be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
+ rc = -EFAULT;
+ tpm_buf_destroy(&buf);
+ goto out;
+ }
+
+ chip->nr_commands = nr_commands;
+
+ attrs = (__be32 *)&buf.data[TPM_HEADER_SIZE + 9];
+ for (i = 0; i < nr_commands; i++, attrs++) {
+ chip->cc_attrs_tbl[i] = be32_to_cpup(attrs);
+ cc = chip->cc_attrs_tbl[i] & 0xFFFF;
+
+ if (cc == TPM2_CC_CONTEXT_SAVE || cc == TPM2_CC_FLUSH_CONTEXT) {
+ chip->cc_attrs_tbl[i] &=
+ ~(GENMASK(2, 0) << TPM2_CC_ATTR_CHANDLES);
+ chip->cc_attrs_tbl[i] |= 1 << TPM2_CC_ATTR_CHANDLES;
+ }
+ }
+
+ tpm_buf_destroy(&buf);
+
+out:
+ if (rc > 0)
+ rc = -ENODEV;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm2_get_cc_attrs_tbl);
+
+/**
+ * tpm2_startup - turn on the TPM
+ * @chip: TPM chip to use
+ *
+ * Normally the firmware should start the TPM. This function is provided as a
+ * workaround if this does not happen. A legal case for this could be for
+ * example when a TPM emulator is used.
+ *
+ * Return: same as tpm_transmit_cmd()
+ */
+
+static int tpm2_startup(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ dev_info(&chip->dev, "starting up the TPM manually\n");
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_STARTUP);
+ if (rc < 0)
+ return rc;
+
+ tpm_buf_append_u16(&buf, TPM2_SU_CLEAR);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM");
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+/**
+ * tpm2_auto_startup - Perform the standard automatic TPM initialization
+ * sequence
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error.
+ */
+int tpm2_auto_startup(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm2_get_timeouts(chip);
+ if (rc)
+ goto out;
+
+ rc = tpm2_do_selftest(chip);
+ if (rc && rc != TPM2_RC_INITIALIZE)
+ goto out;
+
+ if (rc == TPM2_RC_INITIALIZE) {
+ rc = tpm2_startup(chip);
+ if (rc)
+ goto out;
+
+ rc = tpm2_do_selftest(chip);
+ if (rc)
+ goto out;
+ }
+
+ rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc == TPM2_RC_FAILURE || (rc < 0 && rc != -ENOMEM)) {
+ dev_info(&chip->dev,
+ "TPM in field failure mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
+
+out:
+ /*
+ * Infineon TPM in field upgrade mode will return no data for the number
+ * of supported commands.
+ */
+ if (rc == TPM2_RC_UPGRADE || rc == -ENODATA) {
+ dev_info(&chip->dev, "TPM in field upgrade mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
+
+ if (rc > 0)
+ rc = -ENODEV;
+ return rc;
+}
+
+int tpm2_find_cc(struct tpm_chip *chip, u32 cc)
+{
+ u32 cc_mask;
+ int i;
+
+ cc_mask = 1 << TPM2_CC_ATTR_VENDOR | GENMASK(15, 0);
+ for (i = 0; i < chip->nr_commands; i++)
+ if (cc == (chip->cc_attrs_tbl[i] & cc_mask))
+ return i;
+
+ return -1;
+}
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
new file mode 100644
index 0000000000..363afdd4d1
--- /dev/null
+++ b/drivers/char/tpm/tpm2-space.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains TPM2 protocol implementations of the commands
+ * used by the kernel internally.
+ */
+
+#include <linux/gfp.h>
+#include <asm/unaligned.h>
+#include "tpm.h"
+
+enum tpm2_handle_types {
+ TPM2_HT_HMAC_SESSION = 0x02000000,
+ TPM2_HT_POLICY_SESSION = 0x03000000,
+ TPM2_HT_TRANSIENT = 0x80000000,
+};
+
+struct tpm2_context {
+ __be64 sequence;
+ __be32 saved_handle;
+ __be32 hierarchy;
+ __be16 blob_size;
+} __packed;
+
+static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
+ if (space->session_tbl[i])
+ tpm2_flush_context(chip, space->session_tbl[i]);
+ }
+}
+
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
+{
+ space->context_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!space->context_buf)
+ return -ENOMEM;
+
+ space->session_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (space->session_buf == NULL) {
+ kfree(space->context_buf);
+ /* Prevent caller getting a dangling pointer. */
+ space->context_buf = NULL;
+ return -ENOMEM;
+ }
+
+ space->buf_size = buf_size;
+ return 0;
+}
+
+void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
+{
+
+ if (tpm_try_get_ops(chip) == 0) {
+ tpm2_flush_sessions(chip, space);
+ tpm_put_ops(chip);
+ }
+
+ kfree(space->context_buf);
+ kfree(space->session_buf);
+}
+
+static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
+ unsigned int *offset, u32 *handle)
+{
+ struct tpm_buf tbuf;
+ struct tpm2_context *ctx;
+ unsigned int body_size;
+ int rc;
+
+ rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_LOAD);
+ if (rc)
+ return rc;
+
+ ctx = (struct tpm2_context *)&buf[*offset];
+ body_size = sizeof(*ctx) + be16_to_cpu(ctx->blob_size);
+ tpm_buf_append(&tbuf, &buf[*offset], body_size);
+
+ rc = tpm_transmit_cmd(chip, &tbuf, 4, NULL);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed with a system error %d\n",
+ __func__, rc);
+ tpm_buf_destroy(&tbuf);
+ return -EFAULT;
+ } else if (tpm2_rc_value(rc) == TPM2_RC_HANDLE ||
+ rc == TPM2_RC_REFERENCE_H0) {
+ /*
+ * TPM_RC_HANDLE means that the session context can't
+ * be loaded because of an internal counter mismatch
+ * that makes the TPM think there might have been a
+ * replay. This might happen if the context was saved
+ * and loaded outside the space.
+ *
+ * TPM_RC_REFERENCE_H0 means the session has been
+ * flushed outside the space
+ */
+ *handle = 0;
+ tpm_buf_destroy(&tbuf);
+ return -ENOENT;
+ } else if (rc > 0) {
+ dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
+ __func__, rc);
+ tpm_buf_destroy(&tbuf);
+ return -EFAULT;
+ }
+
+ *handle = be32_to_cpup((__be32 *)&tbuf.data[TPM_HEADER_SIZE]);
+ *offset += body_size;
+
+ tpm_buf_destroy(&tbuf);
+ return 0;
+}
+
+static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
+ unsigned int buf_size, unsigned int *offset)
+{
+ struct tpm_buf tbuf;
+ unsigned int body_size;
+ int rc;
+
+ rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_SAVE);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&tbuf, handle);
+
+ rc = tpm_transmit_cmd(chip, &tbuf, 0, NULL);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed with a system error %d\n",
+ __func__, rc);
+ tpm_buf_destroy(&tbuf);
+ return -EFAULT;
+ } else if (tpm2_rc_value(rc) == TPM2_RC_REFERENCE_H0) {
+ tpm_buf_destroy(&tbuf);
+ return -ENOENT;
+ } else if (rc) {
+ dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
+ __func__, rc);
+ tpm_buf_destroy(&tbuf);
+ return -EFAULT;
+ }
+
+ body_size = tpm_buf_length(&tbuf) - TPM_HEADER_SIZE;
+ if ((*offset + body_size) > buf_size) {
+ dev_warn(&chip->dev, "%s: out of backing storage\n", __func__);
+ tpm_buf_destroy(&tbuf);
+ return -ENOMEM;
+ }
+
+ memcpy(&buf[*offset], &tbuf.data[TPM_HEADER_SIZE], body_size);
+ *offset += body_size;
+ tpm_buf_destroy(&tbuf);
+ return 0;
+}
+
+void tpm2_flush_space(struct tpm_chip *chip)
+{
+ struct tpm_space *space = &chip->work_space;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
+ if (space->context_tbl[i] && ~space->context_tbl[i])
+ tpm2_flush_context(chip, space->context_tbl[i]);
+
+ tpm2_flush_sessions(chip, space);
+}
+
+static int tpm2_load_space(struct tpm_chip *chip)
+{
+ struct tpm_space *space = &chip->work_space;
+ unsigned int offset;
+ int i;
+ int rc;
+
+ for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) {
+ if (!space->context_tbl[i])
+ continue;
+
+ /* sanity check, should never happen */
+ if (~space->context_tbl[i]) {
+ dev_err(&chip->dev, "context table is inconsistent");
+ return -EFAULT;
+ }
+
+ rc = tpm2_load_context(chip, space->context_buf, &offset,
+ &space->context_tbl[i]);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
+ u32 handle;
+
+ if (!space->session_tbl[i])
+ continue;
+
+ rc = tpm2_load_context(chip, space->session_buf,
+ &offset, &handle);
+ if (rc == -ENOENT) {
+ /* load failed, just forget session */
+ space->session_tbl[i] = 0;
+ } else if (rc) {
+ tpm2_flush_space(chip);
+ return rc;
+ }
+ if (handle != space->session_tbl[i]) {
+ dev_warn(&chip->dev, "session restored to wrong handle\n");
+ tpm2_flush_space(chip);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static bool tpm2_map_to_phandle(struct tpm_space *space, void *handle)
+{
+ u32 vhandle = be32_to_cpup((__be32 *)handle);
+ u32 phandle;
+ int i;
+
+ i = 0xFFFFFF - (vhandle & 0xFFFFFF);
+ if (i >= ARRAY_SIZE(space->context_tbl) || !space->context_tbl[i])
+ return false;
+
+ phandle = space->context_tbl[i];
+ *((__be32 *)handle) = cpu_to_be32(phandle);
+ return true;
+}
+
+static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
+{
+ struct tpm_space *space = &chip->work_space;
+ unsigned int nr_handles;
+ u32 attrs;
+ __be32 *handle;
+ int i;
+
+ i = tpm2_find_cc(chip, cc);
+ if (i < 0)
+ return -EINVAL;
+
+ attrs = chip->cc_attrs_tbl[i];
+ nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
+
+ handle = (__be32 *)&cmd[TPM_HEADER_SIZE];
+ for (i = 0; i < nr_handles; i++, handle++) {
+ if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) {
+ if (!tpm2_map_to_phandle(space, handle))
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int tpm_find_and_validate_cc(struct tpm_chip *chip,
+ struct tpm_space *space,
+ const void *cmd, size_t len)
+{
+ const struct tpm_header *header = (const void *)cmd;
+ int i;
+ u32 cc;
+ u32 attrs;
+ unsigned int nr_handles;
+
+ if (len < TPM_HEADER_SIZE || !chip->nr_commands)
+ return -EINVAL;
+
+ cc = be32_to_cpu(header->ordinal);
+
+ i = tpm2_find_cc(chip, cc);
+ if (i < 0) {
+ dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
+ cc);
+ return -EOPNOTSUPP;
+ }
+
+ attrs = chip->cc_attrs_tbl[i];
+ nr_handles =
+ 4 * ((attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0));
+ if (len < TPM_HEADER_SIZE + 4 * nr_handles)
+ goto err_len;
+
+ return cc;
+err_len:
+ dev_dbg(&chip->dev, "%s: insufficient command length %zu", __func__,
+ len);
+ return -EINVAL;
+}
+
+int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
+ size_t cmdsiz)
+{
+ int rc;
+ int cc;
+
+ if (!space)
+ return 0;
+
+ cc = tpm_find_and_validate_cc(chip, space, cmd, cmdsiz);
+ if (cc < 0)
+ return cc;
+
+ memcpy(&chip->work_space.context_tbl, &space->context_tbl,
+ sizeof(space->context_tbl));
+ memcpy(&chip->work_space.session_tbl, &space->session_tbl,
+ sizeof(space->session_tbl));
+ memcpy(chip->work_space.context_buf, space->context_buf,
+ space->buf_size);
+ memcpy(chip->work_space.session_buf, space->session_buf,
+ space->buf_size);
+
+ rc = tpm2_load_space(chip);
+ if (rc) {
+ tpm2_flush_space(chip);
+ return rc;
+ }
+
+ rc = tpm2_map_command(chip, cc, cmd);
+ if (rc) {
+ tpm2_flush_space(chip);
+ return rc;
+ }
+
+ chip->last_cc = cc;
+ return 0;
+}
+
+static bool tpm2_add_session(struct tpm_chip *chip, u32 handle)
+{
+ struct tpm_space *space = &chip->work_space;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++)
+ if (space->session_tbl[i] == 0)
+ break;
+
+ if (i == ARRAY_SIZE(space->session_tbl))
+ return false;
+
+ space->session_tbl[i] = handle;
+ return true;
+}
+
+static u32 tpm2_map_to_vhandle(struct tpm_space *space, u32 phandle, bool alloc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) {
+ if (alloc) {
+ if (!space->context_tbl[i]) {
+ space->context_tbl[i] = phandle;
+ break;
+ }
+ } else if (space->context_tbl[i] == phandle)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(space->context_tbl))
+ return 0;
+
+ return TPM2_HT_TRANSIENT | (0xFFFFFF - i);
+}
+
+static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
+ size_t len)
+{
+ struct tpm_space *space = &chip->work_space;
+ struct tpm_header *header = (struct tpm_header *)rsp;
+ u32 phandle;
+ u32 phandle_type;
+ u32 vhandle;
+ u32 attrs;
+ int i;
+
+ if (be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS)
+ return 0;
+
+ i = tpm2_find_cc(chip, cc);
+ /* sanity check, should never happen */
+ if (i < 0)
+ return -EFAULT;
+
+ attrs = chip->cc_attrs_tbl[i];
+ if (!((attrs >> TPM2_CC_ATTR_RHANDLE) & 1))
+ return 0;
+
+ phandle = be32_to_cpup((__be32 *)&rsp[TPM_HEADER_SIZE]);
+ phandle_type = phandle & 0xFF000000;
+
+ switch (phandle_type) {
+ case TPM2_HT_TRANSIENT:
+ vhandle = tpm2_map_to_vhandle(space, phandle, true);
+ if (!vhandle)
+ goto out_no_slots;
+
+ *(__be32 *)&rsp[TPM_HEADER_SIZE] = cpu_to_be32(vhandle);
+ break;
+ case TPM2_HT_HMAC_SESSION:
+ case TPM2_HT_POLICY_SESSION:
+ if (!tpm2_add_session(chip, phandle))
+ goto out_no_slots;
+ break;
+ default:
+ dev_err(&chip->dev, "%s: unknown handle 0x%08X\n",
+ __func__, phandle);
+ break;
+ }
+
+ return 0;
+out_no_slots:
+ tpm2_flush_context(chip, phandle);
+ dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
+ phandle);
+ return -ENOMEM;
+}
+
+struct tpm2_cap_handles {
+ u8 more_data;
+ __be32 capability;
+ __be32 count;
+ __be32 handles[];
+} __packed;
+
+static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp,
+ size_t len)
+{
+ struct tpm_space *space = &chip->work_space;
+ struct tpm_header *header = (struct tpm_header *)rsp;
+ struct tpm2_cap_handles *data;
+ u32 phandle;
+ u32 phandle_type;
+ u32 vhandle;
+ int i;
+ int j;
+
+ if (cc != TPM2_CC_GET_CAPABILITY ||
+ be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS) {
+ return 0;
+ }
+
+ if (len < TPM_HEADER_SIZE + 9)
+ return -EFAULT;
+
+ data = (void *)&rsp[TPM_HEADER_SIZE];
+ if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES)
+ return 0;
+
+ if (be32_to_cpu(data->count) > (UINT_MAX - TPM_HEADER_SIZE - 9) / 4)
+ return -EFAULT;
+
+ if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count))
+ return -EFAULT;
+
+ for (i = 0, j = 0; i < be32_to_cpu(data->count); i++) {
+ phandle = be32_to_cpup((__be32 *)&data->handles[i]);
+ phandle_type = phandle & 0xFF000000;
+
+ switch (phandle_type) {
+ case TPM2_HT_TRANSIENT:
+ vhandle = tpm2_map_to_vhandle(space, phandle, false);
+ if (!vhandle)
+ break;
+
+ data->handles[j] = cpu_to_be32(vhandle);
+ j++;
+ break;
+
+ default:
+ data->handles[j] = cpu_to_be32(phandle);
+ j++;
+ break;
+ }
+
+ }
+
+ header->length = cpu_to_be32(TPM_HEADER_SIZE + 9 + 4 * j);
+ data->count = cpu_to_be32(j);
+ return 0;
+}
+
+static int tpm2_save_space(struct tpm_chip *chip)
+{
+ struct tpm_space *space = &chip->work_space;
+ unsigned int offset;
+ int i;
+ int rc;
+
+ for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) {
+ if (!(space->context_tbl[i] && ~space->context_tbl[i]))
+ continue;
+
+ rc = tpm2_save_context(chip, space->context_tbl[i],
+ space->context_buf, space->buf_size,
+ &offset);
+ if (rc == -ENOENT) {
+ space->context_tbl[i] = 0;
+ continue;
+ } else if (rc)
+ return rc;
+
+ tpm2_flush_context(chip, space->context_tbl[i]);
+ space->context_tbl[i] = ~0;
+ }
+
+ for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
+ if (!space->session_tbl[i])
+ continue;
+
+ rc = tpm2_save_context(chip, space->session_tbl[i],
+ space->session_buf, space->buf_size,
+ &offset);
+ if (rc == -ENOENT) {
+ /* handle error saving session, just forget it */
+ space->session_tbl[i] = 0;
+ } else if (rc < 0) {
+ tpm2_flush_space(chip);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
+ void *buf, size_t *bufsiz)
+{
+ struct tpm_header *header = buf;
+ int rc;
+
+ if (!space)
+ return 0;
+
+ rc = tpm2_map_response_header(chip, chip->last_cc, buf, *bufsiz);
+ if (rc) {
+ tpm2_flush_space(chip);
+ goto out;
+ }
+
+ rc = tpm2_map_response_body(chip, chip->last_cc, buf, *bufsiz);
+ if (rc) {
+ tpm2_flush_space(chip);
+ goto out;
+ }
+
+ rc = tpm2_save_space(chip);
+ if (rc) {
+ tpm2_flush_space(chip);
+ goto out;
+ }
+
+ *bufsiz = be32_to_cpu(header->length);
+
+ memcpy(&space->context_tbl, &chip->work_space.context_tbl,
+ sizeof(space->context_tbl));
+ memcpy(&space->session_tbl, &chip->work_space.session_tbl,
+ sizeof(space->session_tbl));
+ memcpy(space->context_buf, chip->work_space.context_buf,
+ space->buf_size);
+ memcpy(space->session_buf, chip->work_space.session_buf,
+ space->buf_size);
+
+ return 0;
+out:
+ dev_err(&chip->dev, "%s: error %d\n", __func__, rc);
+ return rc;
+}
+
+/*
+ * Put the reference to the main device.
+ */
+static void tpm_devs_release(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
+
+ /* release the master device reference */
+ put_device(&chip->dev);
+}
+
+/*
+ * Remove the device file for exposed TPM spaces and release the device
+ * reference. This may also release the reference to the master device.
+ */
+void tpm_devs_remove(struct tpm_chip *chip)
+{
+ cdev_device_del(&chip->cdevs, &chip->devs);
+ put_device(&chip->devs);
+}
+
+/*
+ * Add a device file to expose TPM spaces. Also take a reference to the
+ * main device.
+ */
+int tpm_devs_add(struct tpm_chip *chip)
+{
+ int rc;
+
+ device_initialize(&chip->devs);
+ chip->devs.parent = chip->dev.parent;
+ chip->devs.class = &tpmrm_class;
+
+ /*
+ * Get extra reference on main device to hold on behalf of devs.
+ * This holds the chip structure while cdevs is in use. The
+ * corresponding put is in the tpm_devs_release.
+ */
+ get_device(&chip->dev);
+ chip->devs.release = tpm_devs_release;
+ chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
+ cdev_init(&chip->cdevs, &tpmrm_fops);
+ chip->cdevs.owner = THIS_MODULE;
+
+ rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
+ if (rc)
+ goto err_put_devs;
+
+ rc = cdev_device_add(&chip->cdevs, &chip->devs);
+ if (rc) {
+ dev_err(&chip->devs,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ dev_name(&chip->devs), MAJOR(chip->devs.devt),
+ MINOR(chip->devs.devt), rc);
+ goto err_put_devs;
+ }
+
+ return 0;
+
+err_put_devs:
+ put_device(&chip->devs);
+
+ return rc;
+}
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
new file mode 100644
index 0000000000..54a6750a67
--- /dev/null
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#include "tpm.h"
+#include "tpm_atmel.h"
+
+/* write status bits */
+enum tpm_atmel_write_status {
+ ATML_STATUS_ABORT = 0x01,
+ ATML_STATUS_LASTBYTE = 0x04
+};
+/* read status bits */
+enum tpm_atmel_read_status {
+ ATML_STATUS_BUSY = 0x01,
+ ATML_STATUS_DATA_AVAIL = 0x02,
+ ATML_STATUS_REWRITE = 0x04,
+ ATML_STATUS_READY = 0x08
+};
+
+static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+ u8 status, *hdr = buf;
+ u32 size;
+ int i;
+ __be32 *native_size;
+
+ /* start reading header */
+ if (count < 6)
+ return -EIO;
+
+ for (i = 0; i < 6; i++) {
+ status = ioread8(priv->iobase + 1);
+ if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+ dev_err(&chip->dev, "error reading header\n");
+ return -EIO;
+ }
+ *buf++ = ioread8(priv->iobase);
+ }
+
+ /* size of the data received */
+ native_size = (__force __be32 *) (hdr + 2);
+ size = be32_to_cpu(*native_size);
+
+ if (count < size) {
+ dev_err(&chip->dev,
+ "Recv size(%d) less than available space\n", size);
+ for (; i < size; i++) { /* clear the waiting data anyway */
+ status = ioread8(priv->iobase + 1);
+ if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+ dev_err(&chip->dev, "error reading data\n");
+ return -EIO;
+ }
+ }
+ return -EIO;
+ }
+
+ /* read all the data available */
+ for (; i < size; i++) {
+ status = ioread8(priv->iobase + 1);
+ if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+ dev_err(&chip->dev, "error reading data\n");
+ return -EIO;
+ }
+ *buf++ = ioread8(priv->iobase);
+ }
+
+ /* make sure data available is gone */
+ status = ioread8(priv->iobase + 1);
+
+ if (status & ATML_STATUS_DATA_AVAIL) {
+ dev_err(&chip->dev, "data available is stuck\n");
+ return -EIO;
+ }
+
+ return size;
+}
+
+static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+ int i;
+
+ dev_dbg(&chip->dev, "tpm_atml_send:\n");
+ for (i = 0; i < count; i++) {
+ dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
+ iowrite8(buf[i], priv->iobase);
+ }
+
+ return 0;
+}
+
+static void tpm_atml_cancel(struct tpm_chip *chip)
+{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+ iowrite8(ATML_STATUS_ABORT, priv->iobase + 1);
+}
+
+static u8 tpm_atml_status(struct tpm_chip *chip)
+{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return ioread8(priv->iobase + 1);
+}
+
+static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == ATML_STATUS_READY);
+}
+
+static const struct tpm_class_ops tpm_atmel = {
+ .recv = tpm_atml_recv,
+ .send = tpm_atml_send,
+ .cancel = tpm_atml_cancel,
+ .status = tpm_atml_status,
+ .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
+ .req_complete_val = ATML_STATUS_DATA_AVAIL,
+ .req_canceled = tpm_atml_req_canceled,
+};
+
+static struct platform_device *pdev;
+
+static void atml_plat_remove(void)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+ tpm_chip_unregister(chip);
+ if (priv->have_region)
+ atmel_release_region(priv->base, priv->region_size);
+ atmel_put_base_addr(priv->iobase);
+ platform_device_unregister(pdev);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct platform_driver atml_drv = {
+ .driver = {
+ .name = "tpm_atmel",
+ .pm = &tpm_atml_pm,
+ },
+};
+
+static int __init init_atmel(void)
+{
+ int rc = 0;
+ void __iomem *iobase = NULL;
+ int have_region, region_size;
+ unsigned long base;
+ struct tpm_chip *chip;
+ struct tpm_atmel_priv *priv;
+
+ rc = platform_driver_register(&atml_drv);
+ if (rc)
+ return rc;
+
+ if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) {
+ rc = -ENODEV;
+ goto err_unreg_drv;
+ }
+
+ have_region =
+ (atmel_request_region
+ (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
+
+ pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ rc = PTR_ERR(pdev);
+ goto err_rel_reg;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_unreg_dev;
+ }
+
+ priv->iobase = iobase;
+ priv->base = base;
+ priv->have_region = have_region;
+ priv->region_size = region_size;
+
+ chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
+ goto err_unreg_dev;
+ }
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto err_unreg_dev;
+
+ return 0;
+
+err_unreg_dev:
+ platform_device_unregister(pdev);
+err_rel_reg:
+ atmel_put_base_addr(iobase);
+ if (have_region)
+ atmel_release_region(base,
+ region_size);
+err_unreg_drv:
+ platform_driver_unregister(&atml_drv);
+ return rc;
+}
+
+static void __exit cleanup_atmel(void)
+{
+ platform_driver_unregister(&atml_drv);
+ atml_plat_remove();
+}
+
+module_init(init_atmel);
+module_exit(cleanup_atmel);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
new file mode 100644
index 0000000000..7ac3f69dcf
--- /dev/null
+++ b/drivers/char/tpm/tpm_atmel.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * These difference are required on power because the device must be
+ * discovered through the device tree and iomap must be used to get
+ * around the need for holes in the io_page_mask. This does not happen
+ * automatically because the tpm is not a normal pci device and lives
+ * under the root node.
+ */
+
+struct tpm_atmel_priv {
+ int region_size;
+ int have_region;
+ unsigned long base;
+ void __iomem *iobase;
+};
+
+#ifdef CONFIG_PPC64
+
+#include <linux/of.h>
+
+#define atmel_getb(priv, offset) readb(priv->iobase + offset)
+#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset)
+#define atmel_request_region request_mem_region
+#define atmel_release_region release_mem_region
+
+static inline void atmel_put_base_addr(void __iomem *iobase)
+{
+ iounmap(iobase);
+}
+
+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+ struct device_node *dn;
+ unsigned long address, size;
+ const unsigned int *reg;
+ int reglen;
+ int naddrc;
+ int nsizec;
+
+ dn = of_find_node_by_name(NULL, "tpm");
+
+ if (!dn)
+ return NULL;
+
+ if (!of_device_is_compatible(dn, "AT97SC3201")) {
+ of_node_put(dn);
+ return NULL;
+ }
+
+ reg = of_get_property(dn, "reg", &reglen);
+ naddrc = of_n_addr_cells(dn);
+ nsizec = of_n_size_cells(dn);
+
+ of_node_put(dn);
+
+
+ if (naddrc == 2)
+ address = ((unsigned long) reg[0] << 32) | reg[1];
+ else
+ address = reg[0];
+
+ if (nsizec == 2)
+ size =
+ ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1];
+ else
+ size = reg[naddrc];
+
+ *base = address;
+ *region_size = size;
+ return ioremap(*base, *region_size);
+}
+#else
+#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset)
+#define atmel_putb(val, chip, offset) \
+ outb(val, atmel_get_priv(chip)->base + offset)
+#define atmel_request_region request_region
+#define atmel_release_region release_region
+/* Atmel definitions */
+enum tpm_atmel_addr {
+ TPM_ATMEL_BASE_ADDR_LO = 0x08,
+ TPM_ATMEL_BASE_ADDR_HI = 0x09
+};
+
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base+1) & 0xFF;
+}
+
+/* Verify this is a 1.1 Atmel TPM */
+static int atmel_verify_tpm11(void)
+{
+
+ /* verify that it is an Atmel part */
+ if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
+ tpm_read_index(TPM_ADDR, 5) != 'T' ||
+ tpm_read_index(TPM_ADDR, 6) != 'M' ||
+ tpm_read_index(TPM_ADDR, 7) != 'L')
+ return 1;
+
+ /* query chip for its version number */
+ if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
+ tpm_read_index(TPM_ADDR, 0x01) != 1)
+ return 1;
+
+ /* This is an atmel supported part */
+ return 0;
+}
+
+static inline void atmel_put_base_addr(void __iomem *iobase)
+{
+}
+
+/* Determine where to talk to device */
+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+ int lo, hi;
+
+ if (atmel_verify_tpm11() != 0)
+ return NULL;
+
+ lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
+ hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
+
+ *base = (hi << 8) | lo;
+ *region_size = 2;
+
+ return ioport_map(*base, *region_size);
+}
+#endif
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
new file mode 100644
index 0000000000..ea085b14ab
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb.c
@@ -0,0 +1,858 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG CRB 2.0 TPM specification.
+ */
+
+#include <linux/acpi.h>
+#include <linux/highmem.h>
+#include <linux/rculist.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#ifdef CONFIG_ARM64
+#include <linux/arm-smccc.h>
+#endif
+#include "tpm.h"
+
+#define ACPI_SIG_TPM2 "TPM2"
+#define TPM_CRB_MAX_RESOURCES 3
+
+static const guid_t crb_acpi_start_guid =
+ GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
+ 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4);
+
+enum crb_defaults {
+ CRB_ACPI_START_REVISION_ID = 1,
+ CRB_ACPI_START_INDEX = 1,
+};
+
+enum crb_loc_ctrl {
+ CRB_LOC_CTRL_REQUEST_ACCESS = BIT(0),
+ CRB_LOC_CTRL_RELINQUISH = BIT(1),
+};
+
+enum crb_loc_state {
+ CRB_LOC_STATE_LOC_ASSIGNED = BIT(1),
+ CRB_LOC_STATE_TPM_REG_VALID_STS = BIT(7),
+};
+
+enum crb_ctrl_req {
+ CRB_CTRL_REQ_CMD_READY = BIT(0),
+ CRB_CTRL_REQ_GO_IDLE = BIT(1),
+};
+
+enum crb_ctrl_sts {
+ CRB_CTRL_STS_ERROR = BIT(0),
+ CRB_CTRL_STS_TPM_IDLE = BIT(1),
+};
+
+enum crb_start {
+ CRB_START_INVOKE = BIT(0),
+};
+
+enum crb_cancel {
+ CRB_CANCEL_INVOKE = BIT(0),
+};
+
+struct crb_regs_head {
+ u32 loc_state;
+ u32 reserved1;
+ u32 loc_ctrl;
+ u32 loc_sts;
+ u8 reserved2[32];
+ u64 intf_id;
+ u64 ctrl_ext;
+} __packed;
+
+struct crb_regs_tail {
+ u32 ctrl_req;
+ u32 ctrl_sts;
+ u32 ctrl_cancel;
+ u32 ctrl_start;
+ u32 ctrl_int_enable;
+ u32 ctrl_int_sts;
+ u32 ctrl_cmd_size;
+ u32 ctrl_cmd_pa_low;
+ u32 ctrl_cmd_pa_high;
+ u32 ctrl_rsp_size;
+ u64 ctrl_rsp_pa;
+} __packed;
+
+enum crb_status {
+ CRB_DRV_STS_COMPLETE = BIT(0),
+};
+
+struct crb_priv {
+ u32 sm;
+ const char *hid;
+ struct crb_regs_head __iomem *regs_h;
+ struct crb_regs_tail __iomem *regs_t;
+ u8 __iomem *cmd;
+ u8 __iomem *rsp;
+ u32 cmd_size;
+ u32 smc_func_id;
+ u32 __iomem *pluton_start_addr;
+ u32 __iomem *pluton_reply_addr;
+};
+
+struct tpm2_crb_smc {
+ u32 interrupt;
+ u8 interrupt_flags;
+ u8 op_flags;
+ u16 reserved2;
+ u32 smc_func_id;
+};
+
+struct tpm2_crb_pluton {
+ u64 start_addr;
+ u64 reply_addr;
+};
+
+static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
+ unsigned long timeout)
+{
+ ktime_t start;
+ ktime_t stop;
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(timeout));
+
+ do {
+ if ((ioread32(reg) & mask) == value)
+ return true;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), stop));
+
+ return ((ioread32(reg) & mask) == value);
+}
+
+static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete)
+{
+ if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
+ return 0;
+
+ if (!crb_wait_for_reg_32(priv->pluton_reply_addr, ~0, 1, TPM2_TIMEOUT_C))
+ return -ETIME;
+
+ iowrite32(1, priv->pluton_start_addr);
+ if (wait_for_complete == false)
+ return 0;
+
+ if (!crb_wait_for_reg_32(priv->pluton_start_addr,
+ 0xffffffff, 0, 200))
+ return -ETIME;
+
+ return 0;
+}
+
+/**
+ * __crb_go_idle - request tpm crb device to go the idle state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
+ * The device should respond within TIMEOUT_C by clearing the bit.
+ * Anyhow, we do not wait here as a consequent CMD_READY request
+ * will be handled correctly even if idle was not completed.
+ *
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
+ *
+ * Return: 0 always
+ */
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+{
+ int rc;
+
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+
+ rc = crb_try_pluton_doorbell(priv, true);
+ if (rc)
+ return rc;
+
+ if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
+ CRB_CTRL_REQ_GO_IDLE/* mask */,
+ 0, /* value */
+ TPM2_TIMEOUT_C)) {
+ dev_warn(dev, "goIdle timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int crb_go_idle(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_go_idle(dev, priv);
+}
+
+/**
+ * __crb_cmd_ready - request tpm crb device to enter ready state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
+ * and poll till the device acknowledge it by clearing the bit.
+ * The device should respond within TIMEOUT_C.
+ *
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
+ *
+ * Return: 0 on success -ETIME on timeout;
+ */
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+{
+ int rc;
+
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+
+ rc = crb_try_pluton_doorbell(priv, true);
+ if (rc)
+ return rc;
+
+ if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
+ CRB_CTRL_REQ_CMD_READY /* mask */,
+ 0, /* value */
+ TPM2_TIMEOUT_C)) {
+ dev_warn(dev, "cmdReady timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int crb_cmd_ready(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_cmd_ready(dev, priv);
+}
+
+static int __crb_request_locality(struct device *dev,
+ struct crb_priv *priv, int loc)
+{
+ u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
+ CRB_LOC_STATE_TPM_REG_VALID_STS;
+
+ if (!priv->regs_h)
+ return 0;
+
+ iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
+ if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
+ TPM2_TIMEOUT_C)) {
+ dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int crb_request_locality(struct tpm_chip *chip, int loc)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return __crb_request_locality(&chip->dev, priv, loc);
+}
+
+static int __crb_relinquish_locality(struct device *dev,
+ struct crb_priv *priv, int loc)
+{
+ u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
+ CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
+
+ if (!priv->regs_h)
+ return 0;
+
+ iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+ if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
+ TPM2_TIMEOUT_C)) {
+ dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int crb_relinquish_locality(struct tpm_chip *chip, int loc)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return __crb_relinquish_locality(&chip->dev, priv, loc);
+}
+
+static u8 crb_status(struct tpm_chip *chip)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ u8 sts = 0;
+
+ if ((ioread32(&priv->regs_t->ctrl_start) & CRB_START_INVOKE) !=
+ CRB_START_INVOKE)
+ sts |= CRB_DRV_STS_COMPLETE;
+
+ return sts;
+}
+
+static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ unsigned int expected;
+
+ /* A sanity check that the upper layer wants to get at least the header
+ * as that is the minimum size for any TPM response.
+ */
+ if (count < TPM_HEADER_SIZE)
+ return -EIO;
+
+ /* If this bit is set, according to the spec, the TPM is in
+ * unrecoverable condition.
+ */
+ if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
+ return -EIO;
+
+ /* Read the first 8 bytes in order to get the length of the response.
+ * We read exactly a quad word in order to make sure that the remaining
+ * reads will be aligned.
+ */
+ memcpy_fromio(buf, priv->rsp, 8);
+
+ expected = be32_to_cpup((__be32 *)&buf[2]);
+ if (expected > count || expected < TPM_HEADER_SIZE)
+ return -EIO;
+
+ memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
+
+ return expected;
+}
+
+static int crb_do_acpi_start(struct tpm_chip *chip)
+{
+ union acpi_object *obj;
+ int rc;
+
+ obj = acpi_evaluate_dsm(chip->acpi_dev_handle,
+ &crb_acpi_start_guid,
+ CRB_ACPI_START_REVISION_ID,
+ CRB_ACPI_START_INDEX,
+ NULL);
+ if (!obj)
+ return -ENXIO;
+ rc = obj->integer.value == 0 ? 0 : -ENXIO;
+ ACPI_FREE(obj);
+ return rc;
+}
+
+#ifdef CONFIG_ARM64
+/*
+ * This is a TPM Command Response Buffer start method that invokes a
+ * Secure Monitor Call to requrest the firmware to execute or cancel
+ * a TPM 2.0 command.
+ */
+static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != 0) {
+ dev_err(dev,
+ FW_BUG "tpm_crb_smc_start() returns res.a0 = 0x%lx\n",
+ res.a0);
+ return -EIO;
+ }
+
+ return 0;
+}
+#else
+static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
+{
+ dev_err(dev, FW_BUG "tpm_crb: incorrect start method\n");
+ return -EINVAL;
+}
+#endif
+
+static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ /* Zero the cancel register so that the next command will not get
+ * canceled.
+ */
+ iowrite32(0, &priv->regs_t->ctrl_cancel);
+
+ if (len > priv->cmd_size) {
+ dev_err(&chip->dev, "invalid command count value %zd %d\n",
+ len, priv->cmd_size);
+ return -E2BIG;
+ }
+
+ /* Seems to be necessary for every command */
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
+ __crb_cmd_ready(&chip->dev, priv);
+
+ memcpy_toio(priv->cmd, buf, len);
+
+ /* Make sure that cmd is populated before issuing start. */
+ wmb();
+
+ /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
+ * report only ACPI start but in practice seems to require both
+ * CRB start, hence invoking CRB start method if hid == MSFT0101.
+ */
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
+ (!strcmp(priv->hid, "MSFT0101")))
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
+ rc = crb_do_acpi_start(chip);
+
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+ rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
+ }
+
+ if (rc)
+ return rc;
+
+ return crb_try_pluton_doorbell(priv, false);
+}
+
+static void crb_cancel(struct tpm_chip *chip)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
+
+ if (((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ crb_do_acpi_start(chip))
+ dev_err(&chip->dev, "ACPI Start failed\n");
+}
+
+static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ u32 cancel = ioread32(&priv->regs_t->ctrl_cancel);
+
+ return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+}
+
+static const struct tpm_class_ops tpm_crb = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = crb_status,
+ .recv = crb_recv,
+ .send = crb_send,
+ .cancel = crb_cancel,
+ .req_canceled = crb_req_canceled,
+ .go_idle = crb_go_idle,
+ .cmd_ready = crb_cmd_ready,
+ .request_locality = crb_request_locality,
+ .relinquish_locality = crb_relinquish_locality,
+ .req_complete_mask = CRB_DRV_STS_COMPLETE,
+ .req_complete_val = CRB_DRV_STS_COMPLETE,
+};
+
+static int crb_check_resource(struct acpi_resource *ares, void *data)
+{
+ struct resource *iores_array = data;
+ struct resource_win win;
+ struct resource *res = &(win.res);
+ int i;
+
+ if (acpi_dev_resource_memory(ares, res) ||
+ acpi_dev_resource_address_space(ares, &win)) {
+ for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
+ if (resource_type(iores_array + i) != IORESOURCE_MEM) {
+ iores_array[i] = *res;
+ iores_array[i].name = NULL;
+ break;
+ }
+ }
+ }
+
+ return 1;
+}
+
+static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
+ void __iomem **iobase_ptr, u64 start, u32 size)
+{
+ struct resource new_res = {
+ .start = start,
+ .end = start + size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+
+ /* Detect a 64 bit address on a 32 bit system */
+ if (start != new_res.start)
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ if (!iores)
+ return devm_ioremap_resource(dev, &new_res);
+
+ if (!*iobase_ptr) {
+ *iobase_ptr = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(*iobase_ptr))
+ return *iobase_ptr;
+ }
+
+ return *iobase_ptr + (new_res.start - iores->start);
+}
+
+/*
+ * Work around broken BIOSs that return inconsistent values from the ACPI
+ * region vs the registers. Trust the ACPI region. Such broken systems
+ * probably cannot send large TPM commands since the buffer will be truncated.
+ */
+static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
+ u64 start, u64 size)
+{
+ if (io_res->start > start || io_res->end < start)
+ return size;
+
+ if (start + size - 1 <= io_res->end)
+ return size;
+
+ dev_err(dev,
+ FW_BUG "ACPI region does not cover the entire command/response buffer. %pr vs %llx %llx\n",
+ io_res, start, size);
+
+ return io_res->end - start + 1;
+}
+
+static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf)
+{
+ struct list_head acpi_resource_list;
+ struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
+ void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
+ struct device *dev = &device->dev;
+ struct resource *iores;
+ void __iomem **iobase_ptr;
+ int i;
+ u32 pa_high, pa_low;
+ u64 cmd_pa;
+ u32 cmd_size;
+ __le64 __rsp_pa;
+ u64 rsp_pa;
+ u32 rsp_size;
+ int ret;
+
+ /*
+ * Pluton sometimes does not define ACPI memory regions.
+ * Mapping is then done in crb_map_pluton
+ */
+ if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ INIT_LIST_HEAD(&acpi_resource_list);
+ ret = acpi_dev_get_resources(device, &acpi_resource_list,
+ crb_check_resource, iores_array);
+ if (ret < 0)
+ return ret;
+ acpi_dev_free_resource_list(&acpi_resource_list);
+
+ if (resource_type(iores_array) != IORESOURCE_MEM) {
+ dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
+ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
+ IORESOURCE_MEM) {
+ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
+ memset(iores_array + TPM_CRB_MAX_RESOURCES,
+ 0, sizeof(*iores_array));
+ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
+ }
+ }
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (buf->control_address >= iores_array[i].start &&
+ buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
+ iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
+ sizeof(struct crb_regs_tail));
+
+ if (IS_ERR(priv->regs_t))
+ return PTR_ERR(priv->regs_t);
+
+ /* The ACPI IO region starts at the head area and continues to include
+ * the control area, as one nice sane region except for some older
+ * stuff that puts the control area outside the ACPI IO region.
+ */
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+ if (iores &&
+ buf->control_address == iores->start +
+ sizeof(*priv->regs_h))
+ priv->regs_h = *iobase_ptr;
+ else
+ dev_warn(dev, FW_BUG "Bad ACPI memory layout");
+ }
+
+ ret = __crb_request_locality(dev, priv, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * PTT HW bug w/a: wake up the device to access
+ * possibly not retained registers.
+ */
+ ret = __crb_cmd_ready(dev, priv);
+ if (ret)
+ goto out_relinquish_locality;
+
+ pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
+ pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
+ cmd_pa = ((u64)pa_high << 32) | pa_low;
+ cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; iores_array[i].end; ++i) {
+ if (cmd_pa >= iores_array[i].start &&
+ cmd_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
+
+ dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+ pa_high, pa_low, cmd_size);
+
+ priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
+ if (IS_ERR(priv->cmd)) {
+ ret = PTR_ERR(priv->cmd);
+ goto out;
+ }
+
+ memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
+ rsp_pa = le64_to_cpu(__rsp_pa);
+ rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (rsp_pa >= iores_array[i].start &&
+ rsp_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
+
+ if (cmd_pa != rsp_pa) {
+ priv->rsp = crb_map_res(dev, iores, iobase_ptr,
+ rsp_pa, rsp_size);
+ ret = PTR_ERR_OR_ZERO(priv->rsp);
+ goto out;
+ }
+
+ /* According to the PTP specification, overlapping command and response
+ * buffer sizes must be identical.
+ */
+ if (cmd_size != rsp_size) {
+ dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->rsp = priv->cmd;
+
+out:
+ if (!ret)
+ priv->cmd_size = cmd_size;
+
+ __crb_go_idle(dev, priv);
+
+out_relinquish_locality:
+
+ __crb_relinquish_locality(dev, priv, 0);
+
+ return ret;
+}
+
+static int crb_map_pluton(struct device *dev, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf, struct tpm2_crb_pluton *crb_pluton)
+{
+ priv->pluton_start_addr = crb_map_res(dev, NULL, NULL,
+ crb_pluton->start_addr, 4);
+ if (IS_ERR(priv->pluton_start_addr))
+ return PTR_ERR(priv->pluton_start_addr);
+
+ priv->pluton_reply_addr = crb_map_res(dev, NULL, NULL,
+ crb_pluton->reply_addr, 4);
+ if (IS_ERR(priv->pluton_reply_addr))
+ return PTR_ERR(priv->pluton_reply_addr);
+
+ return 0;
+}
+
+static int crb_acpi_add(struct acpi_device *device)
+{
+ struct acpi_table_tpm2 *buf;
+ struct crb_priv *priv;
+ struct tpm_chip *chip;
+ struct device *dev = &device->dev;
+ struct tpm2_crb_smc *crb_smc;
+ struct tpm2_crb_pluton *crb_pluton;
+ acpi_status status;
+ u32 sm;
+ int rc;
+
+ status = acpi_get_table(ACPI_SIG_TPM2, 1,
+ (struct acpi_table_header **) &buf);
+ if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) {
+ dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
+ }
+
+ /* Should the FIFO driver handle this? */
+ sm = buf->start_method;
+ if (sm == ACPI_TPM2_MEMORY_MAPPED) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
+ rc = -EINVAL;
+ goto out;
+ }
+ crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
+ priv->smc_func_id = crb_smc->smc_func_id;
+ }
+
+ if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
+ rc = -EINVAL;
+ goto out;
+ }
+ crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
+ rc = crb_map_pluton(dev, priv, buf, crb_pluton);
+ if (rc)
+ goto out;
+ }
+
+ priv->sm = sm;
+ priv->hid = acpi_device_hid(device);
+
+ rc = crb_map_io(device, priv, buf);
+ if (rc)
+ goto out;
+
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
+ goto out;
+ }
+
+ dev_set_drvdata(&chip->dev, priv);
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+ rc = tpm_chip_bootstrap(chip);
+ if (rc)
+ goto out;
+
+#ifdef CONFIG_X86
+ /* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ dev_info(dev, "Disabling hwrng\n");
+ chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
+ }
+#endif /* CONFIG_X86 */
+
+ rc = tpm_chip_register(chip);
+
+out:
+ acpi_put_table((struct acpi_table_header *)buf);
+ return rc;
+}
+
+static void crb_acpi_remove(struct acpi_device *device)
+{
+ struct device *dev = &device->dev;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+}
+
+static const struct dev_pm_ops crb_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
+};
+
+static const struct acpi_device_id crb_device_ids[] = {
+ {"MSFT0101", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, crb_device_ids);
+
+static struct acpi_driver crb_acpi_driver = {
+ .name = "tpm_crb",
+ .ids = crb_device_ids,
+ .ops = {
+ .add = crb_acpi_add,
+ .remove = crb_acpi_remove,
+ },
+ .drv = {
+ .pm = &crb_pm,
+ },
+};
+
+module_acpi_driver(crb_acpi_driver);
+MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>");
+MODULE_DESCRIPTION("TPM2 Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
new file mode 100644
index 0000000000..76adb10807
--- /dev/null
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Microsoft Corporation
+ *
+ * Implements a firmware TPM as described here:
+ * https://www.microsoft.com/en-us/research/publication/ftpm-software-implementation-tpm-chip/
+ *
+ * A reference implementation is available here:
+ * https://github.com/microsoft/ms-tpm-20-ref/tree/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM
+ */
+
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+#include "tpm.h"
+#include "tpm_ftpm_tee.h"
+
+/*
+ * TA_FTPM_UUID: BC50D971-D4C9-42C4-82CB-343FB7F37896
+ *
+ * Randomly generated, and must correspond to the GUID on the TA side.
+ * Defined here in the reference implementation:
+ * https://github.com/microsoft/ms-tpm-20-ref/blob/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM/include/fTPM.h#L42
+ */
+static const uuid_t ftpm_ta_uuid =
+ UUID_INIT(0xBC50D971, 0xD4C9, 0x42C4,
+ 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
+
+/**
+ * ftpm_tee_tpm_op_recv() - retrieve fTPM response.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @buf: the buffer to store data.
+ * @count: the number of bytes to read.
+ *
+ * Return:
+ * In case of success the number of bytes received.
+ * On failure, -errno.
+ */
+static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t len;
+
+ len = pvt_data->resp_len;
+ if (count < len) {
+ dev_err(&chip->dev,
+ "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
+ __func__, count, len);
+ return -EIO;
+ }
+
+ memcpy(buf, pvt_data->resp_buf, len);
+ pvt_data->resp_len = 0;
+
+ return len;
+}
+
+/**
+ * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @buf: the buffer to send.
+ * @len: the number of bytes to send.
+ *
+ * Return:
+ * In case of success, returns 0.
+ * On failure, -errno
+ */
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t resp_len;
+ int rc;
+ u8 *temp_buf;
+ struct tpm_header *resp_header;
+ struct tee_ioctl_invoke_arg transceive_args;
+ struct tee_param command_params[4];
+ struct tee_shm *shm = pvt_data->shm;
+
+ if (len > MAX_COMMAND_SIZE) {
+ dev_err(&chip->dev,
+ "%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
+ __func__, len);
+ return -EIO;
+ }
+
+ memset(&transceive_args, 0, sizeof(transceive_args));
+ memset(command_params, 0, sizeof(command_params));
+ pvt_data->resp_len = 0;
+
+ /* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
+ transceive_args = (struct tee_ioctl_invoke_arg) {
+ .func = FTPM_OPTEE_TA_SUBMIT_COMMAND,
+ .session = pvt_data->session,
+ .num_params = 4,
+ };
+
+ /* Fill FTPM_OPTEE_TA_SUBMIT_COMMAND parameters */
+ command_params[0] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
+ .u.memref = {
+ .shm = shm,
+ .size = len,
+ .shm_offs = 0,
+ },
+ };
+
+ temp_buf = tee_shm_get_va(shm, 0);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for transmit\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+ memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
+ memcpy(temp_buf, buf, len);
+
+ command_params[1] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
+ .u.memref = {
+ .shm = shm,
+ .size = MAX_RESPONSE_SIZE,
+ .shm_offs = MAX_COMMAND_SIZE,
+ },
+ };
+
+ rc = tee_client_invoke_func(pvt_data->ctx, &transceive_args,
+ command_params);
+ if ((rc < 0) || (transceive_args.ret != 0)) {
+ dev_err(&chip->dev, "%s: SUBMIT_COMMAND invoke error: 0x%x\n",
+ __func__, transceive_args.ret);
+ return (rc < 0) ? rc : transceive_args.ret;
+ }
+
+ temp_buf = tee_shm_get_va(shm, command_params[1].u.memref.shm_offs);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for receive\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+
+ resp_header = (struct tpm_header *)temp_buf;
+ resp_len = be32_to_cpu(resp_header->length);
+
+ /* sanity check resp_len */
+ if (resp_len < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "%s: tpm response header too small\n",
+ __func__);
+ return -EIO;
+ }
+ if (resp_len > MAX_RESPONSE_SIZE) {
+ dev_err(&chip->dev,
+ "%s: resp_len=%zd exceeds MAX_RESPONSE_SIZE\n",
+ __func__, resp_len);
+ return -EIO;
+ }
+
+ /* sanity checks look good, cache the response */
+ memcpy(pvt_data->resp_buf, temp_buf, resp_len);
+ pvt_data->resp_len = resp_len;
+
+ return 0;
+}
+
+static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
+{
+ /* not supported */
+}
+
+static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
+{
+ return 0;
+}
+
+static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return false;
+}
+
+static const struct tpm_class_ops ftpm_tee_tpm_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = ftpm_tee_tpm_op_recv,
+ .send = ftpm_tee_tpm_op_send,
+ .cancel = ftpm_tee_tpm_op_cancel,
+ .status = ftpm_tee_tpm_op_status,
+ .req_complete_mask = 0,
+ .req_complete_val = 0,
+ .req_canceled = ftpm_tee_tpm_req_canceled,
+};
+
+/*
+ * Check whether this driver supports the fTPM TA in the TEE instance
+ * represented by the params (ver/data) to this function.
+ */
+static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ /*
+ * Currently this driver only support GP Complaint OPTEE based fTPM TA
+ */
+ if ((ver->impl_id == TEE_IMPL_ID_OPTEE) &&
+ (ver->gen_caps & TEE_GEN_CAP_GP))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * ftpm_tee_probe() - initialize the fTPM
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+static int ftpm_tee_probe(struct device *dev)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct ftpm_tee_private *pvt_data = NULL;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ pvt_data = devm_kzalloc(dev, sizeof(struct ftpm_tee_private),
+ GFP_KERNEL);
+ if (!pvt_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, pvt_data);
+
+ /* Open context with TEE driver */
+ pvt_data->ctx = tee_client_open_context(NULL, ftpm_tee_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data->ctx)) {
+ if (PTR_ERR(pvt_data->ctx) == -ENOENT)
+ return -EPROBE_DEFER;
+ dev_err(dev, "%s: tee_client_open_context failed\n", __func__);
+ return PTR_ERR(pvt_data->ctx);
+ }
+
+ /* Open a session with fTPM TA */
+ memset(&sess_arg, 0, sizeof(sess_arg));
+ export_uuid(sess_arg.uuid, &ftpm_ta_uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(pvt_data->ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "%s: tee_client_open_session failed, err=%x\n",
+ __func__, sess_arg.ret);
+ rc = -EINVAL;
+ goto out_tee_session;
+ }
+ pvt_data->session = sess_arg.session;
+
+ /* Allocate dynamic shared memory with fTPM TA */
+ pvt_data->shm = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+ MAX_COMMAND_SIZE +
+ MAX_RESPONSE_SIZE);
+ if (IS_ERR(pvt_data->shm)) {
+ dev_err(dev, "%s: tee_shm_alloc_kernel_buf failed\n", __func__);
+ rc = -ENOMEM;
+ goto out_shm_alloc;
+ }
+
+ /* Allocate new struct tpm_chip instance */
+ chip = tpm_chip_alloc(dev, &ftpm_tee_tpm_ops);
+ if (IS_ERR(chip)) {
+ dev_err(dev, "%s: tpm_chip_alloc failed\n", __func__);
+ rc = PTR_ERR(chip);
+ goto out_chip_alloc;
+ }
+
+ pvt_data->chip = chip;
+ pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ /* Create a character device for the fTPM */
+ rc = tpm_chip_register(pvt_data->chip);
+ if (rc) {
+ dev_err(dev, "%s: tpm_chip_register failed with rc=%d\n",
+ __func__, rc);
+ goto out_chip;
+ }
+
+ return 0;
+
+out_chip:
+ put_device(&pvt_data->chip->dev);
+out_chip_alloc:
+ tee_shm_free(pvt_data->shm);
+out_shm_alloc:
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+out_tee_session:
+ tee_client_close_context(pvt_data->ctx);
+
+ return rc;
+}
+
+static int ftpm_plat_tee_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return ftpm_tee_probe(dev);
+}
+
+/**
+ * ftpm_tee_remove() - remove the TPM device
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * 0 always.
+ */
+static int ftpm_tee_remove(struct device *dev)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(dev);
+
+ /* Release the chip */
+ tpm_chip_unregister(pvt_data->chip);
+
+ /* frees chip */
+ put_device(&pvt_data->chip->dev);
+
+ /* Free the shared memory pool */
+ tee_shm_free(pvt_data->shm);
+
+ /* close the existing session with fTPM TA*/
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+
+ /* close the context with TEE driver */
+ tee_client_close_context(pvt_data->ctx);
+
+ /* memory allocated with devm_kzalloc() is freed automatically */
+
+ return 0;
+}
+
+static void ftpm_plat_tee_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ ftpm_tee_remove(dev);
+}
+
+/**
+ * ftpm_tee_shutdown() - shutdown the TPM device
+ * @pdev: the platform_device description.
+ */
+static void ftpm_plat_tee_shutdown(struct platform_device *pdev)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+
+ tee_shm_free(pvt_data->shm);
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+ tee_client_close_context(pvt_data->ctx);
+}
+
+static const struct of_device_id of_ftpm_tee_ids[] = {
+ { .compatible = "microsoft,ftpm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
+
+static struct platform_driver ftpm_tee_plat_driver = {
+ .driver = {
+ .name = "ftpm-tee",
+ .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+ },
+ .shutdown = ftpm_plat_tee_shutdown,
+ .probe = ftpm_plat_tee_probe,
+ .remove_new = ftpm_plat_tee_remove,
+};
+
+/* UUID of the fTPM TA */
+static const struct tee_client_device_id optee_ftpm_id_table[] = {
+ {UUID_INIT(0xbc50d971, 0xd4c9, 0x42c4,
+ 0x82, 0xcb, 0x34, 0x3f, 0xb7, 0xf3, 0x78, 0x96)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_ftpm_id_table);
+
+static struct tee_client_driver ftpm_tee_driver = {
+ .id_table = optee_ftpm_id_table,
+ .driver = {
+ .name = "optee-ftpm",
+ .bus = &tee_bus_type,
+ .probe = ftpm_tee_probe,
+ .remove = ftpm_tee_remove,
+ },
+};
+
+static int __init ftpm_mod_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&ftpm_tee_plat_driver);
+ if (rc)
+ return rc;
+
+ rc = driver_register(&ftpm_tee_driver.driver);
+ if (rc) {
+ platform_driver_unregister(&ftpm_tee_plat_driver);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __exit ftpm_mod_exit(void)
+{
+ platform_driver_unregister(&ftpm_tee_plat_driver);
+ driver_unregister(&ftpm_tee_driver.driver);
+}
+
+module_init(ftpm_mod_init);
+module_exit(ftpm_mod_exit);
+
+MODULE_AUTHOR("Thirupathaiah Annapureddy <thiruan@microsoft.com>");
+MODULE_DESCRIPTION("TPM Driver for fTPM TA in TEE");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
new file mode 100644
index 0000000000..f98daa7bf6
--- /dev/null
+++ b/drivers/char/tpm/tpm_ftpm_tee.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Microsoft Corporation
+ */
+
+#ifndef __TPM_FTPM_TEE_H__
+#define __TPM_FTPM_TEE_H__
+
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+/* The TAFs ID implemented in this TA */
+#define FTPM_OPTEE_TA_SUBMIT_COMMAND (0)
+#define FTPM_OPTEE_TA_EMULATE_PPI (1)
+
+/* max. buffer size supported by fTPM */
+#define MAX_COMMAND_SIZE 4096
+#define MAX_RESPONSE_SIZE 4096
+
+/**
+ * struct ftpm_tee_private - fTPM's private data
+ * @chip: struct tpm_chip instance registered with tpm framework.
+ * @state: internal state
+ * @session: fTPM TA session identifier.
+ * @resp_len: cached response buffer length.
+ * @resp_buf: cached response buffer.
+ * @ctx: TEE context handler.
+ * @shm: Memory pool shared with fTPM TA in TEE.
+ */
+struct ftpm_tee_private {
+ struct tpm_chip *chip;
+ u32 session;
+ size_t resp_len;
+ u8 resp_buf[MAX_RESPONSE_SIZE];
+ struct tee_context *ctx;
+ struct tee_shm *shm;
+};
+
+#endif /* __TPM_FTPM_TEE_H__ */
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
new file mode 100644
index 0000000000..301a95b373
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ATMEL I2C TPM AT97SC3204T
+ *
+ * Copyright (C) 2012 V Lab Technologies
+ * Teddy Reed <teddy@prosauce.org>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ * Device driver for ATMEL I2C TPMs.
+ *
+ * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM
+ * devices the raw TCG formatted TPM command data is written via I2C and then
+ * raw TCG formatted TPM command data is returned via I2C.
+ *
+ * TGC status/locality/etc functions seen in the LPC implementation do not
+ * seem to be present.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "tpm.h"
+
+#define I2C_DRIVER_NAME "tpm_i2c_atmel"
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+#define ATMEL_STS_OK 1
+
+struct priv_data {
+ size_t len;
+ /* This is the amount we read on the first try. 25 was chosen to fit a
+ * fair number of read responses in the buffer so a 2nd retry can be
+ * avoided in small message cases. */
+ u8 buffer[sizeof(struct tpm_header) + 25];
+};
+
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ s32 status;
+
+ priv->len = 0;
+
+ if (len <= 2)
+ return -EIO;
+
+ status = i2c_master_send(client, buf, len);
+
+ dev_dbg(&chip->dev,
+ "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
+ (int)min_t(size_t, 64, len), buf, len, status);
+
+ if (status < 0)
+ return status;
+
+ /* The upper layer does not support incomplete sends. */
+ if (status != len)
+ return -E2BIG;
+
+ return 0;
+}
+
+static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct tpm_header *hdr = (struct tpm_header *)priv->buffer;
+ u32 expected_len;
+ int rc;
+
+ if (priv->len == 0)
+ return -EIO;
+
+ /* Get the message size from the message header, if we didn't get the
+ * whole message in read_status then we need to re-read the
+ * message. */
+ expected_len = be32_to_cpu(hdr->length);
+ if (expected_len > count)
+ return -ENOMEM;
+
+ if (priv->len >= expected_len) {
+ dev_dbg(&chip->dev,
+ "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ memcpy(buf, priv->buffer, expected_len);
+ return expected_len;
+ }
+
+ rc = i2c_master_recv(client, buf, expected_len);
+ dev_dbg(&chip->dev,
+ "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ return rc;
+}
+
+static void i2c_atmel_cancel(struct tpm_chip *chip)
+{
+ dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported");
+}
+
+static u8 i2c_atmel_read_status(struct tpm_chip *chip)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ int rc;
+
+ /* The TPM fails the I2C read until it is ready, so we do the entire
+ * transfer here and buffer it locally. This way the common code can
+ * properly handle the timeouts. */
+ priv->len = 0;
+ memset(priv->buffer, 0, sizeof(priv->buffer));
+
+
+ /* Once the TPM has completed the command the command remains readable
+ * until another command is issued. */
+ rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
+ dev_dbg(&chip->dev,
+ "%s: sts=%d", __func__, rc);
+ if (rc <= 0)
+ return 0;
+
+ priv->len = rc;
+
+ return ATMEL_STS_OK;
+}
+
+static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return false;
+}
+
+static const struct tpm_class_ops i2c_atmel = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = i2c_atmel_read_status,
+ .recv = i2c_atmel_recv,
+ .send = i2c_atmel_send,
+ .cancel = i2c_atmel_cancel,
+ .req_complete_mask = ATMEL_STS_OK,
+ .req_complete_val = ATMEL_STS_OK,
+ .req_canceled = i2c_atmel_req_canceled,
+};
+
+static int i2c_atmel_probe(struct i2c_client *client)
+{
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+ struct priv_data *priv;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ chip = tpmm_chip_alloc(dev, &i2c_atmel);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ /* There is no known way to probe for this device, and all version
+ * information seems to be read via TPM commands. Thus we rely on the
+ * TPM startup process in the common code to detect the device. */
+
+ return tpm_chip_register(chip);
+}
+
+static void i2c_atmel_remove(struct i2c_client *client)
+{
+ struct device *dev = &(client->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ tpm_chip_unregister(chip);
+}
+
+static const struct i2c_device_id i2c_atmel_id[] = {
+ {I2C_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_atmel_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_atmel_of_match[] = {
+ {.compatible = "atmel,at97sc3204t"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_atmel_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_atmel_driver = {
+ .id_table = i2c_atmel_id,
+ .probe = i2c_atmel_probe,
+ .remove = i2c_atmel_remove,
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .pm = &i2c_atmel_pm_ops,
+ .of_match_table = of_match_ptr(i2c_atmel_of_match),
+ },
+};
+
+module_i2c_driver(i2c_atmel_driver);
+
+MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>");
+MODULE_DESCRIPTION("Atmel TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
new file mode 100644
index 0000000000..81d8a78dc6
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012,2013 Infineon Technologies
+ *
+ * Authors:
+ * Peter Huewe <peter.huewe@infineon.com>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0 and the
+ * Infineon I2C Protocol Stack Specification v0.20.
+ *
+ * It is based on the original tpm_tis device driver from Leendert van
+ * Dorn and Kyleen Hall.
+ */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include "tpm.h"
+
+#define TPM_I2C_INFINEON_BUFSIZE 1260
+
+/* max. number of iterations after I2C NAK */
+#define MAX_COUNT 3
+
+#define SLEEP_DURATION_LOW 55
+#define SLEEP_DURATION_HI 65
+
+/* max. number of iterations after I2C NAK for 'long' commands
+ * we need this especially for sending TPM_READY, since the cleanup after the
+ * transtion to the ready state may take some time, but it is unpredictable
+ * how long it will take.
+ */
+#define MAX_COUNT_LONG 50
+
+#define SLEEP_DURATION_LONG_LOW 200
+#define SLEEP_DURATION_LONG_HI 220
+
+/* After sending TPM_READY to 'reset' the TPM we have to sleep even longer */
+#define SLEEP_DURATION_RESET_LOW 2400
+#define SLEEP_DURATION_RESET_HI 2600
+
+/* we want to use usleep_range instead of msleep for the 5ms TPM_TIMEOUT */
+#define TPM_TIMEOUT_US_LOW (TPM_TIMEOUT * 1000)
+#define TPM_TIMEOUT_US_HI (TPM_TIMEOUT_US_LOW + 2000)
+
+/* expected value for DIDVID register */
+#define TPM_TIS_I2C_DID_VID_9635 0xd1150b00L
+#define TPM_TIS_I2C_DID_VID_9645 0x001a15d1L
+
+enum i2c_chip_type {
+ SLB9635,
+ SLB9645,
+ UNKNOWN,
+};
+
+struct tpm_inf_dev {
+ struct i2c_client *client;
+ int locality;
+ /* In addition to the data itself, the buffer must fit the 7-bit I2C
+ * address and the direction bit.
+ */
+ u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1];
+ struct tpm_chip *chip;
+ enum i2c_chip_type chip_type;
+ unsigned int adapterlimit;
+};
+
+static struct tpm_inf_dev tpm_dev;
+
+/*
+ * iic_tpm_read() - read from TPM register
+ * @addr: register address to read from
+ * @buffer: provided by caller
+ * @len: number of bytes to read
+ *
+ * Read len bytes from TPM register and put them into
+ * buffer (little-endian format, i.e. first byte is put into buffer[0]).
+ *
+ * NOTE: TPM is big-endian for multi-byte values. Multi-byte
+ * values have to be swapped.
+ *
+ * NOTE: We can't unfortunately use the combined read/write functions
+ * provided by the i2c core as the TPM currently does not support the
+ * repeated start condition and due to it's special requirements.
+ * The i2c_smbus* functions do not work for this chip.
+ *
+ * Return -EIO on error, 0 on success.
+ */
+static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
+{
+
+ struct i2c_msg msg1 = {
+ .addr = tpm_dev.client->addr,
+ .len = 1,
+ .buf = &addr
+ };
+ struct i2c_msg msg2 = {
+ .addr = tpm_dev.client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buffer
+ };
+ struct i2c_msg msgs[] = {msg1, msg2};
+
+ int rc = 0;
+ int count;
+ unsigned int msglen = len;
+
+ /* Lock the adapter for the duration of the whole sequence. */
+ if (!tpm_dev.client->adapter->algo->master_xfer)
+ return -EOPNOTSUPP;
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+
+ if (tpm_dev.chip_type == SLB9645) {
+ /* use a combined read for newer chips
+ * unfortunately the smbus functions are not suitable due to
+ * the 32 byte limit of the smbus.
+ * retries should usually not be needed, but are kept just to
+ * be on the safe side.
+ */
+ for (count = 0; count < MAX_COUNT; count++) {
+ rc = __i2c_transfer(tpm_dev.client->adapter, msgs, 2);
+ if (rc > 0)
+ break; /* break here to skip sleep */
+ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+ }
+ } else {
+ /* Expect to send one command message and one data message, but
+ * support looping over each or both if necessary.
+ */
+ while (len > 0) {
+ /* slb9635 protocol should work in all cases */
+ for (count = 0; count < MAX_COUNT; count++) {
+ rc = __i2c_transfer(tpm_dev.client->adapter,
+ &msg1, 1);
+ if (rc > 0)
+ break; /* break here to skip sleep */
+
+ usleep_range(SLEEP_DURATION_LOW,
+ SLEEP_DURATION_HI);
+ }
+
+ if (rc <= 0)
+ goto out;
+
+ /* After the TPM has successfully received the register
+ * address it needs some time, thus we're sleeping here
+ * again, before retrieving the data
+ */
+ for (count = 0; count < MAX_COUNT; count++) {
+ if (tpm_dev.adapterlimit) {
+ msglen = min_t(unsigned int,
+ tpm_dev.adapterlimit,
+ len);
+ msg2.len = msglen;
+ }
+ usleep_range(SLEEP_DURATION_LOW,
+ SLEEP_DURATION_HI);
+ rc = __i2c_transfer(tpm_dev.client->adapter,
+ &msg2, 1);
+ if (rc > 0) {
+ /* Since len is unsigned, make doubly
+ * sure we do not underflow it.
+ */
+ if (msglen > len)
+ len = 0;
+ else
+ len -= msglen;
+ msg2.buf += msglen;
+ break;
+ }
+ /* If the I2C adapter rejected the request (e.g
+ * when the quirk read_max_len < len) fall back
+ * to a sane minimum value and try again.
+ */
+ if (rc == -EOPNOTSUPP)
+ tpm_dev.adapterlimit =
+ I2C_SMBUS_BLOCK_MAX;
+ }
+
+ if (rc <= 0)
+ goto out;
+ }
+ }
+
+out:
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+ /* take care of 'guard time' */
+ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+
+ /* __i2c_transfer returns the number of successfully transferred
+ * messages.
+ * So rc should be greater than 0 here otherwise we have an error.
+ */
+ if (rc <= 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
+ unsigned int sleep_low,
+ unsigned int sleep_hi, u8 max_count)
+{
+ int rc = -EIO;
+ int count;
+
+ struct i2c_msg msg1 = {
+ .addr = tpm_dev.client->addr,
+ .len = len + 1,
+ .buf = tpm_dev.buf
+ };
+
+ if (len > TPM_I2C_INFINEON_BUFSIZE)
+ return -EINVAL;
+
+ if (!tpm_dev.client->adapter->algo->master_xfer)
+ return -EOPNOTSUPP;
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+
+ /* prepend the 'register address' to the buffer */
+ tpm_dev.buf[0] = addr;
+ memcpy(&(tpm_dev.buf[1]), buffer, len);
+
+ /*
+ * NOTE: We have to use these special mechanisms here and unfortunately
+ * cannot rely on the standard behavior of i2c_transfer.
+ * Even for newer chips the smbus functions are not
+ * suitable due to the 32 byte limit of the smbus.
+ */
+ for (count = 0; count < max_count; count++) {
+ rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
+ if (rc > 0)
+ break;
+ usleep_range(sleep_low, sleep_hi);
+ }
+
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+ /* take care of 'guard time' */
+ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+
+ /* __i2c_transfer returns the number of successfully transferred
+ * messages.
+ * So rc should be greater than 0 here otherwise we have an error.
+ */
+ if (rc <= 0)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * iic_tpm_write() - write to TPM register
+ * @addr: register address to write to
+ * @buffer: containing data to be written
+ * @len: number of bytes to write
+ *
+ * Write len bytes from provided buffer to TPM register (little
+ * endian format, i.e. buffer[0] is written as first byte).
+ *
+ * NOTE: TPM is big-endian for multi-byte values. Multi-byte
+ * values have to be swapped.
+ *
+ * NOTE: use this function instead of the iic_tpm_write_generic function.
+ *
+ * Return -EIO on error, 0 on success
+ */
+static int iic_tpm_write(u8 addr, u8 *buffer, size_t len)
+{
+ return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LOW,
+ SLEEP_DURATION_HI, MAX_COUNT);
+}
+
+/*
+ * This function is needed especially for the cleanup situation after
+ * sending TPM_READY
+ * */
+static int iic_tpm_write_long(u8 addr, u8 *buffer, size_t len)
+{
+ return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LONG_LOW,
+ SLEEP_DURATION_LONG_HI, MAX_COUNT_LONG);
+}
+
+enum tis_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum tis_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum tis_defaults {
+ TIS_SHORT_TIMEOUT = 750, /* ms */
+ TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+};
+
+#define TPM_ACCESS(l) (0x0000 | ((l) << 4))
+#define TPM_STS(l) (0x0001 | ((l) << 4))
+#define TPM_DATA_FIFO(l) (0x0005 | ((l) << 4))
+#define TPM_DID_VID(l) (0x0006 | ((l) << 4))
+
+static bool check_locality(struct tpm_chip *chip, int loc)
+{
+ u8 buf;
+ int rc;
+
+ rc = iic_tpm_read(TPM_ACCESS(loc), &buf, 1);
+ if (rc < 0)
+ return false;
+
+ if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+ tpm_dev.locality = loc;
+ return true;
+ }
+
+ return false;
+}
+
+/* implementation similar to tpm_tis */
+static void release_locality(struct tpm_chip *chip, int loc, int force)
+{
+ u8 buf;
+ if (iic_tpm_read(TPM_ACCESS(loc), &buf, 1) < 0)
+ return;
+
+ if (force || (buf & (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) {
+ buf = TPM_ACCESS_ACTIVE_LOCALITY;
+ iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
+ }
+}
+
+static int request_locality(struct tpm_chip *chip, int loc)
+{
+ unsigned long stop;
+ u8 buf = TPM_ACCESS_REQUEST_USE;
+
+ if (check_locality(chip, loc))
+ return loc;
+
+ iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
+
+ /* wait for burstcount */
+ stop = jiffies + chip->timeout_a;
+ do {
+ if (check_locality(chip, loc))
+ return loc;
+ usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+ } while (time_before(jiffies, stop));
+
+ return -ETIME;
+}
+
+static u8 tpm_tis_i2c_status(struct tpm_chip *chip)
+{
+ /* NOTE: since I2C read may fail, return 0 in this case --> time-out */
+ u8 buf = 0xFF;
+ u8 i = 0;
+
+ do {
+ if (iic_tpm_read(TPM_STS(tpm_dev.locality), &buf, 1) < 0)
+ return 0;
+
+ i++;
+ /* if locallity is set STS should not be 0xFF */
+ } while ((buf == 0xFF) && i < 10);
+
+ return buf;
+}
+
+static void tpm_tis_i2c_ready(struct tpm_chip *chip)
+{
+ /* this causes the current command to be aborted */
+ u8 buf = TPM_STS_COMMAND_READY;
+ iic_tpm_write_long(TPM_STS(tpm_dev.locality), &buf, 1);
+}
+
+static ssize_t get_burstcount(struct tpm_chip *chip)
+{
+ unsigned long stop;
+ ssize_t burstcnt;
+ u8 buf[3];
+
+ /* wait for burstcount */
+ /* which timeout value, spec has 2 answers (c & d) */
+ stop = jiffies + chip->timeout_d;
+ do {
+ /* Note: STS is little endian */
+ if (iic_tpm_read(TPM_STS(tpm_dev.locality)+1, buf, 3) < 0)
+ burstcnt = 0;
+ else
+ burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0];
+
+ if (burstcnt)
+ return burstcnt;
+
+ usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+ } while (time_before(jiffies, stop));
+ return -EBUSY;
+}
+
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ int *status)
+{
+ unsigned long stop;
+
+ /* check current status */
+ *status = tpm_tis_i2c_status(chip);
+ if ((*status != 0xFF) && (*status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+ do {
+ /* since we just checked the status, give the TPM some time */
+ usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+ *status = tpm_tis_i2c_status(chip);
+ if ((*status & mask) == mask)
+ return 0;
+
+ } while (time_before(jiffies, stop));
+
+ return -ETIME;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ size_t size = 0;
+ ssize_t burstcnt;
+ u8 retries = 0;
+ int rc;
+
+ while (size < count) {
+ burstcnt = get_burstcount(chip);
+
+ /* burstcnt < 0 = TPM is busy */
+ if (burstcnt < 0)
+ return burstcnt;
+
+ /* limit received data to max. left */
+ if (burstcnt > (count - size))
+ burstcnt = count - size;
+
+ rc = iic_tpm_read(TPM_DATA_FIFO(tpm_dev.locality),
+ &(buf[size]), burstcnt);
+ if (rc == 0)
+ size += burstcnt;
+ else if (rc < 0)
+ retries++;
+
+ /* avoid endless loop in case of broken HW */
+ if (retries > MAX_COUNT_LONG)
+ return -EIO;
+ }
+ return size;
+}
+
+static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ int size = 0;
+ int status;
+ u32 expected;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ /* read first 10 bytes, including tag, paramsize, and result */
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *)(buf + 2));
+ if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
+ size = -EIO;
+ goto out;
+ }
+
+ size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (size < expected) {
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ goto out;
+ }
+
+ wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status);
+ if (status & TPM_STS_DATA_AVAIL) { /* retry? */
+ dev_err(&chip->dev, "Error left over data\n");
+ size = -EIO;
+ goto out;
+ }
+
+out:
+ tpm_tis_i2c_ready(chip);
+ /* The TPM needs some time to clean up here,
+ * so we sleep rather than keeping the bus busy
+ */
+ usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
+ release_locality(chip, tpm_dev.locality, 0);
+ return size;
+}
+
+static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc, status;
+ ssize_t burstcnt;
+ size_t count = 0;
+ u8 retries = 0;
+ u8 sts = TPM_STS_GO;
+
+ if (len > TPM_I2C_INFINEON_BUFSIZE)
+ return -E2BIG;
+
+ if (request_locality(chip, 0) < 0)
+ return -EBUSY;
+
+ status = tpm_tis_i2c_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ tpm_tis_i2c_ready(chip);
+ if (wait_for_stat
+ (chip, TPM_STS_COMMAND_READY,
+ chip->timeout_b, &status) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+
+ while (count < len - 1) {
+ burstcnt = get_burstcount(chip);
+
+ /* burstcnt < 0 = TPM is busy */
+ if (burstcnt < 0)
+ return burstcnt;
+
+ if (burstcnt > (len - 1 - count))
+ burstcnt = len - 1 - count;
+
+ rc = iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality),
+ &(buf[count]), burstcnt);
+ if (rc == 0)
+ count += burstcnt;
+ else if (rc < 0)
+ retries++;
+
+ /* avoid endless loop in case of broken HW */
+ if (retries > MAX_COUNT_LONG) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ wait_for_stat(chip, TPM_STS_VALID,
+ chip->timeout_c, &status);
+
+ if ((status & TPM_STS_DATA_EXPECT) == 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+
+ /* write last byte */
+ iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), &(buf[count]), 1);
+ wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status);
+ if ((status & TPM_STS_DATA_EXPECT) != 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* go and do it */
+ iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
+
+ return 0;
+out_err:
+ tpm_tis_i2c_ready(chip);
+ /* The TPM needs some time to clean up here,
+ * so we sleep rather than keeping the bus busy
+ */
+ usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
+ release_locality(chip, tpm_dev.locality, 0);
+ return rc;
+}
+
+static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops tpm_tis_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = tpm_tis_i2c_status,
+ .recv = tpm_tis_i2c_recv,
+ .send = tpm_tis_i2c_send,
+ .cancel = tpm_tis_i2c_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = tpm_tis_i2c_req_canceled,
+};
+
+static int tpm_tis_i2c_init(struct device *dev)
+{
+ u32 vendor;
+ int rc = 0;
+ struct tpm_chip *chip;
+
+ chip = tpmm_chip_alloc(dev, &tpm_tis_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ if (request_locality(chip, 0) != 0) {
+ dev_err(dev, "could not request locality\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ /* read four bytes from DID_VID register */
+ if (iic_tpm_read(TPM_DID_VID(0), (u8 *)&vendor, 4) < 0) {
+ dev_err(dev, "could not read vendor id\n");
+ rc = -EIO;
+ goto out_release;
+ }
+
+ if (vendor == TPM_TIS_I2C_DID_VID_9645) {
+ tpm_dev.chip_type = SLB9645;
+ } else if (vendor == TPM_TIS_I2C_DID_VID_9635) {
+ tpm_dev.chip_type = SLB9635;
+ } else {
+ dev_err(dev, "vendor id did not match! ID was %08x\n", vendor);
+ rc = -ENODEV;
+ goto out_release;
+ }
+
+ dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16);
+
+ tpm_dev.chip = chip;
+
+ return tpm_chip_register(chip);
+out_release:
+ release_locality(chip, tpm_dev.locality, 1);
+ tpm_dev.client = NULL;
+out_err:
+ return rc;
+}
+
+static const struct i2c_device_id tpm_tis_i2c_table[] = {
+ {"tpm_i2c_infineon"},
+ {"slb9635tt"},
+ {"slb9645tt"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id tpm_tis_i2c_of_match[] = {
+ {.compatible = "infineon,tpm_i2c_infineon"},
+ {.compatible = "infineon,slb9635tt"},
+ {.compatible = "infineon,slb9645tt"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static int tpm_tis_i2c_probe(struct i2c_client *client)
+{
+ int rc;
+ struct device *dev = &(client->dev);
+
+ if (tpm_dev.client != NULL) {
+ dev_err(dev, "This driver only supports one client at a time\n");
+ return -EBUSY; /* We only support one client */
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "no algorithms associated to the i2c bus\n");
+ return -ENODEV;
+ }
+
+ tpm_dev.client = client;
+ rc = tpm_tis_i2c_init(&client->dev);
+ if (rc != 0) {
+ tpm_dev.client = NULL;
+ rc = -ENODEV;
+ }
+ return rc;
+}
+
+static void tpm_tis_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = tpm_dev.chip;
+
+ tpm_chip_unregister(chip);
+ release_locality(chip, tpm_dev.locality, 1);
+ tpm_dev.client = NULL;
+}
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+ .id_table = tpm_tis_i2c_table,
+ .probe = tpm_tis_i2c_probe,
+ .remove = tpm_tis_i2c_remove,
+ .driver = {
+ .name = "tpm_i2c_infineon",
+ .pm = &tpm_tis_i2c_ops,
+ .of_match_table = of_match_ptr(tpm_tis_i2c_of_match),
+ },
+};
+
+module_i2c_driver(tpm_tis_i2c_driver);
+MODULE_AUTHOR("Peter Huewe <peter.huewe@infineon.com>");
+MODULE_DESCRIPTION("TPM TIS I2C Infineon Driver");
+MODULE_VERSION("2.2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
new file mode 100644
index 0000000000..d7be03c410
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /******************************************************************************
+ * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501/NPCT6XX,
+ * based on the TCG TPM Interface Spec version 1.2.
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2011, Nuvoton Technology Corporation.
+ * Dan Morav <dan.morav@nuvoton.com>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * Nuvoton contact information: APC.Support@nuvoton.com
+ *****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include "tpm.h"
+
+/* I2C interface offsets */
+#define TPM_STS 0x00
+#define TPM_BURST_COUNT 0x01
+#define TPM_DATA_FIFO_W 0x20
+#define TPM_DATA_FIFO_R 0x40
+#define TPM_VID_DID_RID 0x60
+#define TPM_I2C_RETRIES 5
+/*
+ * I2C bus device maximum buffer size w/o counting I2C address or command
+ * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
+ */
+#define TPM_I2C_MAX_BUF_SIZE 32
+#define TPM_I2C_RETRY_COUNT 32
+#define TPM_I2C_BUS_DELAY 1000 /* usec */
+#define TPM_I2C_RETRY_DELAY_SHORT (2 * 1000) /* usec */
+#define TPM_I2C_RETRY_DELAY_LONG (10 * 1000) /* usec */
+#define TPM_I2C_DELAY_RANGE 300 /* usec */
+
+#define OF_IS_TPM2 ((void *)1)
+#define I2C_IS_TPM2 1
+
+struct priv_data {
+ int irq;
+ unsigned int intrs;
+ wait_queue_head_t read_queue;
+};
+
+static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_read_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_write_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+#define TPM_STS_VALID 0x80
+#define TPM_STS_COMMAND_READY 0x40
+#define TPM_STS_GO 0x20
+#define TPM_STS_DATA_AVAIL 0x10
+#define TPM_STS_EXPECT 0x08
+#define TPM_STS_RESPONSE_RETRY 0x02
+#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+/* read TPM_STS register */
+static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ s32 status;
+ u8 data;
+
+ status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
+ if (status <= 0) {
+ dev_err(&chip->dev, "%s() error return %d\n", __func__,
+ status);
+ data = TPM_STS_ERR_VAL;
+ }
+
+ return data;
+}
+
+/* write byte to TPM_STS register */
+static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
+{
+ s32 status;
+ int i;
+
+ /* this causes the current command to be aborted */
+ for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
+ status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
+ if (status < 0)
+ usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+ + TPM_I2C_DELAY_RANGE);
+ }
+ return status;
+}
+
+/* write commandReady to TPM_STS register */
+static void i2c_nuvoton_ready(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ s32 status;
+
+ /* this causes the current command to be aborted */
+ status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
+ if (status < 0)
+ dev_err(&chip->dev,
+ "%s() fail to write TPM_STS.commandReady\n", __func__);
+}
+
+/* read burstCount field from TPM_STS register
+ * return -1 on fail to read */
+static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
+ struct tpm_chip *chip)
+{
+ unsigned long stop = jiffies + chip->timeout_d;
+ s32 status;
+ int burst_count = -1;
+ u8 data;
+
+ /* wait for burstcount to be non-zero */
+ do {
+ /* in I2C burstCount is 1 byte */
+ status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1,
+ &data);
+ if (status > 0 && data > 0) {
+ burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
+ break;
+ }
+ usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+ + TPM_I2C_DELAY_RANGE);
+ } while (time_before(jiffies, stop));
+
+ return burst_count;
+}
+
+/*
+ * WPCT301/NPCT501/NPCT6XX SINT# supports only dataAvail
+ * any call to this function which is not waiting for dataAvail will
+ * set queue to NULL to avoid waiting for interrupt
+ */
+static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value)
+{
+ u8 status = i2c_nuvoton_read_status(chip);
+ return (status != TPM_STS_ERR_VAL) && ((status & mask) == value);
+}
+
+static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
+ u32 timeout, wait_queue_head_t *queue)
+{
+ if ((chip->flags & TPM_CHIP_FLAG_IRQ) && queue) {
+ s32 rc;
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned int cur_intrs = priv->intrs;
+
+ enable_irq(priv->irq);
+ rc = wait_event_interruptible_timeout(*queue,
+ cur_intrs != priv->intrs,
+ timeout);
+ if (rc > 0)
+ return 0;
+ /* At this point we know that the SINT pin is asserted, so we
+ * do not need to do i2c_nuvoton_check_status */
+ } else {
+ unsigned long ten_msec, stop;
+ bool status_valid;
+
+ /* check current status */
+ status_valid = i2c_nuvoton_check_status(chip, mask, value);
+ if (status_valid)
+ return 0;
+
+ /* use polling to wait for the event */
+ ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+ stop = jiffies + timeout;
+ do {
+ if (time_before(jiffies, ten_msec))
+ usleep_range(TPM_I2C_RETRY_DELAY_SHORT,
+ TPM_I2C_RETRY_DELAY_SHORT
+ + TPM_I2C_DELAY_RANGE);
+ else
+ usleep_range(TPM_I2C_RETRY_DELAY_LONG,
+ TPM_I2C_RETRY_DELAY_LONG
+ + TPM_I2C_DELAY_RANGE);
+ status_valid = i2c_nuvoton_check_status(chip, mask,
+ value);
+ if (status_valid)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+ value);
+ return -ETIMEDOUT;
+}
+
+/* wait for dataAvail field to be set in the TPM_STS register */
+static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout,
+ wait_queue_head_t *queue)
+{
+ return i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ timeout, queue);
+}
+
+/* Read @count bytes into @buf from TPM_RD_FIFO register */
+static int i2c_nuvoton_recv_data(struct i2c_client *client,
+ struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ s32 rc;
+ int burst_count, bytes2read, size = 0;
+
+ while (size < count &&
+ i2c_nuvoton_wait_for_data_avail(chip,
+ chip->timeout_c,
+ &priv->read_queue) == 0) {
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(&chip->dev,
+ "%s() fail to read burstCount=%d\n", __func__,
+ burst_count);
+ return -EIO;
+ }
+ bytes2read = min_t(size_t, burst_count, count - size);
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
+ bytes2read, &buf[size]);
+ if (rc < 0) {
+ dev_err(&chip->dev,
+ "%s() fail on i2c_nuvoton_read_buf()=%d\n",
+ __func__, rc);
+ return -EIO;
+ }
+ dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read);
+ size += bytes2read;
+ }
+
+ return size;
+}
+
+/* Read TPM command results */
+static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct device *dev = chip->dev.parent;
+ struct i2c_client *client = to_i2c_client(dev);
+ s32 rc;
+ int status;
+ int burst_count;
+ int retries;
+ int size = 0;
+ u32 expected;
+
+ if (count < TPM_HEADER_SIZE) {
+ i2c_nuvoton_ready(chip); /* return to idle */
+ dev_err(dev, "%s() count < header size\n", __func__);
+ return -EIO;
+ }
+ for (retries = 0; retries < TPM_I2C_RETRIES; retries++) {
+ if (retries > 0) {
+ /* if this is not the first trial, set responseRetry */
+ i2c_nuvoton_write_status(client,
+ TPM_STS_RESPONSE_RETRY);
+ }
+ /*
+ * read first available (> 10 bytes), including:
+ * tag, paramsize, and result
+ */
+ status = i2c_nuvoton_wait_for_data_avail(
+ chip, chip->timeout_c, &priv->read_queue);
+ if (status != 0) {
+ dev_err(dev, "%s() timeout on dataAvail\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail to get burstCount\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ size = i2c_nuvoton_recv_data(client, chip, buf,
+ burst_count);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(dev, "%s() fail to read header\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ /*
+ * convert number of expected bytes field from big endian 32 bit
+ * to machine native
+ */
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count || expected < size) {
+ dev_err(dev, "%s() expected > count\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ rc = i2c_nuvoton_recv_data(client, chip, &buf[size],
+ expected - size);
+ size += rc;
+ if (rc < 0 || size < expected) {
+ dev_err(dev, "%s() fail to read remainder of result\n",
+ __func__);
+ size = -EIO;
+ continue;
+ }
+ if (i2c_nuvoton_wait_for_stat(
+ chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL,
+ TPM_STS_VALID, chip->timeout_c,
+ NULL)) {
+ dev_err(dev, "%s() error left over data\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ i2c_nuvoton_ready(chip);
+ dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size);
+ return size;
+}
+
+/*
+ * Send TPM command.
+ *
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct device *dev = chip->dev.parent;
+ struct i2c_client *client = to_i2c_client(dev);
+ u32 ordinal;
+ unsigned long duration;
+ size_t count = 0;
+ int burst_count, bytes2write, retries, rc = -EIO;
+
+ for (retries = 0; retries < TPM_RETRY; retries++) {
+ i2c_nuvoton_ready(chip);
+ if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->timeout_b, NULL)) {
+ dev_err(dev, "%s() timeout on commandReady\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ rc = 0;
+ while (count < len - 1) {
+ burst_count = i2c_nuvoton_get_burstcount(client,
+ chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail get burstCount\n",
+ __func__);
+ rc = -EIO;
+ break;
+ }
+ bytes2write = min_t(size_t, burst_count,
+ len - 1 - count);
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W,
+ bytes2write, &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail i2cWriteBuf\n",
+ __func__);
+ break;
+ }
+ dev_dbg(dev, "%s(%d):", __func__, bytes2write);
+ count += bytes2write;
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ chip->timeout_c,
+ NULL);
+ if (rc < 0) {
+ dev_err(dev, "%s() timeout on Expect\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ break;
+ }
+ }
+ if (rc < 0)
+ continue;
+
+ /* write last byte */
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1,
+ &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write last byte\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ dev_dbg(dev, "%s(last): %02x", __func__, buf[count]);
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID | TPM_STS_EXPECT,
+ TPM_STS_VALID,
+ chip->timeout_c, NULL);
+ if (rc) {
+ dev_err(dev, "%s() timeout on Expect to clear\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ if (rc < 0) {
+ /* retries == TPM_RETRY */
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ /* execute the TPM command */
+ rc = i2c_nuvoton_write_status(client, TPM_STS_GO);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write Go\n", __func__);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+ duration = tpm_calc_ordinal_duration(chip, ordinal);
+
+ rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
+ if (rc) {
+ dev_err(dev, "%s() timeout command duration %ld\n",
+ __func__, duration);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+
+ dev_dbg(dev, "%s() -> %zd\n", __func__, len);
+ return 0;
+}
+
+static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops tpm_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = i2c_nuvoton_read_status,
+ .recv = i2c_nuvoton_recv,
+ .send = i2c_nuvoton_send,
+ .cancel = i2c_nuvoton_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = i2c_nuvoton_req_canceled,
+};
+
+/* The only purpose for the handler is to signal to any waiting threads that
+ * the interrupt is currently being asserted. The driver does not do any
+ * processing triggered by interrupts, and the chip provides no way to mask at
+ * the source (plus that would be slow over I2C). Run the IRQ as a one-shot,
+ * this means it cannot be shared. */
+static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ priv->intrs++;
+ wake_up(&priv->read_queue);
+ disable_irq_nosync(priv->irq);
+ return IRQ_HANDLED;
+}
+
+static int get_vid(struct i2c_client *client, u32 *res)
+{
+ static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe };
+ u32 temp;
+ s32 rc;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+ rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp);
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) {
+ /*
+ * f/w rev 2.81 has an issue where the VID_DID_RID is not
+ * reporting the right value. so give it another chance at
+ * offset 0x20 (FIFO_W).
+ */
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4,
+ (u8 *) (&temp));
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value,
+ sizeof(vid_did_rid_value)))
+ return -ENODEV;
+ }
+
+ *res = temp;
+ return 0;
+}
+
+static int i2c_nuvoton_probe(struct i2c_client *client)
+{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+ struct priv_data *priv;
+ u32 vid = 0;
+
+ rc = get_vid(client, &vid);
+ if (rc)
+ return rc;
+
+ dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid,
+ (u8) (vid >> 16), (u8) (vid >> 24));
+
+ chip = tpmm_chip_alloc(dev, &tpm_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (dev->of_node) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(dev->driver->of_match_table, dev);
+ if (of_id && of_id->data == OF_IS_TPM2)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ } else
+ if (id->driver_data == I2C_IS_TPM2)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ init_waitqueue_head(&priv->read_queue);
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ /*
+ * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to:
+ * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT
+ * The IRQ should be set in the i2c_board_info (which is done
+ * automatically in of_i2c_register_devices, for device tree users */
+ priv->irq = client->irq;
+ if (client->irq) {
+ dev_dbg(dev, "%s() priv->irq\n", __func__);
+ rc = devm_request_irq(dev, client->irq,
+ i2c_nuvoton_int_handler,
+ IRQF_TRIGGER_LOW,
+ dev_name(&chip->dev),
+ chip);
+ if (rc) {
+ dev_err(dev, "%s() Unable to request irq: %d for use\n",
+ __func__, priv->irq);
+ priv->irq = 0;
+ } else {
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+ /* Clear any pending interrupt */
+ i2c_nuvoton_ready(chip);
+ /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->timeout_b,
+ NULL);
+ if (rc == 0) {
+ /*
+ * TIS is in ready state
+ * write dummy byte to enter reception state
+ * TPM_DATA_FIFO_W <- rc (0)
+ */
+ rc = i2c_nuvoton_write_buf(client,
+ TPM_DATA_FIFO_W,
+ 1, (u8 *) (&rc));
+ if (rc < 0)
+ return rc;
+ /* TPM_STS <- 0x40 (commandReady) */
+ i2c_nuvoton_ready(chip);
+ } else {
+ /*
+ * timeout_b reached - command was
+ * aborted. TIS should now be in idle state -
+ * only TPM_STS_VALID should be set
+ */
+ if (i2c_nuvoton_read_status(chip) !=
+ TPM_STS_VALID)
+ return -EIO;
+ }
+ }
+ }
+
+ return tpm_chip_register(chip);
+}
+
+static void i2c_nuvoton_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ tpm_chip_unregister(chip);
+}
+
+static const struct i2c_device_id i2c_nuvoton_id[] = {
+ {"tpm_i2c_nuvoton"},
+ {"tpm2_i2c_nuvoton", .driver_data = I2C_IS_TPM2},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_nuvoton_of_match[] = {
+ {.compatible = "nuvoton,npct501"},
+ {.compatible = "winbond,wpct301"},
+ {.compatible = "nuvoton,npct601", .data = OF_IS_TPM2},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_nuvoton_driver = {
+ .id_table = i2c_nuvoton_id,
+ .probe = i2c_nuvoton_probe,
+ .remove = i2c_nuvoton_remove,
+ .driver = {
+ .name = "tpm_i2c_nuvoton",
+ .pm = &i2c_nuvoton_pm_ops,
+ .of_match_table = of_match_ptr(i2c_nuvoton_of_match),
+ },
+};
+
+module_i2c_driver(i2c_nuvoton_driver);
+
+MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)");
+MODULE_DESCRIPTION("Nuvoton TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
new file mode 100644
index 0000000000..d3989b257f
--- /dev/null
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012-2020 IBM Corporation
+ *
+ * Author: Ashley Lai <ashleydlai@gmail.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <asm/vio.h>
+#include <asm/irq.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <asm/prom.h>
+
+#include "tpm.h"
+#include "tpm_ibmvtpm.h"
+
+static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
+
+static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
+ { "IBM,vtpm", "IBM,vtpm"},
+ { "IBM,vtpm", "IBM,vtpm20"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
+
+/**
+ * ibmvtpm_send_crq_word() - Send a CRQ request
+ * @vdev: vio device struct
+ * @w1: pre-constructed first word of tpm crq (second word is reserved)
+ *
+ * Return:
+ * 0 - Success
+ * Non-zero - Failure
+ */
+static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
+{
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
+}
+
+/**
+ * ibmvtpm_send_crq() - Send a CRQ request
+ *
+ * @vdev: vio device struct
+ * @valid: Valid field
+ * @msg: Type field
+ * @len: Length field
+ * @data: Data field
+ *
+ * The ibmvtpm crq is defined as follows:
+ *
+ * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
+ * -----------------------------------------------------------------------
+ * Word0 | Valid | Type | Length | Data
+ * -----------------------------------------------------------------------
+ * Word1 | Reserved
+ * -----------------------------------------------------------------------
+ *
+ * Which matches the following structure (on bigendian host):
+ *
+ * struct ibmvtpm_crq {
+ * u8 valid;
+ * u8 msg;
+ * __be16 len;
+ * __be32 data;
+ * __be64 reserved;
+ * } __attribute__((packed, aligned(8)));
+ *
+ * However, the value is passed in a register so just compute the numeric value
+ * to load into the register avoiding byteswap altogether. Endian only affects
+ * memory loads and stores - registers are internally represented the same.
+ *
+ * Return:
+ * 0 (H_SUCCESS) - Success
+ * Non-zero - Failure
+ */
+static int ibmvtpm_send_crq(struct vio_dev *vdev,
+ u8 valid, u8 msg, u16 len, u32 data)
+{
+ u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
+ (u64)data;
+ return ibmvtpm_send_crq_word(vdev, w1);
+}
+
+/**
+ * tpm_ibmvtpm_recv - Receive data after send
+ *
+ * @chip: tpm chip struct
+ * @buf: buffer to read
+ * @count: size of buffer
+ *
+ * Return:
+ * Number of bytes read
+ */
+static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ u16 len;
+
+ if (!ibmvtpm->rtce_buf) {
+ dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
+ return 0;
+ }
+
+ len = ibmvtpm->res_len;
+
+ if (count < len) {
+ dev_err(ibmvtpm->dev,
+ "Invalid size in recv: count=%zd, crq_size=%d\n",
+ count, len);
+ return -EIO;
+ }
+
+ spin_lock(&ibmvtpm->rtce_lock);
+ memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
+ memset(ibmvtpm->rtce_buf, 0, len);
+ ibmvtpm->res_len = 0;
+ spin_unlock(&ibmvtpm->rtce_lock);
+ return len;
+}
+
+/**
+ * ibmvtpm_crq_send_init - Send a CRQ initialize message
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "%s failed rc=%d\n", __func__, rc);
+
+ return rc;
+}
+
+/**
+ * tpm_ibmvtpm_resume - Resume from suspend
+ *
+ * @dev: device struct
+ *
+ * Return: Always 0.
+ */
+static int tpm_ibmvtpm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_ENABLE_CRQ,
+ ibmvtpm->vdev->unit_address);
+ } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc) {
+ dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = vio_enable_interrupts(ibmvtpm->vdev);
+ if (rc) {
+ dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = ibmvtpm_crq_send_init(ibmvtpm);
+ if (rc)
+ dev_err(dev, "Error send_init rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * tpm_ibmvtpm_send() - Send a TPM command
+ * @chip: tpm chip struct
+ * @buf: buffer contains data to send
+ * @count: size of buffer
+ *
+ * Return:
+ * 0 on success,
+ * -errno on error
+ */
+static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ bool retry = true;
+ int rc, sig;
+
+ if (!ibmvtpm->rtce_buf) {
+ dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
+ return 0;
+ }
+
+ if (count > ibmvtpm->rtce_size) {
+ dev_err(ibmvtpm->dev,
+ "Invalid size in send: count=%zd, rtce_size=%d\n",
+ count, ibmvtpm->rtce_size);
+ return -EIO;
+ }
+
+ if (ibmvtpm->tpm_processing_cmd) {
+ dev_info(ibmvtpm->dev,
+ "Need to wait for TPM to finish\n");
+ /* wait for previous command to finish */
+ sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
+ if (sig)
+ return -EINTR;
+ }
+
+ spin_lock(&ibmvtpm->rtce_lock);
+ ibmvtpm->res_len = 0;
+ memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
+
+ /*
+ * set the processing flag before the Hcall, since we may get the
+ * result (interrupt) before even being able to check rc.
+ */
+ ibmvtpm->tpm_processing_cmd = 1;
+
+again:
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
+ count, ibmvtpm->rtce_dma_handle);
+ if (rc != H_SUCCESS) {
+ /*
+ * H_CLOSED can be returned after LPM resume. Call
+ * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
+ * ibmvtpm_send_crq() once before failing.
+ */
+ if (rc == H_CLOSED && retry) {
+ tpm_ibmvtpm_resume(ibmvtpm->dev);
+ retry = false;
+ goto again;
+ }
+ dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+ ibmvtpm->tpm_processing_cmd = 0;
+ }
+
+ spin_unlock(&ibmvtpm->rtce_lock);
+ return 0;
+}
+
+static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
+{
+ return;
+}
+
+static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
+{
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+
+ return ibmvtpm->tpm_processing_cmd;
+}
+
+/**
+ * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
+ *
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
+ * - Note that this is vtpm version and not tpm version
+ *
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "ibmvtpm_crq_get_version failed rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * tpm_ibmvtpm_remove - ibm vtpm remove entry point
+ * @vdev: vio device struct
+ *
+ * Return: Always 0.
+ */
+static void tpm_ibmvtpm_remove(struct vio_dev *vdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ tpm_chip_unregister(chip);
+
+ free_irq(vdev->irq, ibmvtpm);
+
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
+ CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
+
+ if (ibmvtpm->rtce_buf) {
+ dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
+ ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
+ kfree(ibmvtpm->rtce_buf);
+ }
+
+ kfree(ibmvtpm);
+ /* For tpm_ibmvtpm_get_desired_dma */
+ dev_set_drvdata(&vdev->dev, NULL);
+}
+
+/**
+ * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
+ * @vdev: vio device struct
+ *
+ * Return:
+ * Number of bytes the driver needs to DMA map.
+ */
+static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
+ struct ibmvtpm_dev *ibmvtpm;
+
+ /*
+ * ibmvtpm initializes at probe time, so the data we are
+ * asking for may not be set yet. Estimate that 4K required
+ * for TCE-mapped buffer in addition to CRQ.
+ */
+ if (chip)
+ ibmvtpm = dev_get_drvdata(&chip->dev);
+ else
+ return CRQ_RES_BUF_SIZE + PAGE_SIZE;
+
+ return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
+}
+
+/**
+ * tpm_ibmvtpm_suspend - Suspend
+ * @dev: device struct
+ *
+ * Return: Always 0.
+ */
+static int tpm_ibmvtpm_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvtpm_reset_crq - Reset CRQ
+ *
+ * @ibmvtpm: ibm vtpm struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc = 0;
+
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ,
+ ibmvtpm->vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
+ ibmvtpm->crq_queue.index = 0;
+
+ return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
+ ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
+}
+
+static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == 0);
+}
+
+static const struct tpm_class_ops tpm_ibmvtpm = {
+ .recv = tpm_ibmvtpm_recv,
+ .send = tpm_ibmvtpm_send,
+ .cancel = tpm_ibmvtpm_cancel,
+ .status = tpm_ibmvtpm_status,
+ .req_complete_mask = 1,
+ .req_complete_val = 0,
+ .req_canceled = tpm_ibmvtpm_req_canceled,
+};
+
+static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
+ .suspend = tpm_ibmvtpm_suspend,
+ .resume = tpm_ibmvtpm_resume,
+};
+
+/**
+ * ibmvtpm_crq_get_next - Get next responded crq
+ *
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return: vtpm crq pointer or NULL.
+ */
+static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
+{
+ struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
+ struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
+
+ if (crq->valid & VTPM_MSG_RES) {
+ if (++crq_q->index == crq_q->num_entry)
+ crq_q->index = 0;
+ smp_rmb();
+ } else
+ crq = NULL;
+ return crq;
+}
+
+/**
+ * ibmvtpm_crq_process - Process responded crq
+ *
+ * @crq: crq to be processed
+ * @ibmvtpm: vtpm device struct
+ *
+ */
+static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+ struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc = 0;
+
+ switch (crq->valid) {
+ case VALID_INIT_CRQ:
+ switch (crq->msg) {
+ case INIT_CRQ_RES:
+ dev_info(ibmvtpm->dev, "CRQ initialized\n");
+ rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
+ if (rc)
+ dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
+ return;
+ case INIT_CRQ_COMP_RES:
+ dev_info(ibmvtpm->dev,
+ "CRQ initialization completed\n");
+ return;
+ default:
+ dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
+ return;
+ }
+ case IBMVTPM_VALID_CMD:
+ switch (crq->msg) {
+ case VTPM_GET_RTCE_BUFFER_SIZE_RES:
+ if (be16_to_cpu(crq->len) <= 0) {
+ dev_err(ibmvtpm->dev, "Invalid rtce size\n");
+ return;
+ }
+ ibmvtpm->rtce_size = be16_to_cpu(crq->len);
+ ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
+ GFP_ATOMIC);
+ if (!ibmvtpm->rtce_buf) {
+ dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
+ return;
+ }
+
+ ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
+ ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(ibmvtpm->dev,
+ ibmvtpm->rtce_dma_handle)) {
+ kfree(ibmvtpm->rtce_buf);
+ ibmvtpm->rtce_buf = NULL;
+ dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
+ }
+
+ return;
+ case VTPM_GET_VERSION_RES:
+ ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
+ return;
+ case VTPM_TPM_COMMAND_RES:
+ /* len of the data in rtce buffer */
+ ibmvtpm->res_len = be16_to_cpu(crq->len);
+ ibmvtpm->tpm_processing_cmd = 0;
+ wake_up_interruptible(&ibmvtpm->wq);
+ return;
+ default:
+ return;
+ }
+ }
+ return;
+}
+
+/**
+ * ibmvtpm_interrupt - Interrupt handler
+ *
+ * @irq: irq number to handle
+ * @vtpm_instance: vtpm that received interrupt
+ *
+ * Returns:
+ * IRQ_HANDLED
+ **/
+static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
+{
+ struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
+ struct ibmvtpm_crq *crq;
+
+ /* while loop is needed for initial setup (get version and
+ * get rtce_size). There should be only one tpm request at any
+ * given time.
+ */
+ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
+ ibmvtpm_crq_process(crq, ibmvtpm);
+ wake_up_interruptible(&ibmvtpm->crq_queue.wq);
+ crq->valid = 0;
+ smp_wmb();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
+ *
+ * @vio_dev: vio device struct
+ * @id: vio device id struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ const struct vio_device_id *id)
+{
+ struct ibmvtpm_dev *ibmvtpm;
+ struct device *dev = &vio_dev->dev;
+ struct ibmvtpm_crq_queue *crq_q;
+ struct tpm_chip *chip;
+ int rc = -ENOMEM, rc1;
+
+ chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
+ if (!ibmvtpm) {
+ dev_err(dev, "kzalloc for ibmvtpm failed\n");
+ goto cleanup;
+ }
+
+ ibmvtpm->dev = dev;
+ ibmvtpm->vdev = vio_dev;
+
+ crq_q = &ibmvtpm->crq_queue;
+ crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
+ if (!crq_q->crq_addr) {
+ dev_err(dev, "Unable to allocate memory for crq_addr\n");
+ goto cleanup;
+ }
+
+ crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
+ init_waitqueue_head(&crq_q->wq);
+ ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
+ CRQ_RES_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
+ dev_err(dev, "dma mapping failed\n");
+ goto cleanup;
+ }
+
+ rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
+ ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
+ if (rc == H_RESOURCE)
+ rc = ibmvtpm_reset_crq(ibmvtpm);
+
+ if (rc) {
+ dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
+ goto reg_crq_cleanup;
+ }
+
+ rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
+ tpm_ibmvtpm_driver_name, ibmvtpm);
+ if (rc) {
+ dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
+ goto init_irq_cleanup;
+ }
+
+ rc = vio_enable_interrupts(vio_dev);
+ if (rc) {
+ dev_err(dev, "Error %d enabling interrupts\n", rc);
+ goto init_irq_cleanup;
+ }
+
+ init_waitqueue_head(&ibmvtpm->wq);
+
+ crq_q->index = 0;
+
+ dev_set_drvdata(&chip->dev, ibmvtpm);
+
+ spin_lock_init(&ibmvtpm->rtce_lock);
+
+ rc = ibmvtpm_crq_send_init(ibmvtpm);
+ if (rc)
+ goto init_irq_cleanup;
+
+ rc = ibmvtpm_crq_get_version(ibmvtpm);
+ if (rc)
+ goto init_irq_cleanup;
+
+ rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
+ if (rc)
+ goto init_irq_cleanup;
+
+ if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
+ ibmvtpm->rtce_buf != NULL,
+ HZ)) {
+ rc = -ENODEV;
+ dev_err(dev, "CRQ response timed out\n");
+ goto init_irq_cleanup;
+ }
+
+
+ if (!strcmp(id->compat, "IBM,vtpm20"))
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ rc = tpm_get_timeouts(chip);
+ if (rc)
+ goto init_irq_cleanup;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc)
+ goto init_irq_cleanup;
+ }
+
+ return tpm_chip_register(chip);
+init_irq_cleanup:
+ do {
+ rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
+ } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
+reg_crq_cleanup:
+ dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+cleanup:
+ if (ibmvtpm) {
+ if (crq_q->crq_addr)
+ free_page((unsigned long)crq_q->crq_addr);
+ kfree(ibmvtpm);
+ }
+
+ return rc;
+}
+
+static struct vio_driver ibmvtpm_driver = {
+ .id_table = tpm_ibmvtpm_device_table,
+ .probe = tpm_ibmvtpm_probe,
+ .remove = tpm_ibmvtpm_remove,
+ .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
+ .name = tpm_ibmvtpm_driver_name,
+ .pm = &tpm_ibmvtpm_pm_ops,
+};
+
+/**
+ * ibmvtpm_module_init - Initialize ibm vtpm module.
+ *
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int __init ibmvtpm_module_init(void)
+{
+ return vio_register_driver(&ibmvtpm_driver);
+}
+
+/**
+ * ibmvtpm_module_exit - Tear down ibm vtpm module.
+ */
+static void __exit ibmvtpm_module_exit(void)
+{
+ vio_unregister_driver(&ibmvtpm_driver);
+}
+
+module_init(ibmvtpm_module_init);
+module_exit(ibmvtpm_module_exit);
+
+MODULE_AUTHOR("adlai@us.ibm.com");
+MODULE_DESCRIPTION("IBM vTPM Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
new file mode 100644
index 0000000000..51198b1374
--- /dev/null
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <ashleydlai@gmail.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#ifndef __TPM_IBMVTPM_H__
+#define __TPM_IBMVTPM_H__
+
+/* vTPM Message Format 1 */
+struct ibmvtpm_crq {
+ u8 valid;
+ u8 msg;
+ __be16 len;
+ __be32 data;
+ __be64 reserved;
+} __attribute__((packed, aligned(8)));
+
+struct ibmvtpm_crq_queue {
+ struct ibmvtpm_crq *crq_addr;
+ u32 index;
+ u32 num_entry;
+ wait_queue_head_t wq;
+};
+
+struct ibmvtpm_dev {
+ struct device *dev;
+ struct vio_dev *vdev;
+ struct ibmvtpm_crq_queue crq_queue;
+ dma_addr_t crq_dma_handle;
+ u32 rtce_size;
+ void __iomem *rtce_buf;
+ dma_addr_t rtce_dma_handle;
+ spinlock_t rtce_lock;
+ wait_queue_head_t wq;
+ u16 res_len;
+ u32 vtpm_version;
+ u8 tpm_processing_cmd;
+};
+
+#define CRQ_RES_BUF_SIZE PAGE_SIZE
+
+/* Initialize CRQ */
+#define INIT_CRQ_CMD 0xC001000000000000LL /* Init cmd */
+#define INIT_CRQ_COMP_CMD 0xC002000000000000LL /* Init complete cmd */
+#define INIT_CRQ_RES 0x01 /* Init respond */
+#define INIT_CRQ_COMP_RES 0x02 /* Init complete respond */
+#define VALID_INIT_CRQ 0xC0 /* Valid command for init crq */
+
+/* vTPM CRQ response is the message type | 0x80 */
+#define VTPM_MSG_RES 0x80
+#define IBMVTPM_VALID_CMD 0x80
+
+/* vTPM CRQ message types */
+#define VTPM_GET_VERSION 0x01
+#define VTPM_GET_VERSION_RES (0x01 | VTPM_MSG_RES)
+
+#define VTPM_TPM_COMMAND 0x02
+#define VTPM_TPM_COMMAND_RES (0x02 | VTPM_MSG_RES)
+
+#define VTPM_GET_RTCE_BUFFER_SIZE 0x03
+#define VTPM_GET_RTCE_BUFFER_SIZE_RES (0x03 | VTPM_MSG_RES)
+
+#define VTPM_PREPARE_TO_SUSPEND 0x04
+#define VTPM_PREPARE_TO_SUSPEND_RES (0x04 | VTPM_MSG_RES)
+
+#endif
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
new file mode 100644
index 0000000000..9c924a1440
--- /dev/null
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Description:
+ * Device Driver for the Infineon Technologies
+ * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2005, Marcel Selhorst <tpmdd@selhorst.net>
+ * Sirrix AG - security technologies <tpmdd@sirrix.com> and
+ * Applied Data Security Group, Ruhr-University Bochum, Germany
+ * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
+ */
+
+#include <linux/init.h>
+#include <linux/pnp.h>
+#include "tpm.h"
+
+/* Infineon specific definitions */
+/* maximum number of WTX-packages */
+#define TPM_MAX_WTX_PACKAGES 50
+/* msleep-Time for WTX-packages */
+#define TPM_WTX_MSLEEP_TIME 20
+/* msleep-Time --> Interval to check status register */
+#define TPM_MSLEEP_TIME 3
+/* gives number of max. msleep()-calls before throwing timeout */
+#define TPM_MAX_TRIES 5000
+#define TPM_INFINEON_DEV_VEN_VALUE 0x15D1
+
+#define TPM_INF_IO_PORT 0x0
+#define TPM_INF_IO_MEM 0x1
+
+#define TPM_INF_ADDR 0x0
+#define TPM_INF_DATA 0x1
+
+struct tpm_inf_dev {
+ int iotype;
+
+ void __iomem *mem_base; /* MMIO ioremap'd addr */
+ unsigned long map_base; /* phys MMIO base */
+ unsigned long map_size; /* MMIO region size */
+ unsigned int index_off; /* index register offset */
+
+ unsigned int data_regs; /* Data registers */
+ unsigned int data_size;
+
+ unsigned int config_port; /* IO Port config index reg */
+ unsigned int config_size;
+};
+
+static struct tpm_inf_dev tpm_dev;
+
+static inline void tpm_data_out(unsigned char data, unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ outb(data, tpm_dev.data_regs + offset);
+ else
+ writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset);
+}
+
+static inline unsigned char tpm_data_in(unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ return inb(tpm_dev.data_regs + offset);
+ else
+ return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset);
+}
+
+static inline void tpm_config_out(unsigned char data, unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ outb(data, tpm_dev.config_port + offset);
+ else
+ writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset);
+}
+
+static inline unsigned char tpm_config_in(unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ return inb(tpm_dev.config_port + offset);
+ else
+ return readb(tpm_dev.mem_base + tpm_dev.index_off + offset);
+}
+
+/* TPM header definitions */
+enum infineon_tpm_header {
+ TPM_VL_VER = 0x01,
+ TPM_VL_CHANNEL_CONTROL = 0x07,
+ TPM_VL_CHANNEL_PERSONALISATION = 0x0A,
+ TPM_VL_CHANNEL_TPM = 0x0B,
+ TPM_VL_CONTROL = 0x00,
+ TPM_INF_NAK = 0x15,
+ TPM_CTRL_WTX = 0x10,
+ TPM_CTRL_WTX_ABORT = 0x18,
+ TPM_CTRL_WTX_ABORT_ACK = 0x18,
+ TPM_CTRL_ERROR = 0x20,
+ TPM_CTRL_CHAININGACK = 0x40,
+ TPM_CTRL_CHAINING = 0x80,
+ TPM_CTRL_DATA = 0x04,
+ TPM_CTRL_DATA_CHA = 0x84,
+ TPM_CTRL_DATA_CHA_ACK = 0xC4
+};
+
+enum infineon_tpm_register {
+ WRFIFO = 0x00,
+ RDFIFO = 0x01,
+ STAT = 0x02,
+ CMD = 0x03
+};
+
+enum infineon_tpm_command_bits {
+ CMD_DIS = 0x00,
+ CMD_LP = 0x01,
+ CMD_RES = 0x02,
+ CMD_IRQC = 0x06
+};
+
+enum infineon_tpm_status_bits {
+ STAT_XFE = 0x00,
+ STAT_LPA = 0x01,
+ STAT_FOK = 0x02,
+ STAT_TOK = 0x03,
+ STAT_IRQA = 0x06,
+ STAT_RDA = 0x07
+};
+
+/* some outgoing values */
+enum infineon_tpm_values {
+ CHIP_ID1 = 0x20,
+ CHIP_ID2 = 0x21,
+ TPM_DAR = 0x30,
+ RESET_LP_IRQC_DISABLE = 0x41,
+ ENABLE_REGISTER_PAIR = 0x55,
+ IOLIMH = 0x60,
+ IOLIML = 0x61,
+ DISABLE_REGISTER_PAIR = 0xAA,
+ IDVENL = 0xF1,
+ IDVENH = 0xF2,
+ IDPDL = 0xF3,
+ IDPDH = 0xF4
+};
+
+static int number_of_wtx;
+
+static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo)
+{
+ int status;
+ int check = 0;
+ int i;
+
+ if (clear_wrfifo) {
+ for (i = 0; i < 4096; i++) {
+ status = tpm_data_in(WRFIFO);
+ if (status == 0xff) {
+ if (check == 5)
+ break;
+ else
+ check++;
+ }
+ }
+ }
+ /* Note: The values which are currently in the FIFO of the TPM
+ are thrown away since there is no usage for them. Usually,
+ this has nothing to say, since the TPM will give its answer
+ immediately or will be aborted anyway, so the data here is
+ usually garbage and useless.
+ We have to clean this, because the next communication with
+ the TPM would be rubbish, if there is still some old data
+ in the Read FIFO.
+ */
+ i = 0;
+ do {
+ status = tpm_data_in(RDFIFO);
+ status = tpm_data_in(STAT);
+ i++;
+ if (i == TPM_MAX_TRIES)
+ return -EIO;
+ } while ((status & (1 << STAT_RDA)) != 0);
+ return 0;
+}
+
+static int wait(struct tpm_chip *chip, int wait_for_bit)
+{
+ int status;
+ int i;
+ for (i = 0; i < TPM_MAX_TRIES; i++) {
+ status = tpm_data_in(STAT);
+ /* check the status-register if wait_for_bit is set */
+ if (status & 1 << wait_for_bit)
+ break;
+ tpm_msleep(TPM_MSLEEP_TIME);
+ }
+ if (i == TPM_MAX_TRIES) { /* timeout occurs */
+ if (wait_for_bit == STAT_XFE)
+ dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n");
+ if (wait_for_bit == STAT_RDA)
+ dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n");
+ return -EIO;
+ }
+ return 0;
+};
+
+static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
+{
+ wait(chip, STAT_XFE);
+ tpm_data_out(sendbyte, WRFIFO);
+}
+
+ /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more
+ calculation time, it sends a WTX-package, which has to be acknowledged
+ or aborted. This usually occurs if you are hammering the TPM with key
+ creation. Set the maximum number of WTX-packages in the definitions
+ above, if the number is reached, the waiting-time will be denied
+ and the TPM command has to be resend.
+ */
+
+static void tpm_wtx(struct tpm_chip *chip)
+{
+ number_of_wtx++;
+ dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n",
+ number_of_wtx, TPM_MAX_WTX_PACKAGES);
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_CTRL_WTX);
+ wait_and_send(chip, 0x00);
+ wait_and_send(chip, 0x00);
+ tpm_msleep(TPM_WTX_MSLEEP_TIME);
+}
+
+static void tpm_wtx_abort(struct tpm_chip *chip)
+{
+ dev_info(&chip->dev, "Aborting WTX\n");
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_CTRL_WTX_ABORT);
+ wait_and_send(chip, 0x00);
+ wait_and_send(chip, 0x00);
+ number_of_wtx = 0;
+ tpm_msleep(TPM_WTX_MSLEEP_TIME);
+}
+
+static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ int i;
+ int ret;
+ u32 size = 0;
+ number_of_wtx = 0;
+
+recv_begin:
+ /* start receiving header */
+ for (i = 0; i < 4; i++) {
+ ret = wait(chip, STAT_RDA);
+ if (ret)
+ return -EIO;
+ buf[i] = tpm_data_in(RDFIFO);
+ }
+
+ if (buf[0] != TPM_VL_VER) {
+ dev_err(&chip->dev,
+ "Wrong transport protocol implementation!\n");
+ return -EIO;
+ }
+
+ if (buf[1] == TPM_CTRL_DATA) {
+ /* size of the data received */
+ size = ((buf[2] << 8) | buf[3]);
+
+ for (i = 0; i < size; i++) {
+ wait(chip, STAT_RDA);
+ buf[i] = tpm_data_in(RDFIFO);
+ }
+
+ if ((size == 0x6D00) && (buf[1] == 0x80)) {
+ dev_err(&chip->dev, "Error handling on vendor layer!\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < size; i++)
+ buf[i] = buf[i + 6];
+
+ size = size - 6;
+ return size;
+ }
+
+ if (buf[1] == TPM_CTRL_WTX) {
+ dev_info(&chip->dev, "WTX-package received\n");
+ if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
+ tpm_wtx(chip);
+ goto recv_begin;
+ } else {
+ tpm_wtx_abort(chip);
+ goto recv_begin;
+ }
+ }
+
+ if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
+ dev_info(&chip->dev, "WTX-abort acknowledged\n");
+ return size;
+ }
+
+ if (buf[1] == TPM_CTRL_ERROR) {
+ dev_err(&chip->dev, "ERROR-package received:\n");
+ if (buf[4] == TPM_INF_NAK)
+ dev_err(&chip->dev,
+ "-> Negative acknowledgement"
+ " - retransmit command!\n");
+ return -EIO;
+ }
+ return -EIO;
+}
+
+static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ int i;
+ int ret;
+ u8 count_high, count_low, count_4, count_3, count_2, count_1;
+
+ /* Disabling Reset, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+
+ ret = empty_fifo(chip, 1);
+ if (ret) {
+ dev_err(&chip->dev, "Timeout while clearing FIFO\n");
+ return -EIO;
+ }
+
+ ret = wait(chip, STAT_XFE);
+ if (ret)
+ return -EIO;
+
+ count_4 = (count & 0xff000000) >> 24;
+ count_3 = (count & 0x00ff0000) >> 16;
+ count_2 = (count & 0x0000ff00) >> 8;
+ count_1 = (count & 0x000000ff);
+ count_high = ((count + 6) & 0xffffff00) >> 8;
+ count_low = ((count + 6) & 0x000000ff);
+
+ /* Sending Header */
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_CTRL_DATA);
+ wait_and_send(chip, count_high);
+ wait_and_send(chip, count_low);
+
+ /* Sending Data Header */
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_VL_CHANNEL_TPM);
+ wait_and_send(chip, count_4);
+ wait_and_send(chip, count_3);
+ wait_and_send(chip, count_2);
+ wait_and_send(chip, count_1);
+
+ /* Sending Data */
+ for (i = 0; i < count; i++) {
+ wait_and_send(chip, buf[i]);
+ }
+ return 0;
+}
+
+static void tpm_inf_cancel(struct tpm_chip *chip)
+{
+ /*
+ Since we are using the legacy mode to communicate
+ with the TPM, we have no cancel functions, but have
+ a workaround for interrupting the TPM through WTX.
+ */
+}
+
+static u8 tpm_inf_status(struct tpm_chip *chip)
+{
+ return tpm_data_in(STAT);
+}
+
+static const struct tpm_class_ops tpm_inf = {
+ .recv = tpm_inf_recv,
+ .send = tpm_inf_send,
+ .cancel = tpm_inf_cancel,
+ .status = tpm_inf_status,
+ .req_complete_mask = 0,
+ .req_complete_val = 0,
+};
+
+static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
+ /* Infineon TPMs */
+ {"IFX0101", 0},
+ {"IFX0102", 0},
+ {"", 0}
+};
+
+MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
+
+static int tpm_inf_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+{
+ int rc = 0;
+ u8 iol, ioh;
+ int vendorid[2];
+ int version[2];
+ int productid[2];
+ const char *chipname;
+ struct tpm_chip *chip;
+
+ /* read IO-ports through PnP */
+ if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+ tpm_dev.iotype = TPM_INF_IO_PORT;
+
+ tpm_dev.config_port = pnp_port_start(dev, 0);
+ tpm_dev.config_size = pnp_port_len(dev, 0);
+ tpm_dev.data_regs = pnp_port_start(dev, 1);
+ tpm_dev.data_size = pnp_port_len(dev, 1);
+ if ((tpm_dev.data_size < 4) || (tpm_dev.config_size < 2)) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+ dev_info(&dev->dev, "Found %s with ID %s\n",
+ dev->name, dev_id->id);
+ if (!((tpm_dev.data_regs >> 8) & 0xff)) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+ /* publish my base address and request region */
+ if (request_region(tpm_dev.data_regs, tpm_dev.data_size,
+ "tpm_infineon0") == NULL) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+ if (request_region(tpm_dev.config_port, tpm_dev.config_size,
+ "tpm_infineon0") == NULL) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ rc = -EINVAL;
+ goto err_last;
+ }
+ } else if (pnp_mem_valid(dev, 0) &&
+ !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+ tpm_dev.iotype = TPM_INF_IO_MEM;
+
+ tpm_dev.map_base = pnp_mem_start(dev, 0);
+ tpm_dev.map_size = pnp_mem_len(dev, 0);
+
+ dev_info(&dev->dev, "Found %s with ID %s\n",
+ dev->name, dev_id->id);
+
+ /* publish my base address and request region */
+ if (request_mem_region(tpm_dev.map_base, tpm_dev.map_size,
+ "tpm_infineon0") == NULL) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+
+ tpm_dev.mem_base = ioremap(tpm_dev.map_base, tpm_dev.map_size);
+ if (tpm_dev.mem_base == NULL) {
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ rc = -EINVAL;
+ goto err_last;
+ }
+
+ /*
+ * The only known MMIO based Infineon TPM system provides
+ * a single large mem region with the device config
+ * registers at the default TPM_ADDR. The data registers
+ * seem like they could be placed anywhere within the MMIO
+ * region, but lets just put them at zero offset.
+ */
+ tpm_dev.index_off = TPM_ADDR;
+ tpm_dev.data_regs = 0x0;
+ } else {
+ rc = -EINVAL;
+ goto err_last;
+ }
+
+ /* query chip for its vendor, its version number a.s.o. */
+ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ tpm_config_out(IDVENL, TPM_INF_ADDR);
+ vendorid[1] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IDVENH, TPM_INF_ADDR);
+ vendorid[0] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IDPDL, TPM_INF_ADDR);
+ productid[1] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IDPDH, TPM_INF_ADDR);
+ productid[0] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(CHIP_ID1, TPM_INF_ADDR);
+ version[1] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(CHIP_ID2, TPM_INF_ADDR);
+ version[0] = tpm_config_in(TPM_INF_DATA);
+
+ switch ((productid[0] << 8) | productid[1]) {
+ case 6:
+ chipname = " (SLD 9630 TT 1.1)";
+ break;
+ case 11:
+ chipname = " (SLB 9635 TT 1.2)";
+ break;
+ default:
+ chipname = " (unknown chip)";
+ break;
+ }
+
+ if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) {
+
+ /* configure TPM with IO-ports */
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+
+ /* control if IO-ports are set correctly */
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ ioh = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ iol = tpm_config_in(TPM_INF_DATA);
+
+ if ((ioh << 8 | iol) != tpm_dev.data_regs) {
+ dev_err(&dev->dev,
+ "Could not set IO-data registers to 0x%x\n",
+ tpm_dev.data_regs);
+ rc = -EIO;
+ goto err_release_region;
+ }
+
+ /* activate register */
+ tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+ tpm_config_out(0x01, TPM_INF_DATA);
+ tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+
+ /* disable RESET, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+
+ /* Finally, we're done, print some infos */
+ dev_info(&dev->dev, "TPM found: "
+ "config base 0x%lx, "
+ "data base 0x%lx, "
+ "chip version 0x%02x%02x, "
+ "vendor id 0x%x%x (Infineon), "
+ "product id 0x%02x%02x"
+ "%s\n",
+ tpm_dev.iotype == TPM_INF_IO_PORT ?
+ tpm_dev.config_port :
+ tpm_dev.map_base + tpm_dev.index_off,
+ tpm_dev.iotype == TPM_INF_IO_PORT ?
+ tpm_dev.data_regs :
+ tpm_dev.map_base + tpm_dev.data_regs,
+ version[0], version[1],
+ vendorid[0], vendorid[1],
+ productid[0], productid[1], chipname);
+
+ chip = tpmm_chip_alloc(&dev->dev, &tpm_inf);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
+ goto err_release_region;
+ }
+
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto err_release_region;
+
+ return 0;
+ } else {
+ rc = -ENODEV;
+ goto err_release_region;
+ }
+
+err_release_region:
+ if (tpm_dev.iotype == TPM_INF_IO_PORT) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ release_region(tpm_dev.config_port, tpm_dev.config_size);
+ } else {
+ iounmap(tpm_dev.mem_base);
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ }
+
+err_last:
+ return rc;
+}
+
+static void tpm_inf_pnp_remove(struct pnp_dev *dev)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+
+ if (tpm_dev.iotype == TPM_INF_IO_PORT) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ release_region(tpm_dev.config_port,
+ tpm_dev.config_size);
+ } else {
+ iounmap(tpm_dev.mem_base);
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tpm_inf_resume(struct device *dev)
+{
+ /* Re-configure TPM after suspending */
+ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+ /* activate register */
+ tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+ tpm_config_out(0x01, TPM_INF_DATA);
+ tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ /* disable RESET, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+ return tpm_pm_resume(dev);
+}
+#endif
+static SIMPLE_DEV_PM_OPS(tpm_inf_pm, tpm_pm_suspend, tpm_inf_resume);
+
+static struct pnp_driver tpm_inf_pnp_driver = {
+ .name = "tpm_inf_pnp",
+ .id_table = tpm_inf_pnp_tbl,
+ .probe = tpm_inf_pnp_probe,
+ .remove = tpm_inf_pnp_remove,
+ .driver = {
+ .pm = &tpm_inf_pm,
+ }
+};
+
+module_pnp_driver(tpm_inf_pnp_driver);
+
+MODULE_AUTHOR("Marcel Selhorst <tpmdd@sirrix.com>");
+MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
+MODULE_VERSION("1.9.2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
new file mode 100644
index 0000000000..038701d483
--- /dev/null
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "tpm.h"
+
+/* National definitions */
+enum tpm_nsc_addr{
+ TPM_NSC_IRQ = 0x07,
+ TPM_NSC_BASE0_HI = 0x60,
+ TPM_NSC_BASE0_LO = 0x61,
+ TPM_NSC_BASE1_HI = 0x62,
+ TPM_NSC_BASE1_LO = 0x63
+};
+
+enum tpm_nsc_index {
+ NSC_LDN_INDEX = 0x07,
+ NSC_SID_INDEX = 0x20,
+ NSC_LDC_INDEX = 0x30,
+ NSC_DIO_INDEX = 0x60,
+ NSC_CIO_INDEX = 0x62,
+ NSC_IRQ_INDEX = 0x70,
+ NSC_ITS_INDEX = 0x71
+};
+
+enum tpm_nsc_status_loc {
+ NSC_STATUS = 0x01,
+ NSC_COMMAND = 0x01,
+ NSC_DATA = 0x00
+};
+
+/* status bits */
+enum tpm_nsc_status {
+ NSC_STATUS_OBF = 0x01, /* output buffer full */
+ NSC_STATUS_IBF = 0x02, /* input buffer full */
+ NSC_STATUS_F0 = 0x04, /* F0 */
+ NSC_STATUS_A2 = 0x08, /* A2 */
+ NSC_STATUS_RDY = 0x10, /* ready to receive command */
+ NSC_STATUS_IBR = 0x20 /* ready to receive data */
+};
+
+/* command bits */
+enum tpm_nsc_cmd_mode {
+ NSC_COMMAND_NORMAL = 0x01, /* normal mode */
+ NSC_COMMAND_EOC = 0x03,
+ NSC_COMMAND_CANCEL = 0x22
+};
+
+struct tpm_nsc_priv {
+ unsigned long base;
+};
+
+/*
+ * Wait for a certain status to appear
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+
+ /* status immediately available check */
+ *data = inb(priv->base + NSC_STATUS);
+ if ((*data & mask) == val)
+ return 0;
+
+ /* wait for status */
+ stop = jiffies + 10 * HZ;
+ do {
+ msleep(TPM_TIMEOUT);
+ *data = inb(priv->base + 1);
+ if ((*data & mask) == val)
+ return 0;
+ }
+ while (time_before(jiffies, stop));
+
+ return -EBUSY;
+}
+
+static int nsc_wait_for_ready(struct tpm_chip *chip)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+ int status;
+ unsigned long stop;
+
+ /* status immediately available check */
+ status = inb(priv->base + NSC_STATUS);
+ if (status & NSC_STATUS_OBF)
+ status = inb(priv->base + NSC_DATA);
+ if (status & NSC_STATUS_RDY)
+ return 0;
+
+ /* wait for status */
+ stop = jiffies + 100;
+ do {
+ msleep(TPM_TIMEOUT);
+ status = inb(priv->base + NSC_STATUS);
+ if (status & NSC_STATUS_OBF)
+ status = inb(priv->base + NSC_DATA);
+ if (status & NSC_STATUS_RDY)
+ return 0;
+ }
+ while (time_before(jiffies, stop));
+
+ dev_info(&chip->dev, "wait for ready failed\n");
+ return -EBUSY;
+}
+
+
+static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+ u8 *buffer = buf;
+ u8 data, *p;
+ u32 size;
+ __be32 *native_size;
+
+ if (count < 6)
+ return -EIO;
+
+ if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
+ dev_err(&chip->dev, "F0 timeout\n");
+ return -EIO;
+ }
+
+ data = inb(priv->base + NSC_DATA);
+ if (data != NSC_COMMAND_NORMAL) {
+ dev_err(&chip->dev, "not in normal mode (0x%x)\n",
+ data);
+ return -EIO;
+ }
+
+ /* read the whole packet */
+ for (p = buffer; p < &buffer[count]; p++) {
+ if (wait_for_stat
+ (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
+ dev_err(&chip->dev,
+ "OBF timeout (while reading data)\n");
+ return -EIO;
+ }
+ if (data & NSC_STATUS_F0)
+ break;
+ *p = inb(priv->base + NSC_DATA);
+ }
+
+ if ((data & NSC_STATUS_F0) == 0 &&
+ (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
+ dev_err(&chip->dev, "F0 not set\n");
+ return -EIO;
+ }
+
+ data = inb(priv->base + NSC_DATA);
+ if (data != NSC_COMMAND_EOC) {
+ dev_err(&chip->dev,
+ "expected end of command(0x%x)\n", data);
+ return -EIO;
+ }
+
+ native_size = (__force __be32 *) (buf + 2);
+ size = be32_to_cpu(*native_size);
+
+ if (count < size)
+ return -EIO;
+
+ return size;
+}
+
+static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+ u8 data;
+ int i;
+
+ /*
+ * If we hit the chip with back to back commands it locks up
+ * and never set IBF. Hitting it with this "hammer" seems to
+ * fix it. Not sure why this is needed, we followed the flow
+ * chart in the manual to the letter.
+ */
+ outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND);
+
+ if (nsc_wait_for_ready(chip) != 0)
+ return -EIO;
+
+ if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+ dev_err(&chip->dev, "IBF timeout\n");
+ return -EIO;
+ }
+
+ outb(NSC_COMMAND_NORMAL, priv->base + NSC_COMMAND);
+ if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
+ dev_err(&chip->dev, "IBR timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+ dev_err(&chip->dev,
+ "IBF timeout (while writing data)\n");
+ return -EIO;
+ }
+ outb(buf[i], priv->base + NSC_DATA);
+ }
+
+ if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+ dev_err(&chip->dev, "IBF timeout\n");
+ return -EIO;
+ }
+ outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
+
+ return 0;
+}
+
+static void tpm_nsc_cancel(struct tpm_chip *chip)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+ outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND);
+}
+
+static u8 tpm_nsc_status(struct tpm_chip *chip)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return inb(priv->base + NSC_STATUS);
+}
+
+static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == NSC_STATUS_RDY);
+}
+
+static const struct tpm_class_ops tpm_nsc = {
+ .recv = tpm_nsc_recv,
+ .send = tpm_nsc_send,
+ .cancel = tpm_nsc_cancel,
+ .status = tpm_nsc_status,
+ .req_complete_mask = NSC_STATUS_OBF,
+ .req_complete_val = NSC_STATUS_OBF,
+ .req_canceled = tpm_nsc_req_canceled,
+};
+
+static struct platform_device *pdev = NULL;
+
+static void tpm_nsc_remove(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+ tpm_chip_unregister(chip);
+ release_region(priv->base, 2);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct platform_driver nsc_drv = {
+ .driver = {
+ .name = "tpm_nsc",
+ .pm = &tpm_nsc_pm,
+ },
+};
+
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base+1) & 0xFF;
+}
+
+static inline void tpm_write_index(int base, int index, int value)
+{
+ outb(index, base);
+ outb(value & 0xFF, base+1);
+}
+
+static int __init init_nsc(void)
+{
+ int rc = 0;
+ int lo, hi, err;
+ int nscAddrBase = TPM_ADDR;
+ struct tpm_chip *chip;
+ unsigned long base;
+ struct tpm_nsc_priv *priv;
+
+ /* verify that it is a National part (SID) */
+ if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
+ nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)|
+ (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE);
+ if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6)
+ return -ENODEV;
+ }
+
+ err = platform_driver_register(&nsc_drv);
+ if (err)
+ return err;
+
+ hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
+ lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
+ base = (hi<<8) | lo;
+
+ /* enable the DPM module */
+ tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
+
+ pdev = platform_device_alloc("tpm_nscl0", -1);
+ if (!pdev) {
+ rc = -ENOMEM;
+ goto err_unreg_drv;
+ }
+
+ pdev->num_resources = 0;
+ pdev->dev.driver = &nsc_drv.driver;
+ pdev->dev.release = tpm_nsc_remove;
+
+ if ((rc = platform_device_add(pdev)) < 0)
+ goto err_put_dev;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_del_dev;
+ }
+
+ priv->base = base;
+
+ if (request_region(base, 2, "tpm_nsc0") == NULL ) {
+ rc = -EBUSY;
+ goto err_del_dev;
+ }
+
+ chip = tpmm_chip_alloc(&pdev->dev, &tpm_nsc);
+ if (IS_ERR(chip)) {
+ rc = -ENODEV;
+ goto err_rel_reg;
+ }
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto err_rel_reg;
+
+ dev_dbg(&pdev->dev, "NSC TPM detected\n");
+ dev_dbg(&pdev->dev,
+ "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
+ tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20),
+ tpm_read_index(nscAddrBase,0x27));
+ dev_dbg(&pdev->dev,
+ "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n",
+ tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25),
+ tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28));
+ dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n",
+ (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61));
+ dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n",
+ (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63));
+ dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n",
+ tpm_read_index(nscAddrBase,0x70));
+ dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n",
+ tpm_read_index(nscAddrBase,0x71));
+ dev_dbg(&pdev->dev,
+ "NSC DMA channel select0 0x%x, select1 0x%x\n",
+ tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75));
+ dev_dbg(&pdev->dev,
+ "NSC Config "
+ "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1),
+ tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3),
+ tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5),
+ tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7),
+ tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9));
+
+ dev_info(&pdev->dev,
+ "NSC TPM revision %d\n",
+ tpm_read_index(nscAddrBase, 0x27) & 0x1F);
+
+ return 0;
+
+err_rel_reg:
+ release_region(base, 2);
+err_del_dev:
+ platform_device_del(pdev);
+err_put_dev:
+ platform_device_put(pdev);
+err_unreg_drv:
+ platform_driver_unregister(&nsc_drv);
+ return rc;
+}
+
+static void __exit cleanup_nsc(void)
+{
+ if (pdev) {
+ tpm_nsc_remove(&pdev->dev);
+ platform_device_unregister(pdev);
+ }
+
+ platform_driver_unregister(&nsc_drv);
+}
+
+module_init(init_nsc);
+module_exit(cleanup_nsc);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
new file mode 100644
index 0000000000..bc7b1b4501
--- /dev/null
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012-2014 Intel Corporation
+ *
+ * Authors:
+ * Xiaoyan Zhang <xiaoyan.zhang@intel.com>
+ * Jiang Liu <jiang.liu@linux.intel.com>
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains implementation of the sysfs interface for PPI.
+ */
+
+
+#include <linux/acpi.h>
+#include "tpm.h"
+
+#define TPM_PPI_REVISION_ID_1 1
+#define TPM_PPI_REVISION_ID_2 2
+#define TPM_PPI_FN_VERSION 1
+#define TPM_PPI_FN_SUBREQ 2
+#define TPM_PPI_FN_GETREQ 3
+#define TPM_PPI_FN_GETACT 4
+#define TPM_PPI_FN_GETRSP 5
+#define TPM_PPI_FN_SUBREQ2 7
+#define TPM_PPI_FN_GETOPR 8
+#define PPI_TPM_REQ_MAX 101 /* PPI 1.3 for TPM 2 */
+#define PPI_VS_REQ_START 128
+#define PPI_VS_REQ_END 255
+
+static const guid_t tpm_ppi_guid =
+ GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
+ 0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+
+static bool tpm_ppi_req_has_parameter(u64 req)
+{
+ return req == 23;
+}
+
+static inline union acpi_object *
+tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
+ union acpi_object *argv4, u64 rev)
+{
+ BUG_ON(!ppi_handle);
+ return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid,
+ rev, func, argv4, type);
+}
+
+static ssize_t tpm_show_ppi_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version);
+}
+
+static ssize_t tpm_show_ppi_request(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t size = -EINVAL;
+ union acpi_object *obj;
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ u64 rev = TPM_PPI_REVISION_ID_2;
+ u64 req;
+
+ if (strcmp(chip->ppi_version, "1.2") < 0)
+ rev = TPM_PPI_REVISION_ID_1;
+
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETREQ,
+ ACPI_TYPE_PACKAGE, NULL, rev);
+ if (!obj)
+ return -ENXIO;
+
+ /*
+ * output.pointer should be of package type, including two integers.
+ * The first is function return code, 0 means success and 1 means
+ * error. The second is pending TPM operation requested by the OS, 0
+ * means none and >0 means operation value.
+ */
+ if (obj->package.count == 3 &&
+ obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[1].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[2].type == ACPI_TYPE_INTEGER) {
+ if (obj->package.elements[0].integer.value)
+ size = -EFAULT;
+ else {
+ req = obj->package.elements[1].integer.value;
+ if (tpm_ppi_req_has_parameter(req))
+ size = scnprintf(buf, PAGE_SIZE,
+ "%llu %llu\n", req,
+ obj->package.elements[2].integer.value);
+ else
+ size = scnprintf(buf, PAGE_SIZE,
+ "%llu\n", req);
+ }
+ } else if (obj->package.count == 2 &&
+ obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[1].type == ACPI_TYPE_INTEGER) {
+ if (obj->package.elements[0].integer.value)
+ size = -EFAULT;
+ else
+ size = scnprintf(buf, PAGE_SIZE, "%llu\n",
+ obj->package.elements[1].integer.value);
+ }
+
+ ACPI_FREE(obj);
+
+ return size;
+}
+
+static ssize_t tpm_store_ppi_request(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 req;
+ u64 ret;
+ int func = TPM_PPI_FN_SUBREQ;
+ union acpi_object *obj, tmp[2];
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(2, tmp);
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ u64 rev = TPM_PPI_REVISION_ID_1;
+
+ /*
+ * the function to submit TPM operation request to pre-os environment
+ * is updated with function index from SUBREQ to SUBREQ2 since PPI
+ * version 1.1
+ */
+ if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
+ TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_SUBREQ2))
+ func = TPM_PPI_FN_SUBREQ2;
+
+ /*
+ * PPI spec defines params[3].type as ACPI_TYPE_PACKAGE. Some BIOS
+ * accept buffer/string/integer type, but some BIOS accept buffer/
+ * string/package type. For PPI version 1.0 and 1.1, use buffer type
+ * for compatibility, and use package type since 1.2 according to spec.
+ */
+ if (strcmp(chip->ppi_version, "1.3") == 0) {
+ if (sscanf(buf, "%llu %llu", &tmp[0].integer.value,
+ &tmp[1].integer.value) != 2)
+ goto ppi12;
+ rev = TPM_PPI_REVISION_ID_2;
+ tmp[0].type = ACPI_TYPE_INTEGER;
+ tmp[1].type = ACPI_TYPE_INTEGER;
+ } else if (strcmp(chip->ppi_version, "1.2") < 0) {
+ if (sscanf(buf, "%d", &req) != 1)
+ return -EINVAL;
+ argv4.type = ACPI_TYPE_BUFFER;
+ argv4.buffer.length = sizeof(req);
+ argv4.buffer.pointer = (u8 *)&req;
+ } else {
+ppi12:
+ argv4.package.count = 1;
+ tmp[0].type = ACPI_TYPE_INTEGER;
+ if (sscanf(buf, "%llu", &tmp[0].integer.value) != 1)
+ return -EINVAL;
+ }
+
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, func, ACPI_TYPE_INTEGER,
+ &argv4, rev);
+ if (!obj) {
+ return -ENXIO;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+ }
+
+ if (ret == 0)
+ return (acpi_status)count;
+
+ return (ret == 1) ? -EPERM : -EFAULT;
+}
+
+static ssize_t tpm_show_ppi_transition_action(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 ret;
+ acpi_status status;
+ union acpi_object *obj = NULL;
+ union acpi_object tmp = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 0,
+ .buffer.pointer = NULL
+ };
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ static char *info[] = {
+ "None",
+ "Shutdown",
+ "Reboot",
+ "OS Vendor-specific",
+ "Error",
+ };
+
+ /*
+ * PPI spec defines params[3].type as empty package, but some platforms
+ * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
+ * compatibility, define params[3].type as buffer, if PPI version < 1.2
+ */
+ if (strcmp(chip->ppi_version, "1.2") < 0)
+ obj = &tmp;
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETACT,
+ ACPI_TYPE_INTEGER, obj, TPM_PPI_REVISION_ID_1);
+ if (!obj) {
+ return -ENXIO;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+ }
+
+ if (ret < ARRAY_SIZE(info) - 1)
+ status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
+ else
+ status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
+ info[ARRAY_SIZE(info)-1]);
+ return status;
+}
+
+static ssize_t tpm_show_ppi_response(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ acpi_status status = -EINVAL;
+ union acpi_object *obj, *ret_obj;
+ u64 req, res;
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETRSP,
+ ACPI_TYPE_PACKAGE, NULL, TPM_PPI_REVISION_ID_1);
+ if (!obj)
+ return -ENXIO;
+
+ /*
+ * parameter output.pointer should be of package type, including
+ * 3 integers. The first means function return code, the second means
+ * most recent TPM operation request, and the last means response to
+ * the most recent TPM operation request. Only if the first is 0, and
+ * the second integer is not 0, the response makes sense.
+ */
+ ret_obj = obj->package.elements;
+ if (obj->package.count < 3 ||
+ ret_obj[0].type != ACPI_TYPE_INTEGER ||
+ ret_obj[1].type != ACPI_TYPE_INTEGER ||
+ ret_obj[2].type != ACPI_TYPE_INTEGER)
+ goto cleanup;
+
+ if (ret_obj[0].integer.value) {
+ status = -EFAULT;
+ goto cleanup;
+ }
+
+ req = ret_obj[1].integer.value;
+ res = ret_obj[2].integer.value;
+ if (req) {
+ if (res == 0)
+ status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+ "0: Success");
+ else if (res == 0xFFFFFFF0)
+ status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+ "0xFFFFFFF0: User Abort");
+ else if (res == 0xFFFFFFF1)
+ status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+ "0xFFFFFFF1: BIOS Failure");
+ else if (res >= 1 && res <= 0x00000FFF)
+ status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
+ req, res, "Corresponding TPM error");
+ else
+ status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
+ req, res, "Error");
+ } else {
+ status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
+ req, "No Recent Request");
+ }
+
+cleanup:
+ ACPI_FREE(obj);
+ return status;
+}
+
+static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
+ u32 end)
+{
+ int i;
+ u32 ret;
+ char *str = buf;
+ union acpi_object *obj, tmp;
+ union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+ static char *info[] = {
+ "Not implemented",
+ "BIOS only",
+ "Blocked for OS by BIOS",
+ "User required",
+ "User not required",
+ };
+
+ if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1,
+ 1 << TPM_PPI_FN_GETOPR))
+ return -EPERM;
+
+ tmp.integer.type = ACPI_TYPE_INTEGER;
+ for (i = start; i <= end; i++) {
+ tmp.integer.value = i;
+ obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
+ ACPI_TYPE_INTEGER, &argv,
+ TPM_PPI_REVISION_ID_1);
+ if (!obj) {
+ return -ENOMEM;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+ }
+
+ if (ret > 0 && ret < ARRAY_SIZE(info))
+ str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
+ i, ret, info[ret]);
+ }
+
+ return str - buf;
+}
+
+static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
+ PPI_TPM_REQ_MAX);
+}
+
+static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
+ PPI_VS_REQ_END);
+}
+
+static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
+static DEVICE_ATTR(request, S_IRUGO | S_IWUSR | S_IWGRP,
+ tpm_show_ppi_request, tpm_store_ppi_request);
+static DEVICE_ATTR(transition_action, S_IRUGO,
+ tpm_show_ppi_transition_action, NULL);
+static DEVICE_ATTR(response, S_IRUGO, tpm_show_ppi_response, NULL);
+static DEVICE_ATTR(tcg_operations, S_IRUGO, tpm_show_ppi_tcg_operations, NULL);
+static DEVICE_ATTR(vs_operations, S_IRUGO, tpm_show_ppi_vs_operations, NULL);
+
+static struct attribute *ppi_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_request.attr,
+ &dev_attr_transition_action.attr,
+ &dev_attr_response.attr,
+ &dev_attr_tcg_operations.attr,
+ &dev_attr_vs_operations.attr, NULL,
+};
+static const struct attribute_group ppi_attr_grp = {
+ .name = "ppi",
+ .attrs = ppi_attrs
+};
+
+void tpm_add_ppi(struct tpm_chip *chip)
+{
+ union acpi_object *obj;
+
+ if (!chip->acpi_dev_handle)
+ return;
+
+ if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
+ TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_VERSION))
+ return;
+
+ /* Cache PPI version string. */
+ obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid,
+ TPM_PPI_REVISION_ID_1,
+ TPM_PPI_FN_VERSION,
+ NULL, ACPI_TYPE_STRING);
+ if (obj) {
+ strscpy(chip->ppi_version, obj->string.pointer,
+ sizeof(chip->ppi_version));
+ ACPI_FREE(obj);
+ }
+
+ chip->groups[chip->groups_cnt++] = &ppi_attr_grp;
+}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
new file mode 100644
index 0000000000..2c52b7905b
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/freezer.h>
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+struct tpm_info {
+ struct resource res;
+ /* irq > 0 means: use irq $irq;
+ * irq = 0 means: autoprobe for an irq;
+ * irq = -1 means: no irq support
+ */
+ int irq;
+};
+
+struct tpm_tis_tcg_phy {
+ struct tpm_tis_data priv;
+ void __iomem *iobase;
+};
+
+static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_tcg_phy, priv);
+}
+
+#ifdef CONFIG_PREEMPT_RT
+/*
+ * Flush previous write operations with a dummy read operation to the
+ * TPM MMIO base address.
+ */
+static inline void tpm_tis_flush(void __iomem *iobase)
+{
+ ioread8(iobase + TPM_ACCESS(0));
+}
+#else
+#define tpm_tis_flush(iobase) do { } while (0)
+#endif
+
+/*
+ * Write a byte word to the TPM MMIO address, and flush the write queue.
+ * The flush ensures that the data is sent immediately over the bus and not
+ * aggregated with further requests and transferred later in a batch. The large
+ * write requests can lead to unwanted latency spikes by blocking the CPU until
+ * the complete batch has been transferred.
+ */
+static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr)
+{
+ iowrite8(b, iobase + addr);
+ tpm_tis_flush(iobase);
+}
+
+/*
+ * Write a 32-bit word to the TPM MMIO address, and flush the write queue.
+ * The flush ensures that the data is sent immediately over the bus and not
+ * aggregated with further requests and transferred later in a batch. The large
+ * write requests can lead to unwanted latency spikes by blocking the CPU until
+ * the complete batch has been transferred.
+ */
+static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
+{
+ iowrite32(b, iobase + addr);
+ tpm_tis_flush(iobase);
+}
+
+static bool interrupts;
+module_param(interrupts, bool, 0444);
+MODULE_PARM_DESC(interrupts, "Enable interrupts");
+
+static bool itpm;
+module_param(itpm, bool, 0444);
+MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
+
+static bool force;
+#ifdef CONFIG_X86
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
+#endif
+
+#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
+static int has_hid(struct acpi_device *dev, const char *hid)
+{
+ struct acpi_hardware_id *id;
+
+ list_for_each_entry(id, &dev->pnp.ids, list)
+ if (!strcmp(hid, id->id))
+ return 1;
+
+ return 0;
+}
+
+static inline int is_itpm(struct acpi_device *dev)
+{
+ if (!dev)
+ return 0;
+ return has_hid(dev, "INTC0102");
+}
+#else
+static inline int is_itpm(struct acpi_device *dev)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_ACPI)
+#define DEVICE_IS_TPM2 1
+
+static const struct acpi_device_id tpm_acpi_tbl[] = {
+ {"MSFT0101", DEVICE_IS_TPM2},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl);
+
+static int check_acpi_tpm2(struct device *dev)
+{
+ const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
+ struct acpi_table_tpm2 *tbl;
+ acpi_status st;
+ int ret = 0;
+
+ if (!aid || aid->driver_data != DEVICE_IS_TPM2)
+ return 0;
+
+ /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
+ * table is mandatory
+ */
+ st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+ if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
+ dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
+ }
+
+ /* The tpm2_crb driver handles this device */
+ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
+ ret = -ENODEV;
+
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return ret;
+}
+#else
+static int check_acpi_tpm2(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+ __le16 result_le16;
+ __le32 result_le32;
+
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ *result++ = ioread8(phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ result_le16 = cpu_to_le16(ioread16(phy->iobase + addr));
+ memcpy(result, &result_le16, sizeof(u16));
+ break;
+ case TPM_TIS_PHYS_32:
+ result_le32 = cpu_to_le32(ioread32(phy->iobase + addr));
+ memcpy(result, &result_le32, sizeof(u32));
+ break;
+ }
+
+ return 0;
+}
+
+static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ tpm_tis_iowrite8(*value++, phy->iobase, addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ return -EINVAL;
+ case TPM_TIS_PHYS_32:
+ tpm_tis_iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct tpm_tis_phy_ops tpm_tcg = {
+ .read_bytes = tpm_tcg_read_bytes,
+ .write_bytes = tpm_tcg_write_bytes,
+};
+
+static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
+{
+ struct tpm_tis_tcg_phy *phy;
+ int irq = -1;
+ int rc;
+
+ rc = check_acpi_tpm2(dev);
+ if (rc)
+ return rc;
+
+ phy = devm_kzalloc(dev, sizeof(struct tpm_tis_tcg_phy), GFP_KERNEL);
+ if (phy == NULL)
+ return -ENOMEM;
+
+ phy->iobase = devm_ioremap_resource(dev, &tpm_info->res);
+ if (IS_ERR(phy->iobase))
+ return PTR_ERR(phy->iobase);
+
+ if (interrupts)
+ irq = tpm_info->irq;
+
+ if (itpm || is_itpm(ACPI_COMPANION(dev)))
+ set_bit(TPM_TIS_ITPM_WORKAROUND, &phy->priv.flags);
+
+ return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg,
+ ACPI_HANDLE(dev));
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
+ const struct pnp_device_id *pnp_id)
+{
+ struct tpm_info tpm_info = {};
+ struct resource *res;
+
+ res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ tpm_info.res = *res;
+
+ if (pnp_irq_valid(pnp_dev, 0))
+ tpm_info.irq = pnp_irq(pnp_dev, 0);
+ else
+ tpm_info.irq = -1;
+
+ return tpm_tis_init(&pnp_dev->dev, &tpm_info);
+}
+
+/*
+ * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module
+ * parameter"). This commit added IFX0102 device ID, which is also used by
+ * tpm_infineon but ignored to add quirks to probe which driver ought to be
+ * used.
+ */
+
+static struct pnp_device_id tpm_pnp_tbl[] = {
+ {"PNP0C31", 0}, /* TPM */
+ {"ATM1200", 0}, /* Atmel */
+ {"IFX0102", 0}, /* Infineon */
+ {"BCM0101", 0}, /* Broadcom */
+ {"BCM0102", 0}, /* Broadcom */
+ {"NSC1200", 0}, /* National */
+ {"ICO0102", 0}, /* Intel */
+ /* Add new here */
+ {"", 0}, /* User Specified */
+ {"", 0} /* Terminator */
+};
+MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
+
+static void tpm_tis_pnp_remove(struct pnp_dev *dev)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+static struct pnp_driver tis_pnp_driver = {
+ .name = "tpm_tis",
+ .id_table = tpm_pnp_tbl,
+ .probe = tpm_tis_pnp_init,
+ .remove = tpm_tis_pnp_remove,
+ .driver = {
+ .pm = &tpm_tis_pm,
+ },
+};
+
+#define TIS_HID_USR_IDX (ARRAY_SIZE(tpm_pnp_tbl) - 2)
+module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
+ sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
+MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
+
+static struct platform_device *force_pdev;
+
+static int tpm_tis_plat_probe(struct platform_device *pdev)
+{
+ struct tpm_info tpm_info = {};
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+ tpm_info.res = *res;
+
+ tpm_info.irq = platform_get_irq_optional(pdev, 0);
+ if (tpm_info.irq <= 0) {
+ if (pdev != force_pdev)
+ tpm_info.irq = -1;
+ else
+ /* When forcing auto probe the IRQ */
+ tpm_info.irq = 0;
+ }
+
+ return tpm_tis_init(&pdev->dev, &tpm_info);
+}
+
+static void tpm_tis_plat_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tis_of_platform_match[] = {
+ {.compatible = "tcg,tpm-tis-mmio"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tis_of_platform_match);
+#endif
+
+static struct platform_driver tis_drv = {
+ .probe = tpm_tis_plat_probe,
+ .remove_new = tpm_tis_plat_remove,
+ .driver = {
+ .name = "tpm_tis",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(tis_of_platform_match),
+ .acpi_match_table = ACPI_PTR(tpm_acpi_tbl),
+ },
+};
+
+static int tpm_tis_force_device(void)
+{
+ struct platform_device *pdev;
+ static const struct resource x86_resources[] = {
+ DEFINE_RES_MEM(0xFED40000, TIS_MEM_LEN)
+ };
+
+ if (!force)
+ return 0;
+
+ /* The driver core will match the name tpm_tis of the device to
+ * the tpm_tis platform driver and complete the setup via
+ * tpm_tis_plat_probe
+ */
+ pdev = platform_device_register_simple("tpm_tis", -1, x86_resources,
+ ARRAY_SIZE(x86_resources));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+ force_pdev = pdev;
+
+ return 0;
+}
+
+static int __init init_tis(void)
+{
+ int rc;
+
+ rc = tpm_tis_force_device();
+ if (rc)
+ goto err_force;
+
+ rc = platform_driver_register(&tis_drv);
+ if (rc)
+ goto err_platform;
+
+
+ if (IS_ENABLED(CONFIG_PNP)) {
+ rc = pnp_register_driver(&tis_pnp_driver);
+ if (rc)
+ goto err_pnp;
+ }
+
+ return 0;
+
+err_pnp:
+ platform_driver_unregister(&tis_drv);
+err_platform:
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
+err_force:
+ return rc;
+}
+
+static void __exit cleanup_tis(void)
+{
+ pnp_unregister_driver(&tis_pnp_driver);
+ platform_driver_unregister(&tis_drv);
+
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
+}
+
+module_init(init_tis);
+module_exit(cleanup_tis);
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
new file mode 100644
index 0000000000..1b350412d8
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -0,0 +1,1367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/freezer.h>
+#include <linux/dmi.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+#define TPM_TIS_MAX_UNHANDLED_IRQS 1000
+
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static u8 tpm_tis_filter_sts_mask(u8 int_mask, u8 sts_mask)
+{
+ if (!(int_mask & TPM_INTF_STS_VALID_INT))
+ sts_mask &= ~TPM_STS_VALID;
+
+ if (!(int_mask & TPM_INTF_DATA_AVAIL_INT))
+ sts_mask &= ~TPM_STS_DATA_AVAIL;
+
+ if (!(int_mask & TPM_INTF_CMD_READY_INT))
+ sts_mask &= ~TPM_STS_COMMAND_READY;
+
+ return sts_mask;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+ u8 sts_mask;
+ int ret = 0;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ sts_mask = mask & (TPM_STS_VALID | TPM_STS_DATA_AVAIL |
+ TPM_STS_COMMAND_READY);
+ /* check what status changes can be handled by irqs */
+ sts_mask = tpm_tis_filter_sts_mask(priv->int_mask, sts_mask);
+
+ stop = jiffies + timeout;
+ /* process status changes with irq support */
+ if (sts_mask) {
+ ret = -ETIME;
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, sts_mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ ret = 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ mask &= ~sts_mask;
+ if (!mask) /* all done */
+ return 0;
+ /* process status changes without irq support */
+ do {
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ usleep_range(priv->timeout_min,
+ priv->timeout_max);
+ } while (time_before(jiffies, stop));
+ return -ETIME;
+}
+
+/* Before we attempt to access the TPM we must see that the valid bit is set.
+ * The specification says that this bit is 0 at reset and remains 0 until the
+ * 'TPM has gone through its self test and initialization and has established
+ * correct values in the other bits.'
+ */
+static int wait_startup(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop = jiffies + chip->timeout_a;
+
+ do {
+ int rc;
+ u8 access;
+
+ rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+ if (rc < 0)
+ return rc;
+
+ if (access & TPM_ACCESS_VALID)
+ return 0;
+ tpm_msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -1;
+}
+
+static bool check_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u8 access;
+
+ rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+ if (rc < 0)
+ return false;
+
+ if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
+ | TPM_ACCESS_REQUEST_USE)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+ priv->locality = l;
+ return true;
+ }
+
+ return false;
+}
+
+static int __tpm_tis_relinquish_locality(struct tpm_tis_data *priv, int l)
+{
+ tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+
+ return 0;
+}
+
+static int tpm_tis_relinquish_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ mutex_lock(&priv->locality_count_mutex);
+ priv->locality_count--;
+ if (priv->locality_count == 0)
+ __tpm_tis_relinquish_locality(priv, l);
+ mutex_unlock(&priv->locality_count_mutex);
+
+ return 0;
+}
+
+static int __tpm_tis_request_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop, timeout;
+ long rc;
+
+ if (check_locality(chip, l))
+ return l;
+
+ rc = tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_REQUEST_USE);
+ if (rc < 0)
+ return rc;
+
+ stop = jiffies + chip->timeout_a;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -1;
+ rc = wait_event_interruptible_timeout(priv->int_queue,
+ (check_locality
+ (chip, l)),
+ timeout);
+ if (rc > 0)
+ return l;
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ /* wait for burstcount */
+ do {
+ if (check_locality(chip, l))
+ return l;
+ tpm_msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ }
+ return -1;
+}
+
+static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int ret = 0;
+
+ mutex_lock(&priv->locality_count_mutex);
+ if (priv->locality_count == 0)
+ ret = __tpm_tis_request_locality(chip, l);
+ if (!ret)
+ priv->locality_count++;
+ mutex_unlock(&priv->locality_count_mutex);
+ return ret;
+}
+
+static u8 tpm_tis_status(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u8 status;
+
+ rc = tpm_tis_read8(priv, TPM_STS(priv->locality), &status);
+ if (rc < 0)
+ return 0;
+
+ if (unlikely((status & TPM_STS_READ_ZERO) != 0)) {
+ if (!test_and_set_bit(TPM_TIS_INVALID_STATUS, &priv->flags)) {
+ /*
+ * If this trips, the chances are the read is
+ * returning 0xff because the locality hasn't been
+ * acquired. Usually because tpm_try_get_ops() hasn't
+ * been called before doing a TPM operation.
+ */
+ dev_err(&chip->dev, "invalid TPM_STS.x 0x%02x, dumping stack for forensics\n",
+ status);
+
+ /*
+ * Dump stack for forensics, as invalid TPM_STS.x could be
+ * potentially triggered by impaired tpm_try_get_ops() or
+ * tpm_find_get_ops().
+ */
+ dump_stack();
+ }
+
+ return 0;
+ }
+
+ return status;
+}
+
+static void tpm_tis_ready(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ /* this causes the current command to be aborted */
+ tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_COMMAND_READY);
+}
+
+static int get_burstcount(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ int burstcnt, rc;
+ u32 value;
+
+ /* wait for burstcount */
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ stop = jiffies + chip->timeout_a;
+ else
+ stop = jiffies + chip->timeout_d;
+ do {
+ rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value);
+ if (rc < 0)
+ return rc;
+
+ burstcnt = (value >> 8) & 0xFFFF;
+ if (burstcnt)
+ return burstcnt;
+ usleep_range(TPM_TIMEOUT_USECS_MIN, TPM_TIMEOUT_USECS_MAX);
+ } while (time_before(jiffies, stop));
+ return -EBUSY;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int size = 0, burstcnt, rc;
+
+ while (size < count) {
+ rc = wait_for_tpm_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->timeout_c,
+ &priv->read_queue, true);
+ if (rc < 0)
+ return rc;
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ return burstcnt;
+ }
+ burstcnt = min_t(int, burstcnt, count - size);
+
+ rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
+ burstcnt, buf + size);
+ if (rc < 0)
+ return rc;
+
+ size += burstcnt;
+ }
+ return size;
+}
+
+static int tpm_tis_try_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int size = 0;
+ int status;
+ u32 expected;
+ int rc;
+
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ /* read first 10 bytes, including tag, paramsize, and result */
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count || expected < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ rc = recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (rc < 0) {
+ size = rc;
+ goto out;
+ }
+ size += rc;
+ if (size < expected) {
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ goto out;
+ }
+
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ size = -ETIME;
+ goto out;
+ }
+ status = tpm_tis_status(chip);
+ if (status & TPM_STS_DATA_AVAIL) {
+ dev_err(&chip->dev, "Error left over data\n");
+ size = -EIO;
+ goto out;
+ }
+
+ rc = tpm_tis_verify_crc(priv, (size_t)size, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for response.\n");
+ size = rc;
+ goto out;
+ }
+
+out:
+ return size;
+}
+
+static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned int try;
+ int rc = 0;
+
+ if (count < TPM_HEADER_SIZE)
+ return -EIO;
+
+ for (try = 0; try < TPM_RETRY; try++) {
+ rc = tpm_tis_try_recv(chip, buf, count);
+
+ if (rc == -EIO)
+ /* Data transfer errors, indicated by EIO, can be
+ * recovered by rereading the response.
+ */
+ tpm_tis_write8(priv, TPM_STS(priv->locality),
+ TPM_STS_RESPONSE_RETRY);
+ else
+ break;
+ }
+
+ tpm_tis_ready(chip);
+
+ return rc;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc, status, burstcnt;
+ size_t count = 0;
+ bool itpm = test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
+
+ status = tpm_tis_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ tpm_tis_ready(chip);
+ if (wait_for_tpm_stat
+ (chip, TPM_STS_COMMAND_READY, chip->timeout_b,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+
+ while (count < len - 1) {
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ rc = burstcnt;
+ goto out_err;
+ }
+ burstcnt = min_t(int, burstcnt, len - count - 1);
+ rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
+ burstcnt, buf + count);
+ if (rc < 0)
+ goto out_err;
+
+ count += burstcnt;
+
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ status = tpm_tis_status(chip);
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+
+ /* write last byte */
+ rc = tpm_tis_write8(priv, TPM_DATA_FIFO(priv->locality), buf[count]);
+ if (rc < 0)
+ goto out_err;
+
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ status = tpm_tis_status(chip);
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ rc = tpm_tis_verify_crc(priv, len, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for command.\n");
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ tpm_tis_ready(chip);
+ return rc;
+}
+
+static void __tpm_tis_disable_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 int_mask = 0;
+
+ tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &int_mask);
+ int_mask &= ~TPM_GLOBAL_INT_ENABLE;
+ tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), int_mask);
+
+ chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+}
+
+static void tpm_tis_disable_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq == 0)
+ return;
+
+ __tpm_tis_disable_interrupts(chip);
+
+ devm_free_irq(chip->dev.parent, priv->irq, chip);
+ priv->irq = 0;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u32 ordinal;
+ unsigned long dur;
+ unsigned int try;
+
+ for (try = 0; try < TPM_RETRY; try++) {
+ rc = tpm_tis_send_data(chip, buf, len);
+ if (rc >= 0)
+ /* Data transfer done successfully */
+ break;
+ else if (rc != -EIO)
+ /* Data transfer failed, not recoverable */
+ return rc;
+ }
+
+ /* go and do it */
+ rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
+ if (rc < 0)
+ goto out_err;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+
+ dur = tpm_calc_ordinal_duration(chip, ordinal);
+ if (wait_for_tpm_stat
+ (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur,
+ &priv->read_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+ return 0;
+out_err:
+ tpm_tis_ready(chip);
+ return rc;
+}
+
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc, irq;
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ) ||
+ test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
+ return tpm_tis_send_main(chip, buf, len);
+
+ /* Verify receipt of the expected IRQ */
+ irq = priv->irq;
+ priv->irq = 0;
+ chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+ rc = tpm_tis_send_main(chip, buf, len);
+ priv->irq = irq;
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+ if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
+ tpm_msleep(1);
+ if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
+ tpm_tis_disable_interrupts(chip);
+ set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
+ return rc;
+}
+
+struct tis_vendor_durations_override {
+ u32 did_vid;
+ struct tpm1_version version;
+ unsigned long durations[3];
+};
+
+static const struct tis_vendor_durations_override vendor_dur_overrides[] = {
+ /* STMicroelectronics 0x104a */
+ { 0x0000104a,
+ { 1, 2, 8, 28 },
+ { (2 * 60 * HZ), (2 * 60 * HZ), (2 * 60 * HZ) } },
+};
+
+static void tpm_tis_update_durations(struct tpm_chip *chip,
+ unsigned long *duration_cap)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ struct tpm1_version *version;
+ u32 did_vid;
+ int i, rc;
+ cap_t cap;
+
+ chip->duration_adjusted = false;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed to read did_vid. %d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Try to get a TPM version 1.2 or 1.1 TPM_CAP_VERSION_INFO */
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version",
+ sizeof(cap.version2));
+ if (!rc) {
+ version = &cap.version2.version;
+ } else {
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1));
+
+ if (rc)
+ goto out;
+
+ version = &cap.version1;
+ }
+
+ for (i = 0; i != ARRAY_SIZE(vendor_dur_overrides); i++) {
+ if (vendor_dur_overrides[i].did_vid != did_vid)
+ continue;
+
+ if ((version->major ==
+ vendor_dur_overrides[i].version.major) &&
+ (version->minor ==
+ vendor_dur_overrides[i].version.minor) &&
+ (version->rev_major ==
+ vendor_dur_overrides[i].version.rev_major) &&
+ (version->rev_minor ==
+ vendor_dur_overrides[i].version.rev_minor)) {
+
+ memcpy(duration_cap,
+ vendor_dur_overrides[i].durations,
+ sizeof(vendor_dur_overrides[i].durations));
+
+ chip->duration_adjusted = true;
+ goto out;
+ }
+ }
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+}
+
+struct tis_vendor_timeout_override {
+ u32 did_vid;
+ unsigned long timeout_us[4];
+};
+
+static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
+ /* Atmel 3204 */
+ { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
+ (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
+};
+
+static void tpm_tis_update_timeouts(struct tpm_chip *chip,
+ unsigned long *timeout_cap)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int i, rc;
+ u32 did_vid;
+
+ chip->timeout_adjusted = false;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed to read did_vid: %d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
+ if (vendor_timeout_overrides[i].did_vid != did_vid)
+ continue;
+ memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
+ sizeof(vendor_timeout_overrides[i].timeout_us));
+ chip->timeout_adjusted = true;
+ }
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return;
+}
+
+/*
+ * Early probing for iTPM with STS_DATA_EXPECT flaw.
+ * Try sending command without itpm flag set and if that
+ * fails, repeat with itpm flag set.
+ */
+static int probe_itpm(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+ static const u8 cmd_getticks[] = {
+ 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0xf1
+ };
+ size_t len = sizeof(cmd_getticks);
+ u16 vendor;
+
+ if (test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags))
+ return 0;
+
+ rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+ return rc;
+
+ /* probe only iTPMS */
+ if (vendor != TPM_VID_INTEL)
+ return 0;
+
+ if (tpm_tis_request_locality(chip, 0) != 0)
+ return -EBUSY;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0)
+ goto out;
+
+ tpm_tis_ready(chip);
+
+ set_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0)
+ dev_info(&chip->dev, "Detected an iTPM.\n");
+ else {
+ clear_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
+ rc = -EFAULT;
+ }
+
+out:
+ tpm_tis_ready(chip);
+ tpm_tis_relinquish_locality(chip, priv->locality);
+
+ return rc;
+}
+
+static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (!test_bit(TPM_TIS_DEFAULT_CANCELLATION, &priv->flags)) {
+ switch (priv->manufacturer_id) {
+ case TPM_VID_WINBOND:
+ return ((status == TPM_STS_VALID) ||
+ (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
+ case TPM_VID_STM:
+ return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
+ default:
+ break;
+ }
+ }
+
+ return status == TPM_STS_COMMAND_READY;
+}
+
+static irqreturn_t tpm_tis_revert_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ const char *product;
+ const char *vendor;
+
+ dev_warn(&chip->dev, FW_BUG
+ "TPM interrupt storm detected, polling instead\n");
+
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ product = dmi_get_system_info(DMI_PRODUCT_VERSION);
+
+ if (vendor && product) {
+ dev_info(&chip->dev,
+ "Consider adding the following entry to tpm_tis_dmi_table:\n");
+ dev_info(&chip->dev, "\tDMI_SYS_VENDOR: %s\n", vendor);
+ dev_info(&chip->dev, "\tDMI_PRODUCT_VERSION: %s\n", product);
+ }
+
+ if (tpm_tis_request_locality(chip, 0) != 0)
+ return IRQ_NONE;
+
+ __tpm_tis_disable_interrupts(chip);
+ tpm_tis_relinquish_locality(chip, 0);
+
+ schedule_work(&priv->free_irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tpm_tis_update_unhandled_irqs(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ irqreturn_t irqret = IRQ_HANDLED;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
+ return IRQ_HANDLED;
+
+ if (time_after(jiffies, priv->last_unhandled_irq + HZ/10))
+ priv->unhandled_irqs = 1;
+ else
+ priv->unhandled_irqs++;
+
+ priv->last_unhandled_irq = jiffies;
+
+ if (priv->unhandled_irqs > TPM_TIS_MAX_UNHANDLED_IRQS)
+ irqret = tpm_tis_revert_interrupts(chip);
+
+ return irqret;
+}
+
+static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 interrupt;
+ int rc;
+
+ rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
+ if (rc < 0)
+ goto err;
+
+ if (interrupt == 0)
+ goto err;
+
+ set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
+ if (interrupt & TPM_INTF_DATA_AVAIL_INT)
+ wake_up_interruptible(&priv->read_queue);
+
+ if (interrupt &
+ (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
+ TPM_INTF_CMD_READY_INT))
+ wake_up_interruptible(&priv->int_queue);
+
+ /* Clear interrupts handled with TPM_EOI */
+ tpm_tis_request_locality(chip, 0);
+ rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
+ tpm_tis_relinquish_locality(chip, 0);
+ if (rc < 0)
+ goto err;
+
+ tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
+ return IRQ_HANDLED;
+
+err:
+ return tpm_tis_update_unhandled_irqs(chip);
+}
+
+static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
+{
+ const char *desc = "attempting to generate an interrupt";
+ u32 cap2;
+ cap_t cap;
+ int ret;
+
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+ else
+ ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+
+ if (ret)
+ chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+}
+
+static void tpm_tis_free_irq_func(struct work_struct *work)
+{
+ struct tpm_tis_data *priv = container_of(work, typeof(*priv), free_irq_work);
+ struct tpm_chip *chip = priv->chip;
+
+ devm_free_irq(chip->dev.parent, priv->irq, chip);
+ priv->irq = 0;
+}
+
+/* Register the IRQ and issue a command that will cause an interrupt. If an
+ * irq is seen then leave the chip setup for IRQ operation, otherwise reverse
+ * everything and leave in polling mode. Returns 0 on success.
+ */
+static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ int flags, int irq)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u8 original_int_vec;
+ int rc;
+ u32 int_status;
+
+ INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
+
+ rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
+ tis_int_handler, IRQF_ONESHOT | flags,
+ dev_name(&chip->dev), chip);
+ if (rc) {
+ dev_info(&chip->dev, "Unable to request irq: %d for probe\n",
+ irq);
+ return -1;
+ }
+ priv->irq = irq;
+
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+ &original_int_vec);
+ if (rc < 0) {
+ tpm_tis_relinquish_locality(chip, priv->locality);
+ return rc;
+ }
+
+ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
+ if (rc < 0)
+ goto restore_irqs;
+
+ rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
+ if (rc < 0)
+ goto restore_irqs;
+
+ /* Clear all existing */
+ rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
+ if (rc < 0)
+ goto restore_irqs;
+ /* Turn on */
+ rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
+ intmask | TPM_GLOBAL_INT_ENABLE);
+ if (rc < 0)
+ goto restore_irqs;
+
+ clear_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
+
+ /* Generate an interrupt by having the core call through to
+ * tpm_tis_send
+ */
+ tpm_tis_gen_interrupt(chip);
+
+restore_irqs:
+ /* tpm_tis_send will either confirm the interrupt is working or it
+ * will call disable_irq which undoes all of the above.
+ */
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+ tpm_tis_write8(priv, original_int_vec,
+ TPM_INT_VECTOR(priv->locality));
+ rc = -1;
+ }
+
+ tpm_tis_relinquish_locality(chip, priv->locality);
+
+ return rc;
+}
+
+/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
+ * do not have ACPI/etc. We typically expect the interrupt to be declared if
+ * present.
+ */
+static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u8 original_int_vec;
+ int i, rc;
+
+ rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+ &original_int_vec);
+ if (rc < 0)
+ return;
+
+ if (!original_int_vec) {
+ if (IS_ENABLED(CONFIG_X86))
+ for (i = 3; i <= 15; i++)
+ if (!tpm_tis_probe_irq_single(chip, intmask, 0,
+ i))
+ return;
+ } else if (!tpm_tis_probe_irq_single(chip, intmask, 0,
+ original_int_vec))
+ return;
+}
+
+void tpm_tis_remove(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 reg = TPM_INT_ENABLE(priv->locality);
+ u32 interrupt;
+ int rc;
+
+ tpm_tis_clkrun_enable(chip, true);
+
+ rc = tpm_tis_read32(priv, reg, &interrupt);
+ if (rc < 0)
+ interrupt = 0;
+
+ tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+ flush_work(&priv->free_irq_work);
+
+ tpm_tis_clkrun_enable(chip, false);
+
+ if (priv->ilb_base_addr)
+ iounmap(priv->ilb_base_addr);
+}
+EXPORT_SYMBOL_GPL(tpm_tis_remove);
+
+/**
+ * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
+ * of a single TPM command
+ * @chip: TPM chip to use
+ * @value: 1 - Disable CLKRUN protocol, so that clocks are free running
+ * 0 - Enable CLKRUN protocol
+ * Call this function directly in tpm_tis_remove() in error or driver removal
+ * path, since the chip->ops is set to NULL in tpm_chip_unregister().
+ */
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
+{
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ u32 clkrun_val;
+
+ if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
+ !data->ilb_base_addr)
+ return;
+
+ if (value) {
+ data->clkrun_enabled++;
+ if (data->clkrun_enabled > 1)
+ return;
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Disable LPC CLKRUN# */
+ clkrun_val &= ~LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ } else {
+ data->clkrun_enabled--;
+ if (data->clkrun_enabled)
+ return;
+
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Enable LPC CLKRUN# */
+ clkrun_val |= LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ }
+}
+
+static const struct tpm_class_ops tpm_tis = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = tpm_tis_status,
+ .recv = tpm_tis_recv,
+ .send = tpm_tis_send,
+ .cancel = tpm_tis_ready,
+ .update_timeouts = tpm_tis_update_timeouts,
+ .update_durations = tpm_tis_update_durations,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = tpm_tis_req_canceled,
+ .request_locality = tpm_tis_request_locality,
+ .relinquish_locality = tpm_tis_relinquish_locality,
+ .clk_enable = tpm_tis_clkrun_enable,
+};
+
+int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ const struct tpm_tis_phy_ops *phy_ops,
+ acpi_handle acpi_dev_handle)
+{
+ u32 vendor;
+ u32 intfcaps;
+ u32 intmask;
+ u32 clkrun_val;
+ u8 rid;
+ int rc, probe;
+ struct tpm_chip *chip;
+
+ chip = tpmm_chip_alloc(dev, &tpm_tis);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+#ifdef CONFIG_ACPI
+ chip->acpi_dev_handle = acpi_dev_handle;
+#endif
+
+ chip->hwrng.quality = priv->rng_quality;
+
+ /* Maximum timeouts */
+ chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX);
+ chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
+ chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX);
+ chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX);
+ priv->chip = chip;
+ priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
+ priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
+ priv->phy_ops = phy_ops;
+ priv->locality_count = 0;
+ mutex_init(&priv->locality_count_mutex);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+ return rc;
+
+ priv->manufacturer_id = vendor;
+
+ if (priv->manufacturer_id == TPM_VID_ATML &&
+ !(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ priv->timeout_min = TIS_TIMEOUT_MIN_ATML;
+ priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
+ }
+
+ if (is_bsw()) {
+ priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+ ILB_REMAP_SIZE);
+ if (!priv->ilb_base_addr)
+ return -ENOMEM;
+
+ clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
+ /* Check if CLKRUN# is already not enabled in the LPC bus */
+ if (!(clkrun_val & LPC_CLKRUN_EN)) {
+ iounmap(priv->ilb_base_addr);
+ priv->ilb_base_addr = NULL;
+ }
+ }
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
+ if (wait_startup(chip, 0) != 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ /* Take control of the TPM's interrupt hardware and shut it off */
+ rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+ if (rc < 0)
+ goto out_err;
+
+ /* Figure out the capabilities */
+ rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
+ if (rc < 0)
+ goto out_err;
+
+ dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
+ intfcaps);
+ if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
+ dev_dbg(dev, "\tBurst Count Static\n");
+ if (intfcaps & TPM_INTF_CMD_READY_INT) {
+ intmask |= TPM_INTF_CMD_READY_INT;
+ dev_dbg(dev, "\tCommand Ready Int Support\n");
+ }
+ if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
+ dev_dbg(dev, "\tInterrupt Edge Falling\n");
+ if (intfcaps & TPM_INTF_INT_EDGE_RISING)
+ dev_dbg(dev, "\tInterrupt Edge Rising\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
+ dev_dbg(dev, "\tInterrupt Level Low\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
+ dev_dbg(dev, "\tInterrupt Level High\n");
+ if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) {
+ intmask |= TPM_INTF_LOCALITY_CHANGE_INT;
+ dev_dbg(dev, "\tLocality Change Int Support\n");
+ }
+ if (intfcaps & TPM_INTF_STS_VALID_INT) {
+ intmask |= TPM_INTF_STS_VALID_INT;
+ dev_dbg(dev, "\tSts Valid Int Support\n");
+ }
+ if (intfcaps & TPM_INTF_DATA_AVAIL_INT) {
+ intmask |= TPM_INTF_DATA_AVAIL_INT;
+ dev_dbg(dev, "\tData Avail Int Support\n");
+ }
+
+ intmask &= ~TPM_GLOBAL_INT_ENABLE;
+
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ tpm_tis_relinquish_locality(chip, 0);
+
+ rc = tpm_chip_start(chip);
+ if (rc)
+ goto out_err;
+ rc = tpm2_probe(chip);
+ tpm_chip_stop(chip);
+ if (rc)
+ goto out_err;
+
+ rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
+ if (rc < 0)
+ goto out_err;
+
+ dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
+ vendor >> 16, rid);
+
+ probe = probe_itpm(chip);
+ if (probe < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ /* INTERRUPT Setup */
+ init_waitqueue_head(&priv->read_queue);
+ init_waitqueue_head(&priv->int_queue);
+
+ rc = tpm_chip_bootstrap(chip);
+ if (rc)
+ goto out_err;
+
+ if (irq != -1) {
+ /*
+ * Before doing irq testing issue a command to the TPM in polling mode
+ * to make sure it works. May as well use that command to set the
+ * proper timeouts for the driver.
+ */
+
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ goto out_err;
+
+ rc = tpm_get_timeouts(chip);
+
+ tpm_tis_relinquish_locality(chip, 0);
+
+ if (rc) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (irq)
+ tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
+ irq);
+ else
+ tpm_tis_probe_irq(chip, intmask);
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ priv->int_mask = intmask;
+ } else {
+ dev_err(&chip->dev, FW_BUG
+ "TPM interrupt not working, polling instead\n");
+
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ goto out_err;
+ tpm_tis_disable_interrupts(chip);
+ tpm_tis_relinquish_locality(chip, 0);
+ }
+ }
+
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto out_err;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return 0;
+out_err:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ tpm_tis_remove(chip);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_tis_core_init);
+
+#ifdef CONFIG_PM_SLEEP
+static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 intmask;
+ int rc;
+
+ /*
+ * Re-enable interrupts that device may have lost or BIOS/firmware may
+ * have disabled.
+ */
+ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Setting IRQ failed.\n");
+ return;
+ }
+
+ intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE;
+ rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ if (rc < 0)
+ dev_err(&chip->dev, "Enabling interrupts failed.\n");
+}
+
+int tpm_tis_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ int ret;
+
+ ret = tpm_chip_start(chip);
+ if (ret)
+ return ret;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ)
+ tpm_tis_reenable_interrupts(chip);
+
+ /*
+ * TPM 1.2 requires self-test on resume. This function actually returns
+ * an error code but for unknown reason it isn't handled.
+ */
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ tpm1_do_selftest(chip);
+
+ tpm_chip_stop(chip);
+
+ ret = tpm_pm_resume(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_tis_resume);
+#endif
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
new file mode 100644
index 0000000000..13e99cf65e
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ */
+
+#ifndef __TPM_TIS_CORE_H__
+#define __TPM_TIS_CORE_H__
+
+#include "tpm.h"
+
+enum tis_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum tis_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+ TPM_STS_RESPONSE_RETRY = 0x02,
+ TPM_STS_READ_ZERO = 0x23, /* bits that must be zero on read */
+};
+
+enum tis_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80000000,
+ TPM_INTF_BURST_COUNT_STATIC = 0x100,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_INT_EDGE_FALLING = 0x040,
+ TPM_INTF_INT_EDGE_RISING = 0x020,
+ TPM_INTF_INT_LEVEL_LOW = 0x010,
+ TPM_INTF_INT_LEVEL_HIGH = 0x008,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_MEM_LEN = 0x5000,
+ TIS_SHORT_TIMEOUT = 750, /* ms */
+ TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
+ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
+};
+
+/* Some timeout values are needed before it is known whether the chip is
+ * TPM 1.0 or TPM 2.0.
+ */
+#define TIS_TIMEOUT_A_MAX max_t(int, TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A)
+#define TIS_TIMEOUT_B_MAX max_t(int, TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B)
+#define TIS_TIMEOUT_C_MAX max_t(int, TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C)
+#define TIS_TIMEOUT_D_MAX max_t(int, TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D)
+
+#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
+#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
+#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
+#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
+#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
+#define TPM_STS(l) (0x0018 | ((l) << 12))
+#define TPM_STS3(l) (0x001b | ((l) << 12))
+#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
+
+#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
+#define TPM_RID(l) (0x0F04 | ((l) << 12))
+
+#define LPC_CNTRL_OFFSET 0x84
+#define LPC_CLKRUN_EN (1 << 2)
+#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
+#define ILB_REMAP_SIZE 0x100
+
+enum tpm_tis_flags {
+ TPM_TIS_ITPM_WORKAROUND = 0,
+ TPM_TIS_INVALID_STATUS = 1,
+ TPM_TIS_DEFAULT_CANCELLATION = 2,
+ TPM_TIS_IRQ_TESTED = 3,
+};
+
+struct tpm_tis_data {
+ struct tpm_chip *chip;
+ u16 manufacturer_id;
+ struct mutex locality_count_mutex;
+ unsigned int locality_count;
+ int locality;
+ int irq;
+ struct work_struct free_irq_work;
+ unsigned long last_unhandled_irq;
+ unsigned int unhandled_irqs;
+ unsigned int int_mask;
+ unsigned long flags;
+ void __iomem *ilb_base_addr;
+ u16 clkrun_enabled;
+ wait_queue_head_t int_queue;
+ wait_queue_head_t read_queue;
+ const struct tpm_tis_phy_ops *phy_ops;
+ unsigned short rng_quality;
+ unsigned int timeout_min; /* usecs */
+ unsigned int timeout_max; /* usecs */
+};
+
+/*
+ * IO modes to indicate how many bytes should be read/written at once in the
+ * tpm_tis_phy_ops read_bytes/write_bytes calls. Use TPM_TIS_PHYS_8 to
+ * receive/transmit byte-wise, TPM_TIS_PHYS_16 for two bytes etc.
+ */
+enum tpm_tis_io_mode {
+ TPM_TIS_PHYS_8,
+ TPM_TIS_PHYS_16,
+ TPM_TIS_PHYS_32,
+};
+
+struct tpm_tis_phy_ops {
+ /* data is passed in little endian */
+ int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode mode);
+ int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value, enum tpm_tis_io_mode mode);
+ int (*verify_crc)(struct tpm_tis_data *data, size_t len,
+ const u8 *value);
+};
+
+static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result)
+{
+ return data->phy_ops->read_bytes(data, addr, len, result,
+ TPM_TIS_PHYS_8);
+}
+
+static inline int tpm_tis_read8(struct tpm_tis_data *data, u32 addr, u8 *result)
+{
+ return data->phy_ops->read_bytes(data, addr, 1, result, TPM_TIS_PHYS_8);
+}
+
+static inline int tpm_tis_read16(struct tpm_tis_data *data, u32 addr,
+ u16 *result)
+{
+ __le16 result_le;
+ int rc;
+
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
+ (u8 *)&result_le, TPM_TIS_PHYS_16);
+ if (!rc)
+ *result = le16_to_cpu(result_le);
+
+ return rc;
+}
+
+static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
+ u32 *result)
+{
+ __le32 result_le;
+ int rc;
+
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
+ (u8 *)&result_le, TPM_TIS_PHYS_32);
+ if (!rc)
+ *result = le32_to_cpu(result_le);
+
+ return rc;
+}
+
+static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value)
+{
+ return data->phy_ops->write_bytes(data, addr, len, value,
+ TPM_TIS_PHYS_8);
+}
+
+static inline int tpm_tis_write8(struct tpm_tis_data *data, u32 addr, u8 value)
+{
+ return data->phy_ops->write_bytes(data, addr, 1, &value,
+ TPM_TIS_PHYS_8);
+}
+
+static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
+ u32 value)
+{
+ __le32 value_le;
+ int rc;
+
+ value_le = cpu_to_le32(value);
+ rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
+ (u8 *)&value_le, TPM_TIS_PHYS_32);
+ return rc;
+}
+
+static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ if (!data->phy_ops->verify_crc)
+ return 0;
+ return data->phy_ops->verify_crc(data, len, value);
+}
+
+static inline bool is_bsw(void)
+{
+#ifdef CONFIG_X86
+ return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+#else
+ return false;
+#endif
+}
+
+void tpm_tis_remove(struct tpm_chip *chip);
+int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ const struct tpm_tis_phy_ops *phy_ops,
+ acpi_handle acpi_dev_handle);
+
+#ifdef CONFIG_PM_SLEEP
+int tpm_tis_resume(struct device *dev);
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
new file mode 100644
index 0000000000..a897402cc3
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2021 Nuvoton Technology corporation
+ * Copyright (C) 2019-2022 Infineon Technologies AG
+ *
+ * This device driver implements the TPM interface as defined in the TCG PC
+ * Client Platform TPM Profile (PTP) Specification for TPM 2.0 v1.04
+ * Revision 14.
+ *
+ * It is based on the tpm_tis_spi device driver.
+ */
+
+#include <linux/i2c.h>
+#include <linux/crc-ccitt.h>
+#include "tpm_tis_core.h"
+
+/* TPM registers */
+#define TPM_I2C_LOC_SEL 0x00
+#define TPM_I2C_ACCESS 0x04
+#define TPM_I2C_INTERFACE_CAPABILITY 0x30
+#define TPM_I2C_DEVICE_ADDRESS 0x38
+#define TPM_I2C_DATA_CSUM_ENABLE 0x40
+#define TPM_DATA_CSUM 0x44
+#define TPM_I2C_DID_VID 0x48
+#define TPM_I2C_RID 0x4C
+
+/* TIS-compatible register address to avoid clash with TPM_ACCESS (0x00) */
+#define TPM_LOC_SEL 0x0FFF
+
+/* Mask to extract the I2C register from TIS register addresses */
+#define TPM_TIS_REGISTER_MASK 0x0FFF
+
+/* Default Guard Time of 250µs until interface capability register is read */
+#define GUARD_TIME_DEFAULT_MIN 250
+#define GUARD_TIME_DEFAULT_MAX 300
+
+/* Guard Time of 250µs after I2C slave NACK */
+#define GUARD_TIME_ERR_MIN 250
+#define GUARD_TIME_ERR_MAX 300
+
+/* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */
+#define TPM_GUARD_TIME_SR_MASK 0x40000000
+#define TPM_GUARD_TIME_RR_MASK 0x00100000
+#define TPM_GUARD_TIME_RW_MASK 0x00080000
+#define TPM_GUARD_TIME_WR_MASK 0x00040000
+#define TPM_GUARD_TIME_WW_MASK 0x00020000
+#define TPM_GUARD_TIME_MIN_MASK 0x0001FE00
+#define TPM_GUARD_TIME_MIN_SHIFT 9
+
+/* Masks with bits that must be read zero */
+#define TPM_ACCESS_READ_ZERO 0x48
+#define TPM_INT_ENABLE_ZERO 0x7FFFFF60
+#define TPM_STS_READ_ZERO 0x23
+#define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
+#define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
+
+struct tpm_tis_i2c_phy {
+ struct tpm_tis_data priv;
+ struct i2c_client *i2c_client;
+ bool guard_time_read;
+ bool guard_time_write;
+ u16 guard_time_min;
+ u16 guard_time_max;
+ u8 *io_buf;
+};
+
+static inline struct tpm_tis_i2c_phy *
+to_tpm_tis_i2c_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_i2c_phy, priv);
+}
+
+/*
+ * tpm_tis_core uses the register addresses as defined in Table 19 "Allocation
+ * of Register Space for FIFO TPM Access" of the TCG PC Client PTP
+ * Specification. In order for this code to work together with tpm_tis_core,
+ * those addresses need to mapped to the registers defined for I2C TPMs in
+ * Table 51 "I2C-TPM Register Overview".
+ *
+ * For most addresses this can be done by simply stripping off the locality
+ * information from the address. A few addresses need to be mapped explicitly,
+ * since the corresponding I2C registers have been moved around. TPM_LOC_SEL is
+ * only defined for I2C TPMs and is also mapped explicitly here to distinguish
+ * it from TPM_ACCESS(0).
+ *
+ * Locality information is ignored, since this driver assumes exclusive access
+ * to the TPM and always uses locality 0.
+ */
+static u8 tpm_tis_i2c_address_to_register(u32 addr)
+{
+ addr &= TPM_TIS_REGISTER_MASK;
+
+ switch (addr) {
+ case TPM_ACCESS(0):
+ return TPM_I2C_ACCESS;
+ case TPM_LOC_SEL:
+ return TPM_I2C_LOC_SEL;
+ case TPM_DID_VID(0):
+ return TPM_I2C_DID_VID;
+ case TPM_RID(0):
+ return TPM_I2C_RID;
+ default:
+ return addr;
+ }
+}
+
+static int tpm_tis_i2c_retry_transfer_until_ack(struct tpm_tis_data *data,
+ struct i2c_msg *msg)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ bool guard_time;
+ int i = 0;
+ int ret;
+
+ if (msg->flags & I2C_M_RD)
+ guard_time = phy->guard_time_read;
+ else
+ guard_time = phy->guard_time_write;
+
+ do {
+ ret = i2c_transfer(phy->i2c_client->adapter, msg, 1);
+ if (ret < 0)
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ else if (guard_time)
+ usleep_range(phy->guard_time_min, phy->guard_time_max);
+ /* retry on TPM NACK */
+ } while (ret < 0 && i++ < TPM_RETRY);
+
+ return ret;
+}
+
+/* Check that bits which must be read zero are not set */
+static int tpm_tis_i2c_sanity_check_read(u8 reg, u16 len, u8 *buf)
+{
+ u32 zero_mask;
+ u32 value;
+
+ switch (len) {
+ case sizeof(u8):
+ value = buf[0];
+ break;
+ case sizeof(u16):
+ value = le16_to_cpup((__le16 *)buf);
+ break;
+ case sizeof(u32):
+ value = le32_to_cpup((__le32 *)buf);
+ break;
+ default:
+ /* unknown length, skip check */
+ return 0;
+ }
+
+ switch (reg) {
+ case TPM_I2C_ACCESS:
+ zero_mask = TPM_ACCESS_READ_ZERO;
+ break;
+ case TPM_INT_ENABLE(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INT_ENABLE_ZERO;
+ break;
+ case TPM_STS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_STS_READ_ZERO;
+ break;
+ case TPM_INTF_CAPS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INTF_CAPABILITY_ZERO;
+ break;
+ case TPM_I2C_INTERFACE_CAPABILITY:
+ zero_mask = TPM_I2C_INTERFACE_CAPABILITY_ZERO;
+ break;
+ default:
+ /* unknown register, skip check */
+ return 0;
+ }
+
+ if (unlikely((value & zero_mask) != 0x00)) {
+ pr_debug("TPM I2C read of register 0x%02x failed sanity check: 0x%x\n", reg, value);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int i;
+ int ret;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ u16 read = 0;
+
+ while (read < len) {
+ /* write register */
+ msg.len = sizeof(reg);
+ msg.buf = &reg;
+ msg.flags = 0;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ /* read data */
+ msg.buf = result + read;
+ msg.len = len - read;
+ msg.flags = I2C_M_RD;
+ if (msg.len > I2C_SMBUS_BLOCK_MAX)
+ msg.len = I2C_SMBUS_BLOCK_MAX;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+ read += msg.len;
+ }
+
+ ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
+ if (ret == 0)
+ return 0;
+
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ }
+
+ return ret;
+}
+
+static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int ret;
+ u16 wrote = 0;
+
+ if (len > TPM_BUFSIZE - 1)
+ return -EIO;
+
+ phy->io_buf[0] = reg;
+ msg.buf = phy->io_buf;
+ while (wrote < len) {
+ /* write register and data in one go */
+ msg.len = sizeof(reg) + len - wrote;
+ if (msg.len > I2C_SMBUS_BLOCK_MAX)
+ msg.len = I2C_SMBUS_BLOCK_MAX;
+
+ memcpy(phy->io_buf + sizeof(reg), value + wrote,
+ msg.len - sizeof(reg));
+
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+ wrote += msg.len - sizeof(reg);
+ }
+
+ return 0;
+}
+
+static int tpm_tis_i2c_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ u16 crc_tpm, crc_host;
+ int rc;
+
+ rc = tpm_tis_read16(data, TPM_DATA_CSUM, &crc_tpm);
+ if (rc < 0)
+ return rc;
+
+ /* reflect crc result, regardless of host endianness */
+ crc_host = swab16(crc_ccitt(0, value, len));
+ if (crc_tpm != crc_host)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Guard Time:
+ * After each I2C operation, the TPM might require the master to wait.
+ * The time period is vendor-specific and must be read from the
+ * TPM_I2C_INTERFACE_CAPABILITY register.
+ *
+ * Before the Guard Time is read (or after the TPM failed to send an I2C NACK),
+ * a Guard Time of 250µs applies.
+ *
+ * Various flags in the same register indicate if a guard time is needed:
+ * - SR: <I2C read with repeated start> <guard time> <I2C read>
+ * - RR: <I2C read> <guard time> <I2C read>
+ * - RW: <I2C read> <guard time> <I2C write>
+ * - WR: <I2C write> <guard time> <I2C read>
+ * - WW: <I2C write> <guard time> <I2C write>
+ *
+ * See TCG PC Client PTP Specification v1.04, 8.1.10 GUARD_TIME
+ */
+static int tpm_tis_i2c_init_guard_time(struct tpm_tis_i2c_phy *phy)
+{
+ u32 i2c_caps;
+ int ret;
+
+ phy->guard_time_read = true;
+ phy->guard_time_write = true;
+ phy->guard_time_min = GUARD_TIME_DEFAULT_MIN;
+ phy->guard_time_max = GUARD_TIME_DEFAULT_MAX;
+
+ ret = tpm_tis_i2c_read_bytes(&phy->priv, TPM_I2C_INTERFACE_CAPABILITY,
+ sizeof(i2c_caps), (u8 *)&i2c_caps,
+ TPM_TIS_PHYS_32);
+ if (ret)
+ return ret;
+
+ phy->guard_time_read = (i2c_caps & TPM_GUARD_TIME_RR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_RW_MASK);
+ phy->guard_time_write = (i2c_caps & TPM_GUARD_TIME_WR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_WW_MASK);
+ phy->guard_time_min = (i2c_caps & TPM_GUARD_TIME_MIN_MASK) >>
+ TPM_GUARD_TIME_MIN_SHIFT;
+ /* guard_time_max = guard_time_min * 1.2 */
+ phy->guard_time_max = phy->guard_time_min + phy->guard_time_min / 5;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = {
+ .read_bytes = tpm_tis_i2c_read_bytes,
+ .write_bytes = tpm_tis_i2c_write_bytes,
+ .verify_crc = tpm_tis_i2c_verify_crc,
+};
+
+static int tpm_tis_i2c_probe(struct i2c_client *dev)
+{
+ struct tpm_tis_i2c_phy *phy;
+ const u8 crc_enable = 1;
+ const u8 locality = 0;
+ int ret;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->io_buf = devm_kzalloc(&dev->dev, TPM_BUFSIZE, GFP_KERNEL);
+ if (!phy->io_buf)
+ return -ENOMEM;
+
+ set_bit(TPM_TIS_DEFAULT_CANCELLATION, &phy->priv.flags);
+ phy->i2c_client = dev;
+
+ /* must precede all communication with the tpm */
+ ret = tpm_tis_i2c_init_guard_time(phy);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_LOC_SEL, sizeof(locality),
+ &locality, TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_I2C_DATA_CSUM_ENABLE,
+ sizeof(crc_enable), &crc_enable,
+ TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_i2c_phy_ops,
+ NULL);
+}
+
+static void tpm_tis_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+static const struct i2c_device_id tpm_tis_i2c_id[] = {
+ { "tpm_tis_i2c", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_tis_i2c_match[] = {
+ { .compatible = "infineon,slb9673", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_i2c_match);
+#endif
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+ .driver = {
+ .name = "tpm_tis_i2c",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_i2c_match),
+ },
+ .probe = tpm_tis_i2c_probe,
+ .remove = tpm_tis_i2c_remove,
+ .id_table = tpm_tis_i2c_id,
+};
+module_i2c_driver(tpm_tis_i2c_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native I2C access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
new file mode 100644
index 0000000000..e70abd69e1
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Based on Infineon TPM driver by Peter Huewe.
+ *
+ * cr50 is a firmware for H1 secure modules that requires special
+ * handling for the I2C interface.
+ *
+ * - Use an interrupt for transaction status instead of hardcoded delays.
+ * - Must use write+wait+read read protocol.
+ * - All 4 bytes of status register must be read/written at once.
+ * - Burst count max is 63 bytes, and burst count behaves slightly differently
+ * than other I2C TPMs.
+ * - When reading from FIFO the full burstcnt must be read instead of just
+ * reading header and determining the remainder.
+ */
+
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "tpm_tis_core.h"
+
+#define TPM_CR50_MAX_BUFSIZE 64
+#define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */
+#define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */
+#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */
+#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */
+#define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */
+#define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */
+#define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */
+
+#define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4))
+#define TPM_I2C_STS(l) (0x0001 | ((l) << 4))
+#define TPM_I2C_DATA_FIFO(l) (0x0005 | ((l) << 4))
+#define TPM_I2C_DID_VID(l) (0x0006 | ((l) << 4))
+
+/**
+ * struct tpm_i2c_cr50_priv_data - Driver private data.
+ * @irq: Irq number used for this chip.
+ * If irq <= 0, then a fixed timeout is used instead of waiting for irq.
+ * @tpm_ready: Struct used by irq handler to signal R/W readiness.
+ * @buf: Buffer used for i2c writes, with i2c address prepended to content.
+ *
+ * Private driver struct used by kernel threads and interrupt context.
+ */
+struct tpm_i2c_cr50_priv_data {
+ int irq;
+ struct completion tpm_ready;
+ u8 buf[TPM_CR50_MAX_BUFSIZE];
+};
+
+/**
+ * tpm_cr50_i2c_int_handler() - cr50 interrupt handler.
+ * @dummy: Unused parameter.
+ * @tpm_info: TPM chip information.
+ *
+ * The cr50 interrupt handler signals waiting threads that the
+ * interrupt has been asserted. It does not do any interrupt triggered
+ * processing but is instead used to avoid fixed delays.
+ *
+ * Return:
+ * IRQ_HANDLED signifies irq was handled by this device.
+ */
+static irqreturn_t tpm_cr50_i2c_int_handler(int dummy, void *tpm_info)
+{
+ struct tpm_chip *chip = tpm_info;
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ complete(&priv->tpm_ready);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tpm_cr50_i2c_wait_tpm_ready() - Wait for tpm to signal ready.
+ * @chip: A TPM chip.
+ *
+ * Wait for completion interrupt if available, otherwise use a fixed
+ * delay for the TPM to be ready.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_wait_tpm_ready(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ /* Use a safe fixed delay if interrupt is not supported */
+ if (priv->irq <= 0) {
+ msleep(TPM_CR50_TIMEOUT_NOIRQ_MS);
+ return 0;
+ }
+
+ /* Wait for interrupt to indicate TPM is ready to respond */
+ if (!wait_for_completion_timeout(&priv->tpm_ready, chip->timeout_a)) {
+ dev_warn(&chip->dev, "Timeout waiting for TPM ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_i2c_enable_tpm_irq() - Enable TPM irq.
+ * @chip: A TPM chip.
+ */
+static void tpm_cr50_i2c_enable_tpm_irq(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq > 0) {
+ reinit_completion(&priv->tpm_ready);
+ enable_irq(priv->irq);
+ }
+}
+
+/**
+ * tpm_cr50_i2c_disable_tpm_irq() - Disable TPM irq.
+ * @chip: A TPM chip.
+ */
+static void tpm_cr50_i2c_disable_tpm_irq(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq > 0)
+ disable_irq(priv->irq);
+}
+
+/**
+ * tpm_cr50_i2c_transfer_message() - Transfer a message over i2c.
+ * @dev: Device information.
+ * @adapter: I2C adapter.
+ * @msg: Message to transfer.
+ *
+ * Call unlocked i2c transfer routine with the provided parameters and
+ * retry in case of bus errors.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_transfer_message(struct device *dev,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msg)
+{
+ unsigned int try;
+ int rc;
+
+ for (try = 0; try < TPM_CR50_I2C_MAX_RETRIES; try++) {
+ rc = __i2c_transfer(adapter, msg, 1);
+ if (rc == 1)
+ return 0; /* Successfully transferred the message */
+ if (try)
+ dev_warn(dev, "i2c transfer failed (attempt %d/%d): %d\n",
+ try + 1, TPM_CR50_I2C_MAX_RETRIES, rc);
+ usleep_range(TPM_CR50_I2C_RETRY_DELAY_LO, TPM_CR50_I2C_RETRY_DELAY_HI);
+ }
+
+ /* No i2c message transferred */
+ return -EIO;
+}
+
+/**
+ * tpm_cr50_i2c_read() - Read from TPM register.
+ * @chip: A TPM chip.
+ * @addr: Register address to read from.
+ * @buffer: Read destination, provided by caller.
+ * @len: Number of bytes to read.
+ *
+ * Sends the register address byte to the TPM, then waits until TPM
+ * is ready via interrupt signal or timeout expiration, then 'len'
+ * bytes are read from TPM response into the provided 'buffer'.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t len)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct i2c_msg msg_reg_addr = {
+ .addr = client->addr,
+ .len = 1,
+ .buf = &addr
+ };
+ struct i2c_msg msg_response = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buffer
+ };
+ int rc;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ /* Prepare for completion interrupt */
+ tpm_cr50_i2c_enable_tpm_irq(chip);
+
+ /* Send the register address byte to the TPM */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_reg_addr);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for TPM to be ready with response data */
+ rc = tpm_cr50_i2c_wait_tpm_ready(chip);
+ if (rc < 0)
+ goto out;
+
+ /* Read response data from the TPM */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_response);
+
+out:
+ tpm_cr50_i2c_disable_tpm_irq(chip);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_i2c_write()- Write to TPM register.
+ * @chip: A TPM chip.
+ * @addr: Register address to write to.
+ * @buffer: Data to write.
+ * @len: Number of bytes to write.
+ *
+ * The provided address is prepended to the data in 'buffer', the
+ * cobined address+data is sent to the TPM, then wait for TPM to
+ * indicate it is done writing.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
+ size_t len)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .len = len + 1,
+ .buf = priv->buf
+ };
+ int rc;
+
+ if (len > TPM_CR50_MAX_BUFSIZE - 1)
+ return -EINVAL;
+
+ /* Prepend the 'register address' to the buffer */
+ priv->buf[0] = addr;
+ memcpy(priv->buf + 1, buffer, len);
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ /* Prepare for completion interrupt */
+ tpm_cr50_i2c_enable_tpm_irq(chip);
+
+ /* Send write request buffer with address */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for TPM to be ready, ignore timeout */
+ tpm_cr50_i2c_wait_tpm_ready(chip);
+
+out:
+ tpm_cr50_i2c_disable_tpm_irq(chip);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_check_locality() - Verify TPM locality 0 is active.
+ * @chip: A TPM chip.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_check_locality(struct tpm_chip *chip)
+{
+ u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY;
+ u8 buf;
+ int rc;
+
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (rc < 0)
+ return rc;
+
+ if ((buf & mask) == mask)
+ return 0;
+
+ return -EIO;
+}
+
+/**
+ * tpm_cr50_release_locality() - Release TPM locality.
+ * @chip: A TPM chip.
+ * @force: Flag to force release if set.
+ */
+static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force)
+{
+ u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING;
+ u8 addr = TPM_I2C_ACCESS(0);
+ u8 buf;
+
+ if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0)
+ return;
+
+ if (force || (buf & mask) == mask) {
+ buf = TPM_ACCESS_ACTIVE_LOCALITY;
+ tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
+ }
+}
+
+/**
+ * tpm_cr50_request_locality() - Request TPM locality 0.
+ * @chip: A TPM chip.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_request_locality(struct tpm_chip *chip)
+{
+ u8 buf = TPM_ACCESS_REQUEST_USE;
+ unsigned long stop;
+ int rc;
+
+ if (!tpm_cr50_check_locality(chip))
+ return 0;
+
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (rc < 0)
+ return rc;
+
+ stop = jiffies + chip->timeout_a;
+ do {
+ if (!tpm_cr50_check_locality(chip))
+ return 0;
+
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ } while (time_before(jiffies, stop));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * tpm_cr50_i2c_tis_status() - Read cr50 tis status.
+ * @chip: A TPM chip.
+ *
+ * cr50 requires all 4 bytes of status register to be read.
+ *
+ * Return:
+ * TPM status byte.
+ */
+static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip)
+{
+ u8 buf[4];
+
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0)
+ return 0;
+
+ return buf[0];
+}
+
+/**
+ * tpm_cr50_i2c_tis_set_ready() - Set status register to ready.
+ * @chip: A TPM chip.
+ *
+ * cr50 requires all 4 bytes of status register to be written.
+ */
+static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip)
+{
+ u8 buf[4] = { TPM_STS_COMMAND_READY };
+
+ tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf));
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+}
+
+/**
+ * tpm_cr50_i2c_get_burst_and_status() - Get burst count and status.
+ * @chip: A TPM chip.
+ * @mask: Status mask.
+ * @burst: Return value for burst.
+ * @status: Return value for status.
+ *
+ * cr50 uses bytes 3:2 of status register for burst count and
+ * all 4 bytes must be read.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask,
+ size_t *burst, u32 *status)
+{
+ unsigned long stop;
+ u8 buf[4];
+
+ *status = 0;
+
+ /* wait for burstcount */
+ stop = jiffies + chip->timeout_b;
+
+ do {
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) {
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ continue;
+ }
+
+ *status = *buf;
+ *burst = le16_to_cpup((__le16 *)(buf + 1));
+
+ if ((*status & mask) == mask &&
+ *burst > 0 && *burst <= TPM_CR50_MAX_BUFSIZE - 1)
+ return 0;
+
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ } while (time_before(jiffies, stop));
+
+ dev_err(&chip->dev, "Timeout reading burst and status\n");
+ return -ETIMEDOUT;
+}
+
+/**
+ * tpm_cr50_i2c_tis_recv() - TPM reception callback.
+ * @chip: A TPM chip.
+ * @buf: Reception buffer.
+ * @buf_len: Buffer length to read.
+ *
+ * Return:
+ * - >= 0: Number of read bytes.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
+{
+
+ u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL;
+ size_t burstcnt, cur, len, expected;
+ u8 addr = TPM_I2C_DATA_FIFO(0);
+ u32 status;
+ int rc;
+
+ if (buf_len < TPM_HEADER_SIZE)
+ return -EINVAL;
+
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ if (burstcnt > buf_len || burstcnt < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev,
+ "Unexpected burstcnt: %zu (max=%zu, min=%d)\n",
+ burstcnt, buf_len, TPM_HEADER_SIZE);
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* Read first chunk of burstcnt bytes */
+ rc = tpm_cr50_i2c_read(chip, addr, buf, burstcnt);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Read of first chunk failed\n");
+ goto out_err;
+ }
+
+ /* Determine expected data in the return buffer */
+ expected = be32_to_cpup((__be32 *)(buf + 2));
+ if (expected > buf_len) {
+ dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
+ rc = -E2BIG;
+ goto out_err;
+ }
+
+ /* Now read the rest of the data */
+ cur = burstcnt;
+ while (cur < expected) {
+ /* Read updated burst count and check status */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ len = min_t(size_t, burstcnt, expected - cur);
+ rc = tpm_cr50_i2c_read(chip, addr, buf + cur, len);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Read failed\n");
+ goto out_err;
+ }
+
+ cur += len;
+ }
+
+ /* Ensure TPM is done reading data */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+ if (status & TPM_STS_DATA_AVAIL) {
+ dev_err(&chip->dev, "Data still available\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
+ tpm_cr50_release_locality(chip, false);
+ return cur;
+
+out_err:
+ /* Abort current transaction if still pending */
+ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
+ tpm_cr50_i2c_tis_set_ready(chip);
+
+ tpm_cr50_release_locality(chip, false);
+ return rc;
+}
+
+/**
+ * tpm_cr50_i2c_tis_send() - TPM transmission callback.
+ * @chip: A TPM chip.
+ * @buf: Buffer to send.
+ * @len: Buffer length.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ size_t burstcnt, limit, sent = 0;
+ u8 tpm_go[4] = { TPM_STS_GO };
+ unsigned long stop;
+ u32 status;
+ int rc;
+
+ rc = tpm_cr50_request_locality(chip);
+ if (rc < 0)
+ return rc;
+
+ /* Wait until TPM is ready for a command */
+ stop = jiffies + chip->timeout_b;
+ while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) {
+ if (time_after(jiffies, stop)) {
+ rc = -ETIMEDOUT;
+ goto out_err;
+ }
+
+ tpm_cr50_i2c_tis_set_ready(chip);
+ }
+
+ while (len > 0) {
+ u8 mask = TPM_STS_VALID;
+
+ /* Wait for data if this is not the first chunk */
+ if (sent > 0)
+ mask |= TPM_STS_DATA_EXPECT;
+
+ /* Read burst count and check status */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ /*
+ * Use burstcnt - 1 to account for the address byte
+ * that is inserted by tpm_cr50_i2c_write()
+ */
+ limit = min_t(size_t, burstcnt - 1, len);
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Write failed\n");
+ goto out_err;
+ }
+
+ sent += limit;
+ len -= limit;
+ }
+
+ /* Ensure TPM is not expecting more data */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+ if (status & TPM_STS_DATA_EXPECT) {
+ dev_err(&chip->dev, "Data still expected\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* Start the TPM command */
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go,
+ sizeof(tpm_go));
+ if (rc < 0) {
+ dev_err(&chip->dev, "Start command failed\n");
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ /* Abort current transaction if still pending */
+ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
+ tpm_cr50_i2c_tis_set_ready(chip);
+
+ tpm_cr50_release_locality(chip, false);
+ return rc;
+}
+
+/**
+ * tpm_cr50_i2c_req_canceled() - Callback to notify a request cancel.
+ * @chip: A TPM chip.
+ * @status: Status given by the cancel callback.
+ *
+ * Return:
+ * True if command is ready, False otherwise.
+ */
+static bool tpm_cr50_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return status == TPM_STS_COMMAND_READY;
+}
+
+static bool tpm_cr50_i2c_is_firmware_power_managed(struct device *dev)
+{
+ u8 val;
+ int ret;
+
+ /* This flag should default true when the device property is not present */
+ ret = device_property_read_u8(dev, "firmware-power-managed", &val);
+ if (ret)
+ return true;
+
+ return val;
+}
+
+static const struct tpm_class_ops cr50_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = &tpm_cr50_i2c_tis_status,
+ .recv = &tpm_cr50_i2c_tis_recv,
+ .send = &tpm_cr50_i2c_tis_send,
+ .cancel = &tpm_cr50_i2c_tis_set_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = &tpm_cr50_i2c_req_canceled,
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cr50_i2c_acpi_id[] = {
+ { "GOOG0005", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cr50_i2c_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_cr50_i2c_match[] = {
+ { .compatible = "google,cr50", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
+#endif
+
+/**
+ * tpm_cr50_i2c_probe() - Driver probe function.
+ * @client: I2C client information.
+ * @id: I2C device id.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_probe(struct i2c_client *client)
+{
+ struct tpm_i2c_cr50_priv_data *priv;
+ struct device *dev = &client->dev;
+ struct tpm_chip *chip;
+ u32 vendor;
+ u8 buf[4];
+ int rc;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ chip = tpmm_chip_alloc(dev, &cr50_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* cr50 is a TPM 2.0 chip */
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ if (tpm_cr50_i2c_is_firmware_power_managed(dev))
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
+ init_completion(&priv->tpm_ready);
+
+ if (client->irq > 0) {
+ rc = devm_request_irq(dev, client->irq, tpm_cr50_i2c_int_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
+ dev->driver->name, chip);
+ if (rc < 0) {
+ dev_err(dev, "Failed to probe IRQ %d\n", client->irq);
+ return rc;
+ }
+
+ priv->irq = client->irq;
+ } else {
+ dev_warn(dev, "No IRQ, will use %ums delay for TPM ready\n",
+ TPM_CR50_TIMEOUT_NOIRQ_MS);
+ }
+
+ rc = tpm_cr50_request_locality(chip);
+ if (rc < 0) {
+ dev_err(dev, "Could not request locality\n");
+ return rc;
+ }
+
+ /* Read four bytes from DID_VID register */
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf));
+ if (rc < 0) {
+ dev_err(dev, "Could not read vendor id\n");
+ tpm_cr50_release_locality(chip, true);
+ return rc;
+ }
+
+ vendor = le32_to_cpup((__le32 *)buf);
+ if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) {
+ dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor);
+ tpm_cr50_release_locality(chip, true);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n",
+ vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50",
+ client->addr, client->irq, vendor >> 16);
+ return tpm_chip_register(chip);
+}
+
+/**
+ * tpm_cr50_i2c_remove() - Driver remove function.
+ * @client: I2C client information.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static void tpm_cr50_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+
+ if (!chip) {
+ dev_crit(dev, "Could not get client data at remove, memory corruption ahead\n");
+ return;
+ }
+
+ tpm_chip_unregister(chip);
+ tpm_cr50_release_locality(chip, true);
+}
+
+static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver cr50_i2c_driver = {
+ .probe = tpm_cr50_i2c_probe,
+ .remove = tpm_cr50_i2c_remove,
+ .driver = {
+ .name = "cr50_i2c",
+ .pm = &cr50_i2c_pm,
+ .acpi_match_table = ACPI_PTR(cr50_i2c_acpi_id),
+ .of_match_table = of_match_ptr(of_cr50_i2c_match),
+ },
+};
+
+module_i2c_driver(cr50_i2c_driver);
+
+MODULE_DESCRIPTION("cr50 TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_spi.h b/drivers/char/tpm/tpm_tis_spi.h
new file mode 100644
index 0000000000..d0f66f6f19
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Infineon Technologies AG
+ * Copyright (C) 2016 STMicroelectronics SAS
+ */
+
+#ifndef TPM_TIS_SPI_H
+#define TPM_TIS_SPI_H
+
+#include "tpm_tis_core.h"
+
+struct tpm_tis_spi_phy {
+ struct tpm_tis_data priv;
+ struct spi_device *spi_device;
+ int (*flow_control)(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *xfer);
+ struct completion ready;
+ unsigned long wake_after;
+
+ u8 *iobuf;
+};
+
+static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_spi_phy, priv);
+}
+
+extern int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops);
+
+extern int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out);
+
+#ifdef CONFIG_TCG_TIS_SPI_CR50
+extern int cr50_spi_probe(struct spi_device *spi);
+#else
+static inline int cr50_spi_probe(struct spi_device *spi)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_TCG_TIS_SPI_CR50)
+extern int tpm_tis_spi_resume(struct device *dev);
+#else
+#define tpm_tis_spi_resume NULL
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c
new file mode 100644
index 0000000000..f4937280e9
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi_cr50.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This device driver implements a TCG PTP FIFO interface over SPI for chips
+ * with Cr50 firmware.
+ * It is based on tpm_tis_spi driver by Peter Huewe and Christophe Ricard.
+ */
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+
+#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
+
+/*
+ * Cr50 timing constants:
+ * - can go to sleep not earlier than after CR50_SLEEP_DELAY_MSEC.
+ * - needs up to CR50_WAKE_START_DELAY_USEC to wake after sleep.
+ * - requires waiting for "ready" IRQ, if supported; or waiting for at least
+ * CR50_NOIRQ_ACCESS_DELAY_MSEC between transactions, if IRQ is not supported.
+ * - waits for up to CR50_FLOW_CONTROL for flow control 'ready' indication.
+ */
+#define CR50_SLEEP_DELAY_MSEC 1000
+#define CR50_WAKE_START_DELAY_USEC 1000
+#define CR50_NOIRQ_ACCESS_DELAY msecs_to_jiffies(2)
+#define CR50_READY_IRQ_TIMEOUT msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define CR50_FLOW_CONTROL msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define MAX_IRQ_CONFIRMATION_ATTEMPTS 3
+
+#define TPM_CR50_FW_VER(l) (0x0f90 | ((l) << 12))
+#define TPM_CR50_MAX_FW_VER_LEN 64
+
+/* Default quality for hwrng. */
+#define TPM_CR50_DEFAULT_RNG_QUALITY 700
+
+struct cr50_spi_phy {
+ struct tpm_tis_spi_phy spi_phy;
+
+ struct mutex time_track_mutex;
+ unsigned long last_access;
+
+ unsigned long access_delay;
+
+ unsigned int irq_confirmation_attempt;
+ bool irq_needs_confirmation;
+ bool irq_confirmed;
+};
+
+static inline struct cr50_spi_phy *to_cr50_spi_phy(struct tpm_tis_spi_phy *phy)
+{
+ return container_of(phy, struct cr50_spi_phy, spi_phy);
+}
+
+/*
+ * The cr50 interrupt handler just signals waiting threads that the
+ * interrupt was asserted. It does not do any processing triggered
+ * by interrupts but is instead used to avoid fixed delays.
+ */
+static irqreturn_t cr50_spi_irq_handler(int dummy, void *dev_id)
+{
+ struct cr50_spi_phy *cr50_phy = dev_id;
+
+ cr50_phy->irq_confirmed = true;
+ complete(&cr50_phy->spi_phy.ready);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Cr50 needs to have at least some delay between consecutive
+ * transactions. Make sure we wait.
+ */
+static void cr50_ensure_access_delay(struct cr50_spi_phy *phy)
+{
+ unsigned long allowed_access = phy->last_access + phy->access_delay;
+ unsigned long time_now = jiffies;
+ struct device *dev = &phy->spi_phy.spi_device->dev;
+
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll have an unneeded short delay,
+ * which is fine.
+ */
+ if (time_in_range_open(time_now, phy->last_access, allowed_access)) {
+ unsigned long remaining, timeout = allowed_access - time_now;
+
+ remaining = wait_for_completion_timeout(&phy->spi_phy.ready,
+ timeout);
+ if (!remaining && phy->irq_confirmed)
+ dev_warn(dev, "Timeout waiting for TPM ready IRQ\n");
+ }
+
+ if (phy->irq_needs_confirmation) {
+ unsigned int attempt = ++phy->irq_confirmation_attempt;
+
+ if (phy->irq_confirmed) {
+ phy->irq_needs_confirmation = false;
+ phy->access_delay = CR50_READY_IRQ_TIMEOUT;
+ dev_info(dev, "TPM ready IRQ confirmed on attempt %u\n",
+ attempt);
+ } else if (attempt > MAX_IRQ_CONFIRMATION_ATTEMPTS) {
+ phy->irq_needs_confirmation = false;
+ dev_warn(dev, "IRQ not confirmed - will use delays\n");
+ }
+ }
+}
+
+/*
+ * Cr50 might go to sleep if there is no SPI activity for some time and
+ * miss the first few bits/bytes on the bus. In such case, wake it up
+ * by asserting CS and give it time to start up.
+ */
+static bool cr50_needs_waking(struct cr50_spi_phy *phy)
+{
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll probably timeout or read
+ * incorrect value from TPM_STS and just retry the operation.
+ */
+ return !time_in_range_open(jiffies, phy->last_access,
+ phy->spi_phy.wake_after);
+}
+
+static void cr50_wake_if_needed(struct cr50_spi_phy *cr50_phy)
+{
+ struct tpm_tis_spi_phy *phy = &cr50_phy->spi_phy;
+
+ if (cr50_needs_waking(cr50_phy)) {
+ /* Assert CS, wait 1 msec, deassert CS */
+ struct spi_transfer spi_cs_wake = {
+ .delay = {
+ .value = 1000,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
+ };
+
+ spi_sync_transfer(phy->spi_device, &spi_cs_wake, 1);
+ /* Wait for it to fully wake */
+ usleep_range(CR50_WAKE_START_DELAY_USEC,
+ CR50_WAKE_START_DELAY_USEC * 2);
+ }
+
+ /* Reset the time when we need to wake Cr50 again */
+ phy->wake_after = jiffies + msecs_to_jiffies(CR50_SLEEP_DELAY_MSEC);
+}
+
+/*
+ * Flow control: clock the bus and wait for cr50 to set LSB before
+ * sending/receiving data. TCG PTP spec allows it to happen during
+ * the last byte of header, but cr50 never does that in practice,
+ * and earlier versions had a bug when it was set too early, so don't
+ * check for it during header transfer.
+ */
+static int cr50_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
+{
+ struct device *dev = &phy->spi_device->dev;
+ unsigned long timeout = jiffies + CR50_FLOW_CONTROL;
+ struct spi_message m;
+ int ret;
+
+ spi_xfer->len = 1;
+
+ do {
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev, "Timeout during flow control\n");
+ return -EBUSY;
+ }
+ } while (!(phy->iobuf[0] & 0x01));
+
+ return 0;
+}
+
+static bool tpm_cr50_spi_is_firmware_power_managed(struct device *dev)
+{
+ u8 val;
+ int ret;
+
+ /* This flag should default true when the device property is not present */
+ ret = device_property_read_u8(dev, "firmware-power-managed", &val);
+ if (ret)
+ return true;
+
+ return val;
+}
+
+static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct cr50_spi_phy *cr50_phy = to_cr50_spi_phy(phy);
+ int ret;
+
+ mutex_lock(&cr50_phy->time_track_mutex);
+ /*
+ * Do this outside of spi_bus_lock in case cr50 is not the
+ * only device on that spi bus.
+ */
+ cr50_ensure_access_delay(cr50_phy);
+ cr50_wake_if_needed(cr50_phy);
+
+ ret = tpm_tis_spi_transfer(data, addr, len, in, out);
+
+ cr50_phy->last_access = jiffies;
+ mutex_unlock(&cr50_phy->time_track_mutex);
+
+ return ret;
+}
+
+static int tpm_tis_spi_cr50_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, result, NULL);
+}
+
+static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value, enum tpm_tis_io_mode io_mode)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, NULL, value);
+}
+
+static const struct tpm_tis_phy_ops tpm_spi_cr50_phy_ops = {
+ .read_bytes = tpm_tis_spi_cr50_read_bytes,
+ .write_bytes = tpm_tis_spi_cr50_write_bytes,
+};
+
+static void cr50_print_fw_version(struct tpm_tis_data *data)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int i, len = 0;
+ char fw_ver[TPM_CR50_MAX_FW_VER_LEN + 1];
+ char fw_ver_block[4];
+
+ /*
+ * Write anything to TPM_CR50_FW_VER to start from the beginning
+ * of the version string
+ */
+ tpm_tis_write8(data, TPM_CR50_FW_VER(data->locality), 0);
+
+ /* Read the string, 4 bytes at a time, until we get '\0' */
+ do {
+ tpm_tis_read_bytes(data, TPM_CR50_FW_VER(data->locality), 4,
+ fw_ver_block);
+ for (i = 0; i < 4 && fw_ver_block[i]; ++len, ++i)
+ fw_ver[len] = fw_ver_block[i];
+ } while (i == 4 && len < TPM_CR50_MAX_FW_VER_LEN);
+ fw_ver[len] = '\0';
+
+ dev_info(&phy->spi_device->dev, "Cr50 firmware version: %s\n", fw_ver);
+}
+
+int cr50_spi_probe(struct spi_device *spi)
+{
+ struct tpm_tis_spi_phy *phy;
+ struct cr50_spi_phy *cr50_phy;
+ int ret;
+ struct tpm_chip *chip;
+
+ cr50_phy = devm_kzalloc(&spi->dev, sizeof(*cr50_phy), GFP_KERNEL);
+ if (!cr50_phy)
+ return -ENOMEM;
+
+ phy = &cr50_phy->spi_phy;
+ phy->flow_control = cr50_spi_flow_control;
+ phy->wake_after = jiffies;
+ phy->priv.rng_quality = TPM_CR50_DEFAULT_RNG_QUALITY;
+ init_completion(&phy->ready);
+
+ cr50_phy->access_delay = CR50_NOIRQ_ACCESS_DELAY;
+ cr50_phy->last_access = jiffies;
+ mutex_init(&cr50_phy->time_track_mutex);
+
+ if (spi->irq > 0) {
+ ret = devm_request_irq(&spi->dev, spi->irq,
+ cr50_spi_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "cr50_spi", cr50_phy);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_warn(&spi->dev, "Requesting IRQ %d failed: %d\n",
+ spi->irq, ret);
+ /*
+ * This is not fatal, the driver will fall back to
+ * delays automatically, since ready will never
+ * be completed without a registered irq handler.
+ * So, just fall through.
+ */
+ } else {
+ /*
+ * IRQ requested, let's verify that it is actually
+ * triggered, before relying on it.
+ */
+ cr50_phy->irq_needs_confirmation = true;
+ }
+ } else {
+ dev_warn(&spi->dev,
+ "No IRQ - will use delays between transactions.\n");
+ }
+
+ ret = tpm_tis_spi_init(spi, phy, -1, &tpm_spi_cr50_phy_ops);
+ if (ret)
+ return ret;
+
+ cr50_print_fw_version(&phy->priv);
+
+ chip = dev_get_drvdata(&spi->dev);
+ if (tpm_cr50_spi_is_firmware_power_managed(&spi->dev))
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+int tpm_tis_spi_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ /*
+ * Jiffies not increased during suspend, so we need to reset
+ * the time to wake Cr50 after resume.
+ */
+ phy->wake_after = jiffies;
+
+ return tpm_tis_resume(dev);
+}
+#endif
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
new file mode 100644
index 0000000000..c5c3197ee2
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Infineon Technologies AG
+ * Copyright (C) 2016 STMicroelectronics SAS
+ *
+ * Authors:
+ * Peter Huewe <peter.huewe@infineon.com>
+ * Christophe Ricard <christophe-h.ricard@st.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
+ * SPI access_.
+ *
+ * It is based on the original tpm_tis device driver from Leendert van
+ * Dorn and Kyleen Hall and Jarko Sakkinnen.
+ */
+
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+#include <linux/tpm.h>
+
+#include "tpm.h"
+#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
+
+#define MAX_SPI_FRAMESIZE 64
+
+/*
+ * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
+ * keep trying to read from the device until MISO goes high indicating the
+ * wait state has ended.
+ *
+ * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+ */
+static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
+{
+ struct spi_message m;
+ int ret, i;
+
+ if ((phy->iobuf[3] & 0x01) == 0) {
+ // handle SPI wait states
+ for (i = 0; i < TPM_RETRY; i++) {
+ spi_xfer->len = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+ if (phy->iobuf[0] & 0x01)
+ break;
+ }
+
+ if (i == TPM_RETRY)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Half duplex controller with support for TPM wait state detection like
+ * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow
+ * control. Each phase sent in different transfer for controller to idenity
+ * phase.
+ */
+static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_transfer spi_xfer[3];
+ struct spi_message m;
+ u8 transfer_len;
+ int ret;
+
+ while (len) {
+ transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
+
+ spi_message_init(&m);
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
+
+ memset(&spi_xfer, 0, sizeof(spi_xfer));
+
+ spi_xfer[0].tx_buf = phy->iobuf;
+ spi_xfer[0].len = 1;
+ spi_message_add_tail(&spi_xfer[0], &m);
+
+ spi_xfer[1].tx_buf = phy->iobuf + 1;
+ spi_xfer[1].len = 3;
+ spi_message_add_tail(&spi_xfer[1], &m);
+
+ if (out) {
+ spi_xfer[2].tx_buf = &phy->iobuf[4];
+ spi_xfer[2].rx_buf = NULL;
+ memcpy(&phy->iobuf[4], out, transfer_len);
+ out += transfer_len;
+ }
+
+ if (in) {
+ spi_xfer[2].tx_buf = NULL;
+ spi_xfer[2].rx_buf = &phy->iobuf[4];
+ }
+
+ spi_xfer[2].len = transfer_len;
+ spi_message_add_tail(&spi_xfer[2], &m);
+
+ reinit_completion(&phy->ready);
+
+ ret = spi_sync(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+
+ if (in) {
+ memcpy(in, &phy->iobuf[4], transfer_len);
+ in += transfer_len;
+ }
+
+ len -= transfer_len;
+ }
+
+ return ret;
+}
+
+static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int ret = 0;
+ struct spi_message m;
+ struct spi_transfer spi_xfer;
+ u8 transfer_len;
+
+ spi_bus_lock(phy->spi_device->master);
+
+ while (len) {
+ transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
+
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
+
+ memset(&spi_xfer, 0, sizeof(spi_xfer));
+ spi_xfer.tx_buf = phy->iobuf;
+ spi_xfer.rx_buf = phy->iobuf;
+ spi_xfer.len = 4;
+ spi_xfer.cs_change = 1;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ goto exit;
+
+ /* Flow control transfers are receive only */
+ spi_xfer.tx_buf = NULL;
+ ret = phy->flow_control(phy, &spi_xfer);
+ if (ret < 0)
+ goto exit;
+
+ spi_xfer.cs_change = 0;
+ spi_xfer.len = transfer_len;
+ spi_xfer.delay.value = 5;
+ spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
+
+ if (out) {
+ spi_xfer.tx_buf = phy->iobuf;
+ spi_xfer.rx_buf = NULL;
+ memcpy(phy->iobuf, out, transfer_len);
+ out += transfer_len;
+ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ reinit_completion(&phy->ready);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ goto exit;
+
+ if (in) {
+ memcpy(in, phy->iobuf, transfer_len);
+ in += transfer_len;
+ }
+
+ len -= transfer_len;
+ }
+
+exit:
+ if (ret < 0) {
+ /* Deactivate chip select */
+ memset(&spi_xfer, 0, sizeof(spi_xfer));
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ spi_sync_locked(phy->spi_device, &m);
+ }
+
+ spi_bus_unlock(phy->spi_device->master);
+ return ret;
+}
+
+int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_controller *ctlr = phy->spi_device->controller;
+
+ /*
+ * TPM flow control over SPI requires full duplex support.
+ * Send entire message to a half duplex controller to handle
+ * wait polling in controller.
+ * Set TPM HW flow control flag..
+ */
+ if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ return tpm_tis_spi_transfer_half(data, addr, len, in, out);
+ else
+ return tpm_tis_spi_transfer_full(data, addr, len, in, out);
+}
+
+static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ return tpm_tis_spi_transfer(data, addr, len, result, NULL);
+}
+
+static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value, enum tpm_tis_io_mode io_mode)
+{
+ return tpm_tis_spi_transfer(data, addr, len, NULL, value);
+}
+
+int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops)
+{
+ phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
+ phy->spi_device = spi;
+
+ return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
+}
+
+static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
+ .read_bytes = tpm_tis_spi_read_bytes,
+ .write_bytes = tpm_tis_spi_write_bytes,
+};
+
+static int tpm_tis_spi_probe(struct spi_device *dev)
+{
+ struct tpm_tis_spi_phy *phy;
+ int irq;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->flow_control = tpm_tis_spi_flow_control;
+
+ if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ dev->mode |= SPI_TPM_HW_FLOW;
+
+ /* If the SPI device has an IRQ then use that */
+ if (dev->irq > 0)
+ irq = dev->irq;
+ else
+ irq = -1;
+
+ init_completion(&phy->ready);
+ return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
+}
+
+typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
+
+static int tpm_tis_spi_driver_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
+ tpm_tis_spi_probe_func probe_func;
+
+ probe_func = of_device_get_match_data(&spi->dev);
+ if (!probe_func) {
+ if (spi_dev_id) {
+ probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
+ if (!probe_func)
+ return -ENODEV;
+ } else
+ probe_func = tpm_tis_spi_probe;
+ }
+
+ return probe_func(spi);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
+
+static void tpm_tis_spi_remove(struct spi_device *dev)
+{
+ struct tpm_chip *chip = spi_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+static const struct spi_device_id tpm_tis_spi_id[] = {
+ { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
+ { "slb9670", (unsigned long)tpm_tis_spi_probe },
+ { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+ { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe },
+ { "cr50", (unsigned long)cr50_spi_probe },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
+
+static const struct of_device_id of_tis_spi_match[] __maybe_unused = {
+ { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
+ { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "google,cr50", .data = cr50_spi_probe },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_spi_match);
+
+static const struct acpi_device_id acpi_tis_spi_match[] __maybe_unused = {
+ {"SMO0768", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
+
+static struct spi_driver tpm_tis_spi_driver = {
+ .driver = {
+ .name = "tpm_tis_spi",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_spi_match),
+ .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = tpm_tis_spi_driver_probe,
+ .remove = tpm_tis_spi_remove,
+ .id_table = tpm_tis_spi_id,
+};
+module_spi_driver(tpm_tis_spi_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native SPI access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c
new file mode 100644
index 0000000000..0621ebec53
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_synquacer.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Linaro Ltd.
+ *
+ * This device driver implements MMIO TPM on SynQuacer Platform.
+ */
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+/*
+ * irq > 0 means: use irq $irq;
+ * irq = 0 means: autoprobe for an irq;
+ * irq = -1 means: no irq support
+ */
+struct tpm_tis_synquacer_info {
+ struct resource res;
+ int irq;
+};
+
+struct tpm_tis_synquacer_phy {
+ struct tpm_tis_data priv;
+ void __iomem *iobase;
+};
+
+static inline struct tpm_tis_synquacer_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_synquacer_phy, priv);
+}
+
+static int tpm_tis_synquacer_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ *result++ = ioread8(phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ result[1] = ioread8(phy->iobase + addr + 1);
+ result[0] = ioread8(phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_32:
+ result[3] = ioread8(phy->iobase + addr + 3);
+ result[2] = ioread8(phy->iobase + addr + 2);
+ result[1] = ioread8(phy->iobase + addr + 1);
+ result[0] = ioread8(phy->iobase + addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int tpm_tis_synquacer_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ iowrite8(*value++, phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ return -EINVAL;
+ case TPM_TIS_PHYS_32:
+ /*
+ * Due to the limitation of SPI controller on SynQuacer,
+ * 16/32 bits access must be done in byte-wise and descending order.
+ */
+ iowrite8(value[3], phy->iobase + addr + 3);
+ iowrite8(value[2], phy->iobase + addr + 2);
+ iowrite8(value[1], phy->iobase + addr + 1);
+ iowrite8(value[0], phy->iobase + addr);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct tpm_tis_phy_ops tpm_tcg_bw = {
+ .read_bytes = tpm_tis_synquacer_read_bytes,
+ .write_bytes = tpm_tis_synquacer_write_bytes,
+};
+
+static int tpm_tis_synquacer_init(struct device *dev,
+ struct tpm_tis_synquacer_info *tpm_info)
+{
+ struct tpm_tis_synquacer_phy *phy;
+
+ phy = devm_kzalloc(dev, sizeof(struct tpm_tis_synquacer_phy), GFP_KERNEL);
+ if (phy == NULL)
+ return -ENOMEM;
+
+ phy->iobase = devm_ioremap_resource(dev, &tpm_info->res);
+ if (IS_ERR(phy->iobase))
+ return PTR_ERR(phy->iobase);
+
+ return tpm_tis_core_init(dev, &phy->priv, tpm_info->irq, &tpm_tcg_bw,
+ ACPI_HANDLE(dev));
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_synquacer_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static int tpm_tis_synquacer_probe(struct platform_device *pdev)
+{
+ struct tpm_tis_synquacer_info tpm_info = {};
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+ tpm_info.res = *res;
+
+ tpm_info.irq = -1;
+
+ return tpm_tis_synquacer_init(&pdev->dev, &tpm_info);
+}
+
+static void tpm_tis_synquacer_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tis_synquacer_of_platform_match[] = {
+ {.compatible = "socionext,synquacer-tpm-mmio"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tis_synquacer_of_platform_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id tpm_synquacer_acpi_tbl[] = {
+ { "SCX0009" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl);
+#endif
+
+static struct platform_driver tis_synquacer_drv = {
+ .probe = tpm_tis_synquacer_probe,
+ .remove_new = tpm_tis_synquacer_remove,
+ .driver = {
+ .name = "tpm_tis_synquacer",
+ .pm = &tpm_tis_synquacer_pm,
+ .of_match_table = of_match_ptr(tis_synquacer_of_platform_match),
+ .acpi_match_table = ACPI_PTR(tpm_synquacer_acpi_tbl),
+ },
+};
+
+module_platform_driver(tis_synquacer_drv);
+
+MODULE_DESCRIPTION("TPM MMIO Driver for Socionext SynQuacer platform");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
new file mode 100644
index 0000000000..30e953988c
--- /dev/null
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -0,0 +1,717 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015, 2016 IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Stefan Berger <stefanb@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for vTPM (vTPM proxy driver)
+ */
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/miscdevice.h>
+#include <linux/vtpm_proxy.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/poll.h>
+#include <linux/compat.h>
+
+#include "tpm.h"
+
+#define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0)
+
+struct proxy_dev {
+ struct tpm_chip *chip;
+
+ u32 flags; /* public API flags */
+
+ wait_queue_head_t wq;
+
+ struct mutex buf_lock; /* protect buffer and flags */
+
+ long state; /* internal state */
+#define STATE_OPENED_FLAG BIT(0)
+#define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */
+#define STATE_REGISTERED_FLAG BIT(2)
+#define STATE_DRIVER_COMMAND BIT(3) /* sending a driver specific command */
+
+ size_t req_len; /* length of queued TPM request */
+ size_t resp_len; /* length of queued TPM response */
+ u8 buffer[TPM_BUFSIZE]; /* request/response buffer */
+
+ struct work_struct work; /* task that retrieves TPM timeouts */
+};
+
+/* all supported flags */
+#define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2)
+
+static struct workqueue_struct *workqueue;
+
+static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev);
+
+/*
+ * Functions related to 'server side'
+ */
+
+/**
+ * vtpm_proxy_fops_read - Read TPM commands on 'server side'
+ *
+ * @filp: file pointer
+ * @buf: read buffer
+ * @count: number of bytes to read
+ * @off: offset
+ *
+ * Return:
+ * Number of bytes read or negative error code
+ */
+static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+ size_t len;
+ int sig, rc;
+
+ sig = wait_event_interruptible(proxy_dev->wq,
+ proxy_dev->req_len != 0 ||
+ !(proxy_dev->state & STATE_OPENED_FLAG));
+ if (sig)
+ return -EINTR;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ len = proxy_dev->req_len;
+
+ if (count < len || len > sizeof(proxy_dev->buffer)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n",
+ count, len);
+ return -EIO;
+ }
+
+ rc = copy_to_user(buf, proxy_dev->buffer, len);
+ memset(proxy_dev->buffer, 0, len);
+ proxy_dev->req_len = 0;
+
+ if (!rc)
+ proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ if (rc)
+ return -EFAULT;
+
+ return len;
+}
+
+/**
+ * vtpm_proxy_fops_write - Write TPM responses on 'server side'
+ *
+ * @filp: file pointer
+ * @buf: write buffer
+ * @count: number of bytes to write
+ * @off: offset
+ *
+ * Return:
+ * Number of bytes read or negative error value
+ */
+static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ if (count > sizeof(proxy_dev->buffer) ||
+ !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EIO;
+ }
+
+ proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
+
+ proxy_dev->req_len = 0;
+
+ if (copy_from_user(proxy_dev->buffer, buf, count)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EFAULT;
+ }
+
+ proxy_dev->resp_len = count;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ wake_up_interruptible(&proxy_dev->wq);
+
+ return count;
+}
+
+/*
+ * vtpm_proxy_fops_poll - Poll status on 'server side'
+ *
+ * @filp: file pointer
+ * @wait: poll table
+ *
+ * Return: Poll flags
+ */
+static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+ __poll_t ret;
+
+ poll_wait(filp, &proxy_dev->wq, wait);
+
+ ret = EPOLLOUT;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (proxy_dev->req_len)
+ ret |= EPOLLIN | EPOLLRDNORM;
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG))
+ ret |= EPOLLHUP;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ return ret;
+}
+
+/*
+ * vtpm_proxy_fops_open - Open vTPM device on 'server side'
+ *
+ * @filp: file pointer
+ *
+ * Called when setting up the anonymous file descriptor
+ */
+static void vtpm_proxy_fops_open(struct file *filp)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+
+ proxy_dev->state |= STATE_OPENED_FLAG;
+}
+
+/**
+ * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open
+ * Call to undo vtpm_proxy_fops_open
+ *
+ *@proxy_dev: tpm proxy device
+ */
+static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev)
+{
+ mutex_lock(&proxy_dev->buf_lock);
+
+ proxy_dev->state &= ~STATE_OPENED_FLAG;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ /* no more TPM responses -- wake up anyone waiting for them */
+ wake_up_interruptible(&proxy_dev->wq);
+}
+
+/*
+ * vtpm_proxy_fops_release - Close 'server side'
+ *
+ * @inode: inode
+ * @filp: file pointer
+ * Return:
+ * Always returns 0.
+ */
+static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+
+ filp->private_data = NULL;
+
+ vtpm_proxy_delete_device(proxy_dev);
+
+ return 0;
+}
+
+static const struct file_operations vtpm_proxy_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = vtpm_proxy_fops_read,
+ .write = vtpm_proxy_fops_write,
+ .poll = vtpm_proxy_fops_poll,
+ .release = vtpm_proxy_fops_release,
+};
+
+/*
+ * Functions invoked by the core TPM driver to send TPM commands to
+ * 'server side' and receive responses from there.
+ */
+
+/*
+ * Called when core TPM driver reads TPM responses from 'server side'
+ *
+ * @chip: tpm chip to use
+ * @buf: receive buffer
+ * @count: bytes to read
+ * Return:
+ * Number of TPM response bytes read, negative error value otherwise
+ */
+static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+ size_t len;
+
+ /* process gone ? */
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ len = proxy_dev->resp_len;
+ if (count < len) {
+ dev_err(&chip->dev,
+ "Invalid size in recv: count=%zd, resp_len=%zd\n",
+ count, len);
+ len = -EIO;
+ goto out;
+ }
+
+ memcpy(buf, proxy_dev->buffer, len);
+ proxy_dev->resp_len = 0;
+
+out:
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ return len;
+}
+
+static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
+ u8 *buf, size_t count)
+{
+ struct tpm_header *hdr = (struct tpm_header *)buf;
+
+ if (count < sizeof(struct tpm_header))
+ return 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ switch (be32_to_cpu(hdr->ordinal)) {
+ case TPM2_CC_SET_LOCALITY:
+ return 1;
+ }
+ } else {
+ switch (be32_to_cpu(hdr->ordinal)) {
+ case TPM_ORD_SET_LOCALITY:
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Called when core TPM driver forwards TPM requests to 'server side'.
+ *
+ * @chip: tpm chip to use
+ * @buf: send buffer
+ * @count: bytes to send
+ *
+ * Return:
+ * 0 in case of success, negative error value otherwise.
+ */
+static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+ if (count > sizeof(proxy_dev->buffer)) {
+ dev_err(&chip->dev,
+ "Invalid size in send: count=%zd, buffer size=%zd\n",
+ count, sizeof(proxy_dev->buffer));
+ return -EIO;
+ }
+
+ if (!(proxy_dev->state & STATE_DRIVER_COMMAND) &&
+ vtpm_proxy_is_driver_command(chip, buf, count))
+ return -EFAULT;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ proxy_dev->resp_len = 0;
+
+ proxy_dev->req_len = count;
+ memcpy(proxy_dev->buffer, buf, count);
+
+ proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ wake_up_interruptible(&proxy_dev->wq);
+
+ return 0;
+}
+
+static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
+{
+ /* not supported */
+}
+
+static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+ if (proxy_dev->resp_len)
+ return VTPM_PROXY_REQ_COMPLETE_FLAG;
+
+ return 0;
+}
+
+static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+ bool ret;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ ret = !(proxy_dev->state & STATE_OPENED_FLAG);
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ return ret;
+}
+
+static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
+{
+ struct tpm_buf buf;
+ int rc;
+ const struct tpm_header *header;
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS,
+ TPM2_CC_SET_LOCALITY);
+ else
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND,
+ TPM_ORD_SET_LOCALITY);
+ if (rc)
+ return rc;
+ tpm_buf_append_u8(&buf, locality);
+
+ proxy_dev->state |= STATE_DRIVER_COMMAND;
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to set locality");
+
+ proxy_dev->state &= ~STATE_DRIVER_COMMAND;
+
+ if (rc < 0) {
+ locality = rc;
+ goto out;
+ }
+
+ header = (const struct tpm_header *)buf.data;
+ rc = be32_to_cpu(header->return_code);
+ if (rc)
+ locality = -1;
+
+out:
+ tpm_buf_destroy(&buf);
+
+ return locality;
+}
+
+static const struct tpm_class_ops vtpm_proxy_tpm_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = vtpm_proxy_tpm_op_recv,
+ .send = vtpm_proxy_tpm_op_send,
+ .cancel = vtpm_proxy_tpm_op_cancel,
+ .status = vtpm_proxy_tpm_op_status,
+ .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG,
+ .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG,
+ .req_canceled = vtpm_proxy_tpm_req_canceled,
+ .request_locality = vtpm_proxy_request_locality,
+};
+
+/*
+ * Code related to the startup of the TPM 2 and startup of TPM 1.2 +
+ * retrieval of timeouts and durations.
+ */
+
+static void vtpm_proxy_work(struct work_struct *work)
+{
+ struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev,
+ work);
+ int rc;
+
+ rc = tpm_chip_register(proxy_dev->chip);
+ if (rc)
+ vtpm_proxy_fops_undo_open(proxy_dev);
+ else
+ proxy_dev->state |= STATE_REGISTERED_FLAG;
+}
+
+/*
+ * vtpm_proxy_work_stop: make sure the work has finished
+ *
+ * This function is useful when user space closed the fd
+ * while the driver still determines timeouts.
+ */
+static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev)
+{
+ vtpm_proxy_fops_undo_open(proxy_dev);
+ flush_work(&proxy_dev->work);
+}
+
+/*
+ * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization
+ */
+static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev)
+{
+ queue_work(workqueue, &proxy_dev->work);
+}
+
+/*
+ * Code related to creation and deletion of device pairs
+ */
+static struct proxy_dev *vtpm_proxy_create_proxy_dev(void)
+{
+ struct proxy_dev *proxy_dev;
+ struct tpm_chip *chip;
+ int err;
+
+ proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL);
+ if (proxy_dev == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&proxy_dev->wq);
+ mutex_init(&proxy_dev->buf_lock);
+ INIT_WORK(&proxy_dev->work, vtpm_proxy_work);
+
+ chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops);
+ if (IS_ERR(chip)) {
+ err = PTR_ERR(chip);
+ goto err_proxy_dev_free;
+ }
+ dev_set_drvdata(&chip->dev, proxy_dev);
+
+ proxy_dev->chip = chip;
+
+ return proxy_dev;
+
+err_proxy_dev_free:
+ kfree(proxy_dev);
+
+ return ERR_PTR(err);
+}
+
+/*
+ * Undo what has been done in vtpm_create_proxy_dev
+ */
+static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev)
+{
+ put_device(&proxy_dev->chip->dev); /* frees chip */
+ kfree(proxy_dev);
+}
+
+/*
+ * Create a /dev/tpm%d and 'server side' file descriptor pair
+ *
+ * Return:
+ * Returns file pointer on success, an error value otherwise
+ */
+static struct file *vtpm_proxy_create_device(
+ struct vtpm_proxy_new_dev *vtpm_new_dev)
+{
+ struct proxy_dev *proxy_dev;
+ int rc, fd;
+ struct file *file;
+
+ if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ proxy_dev = vtpm_proxy_create_proxy_dev();
+ if (IS_ERR(proxy_dev))
+ return ERR_CAST(proxy_dev);
+
+ proxy_dev->flags = vtpm_new_dev->flags;
+
+ /* setup an anonymous file for the server-side */
+ fd = get_unused_fd_flags(O_RDWR);
+ if (fd < 0) {
+ rc = fd;
+ goto err_delete_proxy_dev;
+ }
+
+ file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev,
+ O_RDWR);
+ if (IS_ERR(file)) {
+ rc = PTR_ERR(file);
+ goto err_put_unused_fd;
+ }
+
+ /* from now on we can unwind with put_unused_fd() + fput() */
+ /* simulate an open() on the server side */
+ vtpm_proxy_fops_open(file);
+
+ if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2)
+ proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ vtpm_proxy_work_start(proxy_dev);
+
+ vtpm_new_dev->fd = fd;
+ vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt);
+ vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt);
+ vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num;
+
+ return file;
+
+err_put_unused_fd:
+ put_unused_fd(fd);
+
+err_delete_proxy_dev:
+ vtpm_proxy_delete_proxy_dev(proxy_dev);
+
+ return ERR_PTR(rc);
+}
+
+/*
+ * Counter part to vtpm_create_device.
+ */
+static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
+{
+ vtpm_proxy_work_stop(proxy_dev);
+
+ /*
+ * A client may hold the 'ops' lock, so let it know that the server
+ * side shuts down before we try to grab the 'ops' lock when
+ * unregistering the chip.
+ */
+ vtpm_proxy_fops_undo_open(proxy_dev);
+
+ if (proxy_dev->state & STATE_REGISTERED_FLAG)
+ tpm_chip_unregister(proxy_dev->chip);
+
+ vtpm_proxy_delete_proxy_dev(proxy_dev);
+}
+
+/*
+ * Code related to the control device /dev/vtpmx
+ */
+
+/**
+ * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @file: /dev/vtpmx
+ * @ioctl: the ioctl number
+ * @arg: pointer to the struct vtpmx_proxy_new_dev
+ *
+ * Creates an anonymous file that is used by the process acting as a TPM to
+ * communicate with the client processes. The function will also add a new TPM
+ * device through which data is proxied to this TPM acting process. The caller
+ * will be provided with a file descriptor to communicate with the clients and
+ * major and minor numbers for the TPM device.
+ */
+static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
+ struct vtpm_proxy_new_dev vtpm_new_dev;
+ struct file *vtpm_file;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ vtpm_new_dev_p = argp;
+
+ if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
+ sizeof(vtpm_new_dev)))
+ return -EFAULT;
+
+ vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev);
+ if (IS_ERR(vtpm_file))
+ return PTR_ERR(vtpm_file);
+
+ if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
+ sizeof(vtpm_new_dev))) {
+ put_unused_fd(vtpm_new_dev.fd);
+ fput(vtpm_file);
+ return -EFAULT;
+ }
+
+ fd_install(vtpm_new_dev.fd, vtpm_file);
+ return 0;
+}
+
+/*
+ * vtpmx_fops_ioctl: ioctl on /dev/vtpmx
+ *
+ * Return:
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ switch (ioctl) {
+ case VTPM_PROXY_IOC_NEW_DEV:
+ return vtpmx_ioc_new_dev(f, ioctl, arg);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct file_operations vtpmx_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = vtpmx_fops_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice vtpmx_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vtpmx",
+ .fops = &vtpmx_fops,
+};
+
+static int __init vtpm_module_init(void)
+{
+ int rc;
+
+ workqueue = create_workqueue("tpm-vtpm");
+ if (!workqueue) {
+ pr_err("couldn't create workqueue\n");
+ return -ENOMEM;
+ }
+
+ rc = misc_register(&vtpmx_miscdev);
+ if (rc) {
+ pr_err("couldn't create vtpmx device\n");
+ destroy_workqueue(workqueue);
+ }
+
+ return rc;
+}
+
+static void __exit vtpm_module_exit(void)
+{
+ destroy_workqueue(workqueue);
+ misc_deregister(&vtpmx_miscdev);
+}
+
+module_init(vtpm_module_init);
+module_exit(vtpm_module_exit);
+
+MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
+MODULE_DESCRIPTION("vTPM Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
new file mode 100644
index 0000000000..eef0fb06ea
--- /dev/null
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 James.Bottomley@HansenPartnership.com
+ */
+#include <linux/slab.h>
+#include "tpm-dev.h"
+
+struct tpmrm_priv {
+ struct file_priv priv;
+ struct tpm_space space;
+};
+
+static int tpmrm_open(struct inode *inode, struct file *file)
+{
+ struct tpm_chip *chip;
+ struct tpmrm_priv *priv;
+ int rc;
+
+ chip = container_of(inode->i_cdev, struct tpm_chip, cdevs);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
+ if (rc) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+
+ tpm_common_open(file, chip, &priv->priv, &priv->space);
+
+ return 0;
+}
+
+static int tpmrm_release(struct inode *inode, struct file *file)
+{
+ struct file_priv *fpriv = file->private_data;
+ struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv);
+
+ tpm_common_release(file, fpriv);
+ tpm2_del_space(fpriv->chip, &priv->space);
+ kfree(priv);
+
+ return 0;
+}
+
+const struct file_operations tpmrm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpmrm_open,
+ .read = tpm_common_read,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
+ .release = tpmrm_release,
+};
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
new file mode 100644
index 0000000000..80cca3b83b
--- /dev/null
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of the Xen vTPM device frontend
+ *
+ * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
+ */
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/freezer.h>
+#include <xen/xen.h>
+#include <xen/events.h>
+#include <xen/interface/io/tpmif.h>
+#include <xen/grant_table.h>
+#include <xen/xenbus.h>
+#include <xen/page.h>
+#include "tpm.h"
+#include <xen/platform_pci.h>
+
+struct tpm_private {
+ struct tpm_chip *chip;
+ struct xenbus_device *dev;
+
+ struct vtpm_shared_page *shr;
+
+ unsigned int evtchn;
+ int ring_ref;
+ domid_t backend_id;
+ int irq;
+ wait_queue_head_t read_queue;
+};
+
+enum status_bits {
+ VTPM_STATUS_RUNNING = 0x1,
+ VTPM_STATUS_IDLE = 0x2,
+ VTPM_STATUS_RESULT = 0x4,
+ VTPM_STATUS_CANCELED = 0x8,
+};
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_TIMEOUT);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
+static u8 vtpm_status(struct tpm_chip *chip)
+{
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ switch (priv->shr->state) {
+ case VTPM_STATE_IDLE:
+ return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
+ case VTPM_STATE_FINISH:
+ return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
+ case VTPM_STATE_SUBMIT:
+ case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
+ return VTPM_STATUS_RUNNING;
+ default:
+ return 0;
+ }
+}
+
+static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return status & VTPM_STATUS_CANCELED;
+}
+
+static void vtpm_cancel(struct tpm_chip *chip)
+{
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ priv->shr->state = VTPM_STATE_CANCEL;
+ wmb();
+ notify_remote_via_evtchn(priv->evtchn);
+}
+
+static size_t shr_data_offset(struct vtpm_shared_page *shr)
+{
+ return struct_size(shr, extra_pages, shr->nr_extra_pages);
+}
+
+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ struct vtpm_shared_page *shr = priv->shr;
+ size_t offset = shr_data_offset(shr);
+
+ u32 ordinal;
+ unsigned long duration;
+
+ if (offset > PAGE_SIZE)
+ return -EINVAL;
+
+ if (offset + count > PAGE_SIZE)
+ return -EINVAL;
+
+ /* Wait for completion of any existing command or cancellation */
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c,
+ &priv->read_queue, true) < 0) {
+ vtpm_cancel(chip);
+ return -ETIME;
+ }
+
+ memcpy(offset + (u8 *)shr, buf, count);
+ shr->length = count;
+ barrier();
+ shr->state = VTPM_STATE_SUBMIT;
+ wmb();
+ notify_remote_via_evtchn(priv->evtchn);
+
+ ordinal = be32_to_cpu(((struct tpm_header *)buf)->ordinal);
+ duration = tpm_calc_ordinal_duration(chip, ordinal);
+
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
+ &priv->read_queue, true) < 0) {
+ /* got a signal or timeout, try to cancel */
+ vtpm_cancel(chip);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ struct vtpm_shared_page *shr = priv->shr;
+ size_t offset = shr_data_offset(shr);
+ size_t length = shr->length;
+
+ if (shr->state == VTPM_STATE_IDLE)
+ return -ECANCELED;
+
+ /* In theory the wait at the end of _send makes this one unnecessary */
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c,
+ &priv->read_queue, true) < 0) {
+ vtpm_cancel(chip);
+ return -ETIME;
+ }
+
+ if (offset > PAGE_SIZE)
+ return -EIO;
+
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ if (length > count)
+ length = count;
+
+ memcpy(buf, offset + (u8 *)shr, length);
+
+ return length;
+}
+
+static const struct tpm_class_ops tpm_vtpm = {
+ .status = vtpm_status,
+ .recv = vtpm_recv,
+ .send = vtpm_send,
+ .cancel = vtpm_cancel,
+ .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
+ .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
+ .req_canceled = vtpm_req_canceled,
+};
+
+static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
+{
+ struct tpm_private *priv = dev_id;
+
+ switch (priv->shr->state) {
+ case VTPM_STATE_IDLE:
+ case VTPM_STATE_FINISH:
+ wake_up_interruptible(&priv->read_queue);
+ break;
+ case VTPM_STATE_SUBMIT:
+ case VTPM_STATE_CANCEL:
+ default:
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+static int setup_chip(struct device *dev, struct tpm_private *priv)
+{
+ struct tpm_chip *chip;
+
+ chip = tpmm_chip_alloc(dev, &tpm_vtpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ init_waitqueue_head(&priv->read_queue);
+
+ priv->chip = chip;
+ dev_set_drvdata(&chip->dev, priv);
+
+ return 0;
+}
+
+/* caller must clean up in case of errors */
+static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
+{
+ struct xenbus_transaction xbt;
+ const char *message = NULL;
+ int rv;
+
+ rv = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&priv->shr, 1,
+ &priv->ring_ref);
+ if (rv < 0)
+ return rv;
+
+ rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
+ if (rv)
+ return rv;
+
+ rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
+ "tpmif", priv);
+ if (rv <= 0) {
+ xenbus_dev_fatal(dev, rv, "allocating TPM irq");
+ return rv;
+ }
+ priv->irq = rv;
+
+ again:
+ rv = xenbus_transaction_start(&xbt);
+ if (rv) {
+ xenbus_dev_fatal(dev, rv, "starting transaction");
+ return rv;
+ }
+
+ rv = xenbus_printf(xbt, dev->nodename,
+ "ring-ref", "%u", priv->ring_ref);
+ if (rv) {
+ message = "writing ring-ref";
+ goto abort_transaction;
+ }
+
+ rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+ priv->evtchn);
+ if (rv) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
+ if (rv) {
+ message = "writing feature-protocol-v2";
+ goto abort_transaction;
+ }
+
+ rv = xenbus_transaction_end(xbt, 0);
+ if (rv == -EAGAIN)
+ goto again;
+ if (rv) {
+ xenbus_dev_fatal(dev, rv, "completing transaction");
+ return rv;
+ }
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ if (message)
+ xenbus_dev_error(dev, rv, "%s", message);
+
+ return rv;
+}
+
+static void ring_free(struct tpm_private *priv)
+{
+ if (!priv)
+ return;
+
+ xenbus_teardown_ring((void **)&priv->shr, 1, &priv->ring_ref);
+
+ if (priv->irq)
+ unbind_from_irqhandler(priv->irq, priv);
+
+ kfree(priv);
+}
+
+static int tpmfront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ struct tpm_private *priv;
+ int rv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
+ return -ENOMEM;
+ }
+
+ rv = setup_chip(&dev->dev, priv);
+ if (rv) {
+ kfree(priv);
+ return rv;
+ }
+
+ rv = setup_ring(dev, priv);
+ if (rv) {
+ ring_free(priv);
+ return rv;
+ }
+
+ tpm_get_timeouts(priv->chip);
+
+ return tpm_chip_register(priv->chip);
+}
+
+static void tpmfront_remove(struct xenbus_device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ tpm_chip_unregister(chip);
+ ring_free(priv);
+ dev_set_drvdata(&chip->dev, NULL);
+}
+
+static int tpmfront_resume(struct xenbus_device *dev)
+{
+ /* A suspend/resume/migrate will interrupt a vTPM anyway */
+ tpmfront_remove(dev);
+ return tpmfront_probe(dev, NULL);
+}
+
+static void backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ switch (backend_state) {
+ case XenbusStateInitialised:
+ case XenbusStateConnected:
+ if (dev->state == XenbusStateConnected)
+ break;
+
+ if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
+ 0)) {
+ xenbus_dev_fatal(dev, -EINVAL,
+ "vTPM protocol 2 required");
+ return;
+ }
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosing:
+ case XenbusStateClosed:
+ device_unregister(&dev->dev);
+ xenbus_frontend_closed(dev);
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct xenbus_device_id tpmfront_ids[] = {
+ { "vtpm" },
+ { "" }
+};
+MODULE_ALIAS("xen:vtpm");
+
+static struct xenbus_driver tpmfront_driver = {
+ .ids = tpmfront_ids,
+ .probe = tpmfront_probe,
+ .remove = tpmfront_remove,
+ .resume = tpmfront_resume,
+ .otherend_changed = backend_changed,
+};
+
+static int __init xen_tpmfront_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
+ return xenbus_register_frontend(&tpmfront_driver);
+}
+module_init(xen_tpmfront_init);
+
+static void __exit xen_tpmfront_exit(void)
+{
+ xenbus_unregister_driver(&tpmfront_driver);
+}
+module_exit(xen_tpmfront_exit);
+
+MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
+MODULE_DESCRIPTION("Xen vTPM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
new file mode 100644
index 0000000000..5af804c17a
--- /dev/null
+++ b/drivers/char/ttyprintk.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/char/ttyprintk.c
+ *
+ * Copyright (C) 2010 Samo Pogacnik
+ */
+
+/*
+ * This pseudo device allows user to make printk messages. It is possible
+ * to store "console" messages inline with kernel messages for better analyses
+ * of the boot process, for example.
+ */
+
+#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+struct ttyprintk_port {
+ struct tty_port port;
+ spinlock_t spinlock;
+};
+
+static struct ttyprintk_port tpk_port;
+
+/*
+ * Our simple preformatting supports transparent output of (time-stamped)
+ * printk messages (also suitable for logging service):
+ * - any cr is replaced by nl
+ * - adds a ttyprintk source tag in front of each line
+ * - too long message is fragmented, with '\'nl between fragments
+ * - TPK_STR_SIZE isn't really the write_room limiting factor, because
+ * it is emptied on the fly during preformatting.
+ */
+#define TPK_STR_SIZE 508 /* should be bigger then max expected line length */
+#define TPK_MAX_ROOM 4096 /* we could assume 4K for instance */
+#define TPK_PREFIX KERN_SOH __stringify(CONFIG_TTY_PRINTK_LEVEL)
+
+static int tpk_curr;
+
+static char tpk_buffer[TPK_STR_SIZE + 4];
+
+static void tpk_flush(void)
+{
+ if (tpk_curr > 0) {
+ tpk_buffer[tpk_curr] = '\0';
+ printk(TPK_PREFIX "[U] %s\n", tpk_buffer);
+ tpk_curr = 0;
+ }
+}
+
+static int tpk_printk(const u8 *buf, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (tpk_curr >= TPK_STR_SIZE) {
+ /* end of tmp buffer reached: cut the message in two */
+ tpk_buffer[tpk_curr++] = '\\';
+ tpk_flush();
+ }
+
+ switch (buf[i]) {
+ case '\r':
+ tpk_flush();
+ if ((i + 1) < count && buf[i + 1] == '\n')
+ i++;
+ break;
+ case '\n':
+ tpk_flush();
+ break;
+ default:
+ tpk_buffer[tpk_curr++] = buf[i];
+ break;
+ }
+ }
+
+ return count;
+}
+
+/*
+ * TTY operations open function.
+ */
+static int tpk_open(struct tty_struct *tty, struct file *filp)
+{
+ tty->driver_data = &tpk_port;
+
+ return tty_port_open(&tpk_port.port, tty, filp);
+}
+
+/*
+ * TTY operations close function.
+ */
+static void tpk_close(struct tty_struct *tty, struct file *filp)
+{
+ struct ttyprintk_port *tpkp = tty->driver_data;
+
+ tty_port_close(&tpkp->port, tty, filp);
+}
+
+/*
+ * TTY operations write function.
+ */
+static ssize_t tpk_write(struct tty_struct *tty, const u8 *buf, size_t count)
+{
+ struct ttyprintk_port *tpkp = tty->driver_data;
+ unsigned long flags;
+ int ret;
+
+ /* exclusive use of tpk_printk within this tty */
+ spin_lock_irqsave(&tpkp->spinlock, flags);
+ ret = tpk_printk(buf, count);
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
+
+ return ret;
+}
+
+/*
+ * TTY operations write_room function.
+ */
+static unsigned int tpk_write_room(struct tty_struct *tty)
+{
+ return TPK_MAX_ROOM;
+}
+
+/*
+ * TTY operations hangup function.
+ */
+static void tpk_hangup(struct tty_struct *tty)
+{
+ struct ttyprintk_port *tpkp = tty->driver_data;
+
+ tty_port_hangup(&tpkp->port);
+}
+
+/*
+ * TTY port operations shutdown function.
+ */
+static void tpk_port_shutdown(struct tty_port *tport)
+{
+ struct ttyprintk_port *tpkp =
+ container_of(tport, struct ttyprintk_port, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tpkp->spinlock, flags);
+ tpk_flush();
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
+}
+
+static const struct tty_operations ttyprintk_ops = {
+ .open = tpk_open,
+ .close = tpk_close,
+ .write = tpk_write,
+ .write_room = tpk_write_room,
+ .hangup = tpk_hangup,
+};
+
+static const struct tty_port_operations tpk_port_ops = {
+ .shutdown = tpk_port_shutdown,
+};
+
+static struct tty_driver *ttyprintk_driver;
+
+static struct tty_driver *ttyprintk_console_device(struct console *c,
+ int *index)
+{
+ *index = 0;
+ return ttyprintk_driver;
+}
+
+static struct console ttyprintk_console = {
+ .name = "ttyprintk",
+ .device = ttyprintk_console_device,
+};
+
+static int __init ttyprintk_init(void)
+{
+ int ret;
+
+ spin_lock_init(&tpk_port.spinlock);
+
+ ttyprintk_driver = tty_alloc_driver(1,
+ TTY_DRIVER_RESET_TERMIOS |
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_UNNUMBERED_NODE);
+ if (IS_ERR(ttyprintk_driver))
+ return PTR_ERR(ttyprintk_driver);
+
+ tty_port_init(&tpk_port.port);
+ tpk_port.port.ops = &tpk_port_ops;
+
+ ttyprintk_driver->driver_name = "ttyprintk";
+ ttyprintk_driver->name = "ttyprintk";
+ ttyprintk_driver->major = TTYAUX_MAJOR;
+ ttyprintk_driver->minor_start = 3;
+ ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE;
+ ttyprintk_driver->init_termios = tty_std_termios;
+ ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET;
+ tty_set_operations(ttyprintk_driver, &ttyprintk_ops);
+ tty_port_link_device(&tpk_port.port, ttyprintk_driver, 0);
+
+ ret = tty_register_driver(ttyprintk_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "Couldn't register ttyprintk driver\n");
+ goto error;
+ }
+
+ register_console(&ttyprintk_console);
+
+ return 0;
+
+error:
+ tty_driver_kref_put(ttyprintk_driver);
+ tty_port_destroy(&tpk_port.port);
+ return ret;
+}
+
+static void __exit ttyprintk_exit(void)
+{
+ unregister_console(&ttyprintk_console);
+ tty_unregister_driver(ttyprintk_driver);
+ tty_driver_kref_put(ttyprintk_driver);
+ tty_port_destroy(&tpk_port.port);
+}
+
+device_initcall(ttyprintk_init);
+module_exit(ttyprintk_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c
new file mode 100644
index 0000000000..956ebe2080
--- /dev/null
+++ b/drivers/char/uv_mmtimer.c
@@ -0,0 +1,220 @@
+/*
+ * Timer device implementation for SGI UV platform.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2009 Silicon Graphics, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mmtimer.h>
+#include <linux/miscdevice.h>
+#include <linux/posix-timers.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+
+#include <asm/genapic.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+
+MODULE_AUTHOR("Dimitri Sivanich <sivanich@sgi.com>");
+MODULE_DESCRIPTION("SGI UV Memory Mapped RTC Timer");
+MODULE_LICENSE("GPL");
+
+/* name of the device, usually in /dev */
+#define UV_MMTIMER_NAME "mmtimer"
+#define UV_MMTIMER_DESC "SGI UV Memory Mapped RTC Timer"
+#define UV_MMTIMER_VERSION "1.0"
+
+static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
+
+/*
+ * Period in femtoseconds (10^-15 s)
+ */
+static unsigned long uv_mmtimer_femtoperiod;
+
+static const struct file_operations uv_mmtimer_fops = {
+ .owner = THIS_MODULE,
+ .mmap = uv_mmtimer_mmap,
+ .unlocked_ioctl = uv_mmtimer_ioctl,
+ .llseek = noop_llseek,
+};
+
+/**
+ * uv_mmtimer_ioctl - ioctl interface for /dev/uv_mmtimer
+ * @file: file structure for the device
+ * @cmd: command to execute
+ * @arg: optional argument to command
+ *
+ * Executes the command specified by @cmd. Returns 0 for success, < 0 for
+ * failure.
+ *
+ * Valid commands:
+ *
+ * %MMTIMER_GETOFFSET - Should return the offset (relative to the start
+ * of the page where the registers are mapped) for the counter in question.
+ *
+ * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15)
+ * seconds
+ *
+ * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address
+ * specified by @arg
+ *
+ * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter
+ *
+ * %MMTIMER_MMAPAVAIL - Returns 1 if registers can be mmap'd into userspace
+ *
+ * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it
+ * in the address specified by @arg.
+ */
+static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case MMTIMER_GETOFFSET: /* offset of the counter */
+ /*
+ * Starting with HUB rev 2.0, the UV RTC register is
+ * replicated across all cachelines of it's own page.
+ * This allows faster simultaneous reads from a given socket.
+ *
+ * The offset returned is in 64 bit units.
+ */
+ if (uv_get_min_hub_revision_id() == 1)
+ ret = 0;
+ else
+ ret = ((uv_blade_processor_id() * L1_CACHE_BYTES) %
+ PAGE_SIZE) / 8;
+ break;
+
+ case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
+ if (copy_to_user((unsigned long __user *)arg,
+ &uv_mmtimer_femtoperiod, sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+
+ case MMTIMER_GETFREQ: /* frequency in Hz */
+ if (copy_to_user((unsigned long __user *)arg,
+ &sn_rtc_cycles_per_second,
+ sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+
+ case MMTIMER_GETBITS: /* number of bits in the clock */
+ ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK);
+ break;
+
+ case MMTIMER_MMAPAVAIL:
+ ret = 1;
+ break;
+
+ case MMTIMER_GETCOUNTER:
+ if (copy_to_user((unsigned long __user *)arg,
+ (unsigned long *)uv_local_mmr_address(UVH_RTC),
+ sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * uv_mmtimer_mmap - maps the clock's registers into userspace
+ * @file: file structure for the device
+ * @vma: VMA to map the registers into
+ *
+ * Calls remap_pfn_range() to map the clock's registers into
+ * the calling process' address space.
+ */
+static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long uv_mmtimer_addr;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (PAGE_SIZE > (1 << 16))
+ return -ENOSYS;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ uv_mmtimer_addr = UV_LOCAL_MMR_BASE | UVH_RTC;
+ uv_mmtimer_addr &= ~(PAGE_SIZE - 1);
+ uv_mmtimer_addr &= 0xfffffffffffffffUL;
+
+ if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ printk(KERN_ERR "remap_pfn_range failed in uv_mmtimer_mmap\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static struct miscdevice uv_mmtimer_miscdev = {
+ MISC_DYNAMIC_MINOR,
+ UV_MMTIMER_NAME,
+ &uv_mmtimer_fops
+};
+
+
+/**
+ * uv_mmtimer_init - device initialization routine
+ *
+ * Does initial setup for the uv_mmtimer device.
+ */
+static int __init uv_mmtimer_init(void)
+{
+ if (!is_uv_system()) {
+ printk(KERN_ERR "%s: Hardware unsupported\n", UV_MMTIMER_NAME);
+ return -1;
+ }
+
+ /*
+ * Sanity check the cycles/sec variable
+ */
+ if (sn_rtc_cycles_per_second < 100000) {
+ printk(KERN_ERR "%s: unable to determine clock frequency\n",
+ UV_MMTIMER_NAME);
+ return -1;
+ }
+
+ uv_mmtimer_femtoperiod = ((unsigned long)1E15 +
+ sn_rtc_cycles_per_second / 2) /
+ sn_rtc_cycles_per_second;
+
+ if (misc_register(&uv_mmtimer_miscdev)) {
+ printk(KERN_ERR "%s: failed to register device\n",
+ UV_MMTIMER_NAME);
+ return -1;
+ }
+
+ printk(KERN_INFO "%s: v%s, %ld MHz\n", UV_MMTIMER_DESC,
+ UV_MMTIMER_VERSION,
+ sn_rtc_cycles_per_second/(unsigned long)1E6);
+
+ return 0;
+}
+
+module_init(uv_mmtimer_init);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
new file mode 100644
index 0000000000..680d1ef2a2
--- /dev/null
+++ b/drivers/char/virtio_console.c
@@ -0,0 +1,2291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
+ * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
+ */
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/splice.h>
+#include <linux/pagemap.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/virtio.h>
+#include <linux/virtio_console.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include "../tty/hvc/hvc_console.h"
+
+#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
+#define VIRTCONS_MAX_PORTS 0x8000
+
+/*
+ * This is a global struct for storing common data for all the devices
+ * this driver handles.
+ *
+ * Mainly, it has a linked list for all the consoles in one place so
+ * that callbacks from hvc for get_chars(), put_chars() work properly
+ * across multiple devices and multiple ports per device.
+ */
+struct ports_driver_data {
+ /* Used for exporting per-port information to debugfs */
+ struct dentry *debugfs_dir;
+
+ /* List of all the devices we're handling */
+ struct list_head portdevs;
+
+ /* All the console devices handled by this driver */
+ struct list_head consoles;
+};
+
+static struct ports_driver_data pdrvdata;
+
+static const struct class port_class = {
+ .name = "virtio-ports",
+};
+
+static DEFINE_SPINLOCK(pdrvdata_lock);
+static DECLARE_COMPLETION(early_console_added);
+
+/* This struct holds information that's relevant only for console ports */
+struct console {
+ /* We'll place all consoles in a list in the pdrvdata struct */
+ struct list_head list;
+
+ /* The hvc device associated with this console port */
+ struct hvc_struct *hvc;
+
+ /* The size of the console */
+ struct winsize ws;
+
+ /*
+ * This number identifies the number that we used to register
+ * with hvc in hvc_instantiate() and hvc_alloc(); this is the
+ * number passed on by the hvc callbacks to us to
+ * differentiate between the other console ports handled by
+ * this driver
+ */
+ u32 vtermno;
+};
+
+static DEFINE_IDA(vtermno_ida);
+
+struct port_buffer {
+ char *buf;
+
+ /* size of the buffer in *buf above */
+ size_t size;
+
+ /* used length of the buffer */
+ size_t len;
+ /* offset in the buf from which to consume data */
+ size_t offset;
+
+ /* DMA address of buffer */
+ dma_addr_t dma;
+
+ /* Device we got DMA memory from */
+ struct device *dev;
+
+ /* List of pending dma buffers to free */
+ struct list_head list;
+
+ /* If sgpages == 0 then buf is used */
+ unsigned int sgpages;
+
+ /* sg is used if spages > 0. sg must be the last in is struct */
+ struct scatterlist sg[];
+};
+
+/*
+ * This is a per-device struct that stores data common to all the
+ * ports for that device (vdev->priv).
+ */
+struct ports_device {
+ /* Next portdev in the list, head is in the pdrvdata struct */
+ struct list_head list;
+
+ /*
+ * Workqueue handlers where we process deferred work after
+ * notification
+ */
+ struct work_struct control_work;
+ struct work_struct config_work;
+
+ struct list_head ports;
+
+ /* To protect the list of ports */
+ spinlock_t ports_lock;
+
+ /* To protect the vq operations for the control channel */
+ spinlock_t c_ivq_lock;
+ spinlock_t c_ovq_lock;
+
+ /* max. number of ports this device can hold */
+ u32 max_nr_ports;
+
+ /* The virtio device we're associated with */
+ struct virtio_device *vdev;
+
+ /*
+ * A couple of virtqueues for the control channel: one for
+ * guest->host transfers, one for host->guest transfers
+ */
+ struct virtqueue *c_ivq, *c_ovq;
+
+ /*
+ * A control packet buffer for guest->host requests, protected
+ * by c_ovq_lock.
+ */
+ struct virtio_console_control cpkt;
+
+ /* Array of per-port IO virtqueues */
+ struct virtqueue **in_vqs, **out_vqs;
+
+ /* Major number for this device. Ports will be created as minors. */
+ int chr_major;
+};
+
+struct port_stats {
+ unsigned long bytes_sent, bytes_received, bytes_discarded;
+};
+
+/* This struct holds the per-port data */
+struct port {
+ /* Next port in the list, head is in the ports_device */
+ struct list_head list;
+
+ /* Pointer to the parent virtio_console device */
+ struct ports_device *portdev;
+
+ /* The current buffer from which data has to be fed to readers */
+ struct port_buffer *inbuf;
+
+ /*
+ * To protect the operations on the in_vq associated with this
+ * port. Has to be a spinlock because it can be called from
+ * interrupt context (get_char()).
+ */
+ spinlock_t inbuf_lock;
+
+ /* Protect the operations on the out_vq. */
+ spinlock_t outvq_lock;
+
+ /* The IO vqs for this port */
+ struct virtqueue *in_vq, *out_vq;
+
+ /* File in the debugfs directory that exposes this port's information */
+ struct dentry *debugfs_file;
+
+ /*
+ * Keep count of the bytes sent, received and discarded for
+ * this port for accounting and debugging purposes. These
+ * counts are not reset across port open / close events.
+ */
+ struct port_stats stats;
+
+ /*
+ * The entries in this struct will be valid if this port is
+ * hooked up to an hvc console
+ */
+ struct console cons;
+
+ /* Each port associates with a separate char device */
+ struct cdev *cdev;
+ struct device *dev;
+
+ /* Reference-counting to handle port hot-unplugs and file operations */
+ struct kref kref;
+
+ /* A waitqueue for poll() or blocking read operations */
+ wait_queue_head_t waitqueue;
+
+ /* The 'name' of the port that we expose via sysfs properties */
+ char *name;
+
+ /* We can notify apps of host connect / disconnect events via SIGIO */
+ struct fasync_struct *async_queue;
+
+ /* The 'id' to identify the port with the Host */
+ u32 id;
+
+ bool outvq_full;
+
+ /* Is the host device open */
+ bool host_connected;
+
+ /* We should allow only one process to open a port */
+ bool guest_connected;
+};
+
+/* This is the very early arch-specified put chars function. */
+static int (*early_put_chars)(u32, const char *, int);
+
+static struct port *find_port_by_vtermno(u32 vtermno)
+{
+ struct port *port;
+ struct console *cons;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdrvdata_lock, flags);
+ list_for_each_entry(cons, &pdrvdata.consoles, list) {
+ if (cons->vtermno == vtermno) {
+ port = container_of(cons, struct port, cons);
+ goto out;
+ }
+ }
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&pdrvdata_lock, flags);
+ return port;
+}
+
+static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
+ dev_t dev)
+{
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+ list_for_each_entry(port, &portdev->ports, list) {
+ if (port->cdev->dev == dev) {
+ kref_get(&port->kref);
+ goto out;
+ }
+ }
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+ return port;
+}
+
+static struct port *find_port_by_devt(dev_t dev)
+{
+ struct ports_device *portdev;
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdrvdata_lock, flags);
+ list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
+ port = find_port_by_devt_in_portdev(portdev, dev);
+ if (port)
+ goto out;
+ }
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&pdrvdata_lock, flags);
+ return port;
+}
+
+static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
+{
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+ list_for_each_entry(port, &portdev->ports, list)
+ if (port->id == id)
+ goto out;
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+ return port;
+}
+
+static struct port *find_port_by_vq(struct ports_device *portdev,
+ struct virtqueue *vq)
+{
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+ list_for_each_entry(port, &portdev->ports, list)
+ if (port->in_vq == vq || port->out_vq == vq)
+ goto out;
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+ return port;
+}
+
+static bool is_console_port(struct port *port)
+{
+ if (port->cons.hvc)
+ return true;
+ return false;
+}
+
+static bool is_rproc_serial(const struct virtio_device *vdev)
+{
+ return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
+}
+
+static inline bool use_multiport(struct ports_device *portdev)
+{
+ /*
+ * This condition can be true when put_chars is called from
+ * early_init
+ */
+ if (!portdev->vdev)
+ return false;
+ return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT);
+}
+
+static DEFINE_SPINLOCK(dma_bufs_lock);
+static LIST_HEAD(pending_free_dma_bufs);
+
+static void free_buf(struct port_buffer *buf, bool can_sleep)
+{
+ unsigned int i;
+
+ for (i = 0; i < buf->sgpages; i++) {
+ struct page *page = sg_page(&buf->sg[i]);
+ if (!page)
+ break;
+ put_page(page);
+ }
+
+ if (!buf->dev) {
+ kfree(buf->buf);
+ } else if (is_rproc_enabled) {
+ unsigned long flags;
+
+ /* dma_free_coherent requires interrupts to be enabled. */
+ if (!can_sleep) {
+ /* queue up dma-buffers to be freed later */
+ spin_lock_irqsave(&dma_bufs_lock, flags);
+ list_add_tail(&buf->list, &pending_free_dma_bufs);
+ spin_unlock_irqrestore(&dma_bufs_lock, flags);
+ return;
+ }
+ dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
+
+ /* Release device refcnt and allow it to be freed */
+ put_device(buf->dev);
+ }
+
+ kfree(buf);
+}
+
+static void reclaim_dma_bufs(void)
+{
+ unsigned long flags;
+ struct port_buffer *buf, *tmp;
+ LIST_HEAD(tmp_list);
+
+ if (list_empty(&pending_free_dma_bufs))
+ return;
+
+ /* Create a copy of the pending_free_dma_bufs while holding the lock */
+ spin_lock_irqsave(&dma_bufs_lock, flags);
+ list_cut_position(&tmp_list, &pending_free_dma_bufs,
+ pending_free_dma_bufs.prev);
+ spin_unlock_irqrestore(&dma_bufs_lock, flags);
+
+ /* Release the dma buffers, without irqs enabled */
+ list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
+ list_del(&buf->list);
+ free_buf(buf, true);
+ }
+}
+
+static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
+ int pages)
+{
+ struct port_buffer *buf;
+
+ reclaim_dma_bufs();
+
+ /*
+ * Allocate buffer and the sg list. The sg list array is allocated
+ * directly after the port_buffer struct.
+ */
+ buf = kmalloc(struct_size(buf, sg, pages), GFP_KERNEL);
+ if (!buf)
+ goto fail;
+
+ buf->sgpages = pages;
+ if (pages > 0) {
+ buf->dev = NULL;
+ buf->buf = NULL;
+ return buf;
+ }
+
+ if (is_rproc_serial(vdev)) {
+ /*
+ * Allocate DMA memory from ancestor. When a virtio
+ * device is created by remoteproc, the DMA memory is
+ * associated with the parent device:
+ * virtioY => remoteprocX#vdevYbuffer.
+ */
+ buf->dev = vdev->dev.parent;
+ if (!buf->dev)
+ goto free_buf;
+
+ /* Increase device refcnt to avoid freeing it */
+ get_device(buf->dev);
+ buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
+ GFP_KERNEL);
+ } else {
+ buf->dev = NULL;
+ buf->buf = kmalloc(buf_size, GFP_KERNEL);
+ }
+
+ if (!buf->buf)
+ goto free_buf;
+ buf->len = 0;
+ buf->offset = 0;
+ buf->size = buf_size;
+ return buf;
+
+free_buf:
+ kfree(buf);
+fail:
+ return NULL;
+}
+
+/* Callers should take appropriate locks */
+static struct port_buffer *get_inbuf(struct port *port)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ if (port->inbuf)
+ return port->inbuf;
+
+ buf = virtqueue_get_buf(port->in_vq, &len);
+ if (buf) {
+ buf->len = min_t(size_t, len, buf->size);
+ buf->offset = 0;
+ port->stats.bytes_received += len;
+ }
+ return buf;
+}
+
+/*
+ * Create a scatter-gather list representing our input buffer and put
+ * it in the queue.
+ *
+ * Callers should take appropriate locks.
+ */
+static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
+{
+ struct scatterlist sg[1];
+ int ret;
+
+ sg_init_one(sg, buf->buf, buf->size);
+
+ ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC);
+ virtqueue_kick(vq);
+ if (!ret)
+ ret = vq->num_free;
+ return ret;
+}
+
+/* Discard any unread data this port has. Callers lockers. */
+static void discard_port_data(struct port *port)
+{
+ struct port_buffer *buf;
+ unsigned int err;
+
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
+ buf = get_inbuf(port);
+
+ err = 0;
+ while (buf) {
+ port->stats.bytes_discarded += buf->len - buf->offset;
+ if (add_inbuf(port->in_vq, buf) < 0) {
+ err++;
+ free_buf(buf, false);
+ }
+ port->inbuf = NULL;
+ buf = get_inbuf(port);
+ }
+ if (err)
+ dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
+ err);
+}
+
+static bool port_has_data(struct port *port)
+{
+ unsigned long flags;
+ bool ret;
+
+ ret = false;
+ spin_lock_irqsave(&port->inbuf_lock, flags);
+ port->inbuf = get_inbuf(port);
+ if (port->inbuf)
+ ret = true;
+
+ spin_unlock_irqrestore(&port->inbuf_lock, flags);
+ return ret;
+}
+
+static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+ unsigned int event, unsigned int value)
+{
+ struct scatterlist sg[1];
+ struct virtqueue *vq;
+ unsigned int len;
+
+ if (!use_multiport(portdev))
+ return 0;
+
+ vq = portdev->c_ovq;
+
+ spin_lock(&portdev->c_ovq_lock);
+
+ portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
+ portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
+ portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
+
+ sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
+
+ if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
+ virtqueue_kick(vq);
+ while (!virtqueue_get_buf(vq, &len)
+ && !virtqueue_is_broken(vq))
+ cpu_relax();
+ }
+
+ spin_unlock(&portdev->c_ovq_lock);
+ return 0;
+}
+
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+ unsigned int value)
+{
+ /* Did the port get unplugged before userspace closed it? */
+ if (port->portdev)
+ return __send_control_msg(port->portdev, port->id, event, value);
+ return 0;
+}
+
+
+/* Callers must take the port->outvq_lock */
+static void reclaim_consumed_buffers(struct port *port)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ free_buf(buf, false);
+ port->outvq_full = false;
+ }
+}
+
+static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
+ int nents, size_t in_count,
+ void *data, bool nonblock)
+{
+ struct virtqueue *out_vq;
+ int err;
+ unsigned long flags;
+ unsigned int len;
+
+ out_vq = port->out_vq;
+
+ spin_lock_irqsave(&port->outvq_lock, flags);
+
+ reclaim_consumed_buffers(port);
+
+ err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC);
+
+ /* Tell Host to go! */
+ virtqueue_kick(out_vq);
+
+ if (err) {
+ in_count = 0;
+ goto done;
+ }
+
+ if (out_vq->num_free == 0)
+ port->outvq_full = true;
+
+ if (nonblock)
+ goto done;
+
+ /*
+ * Wait till the host acknowledges it pushed out the data we
+ * sent. This is done for data from the hvc_console; the tty
+ * operations are performed with spinlocks held so we can't
+ * sleep here. An alternative would be to copy the data to a
+ * buffer and relax the spinning requirement. The downside is
+ * we need to kmalloc a GFP_ATOMIC buffer each time the
+ * console driver writes something out.
+ */
+ while (!virtqueue_get_buf(out_vq, &len)
+ && !virtqueue_is_broken(out_vq))
+ cpu_relax();
+done:
+ spin_unlock_irqrestore(&port->outvq_lock, flags);
+
+ port->stats.bytes_sent += in_count;
+ /*
+ * We're expected to return the amount of data we wrote -- all
+ * of it
+ */
+ return in_count;
+}
+
+/*
+ * Give out the data that's requested from the buffer that we have
+ * queued up.
+ */
+static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
+ size_t out_count, bool to_user)
+{
+ struct port_buffer *buf;
+ unsigned long flags;
+
+ if (!out_count || !port_has_data(port))
+ return 0;
+
+ buf = port->inbuf;
+ out_count = min(out_count, buf->len - buf->offset);
+
+ if (to_user) {
+ ssize_t ret;
+
+ ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
+ if (ret)
+ return -EFAULT;
+ } else {
+ memcpy((__force char *)out_buf, buf->buf + buf->offset,
+ out_count);
+ }
+
+ buf->offset += out_count;
+
+ if (buf->offset == buf->len) {
+ /*
+ * We're done using all the data in this buffer.
+ * Re-queue so that the Host can send us more data.
+ */
+ spin_lock_irqsave(&port->inbuf_lock, flags);
+ port->inbuf = NULL;
+
+ if (add_inbuf(port->in_vq, buf) < 0)
+ dev_warn(port->dev, "failed add_buf\n");
+
+ spin_unlock_irqrestore(&port->inbuf_lock, flags);
+ }
+ /* Return the number of bytes actually copied */
+ return out_count;
+}
+
+/* The condition that must be true for polling to end */
+static bool will_read_block(struct port *port)
+{
+ if (!port->guest_connected) {
+ /* Port got hot-unplugged. Let's exit. */
+ return false;
+ }
+ return !port_has_data(port) && port->host_connected;
+}
+
+static bool will_write_block(struct port *port)
+{
+ bool ret;
+
+ if (!port->guest_connected) {
+ /* Port got hot-unplugged. Let's exit. */
+ return false;
+ }
+ if (!port->host_connected)
+ return true;
+
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * Check if the Host has consumed any buffers since we last
+ * sent data (this is only applicable for nonblocking ports).
+ */
+ reclaim_consumed_buffers(port);
+ ret = port->outvq_full;
+ spin_unlock_irq(&port->outvq_lock);
+
+ return ret;
+}
+
+static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct port *port;
+ ssize_t ret;
+
+ port = filp->private_data;
+
+ /* Port is hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+
+ if (!port_has_data(port)) {
+ /*
+ * If nothing's connected on the host just return 0 in
+ * case of list_empty; this tells the userspace app
+ * that there's no connection
+ */
+ if (!port->host_connected)
+ return 0;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_freezable(port->waitqueue,
+ !will_read_block(port));
+ if (ret < 0)
+ return ret;
+ }
+ /* Port got hot-unplugged while we were waiting above. */
+ if (!port->guest_connected)
+ return -ENODEV;
+ /*
+ * We could've received a disconnection message while we were
+ * waiting for more data.
+ *
+ * This check is not clubbed in the if() statement above as we
+ * might receive some data as well as the host could get
+ * disconnected after we got woken up from our wait. So we
+ * really want to give off whatever data we have and only then
+ * check for host_connected.
+ */
+ if (!port_has_data(port) && !port->host_connected)
+ return 0;
+
+ return fill_readbuf(port, ubuf, count, true);
+}
+
+static int wait_port_writable(struct port *port, bool nonblock)
+{
+ int ret;
+
+ if (will_write_block(port)) {
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_freezable(port->waitqueue,
+ !will_write_block(port));
+ if (ret < 0)
+ return ret;
+ }
+ /* Port got hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+
+ return 0;
+}
+
+static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct port *port;
+ struct port_buffer *buf;
+ ssize_t ret;
+ bool nonblock;
+ struct scatterlist sg[1];
+
+ /* Userspace could be out to fool us */
+ if (!count)
+ return 0;
+
+ port = filp->private_data;
+
+ nonblock = filp->f_flags & O_NONBLOCK;
+
+ ret = wait_port_writable(port, nonblock);
+ if (ret < 0)
+ return ret;
+
+ count = min((size_t)(32 * 1024), count);
+
+ buf = alloc_buf(port->portdev->vdev, count, 0);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = copy_from_user(buf->buf, ubuf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ /*
+ * We now ask send_buf() to not spin for generic ports -- we
+ * can re-use the same code path that non-blocking file
+ * descriptors take for blocking file descriptors since the
+ * wait is already done and we're certain the write will go
+ * through to the host.
+ */
+ nonblock = true;
+ sg_init_one(sg, buf->buf, count);
+ ret = __send_to_port(port, sg, 1, count, buf, nonblock);
+
+ if (nonblock && ret > 0)
+ goto out;
+
+free_buf:
+ free_buf(buf, true);
+out:
+ return ret;
+}
+
+struct sg_list {
+ unsigned int n;
+ unsigned int size;
+ size_t len;
+ struct scatterlist *sg;
+};
+
+static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+ struct splice_desc *sd)
+{
+ struct sg_list *sgl = sd->u.data;
+ unsigned int offset, len;
+
+ if (sgl->n == sgl->size)
+ return 0;
+
+ /* Try lock this page */
+ if (pipe_buf_try_steal(pipe, buf)) {
+ /* Get reference and unlock page for moving */
+ get_page(buf->page);
+ unlock_page(buf->page);
+
+ len = min(buf->len, sd->len);
+ sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
+ } else {
+ /* Failback to copying a page */
+ struct page *page = alloc_page(GFP_KERNEL);
+ char *src;
+
+ if (!page)
+ return -ENOMEM;
+
+ offset = sd->pos & ~PAGE_MASK;
+
+ len = sd->len;
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ src = kmap_atomic(buf->page);
+ memcpy(page_address(page) + offset, src + buf->offset, len);
+ kunmap_atomic(src);
+
+ sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
+ }
+ sgl->n++;
+ sgl->len += len;
+
+ return len;
+}
+
+/* Faster zero-copy write by splicing */
+static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t len, unsigned int flags)
+{
+ struct port *port = filp->private_data;
+ struct sg_list sgl;
+ ssize_t ret;
+ struct port_buffer *buf;
+ struct splice_desc sd = {
+ .total_len = len,
+ .flags = flags,
+ .pos = *ppos,
+ .u.data = &sgl,
+ };
+ unsigned int occupancy;
+
+ /*
+ * Rproc_serial does not yet support splice. To support splice
+ * pipe_to_sg() must allocate dma-buffers and copy content from
+ * regular pages to dma pages. And alloc_buf and free_buf must
+ * support allocating and freeing such a list of dma-buffers.
+ */
+ if (is_rproc_serial(port->out_vq->vdev))
+ return -EINVAL;
+
+ pipe_lock(pipe);
+ ret = 0;
+ if (pipe_empty(pipe->head, pipe->tail))
+ goto error_out;
+
+ ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
+ if (ret < 0)
+ goto error_out;
+
+ occupancy = pipe_occupancy(pipe->head, pipe->tail);
+ buf = alloc_buf(port->portdev->vdev, 0, occupancy);
+
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
+
+ sgl.n = 0;
+ sgl.len = 0;
+ sgl.size = occupancy;
+ sgl.sg = buf->sg;
+ sg_init_table(sgl.sg, sgl.size);
+ ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
+ pipe_unlock(pipe);
+ if (likely(ret > 0))
+ ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
+
+ if (unlikely(ret <= 0))
+ free_buf(buf, true);
+ return ret;
+
+error_out:
+ pipe_unlock(pipe);
+ return ret;
+}
+
+static __poll_t port_fops_poll(struct file *filp, poll_table *wait)
+{
+ struct port *port;
+ __poll_t ret;
+
+ port = filp->private_data;
+ poll_wait(filp, &port->waitqueue, wait);
+
+ if (!port->guest_connected) {
+ /* Port got unplugged */
+ return EPOLLHUP;
+ }
+ ret = 0;
+ if (!will_read_block(port))
+ ret |= EPOLLIN | EPOLLRDNORM;
+ if (!will_write_block(port))
+ ret |= EPOLLOUT;
+ if (!port->host_connected)
+ ret |= EPOLLHUP;
+
+ return ret;
+}
+
+static void remove_port(struct kref *kref);
+
+static int port_fops_release(struct inode *inode, struct file *filp)
+{
+ struct port *port;
+
+ port = filp->private_data;
+
+ /* Notify host of port being closed */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
+
+ spin_lock_irq(&port->inbuf_lock);
+ port->guest_connected = false;
+
+ discard_port_data(port);
+
+ spin_unlock_irq(&port->inbuf_lock);
+
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
+ reclaim_dma_bufs();
+ /*
+ * Locks aren't necessary here as a port can't be opened after
+ * unplug, and if a port isn't unplugged, a kref would already
+ * exist for the port. Plus, taking ports_lock here would
+ * create a dependency on other locks taken by functions
+ * inside remove_port if we're the last holder of the port,
+ * creating many problems.
+ */
+ kref_put(&port->kref, remove_port);
+
+ return 0;
+}
+
+static int port_fops_open(struct inode *inode, struct file *filp)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct port *port;
+ int ret;
+
+ /* We get the port with a kref here */
+ port = find_port_by_devt(cdev->dev);
+ if (!port) {
+ /* Port was unplugged before we could proceed */
+ return -ENXIO;
+ }
+ filp->private_data = port;
+
+ /*
+ * Don't allow opening of console port devices -- that's done
+ * via /dev/hvc
+ */
+ if (is_console_port(port)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* Allow only one process to open a particular port at a time */
+ spin_lock_irq(&port->inbuf_lock);
+ if (port->guest_connected) {
+ spin_unlock_irq(&port->inbuf_lock);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ port->guest_connected = true;
+ spin_unlock_irq(&port->inbuf_lock);
+
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * There might be a chance that we missed reclaiming a few
+ * buffers in the window of the port getting previously closed
+ * and opening now.
+ */
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
+ nonseekable_open(inode, filp);
+
+ /* Notify host of port being opened */
+ send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
+
+ return 0;
+out:
+ kref_put(&port->kref, remove_port);
+ return ret;
+}
+
+static int port_fops_fasync(int fd, struct file *filp, int mode)
+{
+ struct port *port;
+
+ port = filp->private_data;
+ return fasync_helper(fd, filp, mode, &port->async_queue);
+}
+
+/*
+ * The file operations that we support: programs in the guest can open
+ * a console device, read from it, write to it, poll for data and
+ * close it. The devices are at
+ * /dev/vport<device number>p<port number>
+ */
+static const struct file_operations port_fops = {
+ .owner = THIS_MODULE,
+ .open = port_fops_open,
+ .read = port_fops_read,
+ .write = port_fops_write,
+ .splice_write = port_fops_splice_write,
+ .poll = port_fops_poll,
+ .release = port_fops_release,
+ .fasync = port_fops_fasync,
+ .llseek = no_llseek,
+};
+
+/*
+ * The put_chars() callback is pretty straightforward.
+ *
+ * We turn the characters into a scatter-gather list, add it to the
+ * output queue and then kick the Host. Then we sit here waiting for
+ * it to finish: inefficient in theory, but in practice
+ * implementations will do it immediately.
+ */
+static int put_chars(u32 vtermno, const char *buf, int count)
+{
+ struct port *port;
+ struct scatterlist sg[1];
+ void *data;
+ int ret;
+
+ if (unlikely(early_put_chars))
+ return early_put_chars(vtermno, buf, count);
+
+ port = find_port_by_vtermno(vtermno);
+ if (!port)
+ return -EPIPE;
+
+ data = kmemdup(buf, count, GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+
+ sg_init_one(sg, data, count);
+ ret = __send_to_port(port, sg, 1, count, data, false);
+ kfree(data);
+ return ret;
+}
+
+/*
+ * get_chars() is the callback from the hvc_console infrastructure
+ * when an interrupt is received.
+ *
+ * We call out to fill_readbuf that gets us the required data from the
+ * buffers that are queued up.
+ */
+static int get_chars(u32 vtermno, char *buf, int count)
+{
+ struct port *port;
+
+ /* If we've not set up the port yet, we have no input to give. */
+ if (unlikely(early_put_chars))
+ return 0;
+
+ port = find_port_by_vtermno(vtermno);
+ if (!port)
+ return -EPIPE;
+
+ /* If we don't have an input queue yet, we can't get input. */
+ BUG_ON(!port->in_vq);
+
+ return fill_readbuf(port, (__force char __user *)buf, count, false);
+}
+
+static void resize_console(struct port *port)
+{
+ struct virtio_device *vdev;
+
+ /* The port could have been hot-unplugged */
+ if (!port || !is_console_port(port))
+ return;
+
+ vdev = port->portdev->vdev;
+
+ /* Don't test F_SIZE at all if we're rproc: not a valid feature! */
+ if (!is_rproc_serial(vdev) &&
+ virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+ hvc_resize(port->cons.hvc, port->cons.ws);
+}
+
+/* We set the configuration at this point, since we now have a tty */
+static int notifier_add_vio(struct hvc_struct *hp, int data)
+{
+ struct port *port;
+
+ port = find_port_by_vtermno(hp->vtermno);
+ if (!port)
+ return -EINVAL;
+
+ hp->irq_requested = 1;
+ resize_console(port);
+
+ return 0;
+}
+
+static void notifier_del_vio(struct hvc_struct *hp, int data)
+{
+ hp->irq_requested = 0;
+}
+
+/* The operations for console ports. */
+static const struct hv_ops hv_ops = {
+ .get_chars = get_chars,
+ .put_chars = put_chars,
+ .notifier_add = notifier_add_vio,
+ .notifier_del = notifier_del_vio,
+ .notifier_hangup = notifier_del_vio,
+};
+
+/*
+ * Console drivers are initialized very early so boot messages can go
+ * out, so we do things slightly differently from the generic virtio
+ * initialization of the net and block drivers.
+ *
+ * At this stage, the console is output-only. It's too early to set
+ * up a virtqueue, so we let the drivers do some boutique early-output
+ * thing.
+ */
+int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
+{
+ early_put_chars = put_chars;
+ return hvc_instantiate(0, 0, &hv_ops);
+}
+
+static int init_port_console(struct port *port)
+{
+ int ret;
+
+ /*
+ * The Host's telling us this port is a console port. Hook it
+ * up with an hvc console.
+ *
+ * To set up and manage our virtual console, we call
+ * hvc_alloc().
+ *
+ * The first argument of hvc_alloc() is the virtual console
+ * number. The second argument is the parameter for the
+ * notification mechanism (like irq number). We currently
+ * leave this as zero, virtqueues have implicit notifications.
+ *
+ * The third argument is a "struct hv_ops" containing the
+ * put_chars() get_chars(), notifier_add() and notifier_del()
+ * pointers. The final argument is the output buffer size: we
+ * can do any size, so we put PAGE_SIZE here.
+ */
+ ret = ida_alloc_min(&vtermno_ida, 1, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ port->cons.vtermno = ret;
+ port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
+ if (IS_ERR(port->cons.hvc)) {
+ ret = PTR_ERR(port->cons.hvc);
+ dev_err(port->dev,
+ "error %d allocating hvc for port\n", ret);
+ port->cons.hvc = NULL;
+ ida_free(&vtermno_ida, port->cons.vtermno);
+ return ret;
+ }
+ spin_lock_irq(&pdrvdata_lock);
+ list_add_tail(&port->cons.list, &pdrvdata.consoles);
+ spin_unlock_irq(&pdrvdata_lock);
+ port->guest_connected = true;
+
+ /*
+ * Start using the new console output if this is the first
+ * console to come up.
+ */
+ if (early_put_chars)
+ early_put_chars = NULL;
+
+ /* Notify host of port being opened */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
+
+ return 0;
+}
+
+static ssize_t show_port_name(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct port *port;
+
+ port = dev_get_drvdata(dev);
+
+ return sprintf(buffer, "%s\n", port->name);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
+
+static struct attribute *port_sysfs_entries[] = {
+ &dev_attr_name.attr,
+ NULL
+};
+
+static const struct attribute_group port_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = port_sysfs_entries,
+};
+
+static int port_debugfs_show(struct seq_file *s, void *data)
+{
+ struct port *port = s->private;
+
+ seq_printf(s, "name: %s\n", port->name ? port->name : "");
+ seq_printf(s, "guest_connected: %d\n", port->guest_connected);
+ seq_printf(s, "host_connected: %d\n", port->host_connected);
+ seq_printf(s, "outvq_full: %d\n", port->outvq_full);
+ seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
+ seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
+ seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
+ seq_printf(s, "is_console: %s\n",
+ is_console_port(port) ? "yes" : "no");
+ seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(port_debugfs);
+
+static void set_console_size(struct port *port, u16 rows, u16 cols)
+{
+ if (!port || !is_console_port(port))
+ return;
+
+ port->cons.ws.ws_row = rows;
+ port->cons.ws.ws_col = cols;
+}
+
+static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+ struct port_buffer *buf;
+ int nr_added_bufs;
+ int ret;
+
+ nr_added_bufs = 0;
+ do {
+ buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_irq(lock);
+ ret = add_inbuf(vq, buf);
+ if (ret < 0) {
+ spin_unlock_irq(lock);
+ free_buf(buf, true);
+ return ret;
+ }
+ nr_added_bufs++;
+ spin_unlock_irq(lock);
+ } while (ret > 0);
+
+ return nr_added_bufs;
+}
+
+static void send_sigio_to_port(struct port *port)
+{
+ if (port->async_queue && port->guest_connected)
+ kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+ char debugfs_name[16];
+ struct port *port;
+ dev_t devt;
+ int err;
+
+ port = kmalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ kref_init(&port->kref);
+
+ port->portdev = portdev;
+ port->id = id;
+
+ port->name = NULL;
+ port->inbuf = NULL;
+ port->cons.hvc = NULL;
+ port->async_queue = NULL;
+
+ port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+ port->cons.vtermno = 0;
+
+ port->host_connected = port->guest_connected = false;
+ port->stats = (struct port_stats) { 0 };
+
+ port->outvq_full = false;
+
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ port->cdev = cdev_alloc();
+ if (!port->cdev) {
+ dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
+ err = -ENOMEM;
+ goto free_port;
+ }
+ port->cdev->ops = &port_fops;
+
+ devt = MKDEV(portdev->chr_major, id);
+ err = cdev_add(port->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d adding cdev for port %u\n", err, id);
+ goto free_cdev;
+ }
+ port->dev = device_create(&port_class, &port->portdev->vdev->dev,
+ devt, port, "vport%up%u",
+ port->portdev->vdev->index, id);
+ if (IS_ERR(port->dev)) {
+ err = PTR_ERR(port->dev);
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d creating device for port %u\n",
+ err, id);
+ goto free_cdev;
+ }
+
+ spin_lock_init(&port->inbuf_lock);
+ spin_lock_init(&port->outvq_lock);
+ init_waitqueue_head(&port->waitqueue);
+
+ /* We can safely ignore ENOSPC because it means
+ * the queue already has buffers. Buffers are removed
+ * only by virtcons_remove(), not by unplug_port()
+ */
+ err = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (err < 0 && err != -ENOSPC) {
+ dev_err(port->dev, "Error allocating inbufs\n");
+ goto free_device;
+ }
+
+ if (is_rproc_serial(port->portdev->vdev))
+ /*
+ * For rproc_serial assume remote processor is connected.
+ * rproc_serial does not want the console port, only
+ * the generic port implementation.
+ */
+ port->host_connected = true;
+ else if (!use_multiport(port->portdev)) {
+ /*
+ * If we're not using multiport support,
+ * this has to be a console port.
+ */
+ err = init_port_console(port);
+ if (err)
+ goto free_inbufs;
+ }
+
+ spin_lock_irq(&portdev->ports_lock);
+ list_add_tail(&port->list, &port->portdev->ports);
+ spin_unlock_irq(&portdev->ports_lock);
+
+ /*
+ * Tell the Host we're set so that it can send us various
+ * configuration parameters for this port (eg, port name,
+ * caching, whether this is a console port, etc.)
+ */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+ /*
+ * Finally, create the debugfs file that we can use to
+ * inspect a port's state at any time
+ */
+ snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
+ port->portdev->vdev->index, id);
+ port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ pdrvdata.debugfs_dir,
+ port, &port_debugfs_fops);
+ return 0;
+
+free_inbufs:
+free_device:
+ device_destroy(&port_class, port->dev->devt);
+free_cdev:
+ cdev_del(port->cdev);
+free_port:
+ kfree(port);
+fail:
+ /* The host might want to notify management sw about port add failure */
+ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
+ return err;
+}
+
+/* No users remain, remove all port-specific data. */
+static void remove_port(struct kref *kref)
+{
+ struct port *port;
+
+ port = container_of(kref, struct port, kref);
+
+ kfree(port);
+}
+
+static void remove_port_data(struct port *port)
+{
+ spin_lock_irq(&port->inbuf_lock);
+ /* Remove unused data this port might have received. */
+ discard_port_data(port);
+ spin_unlock_irq(&port->inbuf_lock);
+
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+}
+
+/*
+ * Port got unplugged. Remove port from portdev's list and drop the
+ * kref reference. If no userspace has this port opened, it will
+ * result in immediate removal the port.
+ */
+static void unplug_port(struct port *port)
+{
+ spin_lock_irq(&port->portdev->ports_lock);
+ list_del(&port->list);
+ spin_unlock_irq(&port->portdev->ports_lock);
+
+ spin_lock_irq(&port->inbuf_lock);
+ if (port->guest_connected) {
+ /* Let the app know the port is going down. */
+ send_sigio_to_port(port);
+
+ /* Do this after sigio is actually sent */
+ port->guest_connected = false;
+ port->host_connected = false;
+
+ wake_up_interruptible(&port->waitqueue);
+ }
+ spin_unlock_irq(&port->inbuf_lock);
+
+ if (is_console_port(port)) {
+ spin_lock_irq(&pdrvdata_lock);
+ list_del(&port->cons.list);
+ spin_unlock_irq(&pdrvdata_lock);
+ hvc_remove(port->cons.hvc);
+ ida_free(&vtermno_ida, port->cons.vtermno);
+ }
+
+ remove_port_data(port);
+
+ /*
+ * We should just assume the device itself has gone off --
+ * else a close on an open port later will try to send out a
+ * control message.
+ */
+ port->portdev = NULL;
+
+ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+ device_destroy(&port_class, port->dev->devt);
+ cdev_del(port->cdev);
+
+ debugfs_remove(port->debugfs_file);
+ kfree(port->name);
+
+ /*
+ * Locks around here are not necessary - a port can't be
+ * opened after we removed the port struct from ports_list
+ * above.
+ */
+ kref_put(&port->kref, remove_port);
+}
+
+/* Any private messages that the Host and Guest want to share */
+static void handle_control_message(struct virtio_device *vdev,
+ struct ports_device *portdev,
+ struct port_buffer *buf)
+{
+ struct virtio_console_control *cpkt;
+ struct port *port;
+ size_t name_size;
+ int err;
+
+ cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
+
+ port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id));
+ if (!port &&
+ cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) {
+ /* No valid header at start of buffer. Drop it. */
+ dev_dbg(&portdev->vdev->dev,
+ "Invalid index %u in control packet\n", cpkt->id);
+ return;
+ }
+
+ switch (virtio16_to_cpu(vdev, cpkt->event)) {
+ case VIRTIO_CONSOLE_PORT_ADD:
+ if (port) {
+ dev_dbg(&portdev->vdev->dev,
+ "Port %u already added\n", port->id);
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ break;
+ }
+ if (virtio32_to_cpu(vdev, cpkt->id) >=
+ portdev->max_nr_ports) {
+ dev_warn(&portdev->vdev->dev,
+ "Request for adding port with "
+ "out-of-bound id %u, max. supported id: %u\n",
+ cpkt->id, portdev->max_nr_ports - 1);
+ break;
+ }
+ add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
+ break;
+ case VIRTIO_CONSOLE_PORT_REMOVE:
+ unplug_port(port);
+ break;
+ case VIRTIO_CONSOLE_CONSOLE_PORT:
+ if (!cpkt->value)
+ break;
+ if (is_console_port(port))
+ break;
+
+ init_port_console(port);
+ complete(&early_console_added);
+ /*
+ * Could remove the port here in case init fails - but
+ * have to notify the host first.
+ */
+ break;
+ case VIRTIO_CONSOLE_RESIZE: {
+ struct {
+ __u16 rows;
+ __u16 cols;
+ } size;
+
+ if (!is_console_port(port))
+ break;
+
+ memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
+ sizeof(size));
+ set_console_size(port, size.rows, size.cols);
+
+ port->cons.hvc->irq_requested = 1;
+ resize_console(port);
+ break;
+ }
+ case VIRTIO_CONSOLE_PORT_OPEN:
+ port->host_connected = virtio16_to_cpu(vdev, cpkt->value);
+ wake_up_interruptible(&port->waitqueue);
+ /*
+ * If the host port got closed and the host had any
+ * unconsumed buffers, we'll be able to reclaim them
+ * now.
+ */
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
+ /*
+ * If the guest is connected, it'll be interested in
+ * knowing the host connection state changed.
+ */
+ spin_lock_irq(&port->inbuf_lock);
+ send_sigio_to_port(port);
+ spin_unlock_irq(&port->inbuf_lock);
+ break;
+ case VIRTIO_CONSOLE_PORT_NAME:
+ /*
+ * If we woke up after hibernation, we can get this
+ * again. Skip it in that case.
+ */
+ if (port->name)
+ break;
+
+ /*
+ * Skip the size of the header and the cpkt to get the size
+ * of the name that was sent
+ */
+ name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
+
+ port->name = kmalloc(name_size, GFP_KERNEL);
+ if (!port->name) {
+ dev_err(port->dev,
+ "Not enough space to store port name\n");
+ break;
+ }
+ strscpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
+ name_size);
+
+ /*
+ * Since we only have one sysfs attribute, 'name',
+ * create it only if we have a name for the port.
+ */
+ err = sysfs_create_group(&port->dev->kobj,
+ &port_attribute_group);
+ if (err) {
+ dev_err(port->dev,
+ "Error %d creating sysfs device attributes\n",
+ err);
+ } else {
+ /*
+ * Generate a udev event so that appropriate
+ * symlinks can be created based on udev
+ * rules.
+ */
+ kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
+ }
+ break;
+ }
+}
+
+static void control_work_handler(struct work_struct *work)
+{
+ struct ports_device *portdev;
+ struct virtqueue *vq;
+ struct port_buffer *buf;
+ unsigned int len;
+
+ portdev = container_of(work, struct ports_device, control_work);
+ vq = portdev->c_ivq;
+
+ spin_lock(&portdev->c_ivq_lock);
+ while ((buf = virtqueue_get_buf(vq, &len))) {
+ spin_unlock(&portdev->c_ivq_lock);
+
+ buf->len = min_t(size_t, len, buf->size);
+ buf->offset = 0;
+
+ handle_control_message(vq->vdev, portdev, buf);
+
+ spin_lock(&portdev->c_ivq_lock);
+ if (add_inbuf(portdev->c_ivq, buf) < 0) {
+ dev_warn(&portdev->vdev->dev,
+ "Error adding buffer to queue\n");
+ free_buf(buf, false);
+ }
+ }
+ spin_unlock(&portdev->c_ivq_lock);
+}
+
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(vq, &len)))
+ free_buf(buf, can_sleep);
+}
+
+static void out_intr(struct virtqueue *vq)
+{
+ struct port *port;
+
+ port = find_port_by_vq(vq->vdev->priv, vq);
+ if (!port) {
+ flush_bufs(vq, false);
+ return;
+ }
+
+ wake_up_interruptible(&port->waitqueue);
+}
+
+static void in_intr(struct virtqueue *vq)
+{
+ struct port *port;
+ unsigned long flags;
+
+ port = find_port_by_vq(vq->vdev->priv, vq);
+ if (!port) {
+ flush_bufs(vq, false);
+ return;
+ }
+
+ spin_lock_irqsave(&port->inbuf_lock, flags);
+ port->inbuf = get_inbuf(port);
+
+ /*
+ * Normally the port should not accept data when the port is
+ * closed. For generic serial ports, the host won't (shouldn't)
+ * send data till the guest is connected. But this condition
+ * can be reached when a console port is not yet connected (no
+ * tty is spawned) and the other side sends out data over the
+ * vring, or when a remote devices start sending data before
+ * the ports are opened.
+ *
+ * A generic serial port will discard data if not connected,
+ * while console ports and rproc-serial ports accepts data at
+ * any time. rproc-serial is initiated with guest_connected to
+ * false because port_fops_open expects this. Console ports are
+ * hooked up with an HVC console and is initialized with
+ * guest_connected to true.
+ */
+
+ if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
+ discard_port_data(port);
+
+ /* Send a SIGIO indicating new data in case the process asked for it */
+ send_sigio_to_port(port);
+
+ spin_unlock_irqrestore(&port->inbuf_lock, flags);
+
+ wake_up_interruptible(&port->waitqueue);
+
+ if (is_console_port(port) && hvc_poll(port->cons.hvc))
+ hvc_kick();
+}
+
+static void control_intr(struct virtqueue *vq)
+{
+ struct ports_device *portdev;
+
+ portdev = vq->vdev->priv;
+ schedule_work(&portdev->control_work);
+}
+
+static void config_intr(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+
+ portdev = vdev->priv;
+
+ if (!use_multiport(portdev))
+ schedule_work(&portdev->config_work);
+}
+
+static void config_work_handler(struct work_struct *work)
+{
+ struct ports_device *portdev;
+
+ portdev = container_of(work, struct ports_device, config_work);
+ if (!use_multiport(portdev)) {
+ struct virtio_device *vdev;
+ struct port *port;
+ u16 rows, cols;
+
+ vdev = portdev->vdev;
+ virtio_cread(vdev, struct virtio_console_config, cols, &cols);
+ virtio_cread(vdev, struct virtio_console_config, rows, &rows);
+
+ port = find_port_by_id(portdev, 0);
+ set_console_size(port, rows, cols);
+
+ /*
+ * We'll use this way of resizing only for legacy
+ * support. For newer userspace
+ * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
+ * to indicate console size changes so that it can be
+ * done per-port.
+ */
+ resize_console(port);
+ }
+}
+
+static int init_vqs(struct ports_device *portdev)
+{
+ vq_callback_t **io_callbacks;
+ char **io_names;
+ struct virtqueue **vqs;
+ u32 i, j, nr_ports, nr_queues;
+ int err;
+
+ nr_ports = portdev->max_nr_ports;
+ nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
+
+ vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL);
+ io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *),
+ GFP_KERNEL);
+ io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL);
+ portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
+ GFP_KERNEL);
+ portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
+ GFP_KERNEL);
+ if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
+ !portdev->out_vqs) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ /*
+ * For backward compat (newer host but older guest), the host
+ * spawns a console port first and also inits the vqs for port
+ * 0 before others.
+ */
+ j = 0;
+ io_callbacks[j] = in_intr;
+ io_callbacks[j + 1] = out_intr;
+ io_names[j] = "input";
+ io_names[j + 1] = "output";
+ j += 2;
+
+ if (use_multiport(portdev)) {
+ io_callbacks[j] = control_intr;
+ io_callbacks[j + 1] = NULL;
+ io_names[j] = "control-i";
+ io_names[j + 1] = "control-o";
+
+ for (i = 1; i < nr_ports; i++) {
+ j += 2;
+ io_callbacks[j] = in_intr;
+ io_callbacks[j + 1] = out_intr;
+ io_names[j] = "input";
+ io_names[j + 1] = "output";
+ }
+ }
+ /* Find the queues. */
+ err = virtio_find_vqs(portdev->vdev, nr_queues, vqs,
+ io_callbacks,
+ (const char **)io_names, NULL);
+ if (err)
+ goto free;
+
+ j = 0;
+ portdev->in_vqs[0] = vqs[0];
+ portdev->out_vqs[0] = vqs[1];
+ j += 2;
+ if (use_multiport(portdev)) {
+ portdev->c_ivq = vqs[j];
+ portdev->c_ovq = vqs[j + 1];
+
+ for (i = 1; i < nr_ports; i++) {
+ j += 2;
+ portdev->in_vqs[i] = vqs[j];
+ portdev->out_vqs[i] = vqs[j + 1];
+ }
+ }
+ kfree(io_names);
+ kfree(io_callbacks);
+ kfree(vqs);
+
+ return 0;
+
+free:
+ kfree(portdev->out_vqs);
+ kfree(portdev->in_vqs);
+ kfree(io_names);
+ kfree(io_callbacks);
+ kfree(vqs);
+
+ return err;
+}
+
+static const struct file_operations portdev_fops = {
+ .owner = THIS_MODULE,
+};
+
+static void remove_vqs(struct ports_device *portdev)
+{
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq(portdev->vdev, vq) {
+ struct port_buffer *buf;
+
+ flush_bufs(vq, true);
+ while ((buf = virtqueue_detach_unused_buf(vq)))
+ free_buf(buf, true);
+ cond_resched();
+ }
+ portdev->vdev->config->del_vqs(portdev->vdev);
+ kfree(portdev->in_vqs);
+ kfree(portdev->out_vqs);
+}
+
+static void virtcons_remove(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ struct port *port, *port2;
+
+ portdev = vdev->priv;
+
+ spin_lock_irq(&pdrvdata_lock);
+ list_del(&portdev->list);
+ spin_unlock_irq(&pdrvdata_lock);
+
+ /* Device is going away, exit any polling for buffers */
+ virtio_break_device(vdev);
+ if (use_multiport(portdev))
+ flush_work(&portdev->control_work);
+ else
+ flush_work(&portdev->config_work);
+
+ /* Disable interrupts for vqs */
+ virtio_reset_device(vdev);
+ /* Finish up work that's lined up */
+ if (use_multiport(portdev))
+ cancel_work_sync(&portdev->control_work);
+ else
+ cancel_work_sync(&portdev->config_work);
+
+ list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ unplug_port(port);
+
+ unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+
+ /*
+ * When yanking out a device, we immediately lose the
+ * (device-side) queues. So there's no point in keeping the
+ * guest side around till we drop our final reference. This
+ * also means that any ports which are in an open state will
+ * have to just stop using the port, as the vqs are going
+ * away.
+ */
+ remove_vqs(portdev);
+ kfree(portdev);
+}
+
+/*
+ * Once we're further in boot, we get probed like any other virtio
+ * device.
+ *
+ * If the host also supports multiple console ports, we check the
+ * config space to see how many ports the host has spawned. We
+ * initialize each port found.
+ */
+static int virtcons_probe(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ int err;
+ bool multiport;
+ bool early = early_put_chars != NULL;
+
+ /* We only need a config space if features are offered */
+ if (!vdev->config->get &&
+ (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)
+ || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Ensure to read early_put_chars now */
+ barrier();
+
+ portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
+ if (!portdev) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* Attach this portdev to this virtio_device, and vice-versa. */
+ portdev->vdev = vdev;
+ vdev->priv = portdev;
+
+ portdev->chr_major = register_chrdev(0, "virtio-portsdev",
+ &portdev_fops);
+ if (portdev->chr_major < 0) {
+ dev_err(&vdev->dev,
+ "Error %d registering chrdev for device %u\n",
+ portdev->chr_major, vdev->index);
+ err = portdev->chr_major;
+ goto free;
+ }
+
+ multiport = false;
+ portdev->max_nr_ports = 1;
+
+ /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
+ if (!is_rproc_serial(vdev) &&
+ virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+ struct virtio_console_config, max_nr_ports,
+ &portdev->max_nr_ports) == 0) {
+ if (portdev->max_nr_ports == 0 ||
+ portdev->max_nr_ports > VIRTCONS_MAX_PORTS) {
+ dev_err(&vdev->dev,
+ "Invalidate max_nr_ports %d",
+ portdev->max_nr_ports);
+ err = -EINVAL;
+ goto free;
+ }
+ multiport = true;
+ }
+
+ err = init_vqs(portdev);
+ if (err < 0) {
+ dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
+ goto free_chrdev;
+ }
+
+ spin_lock_init(&portdev->ports_lock);
+ INIT_LIST_HEAD(&portdev->ports);
+ INIT_LIST_HEAD(&portdev->list);
+
+ virtio_device_ready(portdev->vdev);
+
+ INIT_WORK(&portdev->config_work, &config_work_handler);
+ INIT_WORK(&portdev->control_work, &control_work_handler);
+
+ if (multiport) {
+ spin_lock_init(&portdev->c_ivq_lock);
+ spin_lock_init(&portdev->c_ovq_lock);
+
+ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
+ if (err < 0) {
+ dev_err(&vdev->dev,
+ "Error allocating buffers for control queue\n");
+ /*
+ * The host might want to notify mgmt sw about device
+ * add failure.
+ */
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
+ /* Device was functional: we need full cleanup. */
+ virtcons_remove(vdev);
+ return err;
+ }
+ } else {
+ /*
+ * For backward compatibility: Create a console port
+ * if we're running on older host.
+ */
+ add_port(portdev, 0);
+ }
+
+ spin_lock_irq(&pdrvdata_lock);
+ list_add_tail(&portdev->list, &pdrvdata.portdevs);
+ spin_unlock_irq(&pdrvdata_lock);
+
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 1);
+
+ /*
+ * If there was an early virtio console, assume that there are no
+ * other consoles. We need to wait until the hvc_alloc matches the
+ * hvc_instantiate, otherwise tty_open will complain, resulting in
+ * a "Warning: unable to open an initial console" boot failure.
+ * Without multiport this is done in add_port above. With multiport
+ * this might take some host<->guest communication - thus we have to
+ * wait.
+ */
+ if (multiport && early)
+ wait_for_completion(&early_console_added);
+
+ return 0;
+
+free_chrdev:
+ unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+free:
+ kfree(portdev);
+fail:
+ return err;
+}
+
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static const unsigned int features[] = {
+ VIRTIO_CONSOLE_F_SIZE,
+ VIRTIO_CONSOLE_F_MULTIPORT,
+};
+
+static const struct virtio_device_id rproc_serial_id_table[] = {
+#if IS_ENABLED(CONFIG_REMOTEPROC)
+ { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
+#endif
+ { 0 },
+};
+MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table);
+
+static const unsigned int rproc_serial_features[] = {
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int virtcons_freeze(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ struct port *port;
+
+ portdev = vdev->priv;
+
+ virtio_reset_device(vdev);
+
+ if (use_multiport(portdev))
+ virtqueue_disable_cb(portdev->c_ivq);
+ cancel_work_sync(&portdev->control_work);
+ cancel_work_sync(&portdev->config_work);
+ /*
+ * Once more: if control_work_handler() was running, it would
+ * enable the cb as the last step.
+ */
+ if (use_multiport(portdev))
+ virtqueue_disable_cb(portdev->c_ivq);
+
+ list_for_each_entry(port, &portdev->ports, list) {
+ virtqueue_disable_cb(port->in_vq);
+ virtqueue_disable_cb(port->out_vq);
+ /*
+ * We'll ask the host later if the new invocation has
+ * the port opened or closed.
+ */
+ port->host_connected = false;
+ remove_port_data(port);
+ }
+ remove_vqs(portdev);
+
+ return 0;
+}
+
+static int virtcons_restore(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ struct port *port;
+ int ret;
+
+ portdev = vdev->priv;
+
+ ret = init_vqs(portdev);
+ if (ret)
+ return ret;
+
+ virtio_device_ready(portdev->vdev);
+
+ if (use_multiport(portdev))
+ fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
+
+ list_for_each_entry(port, &portdev->ports, list) {
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ fill_queue(port->in_vq, &port->inbuf_lock);
+
+ /* Get port open/close status on the host */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+ /*
+ * If a port was open at the time of suspending, we
+ * have to let the host know that it's still open.
+ */
+ if (port->guest_connected)
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
+ }
+ return 0;
+}
+#endif
+
+static struct virtio_driver virtio_console = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtcons_probe,
+ .remove = virtcons_remove,
+ .config_changed = config_intr,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtcons_freeze,
+ .restore = virtcons_restore,
+#endif
+};
+
+static struct virtio_driver virtio_rproc_serial = {
+ .feature_table = rproc_serial_features,
+ .feature_table_size = ARRAY_SIZE(rproc_serial_features),
+ .driver.name = "virtio_rproc_serial",
+ .driver.owner = THIS_MODULE,
+ .id_table = rproc_serial_id_table,
+ .probe = virtcons_probe,
+ .remove = virtcons_remove,
+};
+
+static int __init virtio_console_init(void)
+{
+ int err;
+
+ err = class_register(&port_class);
+ if (err)
+ return err;
+
+ pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
+ INIT_LIST_HEAD(&pdrvdata.consoles);
+ INIT_LIST_HEAD(&pdrvdata.portdevs);
+
+ err = register_virtio_driver(&virtio_console);
+ if (err < 0) {
+ pr_err("Error %d registering virtio driver\n", err);
+ goto free;
+ }
+ err = register_virtio_driver(&virtio_rproc_serial);
+ if (err < 0) {
+ pr_err("Error %d registering virtio rproc serial driver\n",
+ err);
+ goto unregister;
+ }
+ return 0;
+unregister:
+ unregister_virtio_driver(&virtio_console);
+free:
+ debugfs_remove_recursive(pdrvdata.debugfs_dir);
+ class_unregister(&port_class);
+ return err;
+}
+
+static void __exit virtio_console_fini(void)
+{
+ reclaim_dma_bufs();
+
+ unregister_virtio_driver(&virtio_console);
+ unregister_virtio_driver(&virtio_rproc_serial);
+
+ class_unregister(&port_class);
+ debugfs_remove_recursive(pdrvdata.debugfs_dir);
+}
+module_init(virtio_console_init);
+module_exit(virtio_console_fini);
+
+MODULE_DESCRIPTION("Virtio console driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/Makefile b/drivers/char/xilinx_hwicap/Makefile
new file mode 100644
index 0000000000..cc4513889a
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Xilinx OPB hwicap driver
+#
+
+obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap_m.o
+
+xilinx_hwicap_m-y := xilinx_hwicap.o fifo_icap.o buffer_icap.o
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
new file mode 100644
index 0000000000..35981cae1a
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -0,0 +1,361 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2003-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "buffer_icap.h"
+
+/* Indicates how many bytes will fit in a buffer. (1 BRAM) */
+#define XHI_MAX_BUFFER_BYTES 2048
+#define XHI_MAX_BUFFER_INTS (XHI_MAX_BUFFER_BYTES >> 2)
+
+/* File access and error constants */
+#define XHI_DEVICE_READ_ERROR -1
+#define XHI_DEVICE_WRITE_ERROR -2
+#define XHI_BUFFER_OVERFLOW_ERROR -3
+
+#define XHI_DEVICE_READ 0x1
+#define XHI_DEVICE_WRITE 0x0
+
+/* Constants for checking transfer status */
+#define XHI_CYCLE_DONE 0
+#define XHI_CYCLE_EXECUTING 1
+
+/* buffer_icap register offsets */
+
+/* Size of transfer, read & write */
+#define XHI_SIZE_REG_OFFSET 0x800L
+/* offset into bram, read & write */
+#define XHI_BRAM_OFFSET_REG_OFFSET 0x804L
+/* Read not Configure, direction of transfer. Write only */
+#define XHI_RNC_REG_OFFSET 0x808L
+/* Indicates transfer complete. Read only */
+#define XHI_STATUS_REG_OFFSET 0x80CL
+
+/* Constants for setting the RNC register */
+#define XHI_CONFIGURE 0x0UL
+#define XHI_READBACK 0x1UL
+
+/* Constants for the Done register */
+#define XHI_NOT_FINISHED 0x0UL
+#define XHI_FINISHED 0x1UL
+
+#define XHI_BUFFER_START 0
+
+/**
+ * buffer_icap_get_status - Get the contents of the status register.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * The status register contains the ICAP status and the done bit.
+ *
+ * D8 - cfgerr
+ * D7 - dalign
+ * D6 - rip
+ * D5 - in_abort_l
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - Done bit
+ **/
+u32 buffer_icap_get_status(struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET);
+}
+
+/**
+ * buffer_icap_get_bram - Reads data from the storage buffer bram.
+ * @base_address: contains the base address of the component.
+ * @offset: The word offset from which the data should be read.
+ *
+ * A bram is used as a configuration memory cache. One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline u32 buffer_icap_get_bram(void __iomem *base_address,
+ u32 offset)
+{
+ return in_be32(base_address + (offset << 2));
+}
+
+/**
+ * buffer_icap_busy - Return true if the icap device is busy
+ * @base_address: is the base address of the device
+ *
+ * The queries the low order bit of the status register, which
+ * indicates whether the current configuration or readback operation
+ * has completed.
+ **/
+static inline bool buffer_icap_busy(void __iomem *base_address)
+{
+ u32 status = in_be32(base_address + XHI_STATUS_REG_OFFSET);
+ return (status & 1) == XHI_NOT_FINISHED;
+}
+
+/**
+ * buffer_icap_set_size - Set the size register.
+ * @base_address: is the base address of the device
+ * @data: The size in bytes.
+ *
+ * The size register holds the number of 8 bit bytes to transfer between
+ * bram and the icap (or icap to bram).
+ **/
+static inline void buffer_icap_set_size(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_SIZE_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_offset - Set the bram offset register.
+ * @base_address: contains the base address of the device.
+ * @data: is the value to be written to the data register.
+ *
+ * The bram offset register holds the starting bram address to transfer
+ * data from during configuration or write data to during readback.
+ **/
+static inline void buffer_icap_set_offset(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_BRAM_OFFSET_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register.
+ * @base_address: contains the base address of the device.
+ * @data: is the value to be written to the data register.
+ *
+ * The RNC register determines the direction of the data transfer. It
+ * controls whether a configuration or readback take place. Writing to
+ * this register initiates the transfer. A value of 1 initiates a
+ * readback while writing a value of 0 initiates a configuration.
+ **/
+static inline void buffer_icap_set_rnc(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_RNC_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_bram - Write data to the storage buffer bram.
+ * @base_address: contains the base address of the component.
+ * @offset: The word offset at which the data should be written.
+ * @data: The value to be written to the bram offset.
+ *
+ * A bram is used as a configuration memory cache. One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline void buffer_icap_set_bram(void __iomem *base_address,
+ u32 offset, u32 data)
+{
+ out_be32(base_address + (offset << 2), data);
+}
+
+/**
+ * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer.
+ * @drvdata: a pointer to the drvdata.
+ * @offset: The storage buffer start address.
+ * @count: The number of words (32 bit) to read from the
+ * device (ICAP).
+ **/
+static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
+ u32 offset, u32 count)
+{
+
+ s32 retries = 0;
+ void __iomem *base_address = drvdata->base_address;
+
+ if (buffer_icap_busy(base_address))
+ return -EBUSY;
+
+ if ((offset + count) > XHI_MAX_BUFFER_INTS)
+ return -EINVAL;
+
+ /* setSize count*4 to get bytes. */
+ buffer_icap_set_size(base_address, (count << 2));
+ buffer_icap_set_offset(base_address, offset);
+ buffer_icap_set_rnc(base_address, XHI_READBACK);
+
+ while (buffer_icap_busy(base_address)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EBUSY;
+ }
+ return 0;
+
+};
+
+/**
+ * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer.
+ * @drvdata: a pointer to the drvdata.
+ * @offset: The storage buffer start address.
+ * @count: The number of words (32 bit) to read from the
+ * device (ICAP).
+ **/
+static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
+ u32 offset, u32 count)
+{
+
+ s32 retries = 0;
+ void __iomem *base_address = drvdata->base_address;
+
+ if (buffer_icap_busy(base_address))
+ return -EBUSY;
+
+ if ((offset + count) > XHI_MAX_BUFFER_INTS)
+ return -EINVAL;
+
+ /* setSize count*4 to get bytes. */
+ buffer_icap_set_size(base_address, count << 2);
+ buffer_icap_set_offset(base_address, offset);
+ buffer_icap_set_rnc(base_address, XHI_CONFIGURE);
+
+ while (buffer_icap_busy(base_address)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EBUSY;
+ }
+ return 0;
+
+};
+
+/**
+ * buffer_icap_reset - Reset the logic of the icap device.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Writing to the status register resets the ICAP logic in an internal
+ * version of the core. For the version of the core published in EDK,
+ * this is a noop.
+ **/
+void buffer_icap_reset(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET, 0xFEFE);
+}
+
+/**
+ * buffer_icap_set_configuration - Load a partial bitstream from system memory.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Kernel address of the partial bitstream.
+ * @size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size)
+{
+ int status;
+ s32 buffer_count = 0;
+ bool dirty = false;
+ u32 i;
+ void __iomem *base_address = drvdata->base_address;
+
+ /* Loop through all the data */
+ for (i = 0, buffer_count = 0; i < size; i++) {
+
+ /* Copy data to bram */
+ buffer_icap_set_bram(base_address, buffer_count, data[i]);
+ dirty = true;
+
+ if (buffer_count < XHI_MAX_BUFFER_INTS - 1) {
+ buffer_count++;
+ continue;
+ }
+
+ /* Write data to ICAP */
+ status = buffer_icap_device_write(
+ drvdata,
+ XHI_BUFFER_START,
+ XHI_MAX_BUFFER_INTS);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ return status;
+ }
+
+ buffer_count = 0;
+ dirty = false;
+ }
+
+ /* Write unwritten data to ICAP */
+ if (dirty) {
+ /* Write data to ICAP */
+ status = buffer_icap_device_write(drvdata, XHI_BUFFER_START,
+ buffer_count);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ }
+ return status;
+ }
+
+ return 0;
+};
+
+/**
+ * buffer_icap_get_configuration - Read configuration data from the device.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Address of the data representing the partial bitstream
+ * @size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size)
+{
+ int status;
+ s32 buffer_count = 0;
+ u32 i;
+ void __iomem *base_address = drvdata->base_address;
+
+ /* Loop through all the data */
+ for (i = 0, buffer_count = XHI_MAX_BUFFER_INTS; i < size; i++) {
+ if (buffer_count == XHI_MAX_BUFFER_INTS) {
+ u32 words_remaining = size - i;
+ u32 words_to_read =
+ words_remaining <
+ XHI_MAX_BUFFER_INTS ? words_remaining :
+ XHI_MAX_BUFFER_INTS;
+
+ /* Read data from ICAP */
+ status = buffer_icap_device_read(
+ drvdata,
+ XHI_BUFFER_START,
+ words_to_read);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ return status;
+ }
+
+ buffer_count = 0;
+ }
+
+ /* Copy data from bram */
+ data[i] = buffer_icap_get_bram(base_address, buffer_count);
+ buffer_count++;
+ }
+
+ return 0;
+};
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h
new file mode 100644
index 0000000000..d4f419ee87
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.h
@@ -0,0 +1,54 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2003-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_BUFFER_ICAP_H_ /* prevent circular inclusions */
+#define XILINX_BUFFER_ICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 Size);
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 Size);
+
+u32 buffer_icap_get_status(struct hwicap_drvdata *drvdata);
+void buffer_icap_reset(struct hwicap_drvdata *drvdata);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
new file mode 100644
index 0000000000..619f3a30ec
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -0,0 +1,393 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "fifo_icap.h"
+
+/* Register offsets for the XHwIcap device. */
+#define XHI_GIER_OFFSET 0x1C /* Device Global Interrupt Enable Reg */
+#define XHI_IPISR_OFFSET 0x20 /* Interrupt Status Register */
+#define XHI_IPIER_OFFSET 0x28 /* Interrupt Enable Register */
+#define XHI_WF_OFFSET 0x100 /* Write FIFO */
+#define XHI_RF_OFFSET 0x104 /* Read FIFO */
+#define XHI_SZ_OFFSET 0x108 /* Size Register */
+#define XHI_CR_OFFSET 0x10C /* Control Register */
+#define XHI_SR_OFFSET 0x110 /* Status Register */
+#define XHI_WFV_OFFSET 0x114 /* Write FIFO Vacancy Register */
+#define XHI_RFO_OFFSET 0x118 /* Read FIFO Occupancy Register */
+
+/* Device Global Interrupt Enable Register (GIER) bit definitions */
+
+#define XHI_GIER_GIE_MASK 0x80000000 /* Global Interrupt enable Mask */
+
+/**
+ * HwIcap Device Interrupt Status/Enable Registers
+ *
+ * Interrupt Status Register (IPISR) : This register holds the
+ * interrupt status flags for the device. These bits are toggle on
+ * write.
+ *
+ * Interrupt Enable Register (IPIER) : This register is used to enable
+ * interrupt sources for the device.
+ * Writing a '1' to a bit enables the corresponding interrupt.
+ * Writing a '0' to a bit disables the corresponding interrupt.
+ *
+ * IPISR/IPIER registers have the same bit definitions and are only defined
+ * once.
+ */
+#define XHI_IPIXR_RFULL_MASK 0x00000008 /* Read FIFO Full */
+#define XHI_IPIXR_WEMPTY_MASK 0x00000004 /* Write FIFO Empty */
+#define XHI_IPIXR_RDP_MASK 0x00000002 /* Read FIFO half full */
+#define XHI_IPIXR_WRP_MASK 0x00000001 /* Write FIFO half full */
+#define XHI_IPIXR_ALL_MASK 0x0000000F /* Mask of all interrupts */
+
+/* Control Register (CR) */
+#define XHI_CR_SW_RESET_MASK 0x00000008 /* SW Reset Mask */
+#define XHI_CR_FIFO_CLR_MASK 0x00000004 /* FIFO Clear Mask */
+#define XHI_CR_READ_MASK 0x00000002 /* Read from ICAP to FIFO */
+#define XHI_CR_WRITE_MASK 0x00000001 /* Write from FIFO to ICAP */
+
+
+#define XHI_WFO_MAX_VACANCY 1024 /* Max Write FIFO Vacancy, in words */
+#define XHI_RFO_MAX_OCCUPANCY 256 /* Max Read FIFO Occupancy, in words */
+/* The maximum amount we can request from fifo_icap_get_configuration
+ at once, in bytes. */
+#define XHI_MAX_READ_TRANSACTION_WORDS 0xFFF
+
+
+/**
+ * fifo_icap_fifo_write - Write data to the write FIFO.
+ * @drvdata: a pointer to the drvdata.
+ * @data: the 32-bit value to be written to the FIFO.
+ *
+ * This function will silently fail if the fifo is full.
+ **/
+static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata,
+ u32 data)
+{
+ dev_dbg(drvdata->dev, "fifo_write: %x\n", data);
+ out_be32(drvdata->base_address + XHI_WF_OFFSET, data);
+}
+
+/**
+ * fifo_icap_fifo_read - Read data from the Read FIFO.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * This function will silently fail if the fifo is empty.
+ **/
+static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
+{
+ u32 data = in_be32(drvdata->base_address + XHI_RF_OFFSET);
+ dev_dbg(drvdata->dev, "fifo_read: %x\n", data);
+ return data;
+}
+
+/**
+ * fifo_icap_set_read_size - Set the size register.
+ * @drvdata: a pointer to the drvdata.
+ * @data: the size of the following read transaction, in words.
+ **/
+static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
+ u32 data)
+{
+ out_be32(drvdata->base_address + XHI_SZ_OFFSET, data);
+}
+
+/**
+ * fifo_icap_start_config - Initiate a configuration (write) to the device.
+ * @drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_WRITE_MASK);
+ dev_dbg(drvdata->dev, "configuration started\n");
+}
+
+/**
+ * fifo_icap_start_readback - Initiate a readback from the device.
+ * @drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_READ_MASK);
+ dev_dbg(drvdata->dev, "readback started\n");
+}
+
+/**
+ * fifo_icap_get_status - Get the contents of the status register.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * The status register contains the ICAP status and the done bit.
+ *
+ * D8 - cfgerr
+ * D7 - dalign
+ * D6 - rip
+ * D5 - in_abort_l
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - Done bit
+ **/
+u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata)
+{
+ u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET);
+ dev_dbg(drvdata->dev, "Getting status = %x\n", status);
+ return status;
+}
+
+/**
+ * fifo_icap_busy - Return true if the ICAP is still processing a transaction.
+ * @drvdata: a pointer to the drvdata.
+ **/
+static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
+{
+ u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET);
+ return (status & XHI_SR_DONE_MASK) ? 0 : 1;
+}
+
+/**
+ * fifo_icap_write_fifo_vacancy - Query the write fifo available space.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely pushed into the write fifo.
+ **/
+static inline u32 fifo_icap_write_fifo_vacancy(
+ struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_WFV_OFFSET);
+}
+
+/**
+ * fifo_icap_read_fifo_occupancy - Query the read fifo available data.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely read from the read fifo.
+ **/
+static inline u32 fifo_icap_read_fifo_occupancy(
+ struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_RFO_OFFSET);
+}
+
+/**
+ * fifo_icap_set_configuration - Send configuration data to the ICAP.
+ * @drvdata: a pointer to the drvdata.
+ * @frame_buffer: a pointer to the data to be written to the
+ * ICAP device.
+ * @num_words: the number of words (32 bit) to write to the ICAP
+ * device.
+
+ * This function writes the given user data to the Write FIFO in
+ * polled mode and starts the transfer of the data to
+ * the ICAP device.
+ **/
+int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata,
+ u32 *frame_buffer, u32 num_words)
+{
+
+ u32 write_fifo_vacancy = 0;
+ u32 retries = 0;
+ u32 remaining_words;
+
+ dev_dbg(drvdata->dev, "fifo_set_configuration\n");
+
+ /*
+ * Check if the ICAP device is Busy with the last Read/Write
+ */
+ if (fifo_icap_busy(drvdata))
+ return -EBUSY;
+
+ /*
+ * Set up the buffer pointer and the words to be transferred.
+ */
+ remaining_words = num_words;
+
+ while (remaining_words > 0) {
+ /*
+ * Wait until we have some data in the fifo.
+ */
+ while (write_fifo_vacancy == 0) {
+ write_fifo_vacancy =
+ fifo_icap_write_fifo_vacancy(drvdata);
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EIO;
+ }
+
+ /*
+ * Write data into the Write FIFO.
+ */
+ while ((write_fifo_vacancy != 0) &&
+ (remaining_words > 0)) {
+ fifo_icap_fifo_write(drvdata, *frame_buffer);
+
+ remaining_words--;
+ write_fifo_vacancy--;
+ frame_buffer++;
+ }
+ /* Start pushing whatever is in the FIFO into the ICAP. */
+ fifo_icap_start_config(drvdata);
+ }
+
+ /* Wait until the write has finished. */
+ while (fifo_icap_busy(drvdata)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ break;
+ }
+
+ dev_dbg(drvdata->dev, "done fifo_set_configuration\n");
+
+ /*
+ * If the requested number of words have not been read from
+ * the device then indicate failure.
+ */
+ if (remaining_words != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * fifo_icap_get_configuration - Read configuration data from the device.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Address of the data representing the partial bitstream
+ * @size: the size of the partial bitstream in 32 bit words.
+ *
+ * This function reads the specified number of words from the ICAP device in
+ * the polled mode.
+ */
+int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata,
+ u32 *frame_buffer, u32 num_words)
+{
+
+ u32 read_fifo_occupancy = 0;
+ u32 retries = 0;
+ u32 *data = frame_buffer;
+ u32 remaining_words;
+ u32 words_to_read;
+
+ dev_dbg(drvdata->dev, "fifo_get_configuration\n");
+
+ /*
+ * Check if the ICAP device is Busy with the last Write/Read
+ */
+ if (fifo_icap_busy(drvdata))
+ return -EBUSY;
+
+ remaining_words = num_words;
+
+ while (remaining_words > 0) {
+ words_to_read = remaining_words;
+ /* The hardware has a limit on the number of words
+ that can be read at one time. */
+ if (words_to_read > XHI_MAX_READ_TRANSACTION_WORDS)
+ words_to_read = XHI_MAX_READ_TRANSACTION_WORDS;
+
+ remaining_words -= words_to_read;
+
+ fifo_icap_set_read_size(drvdata, words_to_read);
+ fifo_icap_start_readback(drvdata);
+
+ while (words_to_read > 0) {
+ /* Wait until we have some data in the fifo. */
+ while (read_fifo_occupancy == 0) {
+ read_fifo_occupancy =
+ fifo_icap_read_fifo_occupancy(drvdata);
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EIO;
+ }
+
+ if (read_fifo_occupancy > words_to_read)
+ read_fifo_occupancy = words_to_read;
+
+ words_to_read -= read_fifo_occupancy;
+
+ /* Read the data from the Read FIFO. */
+ while (read_fifo_occupancy != 0) {
+ *data++ = fifo_icap_fifo_read(drvdata);
+ read_fifo_occupancy--;
+ }
+ }
+ }
+
+ dev_dbg(drvdata->dev, "done fifo_get_configuration\n");
+
+ return 0;
+}
+
+/**
+ * buffer_icap_reset - Reset the logic of the icap device.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * This function forces the software reset of the complete HWICAP device.
+ * All the registers will return to the default value and the FIFO is also
+ * flushed as a part of this software reset.
+ */
+void fifo_icap_reset(struct hwicap_drvdata *drvdata)
+{
+ u32 reg_data;
+ /*
+ * Reset the device by setting/clearing the RESET bit in the
+ * Control Register.
+ */
+ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data | XHI_CR_SW_RESET_MASK);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data & (~XHI_CR_SW_RESET_MASK));
+
+}
+
+/**
+ * fifo_icap_flush_fifo - This function flushes the FIFOs in the device.
+ * @drvdata: a pointer to the drvdata.
+ */
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata)
+{
+ u32 reg_data;
+ /*
+ * Flush the FIFO by setting/clearing the FIFO Clear bit in the
+ * Control Register.
+ */
+ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data | XHI_CR_FIFO_CLR_MASK);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data & (~XHI_CR_FIFO_CLR_MASK));
+}
+
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h
new file mode 100644
index 0000000000..4c9dd9a3b6
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.h
@@ -0,0 +1,59 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_FIFO_ICAP_H_ /* prevent circular inclusions */
+#define XILINX_FIFO_ICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+/* Reads integers from the device into the storage buffer. */
+int fifo_icap_get_configuration(
+ struct hwicap_drvdata *drvdata,
+ u32 *FrameBuffer,
+ u32 NumWords);
+
+/* Writes integers to the device from the storage buffer. */
+int fifo_icap_set_configuration(
+ struct hwicap_drvdata *drvdata,
+ u32 *FrameBuffer,
+ u32 NumWords);
+
+u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata);
+void fifo_icap_reset(struct hwicap_drvdata *drvdata);
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
new file mode 100644
index 0000000000..f60bb61514
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -0,0 +1,893 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
+ * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+/*
+ * This is the code behind /dev/icap* -- it allows a user-space
+ * application to use the Xilinx ICAP subsystem.
+ *
+ * The following operations are possible:
+ *
+ * open open the port and initialize for access.
+ * release release port
+ * write Write a bitstream to the configuration processor.
+ * read Read a data stream from the configuration processor.
+ *
+ * After being opened, the port is initialized and accessed to avoid a
+ * corrupted first read which may occur with some hardware. The port
+ * is left in a desynched state, requiring that a synch sequence be
+ * transmitted before any valid configuration data. A user will have
+ * exclusive access to the device while it remains open, and the state
+ * of the ICAP cannot be guaranteed after the device is closed. Note
+ * that a complete reset of the core and the state of the ICAP cannot
+ * be performed on many versions of the cores, hence users of this
+ * device should avoid making inconsistent accesses to the device. In
+ * particular, accessing the read interface, without first generating
+ * a write containing a readback packet can leave the ICAP in an
+ * inaccessible state.
+ *
+ * Note that in order to use the read interface, it is first necessary
+ * to write a request packet to the write interface. i.e., it is not
+ * possible to simply readback the bitstream (or any configuration
+ * bits) from a device without specifically requesting them first.
+ * The code to craft such packets is intended to be part of the
+ * user-space application code that uses this device. The simplest
+ * way to use this interface is simply:
+ *
+ * cp foo.bit /dev/icap0
+ *
+ * Note that unless foo.bit is an appropriately constructed partial
+ * bitstream, this has a high likelihood of overwriting the design
+ * currently programmed in the FPGA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/sysctl.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_OF
+/* For open firmware. */
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+#include "xilinx_hwicap.h"
+#include "buffer_icap.h"
+#include "fifo_icap.h"
+
+#define DRIVER_NAME "icap"
+
+#define HWICAP_REGS (0x10000)
+
+#define XHWICAP_MAJOR 259
+#define XHWICAP_MINOR 0
+#define HWICAP_DEVICES 1
+
+/* An array, which is set to true when the device is registered. */
+static DEFINE_MUTEX(hwicap_mutex);
+static bool probed_devices[HWICAP_DEVICES];
+static struct mutex icap_sem;
+
+static const struct class icap_class = {
+ .name = "xilinx_config",
+};
+
+#define UNIMPLEMENTED 0xFFFF
+
+static const struct config_registers v2_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = 11,
+ .KEY = 12,
+ .CBC = 13,
+ .IDCODE = 14,
+ .AXSS = UNIMPLEMENTED,
+ .C0R_1 = UNIMPLEMENTED,
+ .CSOB = UNIMPLEMENTED,
+ .WBSTAR = UNIMPLEMENTED,
+ .TIMER = UNIMPLEMENTED,
+ .BOOTSTS = UNIMPLEMENTED,
+ .CTL_1 = UNIMPLEMENTED,
+};
+
+static const struct config_registers v4_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = UNIMPLEMENTED,
+ .KEY = UNIMPLEMENTED,
+ .CBC = 11,
+ .IDCODE = 12,
+ .AXSS = 13,
+ .C0R_1 = UNIMPLEMENTED,
+ .CSOB = UNIMPLEMENTED,
+ .WBSTAR = UNIMPLEMENTED,
+ .TIMER = UNIMPLEMENTED,
+ .BOOTSTS = UNIMPLEMENTED,
+ .CTL_1 = UNIMPLEMENTED,
+};
+
+static const struct config_registers v5_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = UNIMPLEMENTED,
+ .KEY = UNIMPLEMENTED,
+ .CBC = 11,
+ .IDCODE = 12,
+ .AXSS = 13,
+ .C0R_1 = 14,
+ .CSOB = 15,
+ .WBSTAR = 16,
+ .TIMER = 17,
+ .BOOTSTS = 18,
+ .CTL_1 = 19,
+};
+
+static const struct config_registers v6_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = UNIMPLEMENTED,
+ .KEY = UNIMPLEMENTED,
+ .CBC = 11,
+ .IDCODE = 12,
+ .AXSS = 13,
+ .C0R_1 = 14,
+ .CSOB = 15,
+ .WBSTAR = 16,
+ .TIMER = 17,
+ .BOOTSTS = 22,
+ .CTL_1 = 24,
+};
+
+/**
+ * hwicap_command_desync - Send a DESYNC command to the ICAP port.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Returns: '0' on success and failure value on error
+ *
+ * This command desynchronizes the ICAP After this command, a
+ * bitstream containing a NULL packet, followed by a SYNCH packet is
+ * required before the ICAP will recognize commands.
+ */
+static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
+{
+ u32 buffer[4];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
+ buffer[index++] = XHI_CMD_DESYNCH;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and initiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ return drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+}
+
+/**
+ * hwicap_get_configuration_register - Query a configuration register.
+ * @drvdata: a pointer to the drvdata.
+ * @reg: a constant which represents the configuration
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
+ * @reg_data: returns the value of the register.
+ *
+ * Returns: '0' on success and failure value on error
+ *
+ * Sends a query packet to the ICAP and then receives the response.
+ * The icap is left in Synched state.
+ */
+static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
+ u32 reg, u32 *reg_data)
+{
+ int status;
+ u32 buffer[6];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = XHI_DUMMY_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_SYNC_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and initiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ status = drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+ if (status)
+ return status;
+
+ /* If the syncword was not found, then we need to start over. */
+ status = drvdata->config->get_status(drvdata);
+ if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK)
+ return -EIO;
+
+ index = 0;
+ buffer[index++] = hwicap_type_1_read(reg) | 1;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and initiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ status = drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+ if (status)
+ return status;
+
+ /*
+ * Read the configuration register
+ */
+ status = drvdata->config->get_configuration(drvdata, reg_data, 1);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
+{
+ int status;
+ u32 idcode;
+
+ dev_dbg(drvdata->dev, "initializing\n");
+
+ /* Abort any current transaction, to make sure we have the
+ * ICAP in a good state.
+ */
+ dev_dbg(drvdata->dev, "Reset...\n");
+ drvdata->config->reset(drvdata);
+
+ dev_dbg(drvdata->dev, "Desync...\n");
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ return status;
+
+ /* Attempt to read the IDCODE from ICAP. This
+ * may not be returned correctly, due to the design of the
+ * hardware.
+ */
+ dev_dbg(drvdata->dev, "Reading IDCODE...\n");
+ status = hwicap_get_configuration_register(
+ drvdata, drvdata->config_regs->IDCODE, &idcode);
+ dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode);
+ if (status)
+ return status;
+
+ dev_dbg(drvdata->dev, "Desync...\n");
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static ssize_t
+hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ ssize_t bytes_to_read = 0;
+ u32 *kbuf;
+ u32 words;
+ u32 bytes_remaining;
+ int status;
+
+ status = mutex_lock_interruptible(&drvdata->sem);
+ if (status)
+ return status;
+
+ if (drvdata->read_buffer_in_use) {
+ /* If there are leftover bytes in the buffer, just */
+ /* return them and don't try to read more from the */
+ /* ICAP device. */
+ bytes_to_read =
+ (count < drvdata->read_buffer_in_use) ? count :
+ drvdata->read_buffer_in_use;
+
+ /* Return the data currently in the read buffer. */
+ if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) {
+ status = -EFAULT;
+ goto error;
+ }
+ drvdata->read_buffer_in_use -= bytes_to_read;
+ memmove(drvdata->read_buffer,
+ drvdata->read_buffer + bytes_to_read,
+ 4 - bytes_to_read);
+ } else {
+ /* Get new data from the ICAP, and return what was requested. */
+ kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
+ if (!kbuf) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ /* The ICAP device is only able to read complete */
+ /* words. If a number of bytes that do not correspond */
+ /* to complete words is requested, then we read enough */
+ /* words to get the required number of bytes, and then */
+ /* save the remaining bytes for the next read. */
+
+ /* Determine the number of words to read, rounding up */
+ /* if necessary. */
+ words = ((count + 3) >> 2);
+ bytes_to_read = words << 2;
+
+ if (bytes_to_read > PAGE_SIZE)
+ bytes_to_read = PAGE_SIZE;
+
+ /* Ensure we only read a complete number of words. */
+ bytes_remaining = bytes_to_read & 3;
+ bytes_to_read &= ~3;
+ words = bytes_to_read >> 2;
+
+ status = drvdata->config->get_configuration(drvdata,
+ kbuf, words);
+
+ /* If we didn't read correctly, then bail out. */
+ if (status) {
+ free_page((unsigned long)kbuf);
+ goto error;
+ }
+
+ /* If we fail to return the data to the user, then bail out. */
+ if (copy_to_user(buf, kbuf, bytes_to_read)) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ memcpy(drvdata->read_buffer,
+ kbuf,
+ bytes_remaining);
+ drvdata->read_buffer_in_use = bytes_remaining;
+ free_page((unsigned long)kbuf);
+ }
+ status = bytes_to_read;
+ error:
+ mutex_unlock(&drvdata->sem);
+ return status;
+}
+
+static ssize_t
+hwicap_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ ssize_t written = 0;
+ ssize_t left = count;
+ u32 *kbuf;
+ ssize_t len;
+ ssize_t status;
+
+ status = mutex_lock_interruptible(&drvdata->sem);
+ if (status)
+ return status;
+
+ left += drvdata->write_buffer_in_use;
+
+ /* Only write multiples of 4 bytes. */
+ if (left < 4) {
+ status = 0;
+ goto error;
+ }
+
+ kbuf = (u32 *) __get_free_page(GFP_KERNEL);
+ if (!kbuf) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ while (left > 3) {
+ /* only write multiples of 4 bytes, so there might */
+ /* be as many as 3 bytes left (at the end). */
+ len = left;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ len &= ~3;
+
+ if (drvdata->write_buffer_in_use) {
+ memcpy(kbuf, drvdata->write_buffer,
+ drvdata->write_buffer_in_use);
+ if (copy_from_user(
+ (((char *)kbuf) + drvdata->write_buffer_in_use),
+ buf + written,
+ len - (drvdata->write_buffer_in_use))) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ } else {
+ if (copy_from_user(kbuf, buf + written, len)) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ }
+
+ status = drvdata->config->set_configuration(drvdata,
+ kbuf, len >> 2);
+
+ if (status) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ if (drvdata->write_buffer_in_use) {
+ len -= drvdata->write_buffer_in_use;
+ left -= drvdata->write_buffer_in_use;
+ drvdata->write_buffer_in_use = 0;
+ }
+ written += len;
+ left -= len;
+ }
+ if ((left > 0) && (left < 4)) {
+ if (!copy_from_user(drvdata->write_buffer,
+ buf + written, left)) {
+ drvdata->write_buffer_in_use = left;
+ written += left;
+ left = 0;
+ }
+ }
+
+ free_page((unsigned long)kbuf);
+ status = written;
+ error:
+ mutex_unlock(&drvdata->sem);
+ return status;
+}
+
+static int hwicap_open(struct inode *inode, struct file *file)
+{
+ struct hwicap_drvdata *drvdata;
+ int status;
+
+ mutex_lock(&hwicap_mutex);
+ drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
+
+ status = mutex_lock_interruptible(&drvdata->sem);
+ if (status)
+ goto out;
+
+ if (drvdata->is_open) {
+ status = -EBUSY;
+ goto error;
+ }
+
+ status = hwicap_initialize_hwicap(drvdata);
+ if (status) {
+ dev_err(drvdata->dev, "Failed to open file");
+ goto error;
+ }
+
+ file->private_data = drvdata;
+ drvdata->write_buffer_in_use = 0;
+ drvdata->read_buffer_in_use = 0;
+ drvdata->is_open = 1;
+
+ error:
+ mutex_unlock(&drvdata->sem);
+ out:
+ mutex_unlock(&hwicap_mutex);
+ return status;
+}
+
+static int hwicap_release(struct inode *inode, struct file *file)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ int i;
+ int status = 0;
+
+ mutex_lock(&drvdata->sem);
+
+ if (drvdata->write_buffer_in_use) {
+ /* Flush write buffer. */
+ for (i = drvdata->write_buffer_in_use; i < 4; i++)
+ drvdata->write_buffer[i] = 0;
+
+ status = drvdata->config->set_configuration(drvdata,
+ (u32 *) drvdata->write_buffer, 1);
+ if (status)
+ goto error;
+ }
+
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ goto error;
+
+ error:
+ drvdata->is_open = 0;
+ mutex_unlock(&drvdata->sem);
+ return status;
+}
+
+static const struct file_operations hwicap_fops = {
+ .owner = THIS_MODULE,
+ .write = hwicap_write,
+ .read = hwicap_read,
+ .open = hwicap_open,
+ .release = hwicap_release,
+ .llseek = noop_llseek,
+};
+
+static int hwicap_setup(struct device *dev, int id,
+ const struct resource *regs_res,
+ const struct hwicap_driver_config *config,
+ const struct config_registers *config_regs)
+{
+ dev_t devt;
+ struct hwicap_drvdata *drvdata = NULL;
+ int retval = 0;
+
+ dev_info(dev, "Xilinx icap port driver\n");
+
+ mutex_lock(&icap_sem);
+
+ if (id < 0) {
+ for (id = 0; id < HWICAP_DEVICES; id++)
+ if (!probed_devices[id])
+ break;
+ }
+ if (id < 0 || id >= HWICAP_DEVICES) {
+ mutex_unlock(&icap_sem);
+ dev_err(dev, "%s%i too large\n", DRIVER_NAME, id);
+ return -EINVAL;
+ }
+ if (probed_devices[id]) {
+ mutex_unlock(&icap_sem);
+ dev_err(dev, "cannot assign to %s%i; it is already in use\n",
+ DRIVER_NAME, id);
+ return -EBUSY;
+ }
+
+ probed_devices[id] = 1;
+ mutex_unlock(&icap_sem);
+
+ devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id);
+
+ drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ retval = -ENOMEM;
+ goto failed0;
+ }
+ dev_set_drvdata(dev, (void *)drvdata);
+
+ if (!regs_res) {
+ dev_err(dev, "Couldn't get registers resource\n");
+ retval = -EFAULT;
+ goto failed1;
+ }
+
+ drvdata->mem_start = regs_res->start;
+ drvdata->mem_end = regs_res->end;
+ drvdata->mem_size = resource_size(regs_res);
+
+ if (!request_mem_region(drvdata->mem_start,
+ drvdata->mem_size, DRIVER_NAME)) {
+ dev_err(dev, "Couldn't lock memory region at %Lx\n",
+ (unsigned long long) regs_res->start);
+ retval = -EBUSY;
+ goto failed1;
+ }
+
+ drvdata->devt = devt;
+ drvdata->dev = dev;
+ drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
+ if (!drvdata->base_address) {
+ dev_err(dev, "ioremap() failed\n");
+ retval = -ENOMEM;
+ goto failed2;
+ }
+
+ drvdata->config = config;
+ drvdata->config_regs = config_regs;
+
+ mutex_init(&drvdata->sem);
+ drvdata->is_open = 0;
+
+ dev_info(dev, "ioremap %llx to %p with size %llx\n",
+ (unsigned long long) drvdata->mem_start,
+ drvdata->base_address,
+ (unsigned long long) drvdata->mem_size);
+
+ cdev_init(&drvdata->cdev, &hwicap_fops);
+ drvdata->cdev.owner = THIS_MODULE;
+ retval = cdev_add(&drvdata->cdev, devt, 1);
+ if (retval) {
+ dev_err(dev, "cdev_add() failed\n");
+ goto failed3;
+ }
+
+ device_create(&icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id);
+ return 0; /* success */
+
+ failed3:
+ iounmap(drvdata->base_address);
+
+ failed2:
+ release_mem_region(regs_res->start, drvdata->mem_size);
+
+ failed1:
+ kfree(drvdata);
+
+ failed0:
+ mutex_lock(&icap_sem);
+ probed_devices[id] = 0;
+ mutex_unlock(&icap_sem);
+
+ return retval;
+}
+
+static struct hwicap_driver_config buffer_icap_config = {
+ .get_configuration = buffer_icap_get_configuration,
+ .set_configuration = buffer_icap_set_configuration,
+ .get_status = buffer_icap_get_status,
+ .reset = buffer_icap_reset,
+};
+
+static struct hwicap_driver_config fifo_icap_config = {
+ .get_configuration = fifo_icap_get_configuration,
+ .set_configuration = fifo_icap_set_configuration,
+ .get_status = fifo_icap_get_status,
+ .reset = fifo_icap_reset,
+};
+
+#ifdef CONFIG_OF
+static int hwicap_of_probe(struct platform_device *op,
+ const struct hwicap_driver_config *config)
+{
+ struct resource res;
+ const unsigned int *id;
+ const char *family;
+ int rc;
+ const struct config_registers *regs;
+
+
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
+ if (rc) {
+ dev_err(&op->dev, "invalid address\n");
+ return rc;
+ }
+
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
+
+ /* It's most likely that we're using V4, if the family is not
+ * specified
+ */
+ regs = &v4_config_registers;
+ family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
+
+ if (family) {
+ if (!strcmp(family, "virtex2p"))
+ regs = &v2_config_registers;
+ else if (!strcmp(family, "virtex4"))
+ regs = &v4_config_registers;
+ else if (!strcmp(family, "virtex5"))
+ regs = &v5_config_registers;
+ else if (!strcmp(family, "virtex6"))
+ regs = &v6_config_registers;
+ }
+ return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
+ regs);
+}
+#else
+static inline int hwicap_of_probe(struct platform_device *op,
+ const struct hwicap_driver_config *config)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+static const struct of_device_id hwicap_of_match[];
+static int hwicap_drv_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct resource *res;
+ const struct config_registers *regs;
+ const char *family;
+
+ match = of_match_device(hwicap_of_match, &pdev->dev);
+ if (match)
+ return hwicap_of_probe(pdev, match->data);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ /* It's most likely that we're using V4, if the family is not
+ * specified
+ */
+ regs = &v4_config_registers;
+ family = pdev->dev.platform_data;
+
+ if (family) {
+ if (!strcmp(family, "virtex2p"))
+ regs = &v2_config_registers;
+ else if (!strcmp(family, "virtex4"))
+ regs = &v4_config_registers;
+ else if (!strcmp(family, "virtex5"))
+ regs = &v5_config_registers;
+ else if (!strcmp(family, "virtex6"))
+ regs = &v6_config_registers;
+ }
+
+ return hwicap_setup(&pdev->dev, pdev->id, res,
+ &buffer_icap_config, regs);
+}
+
+static void hwicap_drv_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hwicap_drvdata *drvdata;
+
+ drvdata = dev_get_drvdata(dev);
+
+ device_destroy(&icap_class, drvdata->devt);
+ cdev_del(&drvdata->cdev);
+ iounmap(drvdata->base_address);
+ release_mem_region(drvdata->mem_start, drvdata->mem_size);
+ kfree(drvdata);
+
+ mutex_lock(&icap_sem);
+ probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
+ mutex_unlock(&icap_sem);
+}
+
+#ifdef CONFIG_OF
+/* Match table for device tree binding */
+static const struct of_device_id hwicap_of_match[] = {
+ { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
+ { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
+ {},
+};
+MODULE_DEVICE_TABLE(of, hwicap_of_match);
+#else
+#define hwicap_of_match NULL
+#endif
+
+static struct platform_driver hwicap_platform_driver = {
+ .probe = hwicap_drv_probe,
+ .remove_new = hwicap_drv_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = hwicap_of_match,
+ },
+};
+
+static int __init hwicap_module_init(void)
+{
+ dev_t devt;
+ int retval;
+
+ retval = class_register(&icap_class);
+ if (retval)
+ return retval;
+ mutex_init(&icap_sem);
+
+ devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
+ retval = register_chrdev_region(devt,
+ HWICAP_DEVICES,
+ DRIVER_NAME);
+ if (retval < 0)
+ return retval;
+
+ retval = platform_driver_register(&hwicap_platform_driver);
+ if (retval)
+ goto failed;
+
+ return retval;
+
+ failed:
+ unregister_chrdev_region(devt, HWICAP_DEVICES);
+
+ return retval;
+}
+
+static void __exit hwicap_module_cleanup(void)
+{
+ dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
+
+ class_unregister(&icap_class);
+
+ platform_driver_unregister(&hwicap_platform_driver);
+
+ unregister_chrdev_region(devt, HWICAP_DEVICES);
+}
+
+module_init(hwicap_module_init);
+module_exit(hwicap_module_cleanup);
+
+MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group");
+MODULE_DESCRIPTION("Xilinx ICAP Port Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
new file mode 100644
index 0000000000..6b963d1c8b
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -0,0 +1,224 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2003-2007 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_HWICAP_H_ /* prevent circular inclusions */
+#define XILINX_HWICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <linux/io.h>
+
+struct hwicap_drvdata {
+ u32 write_buffer_in_use; /* Always in [0,3] */
+ u8 write_buffer[4];
+ u32 read_buffer_in_use; /* Always in [0,3] */
+ u8 read_buffer[4];
+ resource_size_t mem_start;/* phys. address of the control registers */
+ resource_size_t mem_end; /* phys. address of the control registers */
+ resource_size_t mem_size;
+ void __iomem *base_address;/* virt. address of the control registers */
+
+ struct device *dev;
+ struct cdev cdev; /* Char device structure */
+ dev_t devt;
+
+ const struct hwicap_driver_config *config;
+ const struct config_registers *config_regs;
+ void *private_data;
+ bool is_open;
+ struct mutex sem;
+};
+
+struct hwicap_driver_config {
+ /* Read configuration data given by size into the data buffer.
+ * Return 0 if successful.
+ */
+ int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size);
+ /* Write configuration data given by size from the data buffer.
+ * Return 0 if successful.
+ */
+ int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size);
+ /* Get the status register, bit pattern given by:
+ * D8 - 0 = configuration error
+ * D7 - 1 = alignment found
+ * D6 - 1 = readback in progress
+ * D5 - 0 = abort in progress
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - 1 = operation completed
+ */
+ u32 (*get_status)(struct hwicap_drvdata *drvdata);
+ /* Reset the hw */
+ void (*reset)(struct hwicap_drvdata *drvdata);
+};
+
+/* Number of times to poll the done register. This has to be large
+ * enough to allow an entire configuration to complete. If an entire
+ * page (4kb) is configured at once, that could take up to 4k cycles
+ * with a byte-wide icap interface. In most cases, this driver is
+ * used with a much smaller fifo, but this should be sufficient in the
+ * worst case.
+ */
+#define XHI_MAX_RETRIES 5000
+
+/************ Constant Definitions *************/
+
+#define XHI_PAD_FRAMES 0x1
+
+/* Mask for calculating configuration packet headers */
+#define XHI_WORD_COUNT_MASK_TYPE_1 0x7FFUL
+#define XHI_WORD_COUNT_MASK_TYPE_2 0x1FFFFFUL
+#define XHI_TYPE_MASK 0x7
+#define XHI_REGISTER_MASK 0xF
+#define XHI_OP_MASK 0x3
+
+#define XHI_TYPE_SHIFT 29
+#define XHI_REGISTER_SHIFT 13
+#define XHI_OP_SHIFT 27
+
+#define XHI_TYPE_1 1
+#define XHI_TYPE_2 2
+#define XHI_OP_WRITE 2
+#define XHI_OP_READ 1
+
+/* Address Block Types */
+#define XHI_FAR_CLB_BLOCK 0
+#define XHI_FAR_BRAM_BLOCK 1
+#define XHI_FAR_BRAM_INT_BLOCK 2
+
+struct config_registers {
+ u32 CRC;
+ u32 FAR;
+ u32 FDRI;
+ u32 FDRO;
+ u32 CMD;
+ u32 CTL;
+ u32 MASK;
+ u32 STAT;
+ u32 LOUT;
+ u32 COR;
+ u32 MFWR;
+ u32 FLR;
+ u32 KEY;
+ u32 CBC;
+ u32 IDCODE;
+ u32 AXSS;
+ u32 C0R_1;
+ u32 CSOB;
+ u32 WBSTAR;
+ u32 TIMER;
+ u32 BOOTSTS;
+ u32 CTL_1;
+};
+
+/* Configuration Commands */
+#define XHI_CMD_NULL 0
+#define XHI_CMD_WCFG 1
+#define XHI_CMD_MFW 2
+#define XHI_CMD_DGHIGH 3
+#define XHI_CMD_RCFG 4
+#define XHI_CMD_START 5
+#define XHI_CMD_RCAP 6
+#define XHI_CMD_RCRC 7
+#define XHI_CMD_AGHIGH 8
+#define XHI_CMD_SWITCH 9
+#define XHI_CMD_GRESTORE 10
+#define XHI_CMD_SHUTDOWN 11
+#define XHI_CMD_GCAPTURE 12
+#define XHI_CMD_DESYNCH 13
+#define XHI_CMD_IPROG 15 /* Only in Virtex5 */
+#define XHI_CMD_CRCC 16 /* Only in Virtex5 */
+#define XHI_CMD_LTIMER 17 /* Only in Virtex5 */
+
+/* Packet constants */
+#define XHI_SYNC_PACKET 0xAA995566UL
+#define XHI_DUMMY_PACKET 0xFFFFFFFFUL
+#define XHI_NOOP_PACKET (XHI_TYPE_1 << XHI_TYPE_SHIFT)
+#define XHI_TYPE_2_READ ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+ (XHI_OP_READ << XHI_OP_SHIFT))
+
+#define XHI_TYPE_2_WRITE ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+ (XHI_OP_WRITE << XHI_OP_SHIFT))
+
+#define XHI_TYPE2_CNT_MASK 0x07FFFFFF
+
+#define XHI_TYPE_1_PACKET_MAX_WORDS 2047UL
+#define XHI_TYPE_1_HEADER_BYTES 4
+#define XHI_TYPE_2_HEADER_BYTES 8
+
+/* Constant to use for CRC check when CRC has been disabled */
+#define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL
+
+/* Meanings of the bits returned by get_status */
+#define XHI_SR_CFGERR_N_MASK 0x00000100 /* Config Error Mask */
+#define XHI_SR_DALIGN_MASK 0x00000080 /* Data Alignment Mask */
+#define XHI_SR_RIP_MASK 0x00000040 /* Read back Mask */
+#define XHI_SR_IN_ABORT_N_MASK 0x00000020 /* Select Map Abort Mask */
+#define XHI_SR_DONE_MASK 0x00000001 /* Done bit Mask */
+
+/**
+ * hwicap_type_1_read - Generates a Type 1 read packet header.
+ * @reg: is the address of the register to be read back.
+ *
+ * Return:
+ * Generates a Type 1 read packet header, which is used to indirectly
+ * read registers in the configuration logic. This packet must then
+ * be sent through the icap device, and a return packet received with
+ * the information.
+ */
+static inline u32 hwicap_type_1_read(u32 reg)
+{
+ return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+ (reg << XHI_REGISTER_SHIFT) |
+ (XHI_OP_READ << XHI_OP_SHIFT);
+}
+
+/**
+ * hwicap_type_1_write - Generates a Type 1 write packet header
+ * @reg: is the address of the register to be read back.
+ *
+ * Return: Type 1 write packet header
+ */
+static inline u32 hwicap_type_1_write(u32 reg)
+{
+ return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+ (reg << XHI_REGISTER_SHIFT) |
+ (XHI_OP_WRITE << XHI_OP_SHIFT);
+}
+
+#endif
diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig
new file mode 100644
index 0000000000..f51d533390
--- /dev/null
+++ b/drivers/char/xillybus/Kconfig
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Xillybus devices
+#
+
+config XILLYBUS_CLASS
+ tristate
+
+config XILLYBUS
+ tristate "Xillybus generic FPGA interface"
+ depends on PCI || OF
+ select CRC32
+ select XILLYBUS_CLASS
+ help
+ Xillybus is a generic interface for peripherals designed on
+ programmable logic (FPGA). The driver probes the hardware for
+ its capabilities, and creates device files accordingly.
+
+ If unsure, say N.
+
+if XILLYBUS
+
+config XILLYBUS_PCIE
+ tristate "Xillybus over PCIe"
+ depends on PCI_MSI
+ help
+ Set to M if you want Xillybus to use PCI Express for communicating
+ with the FPGA. The module will be called xillybus_pcie.
+
+config XILLYBUS_OF
+ tristate "Xillybus over Device Tree"
+ depends on OF && HAS_DMA && HAS_IOMEM
+ help
+ Set to M if you want Xillybus to find its resources from the
+ Open Firmware Flattened Device Tree. If the target is an embedded
+ system, say M. The module will be called xillybus_of.
+
+endif # if XILLYBUS
+
+# XILLYUSB doesn't depend on XILLYBUS
+
+config XILLYUSB
+ tristate "XillyUSB: Xillybus generic FPGA interface for USB"
+ depends on USB
+ select CRC32
+ select XILLYBUS_CLASS
+ help
+ XillyUSB is the Xillybus variant which uses USB for communicating
+ with the FPGA.
+
+ Set to M if you want Xillybus to use USB for communicating with
+ the FPGA. The module will be called xillyusb.
diff --git a/drivers/char/xillybus/Makefile b/drivers/char/xillybus/Makefile
new file mode 100644
index 0000000000..16f31d0320
--- /dev/null
+++ b/drivers/char/xillybus/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Xillybus driver
+#
+
+obj-$(CONFIG_XILLYBUS_CLASS) += xillybus_class.o
+obj-$(CONFIG_XILLYBUS) += xillybus_core.o
+obj-$(CONFIG_XILLYBUS_PCIE) += xillybus_pcie.o
+obj-$(CONFIG_XILLYBUS_OF) += xillybus_of.o
+obj-$(CONFIG_XILLYUSB) += xillyusb.o
diff --git a/drivers/char/xillybus/xillybus.h b/drivers/char/xillybus/xillybus.h
new file mode 100644
index 0000000000..51de7cbc57
--- /dev/null
+++ b/drivers/char/xillybus/xillybus.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/drivers/misc/xillybus.h
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Header file for the Xillybus FPGA/host framework.
+ */
+
+#ifndef __XILLYBUS_H
+#define __XILLYBUS_H
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/cdev.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct xilly_endpoint_hardware;
+
+struct xilly_buffer {
+ void *addr;
+ dma_addr_t dma_addr;
+ int end_offset; /* Counting elements, not bytes */
+};
+
+struct xilly_idt_handle {
+ unsigned char *chandesc;
+ unsigned char *names;
+ int names_len;
+ int entries;
+};
+
+/*
+ * Read-write confusion: wr_* and rd_* notation sticks to FPGA view, so
+ * wr_* buffers are those consumed by read(), since the FPGA writes to them
+ * and vice versa.
+ */
+
+struct xilly_channel {
+ struct xilly_endpoint *endpoint;
+ int chan_num;
+ int log2_element_size;
+ int seekable;
+
+ struct xilly_buffer **wr_buffers; /* FPGA writes, driver reads! */
+ int num_wr_buffers;
+ unsigned int wr_buf_size; /* In bytes */
+ int wr_fpga_buf_idx;
+ int wr_host_buf_idx;
+ int wr_host_buf_pos;
+ int wr_empty;
+ int wr_ready; /* Significant only when wr_empty == 1 */
+ int wr_sleepy;
+ int wr_eof;
+ int wr_hangup;
+ spinlock_t wr_spinlock;
+ struct mutex wr_mutex;
+ wait_queue_head_t wr_wait;
+ wait_queue_head_t wr_ready_wait;
+ int wr_ref_count;
+ int wr_synchronous;
+ int wr_allow_partial;
+ int wr_exclusive_open;
+ int wr_supports_nonempty;
+
+ struct xilly_buffer **rd_buffers; /* FPGA reads, driver writes! */
+ int num_rd_buffers;
+ unsigned int rd_buf_size; /* In bytes */
+ int rd_fpga_buf_idx;
+ int rd_host_buf_pos;
+ int rd_host_buf_idx;
+ int rd_full;
+ spinlock_t rd_spinlock;
+ struct mutex rd_mutex;
+ wait_queue_head_t rd_wait;
+ int rd_ref_count;
+ int rd_allow_partial;
+ int rd_synchronous;
+ int rd_exclusive_open;
+ struct delayed_work rd_workitem;
+ unsigned char rd_leftovers[4];
+};
+
+struct xilly_endpoint {
+ struct device *dev;
+ struct module *owner;
+
+ int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
+ __iomem void *registers;
+ int fatal_error;
+
+ struct mutex register_mutex;
+ wait_queue_head_t ep_wait;
+
+ int num_channels; /* EXCLUDING message buffer */
+ struct xilly_channel **channels;
+ int msg_counter;
+ int failed_messages;
+ int idtlen;
+
+ u32 *msgbuf_addr;
+ dma_addr_t msgbuf_dma_addr;
+ unsigned int msg_buf_size;
+};
+
+struct xilly_mapping {
+ struct device *device;
+ dma_addr_t dma_addr;
+ size_t size;
+ int direction;
+};
+
+irqreturn_t xillybus_isr(int irq, void *data);
+
+struct xilly_endpoint *xillybus_init_endpoint(struct device *dev);
+
+int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint);
+
+void xillybus_endpoint_remove(struct xilly_endpoint *endpoint);
+
+#endif /* __XILLYBUS_H */
diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c
new file mode 100644
index 0000000000..c92a628e38
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_class.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus class
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include "xillybus_class.h"
+
+MODULE_DESCRIPTION("Driver for Xillybus class");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_ALIAS("xillybus_class");
+MODULE_LICENSE("GPL v2");
+
+static DEFINE_MUTEX(unit_mutex);
+static LIST_HEAD(unit_list);
+static const struct class xillybus_class = {
+ .name = "xillybus",
+};
+
+#define UNITNAMELEN 16
+
+struct xilly_unit {
+ struct list_head list_entry;
+ void *private_data;
+
+ struct cdev *cdev;
+ char name[UNITNAMELEN];
+ int major;
+ int lowest_minor;
+ int num_nodes;
+};
+
+int xillybus_init_chrdev(struct device *dev,
+ const struct file_operations *fops,
+ struct module *owner,
+ void *private_data,
+ unsigned char *idt, unsigned int len,
+ int num_nodes,
+ const char *prefix, bool enumerate)
+{
+ int rc;
+ dev_t mdev;
+ int i;
+ char devname[48];
+
+ struct device *device;
+ size_t namelen;
+ struct xilly_unit *unit, *u;
+
+ unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+
+ if (!unit)
+ return -ENOMEM;
+
+ mutex_lock(&unit_mutex);
+
+ if (!enumerate)
+ snprintf(unit->name, UNITNAMELEN, "%s", prefix);
+
+ for (i = 0; enumerate; i++) {
+ snprintf(unit->name, UNITNAMELEN, "%s_%02d",
+ prefix, i);
+
+ enumerate = false;
+ list_for_each_entry(u, &unit_list, list_entry)
+ if (!strcmp(unit->name, u->name)) {
+ enumerate = true;
+ break;
+ }
+ }
+
+ rc = alloc_chrdev_region(&mdev, 0, num_nodes, unit->name);
+
+ if (rc) {
+ dev_warn(dev, "Failed to obtain major/minors");
+ goto fail_obtain;
+ }
+
+ unit->major = MAJOR(mdev);
+ unit->lowest_minor = MINOR(mdev);
+ unit->num_nodes = num_nodes;
+ unit->private_data = private_data;
+
+ unit->cdev = cdev_alloc();
+ if (!unit->cdev) {
+ rc = -ENOMEM;
+ goto unregister_chrdev;
+ }
+ unit->cdev->ops = fops;
+ unit->cdev->owner = owner;
+
+ rc = cdev_add(unit->cdev, MKDEV(unit->major, unit->lowest_minor),
+ unit->num_nodes);
+ if (rc) {
+ dev_err(dev, "Failed to add cdev.\n");
+ /* kobject_put() is normally done by cdev_del() */
+ kobject_put(&unit->cdev->kobj);
+ goto unregister_chrdev;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ namelen = strnlen(idt, len);
+
+ if (namelen == len) {
+ dev_err(dev, "IDT's list of names is too short. This is exceptionally weird, because its CRC is OK\n");
+ rc = -ENODEV;
+ goto unroll_device_create;
+ }
+
+ snprintf(devname, sizeof(devname), "%s_%s",
+ unit->name, idt);
+
+ len -= namelen + 1;
+ idt += namelen + 1;
+
+ device = device_create(&xillybus_class,
+ NULL,
+ MKDEV(unit->major,
+ i + unit->lowest_minor),
+ NULL,
+ "%s", devname);
+
+ if (IS_ERR(device)) {
+ dev_err(dev, "Failed to create %s device. Aborting.\n",
+ devname);
+ rc = -ENODEV;
+ goto unroll_device_create;
+ }
+ }
+
+ if (len) {
+ dev_err(dev, "IDT's list of names is too long. This is exceptionally weird, because its CRC is OK\n");
+ rc = -ENODEV;
+ goto unroll_device_create;
+ }
+
+ list_add_tail(&unit->list_entry, &unit_list);
+
+ dev_info(dev, "Created %d device files.\n", num_nodes);
+
+ mutex_unlock(&unit_mutex);
+
+ return 0;
+
+unroll_device_create:
+ for (i--; i >= 0; i--)
+ device_destroy(&xillybus_class, MKDEV(unit->major,
+ i + unit->lowest_minor));
+
+ cdev_del(unit->cdev);
+
+unregister_chrdev:
+ unregister_chrdev_region(MKDEV(unit->major, unit->lowest_minor),
+ unit->num_nodes);
+
+fail_obtain:
+ mutex_unlock(&unit_mutex);
+
+ kfree(unit);
+
+ return rc;
+}
+EXPORT_SYMBOL(xillybus_init_chrdev);
+
+void xillybus_cleanup_chrdev(void *private_data,
+ struct device *dev)
+{
+ int minor;
+ struct xilly_unit *unit = NULL, *iter;
+
+ mutex_lock(&unit_mutex);
+
+ list_for_each_entry(iter, &unit_list, list_entry)
+ if (iter->private_data == private_data) {
+ unit = iter;
+ break;
+ }
+
+ if (!unit) {
+ dev_err(dev, "Weird bug: Failed to find unit\n");
+ mutex_unlock(&unit_mutex);
+ return;
+ }
+
+ for (minor = unit->lowest_minor;
+ minor < (unit->lowest_minor + unit->num_nodes);
+ minor++)
+ device_destroy(&xillybus_class, MKDEV(unit->major, minor));
+
+ cdev_del(unit->cdev);
+
+ unregister_chrdev_region(MKDEV(unit->major, unit->lowest_minor),
+ unit->num_nodes);
+
+ dev_info(dev, "Removed %d device files.\n",
+ unit->num_nodes);
+
+ list_del(&unit->list_entry);
+ kfree(unit);
+
+ mutex_unlock(&unit_mutex);
+}
+EXPORT_SYMBOL(xillybus_cleanup_chrdev);
+
+int xillybus_find_inode(struct inode *inode,
+ void **private_data, int *index)
+{
+ int minor = iminor(inode);
+ int major = imajor(inode);
+ struct xilly_unit *unit = NULL, *iter;
+
+ mutex_lock(&unit_mutex);
+
+ list_for_each_entry(iter, &unit_list, list_entry)
+ if (iter->major == major &&
+ minor >= iter->lowest_minor &&
+ minor < (iter->lowest_minor + iter->num_nodes)) {
+ unit = iter;
+ break;
+ }
+
+ if (!unit) {
+ mutex_unlock(&unit_mutex);
+ return -ENODEV;
+ }
+
+ *private_data = unit->private_data;
+ *index = minor - unit->lowest_minor;
+
+ mutex_unlock(&unit_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(xillybus_find_inode);
+
+static int __init xillybus_class_init(void)
+{
+ return class_register(&xillybus_class);
+}
+
+static void __exit xillybus_class_exit(void)
+{
+ class_unregister(&xillybus_class);
+}
+
+module_init(xillybus_class_init);
+module_exit(xillybus_class_exit);
diff --git a/drivers/char/xillybus/xillybus_class.h b/drivers/char/xillybus/xillybus_class.h
new file mode 100644
index 0000000000..5dbfdfc95c
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_class.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 Xillybus Ltd, http://www.xillybus.com
+ *
+ * Header file for the Xillybus class
+ */
+
+#ifndef __XILLYBUS_CLASS_H
+#define __XILLYBUS_CLASS_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+
+int xillybus_init_chrdev(struct device *dev,
+ const struct file_operations *fops,
+ struct module *owner,
+ void *private_data,
+ unsigned char *idt, unsigned int len,
+ int num_nodes,
+ const char *prefix, bool enumerate);
+
+void xillybus_cleanup_chrdev(void *private_data,
+ struct device *dev);
+
+int xillybus_find_inode(struct inode *inode,
+ void **private_data, int *index);
+
+#endif /* __XILLYBUS_CLASS_H */
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
new file mode 100644
index 0000000000..11b7c47492
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -0,0 +1,1991 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/misc/xillybus_core.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework.
+ *
+ * This driver interfaces with a special IP core in an FPGA, setting up
+ * a pipe between a hardware FIFO in the programmable logic and a device
+ * file in the host. The number of such pipes and their attributes are
+ * set up on the logic. This driver detects these automatically and
+ * creates the device files accordingly.
+ */
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/crc32.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "xillybus.h"
+#include "xillybus_class.h"
+
+MODULE_DESCRIPTION("Xillybus core functions");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_ALIAS("xillybus_core");
+MODULE_LICENSE("GPL v2");
+
+/* General timeout is 100 ms, rx timeout is 10 ms */
+#define XILLY_RX_TIMEOUT (10*HZ/1000)
+#define XILLY_TIMEOUT (100*HZ/1000)
+
+#define fpga_msg_ctrl_reg 0x0008
+#define fpga_dma_control_reg 0x0020
+#define fpga_dma_bufno_reg 0x0024
+#define fpga_dma_bufaddr_lowaddr_reg 0x0028
+#define fpga_dma_bufaddr_highaddr_reg 0x002c
+#define fpga_buf_ctrl_reg 0x0030
+#define fpga_buf_offset_reg 0x0034
+#define fpga_endian_reg 0x0040
+
+#define XILLYMSG_OPCODE_RELEASEBUF 1
+#define XILLYMSG_OPCODE_QUIESCEACK 2
+#define XILLYMSG_OPCODE_FIFOEOF 3
+#define XILLYMSG_OPCODE_FATAL_ERROR 4
+#define XILLYMSG_OPCODE_NONEMPTY 5
+
+static const char xillyname[] = "xillybus";
+
+static struct workqueue_struct *xillybus_wq;
+
+/*
+ * Locking scheme: Mutexes protect invocations of character device methods.
+ * If both locks are taken, wr_mutex is taken first, rd_mutex second.
+ *
+ * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
+ * buffers' end_offset fields against changes made by IRQ handler (and in
+ * theory, other file request handlers, but the mutex handles that). Nothing
+ * else.
+ * They are held for short direct memory manipulations. Needless to say,
+ * no mutex locking is allowed when a spinlock is held.
+ *
+ * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
+ *
+ * register_mutex is endpoint-specific, and is held when non-atomic
+ * register operations are performed. wr_mutex and rd_mutex may be
+ * held when register_mutex is taken, but none of the spinlocks. Note that
+ * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
+ * which are unrelated to buf_offset_reg, since they are harmless.
+ *
+ * Blocking on the wait queues is allowed with mutexes held, but not with
+ * spinlocks.
+ *
+ * Only interruptible blocking is allowed on mutexes and wait queues.
+ *
+ * All in all, the locking order goes (with skips allowed, of course):
+ * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
+ */
+
+static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)
+{
+ int opcode;
+ int msg_channel, msg_bufno, msg_data, msg_dir;
+
+ opcode = (buf[0] >> 24) & 0xff;
+ msg_dir = buf[0] & 1;
+ msg_channel = (buf[0] >> 1) & 0x7ff;
+ msg_bufno = (buf[0] >> 12) & 0x3ff;
+ msg_data = buf[1] & 0xfffffff;
+
+ dev_warn(endpoint->dev,
+ "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
+ opcode, msg_channel, msg_dir, msg_bufno, msg_data);
+}
+
+/*
+ * xillybus_isr assumes the interrupt is allocated exclusively to it,
+ * which is the natural case MSI and several other hardware-oriented
+ * interrupts. Sharing is not allowed.
+ */
+
+irqreturn_t xillybus_isr(int irq, void *data)
+{
+ struct xilly_endpoint *ep = data;
+ u32 *buf;
+ unsigned int buf_size;
+ int i;
+ int opcode;
+ unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
+ struct xilly_channel *channel;
+
+ buf = ep->msgbuf_addr;
+ buf_size = ep->msg_buf_size/sizeof(u32);
+
+ dma_sync_single_for_cpu(ep->dev, ep->msgbuf_dma_addr,
+ ep->msg_buf_size, DMA_FROM_DEVICE);
+
+ for (i = 0; i < buf_size; i += 2) {
+ if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
+ malformed_message(ep, &buf[i]);
+ dev_warn(ep->dev,
+ "Sending a NACK on counter %x (instead of %x) on entry %d\n",
+ ((buf[i+1] >> 28) & 0xf),
+ ep->msg_counter,
+ i/2);
+
+ if (++ep->failed_messages > 10) {
+ dev_err(ep->dev,
+ "Lost sync with interrupt messages. Stopping.\n");
+ } else {
+ dma_sync_single_for_device(ep->dev,
+ ep->msgbuf_dma_addr,
+ ep->msg_buf_size,
+ DMA_FROM_DEVICE);
+
+ iowrite32(0x01, /* Message NACK */
+ ep->registers + fpga_msg_ctrl_reg);
+ }
+ return IRQ_HANDLED;
+ } else if (buf[i] & (1 << 22)) /* Last message */
+ break;
+ }
+
+ if (i >= buf_size) {
+ dev_err(ep->dev, "Bad interrupt message. Stopping.\n");
+ return IRQ_HANDLED;
+ }
+
+ buf_size = i + 2;
+
+ for (i = 0; i < buf_size; i += 2) { /* Scan through messages */
+ opcode = (buf[i] >> 24) & 0xff;
+
+ msg_dir = buf[i] & 1;
+ msg_channel = (buf[i] >> 1) & 0x7ff;
+ msg_bufno = (buf[i] >> 12) & 0x3ff;
+ msg_data = buf[i+1] & 0xfffffff;
+
+ switch (opcode) {
+ case XILLYMSG_OPCODE_RELEASEBUF:
+ if ((msg_channel > ep->num_channels) ||
+ (msg_channel == 0)) {
+ malformed_message(ep, &buf[i]);
+ break;
+ }
+
+ channel = ep->channels[msg_channel];
+
+ if (msg_dir) { /* Write channel */
+ if (msg_bufno >= channel->num_wr_buffers) {
+ malformed_message(ep, &buf[i]);
+ break;
+ }
+ spin_lock(&channel->wr_spinlock);
+ channel->wr_buffers[msg_bufno]->end_offset =
+ msg_data;
+ channel->wr_fpga_buf_idx = msg_bufno;
+ channel->wr_empty = 0;
+ channel->wr_sleepy = 0;
+ spin_unlock(&channel->wr_spinlock);
+
+ wake_up_interruptible(&channel->wr_wait);
+
+ } else {
+ /* Read channel */
+
+ if (msg_bufno >= channel->num_rd_buffers) {
+ malformed_message(ep, &buf[i]);
+ break;
+ }
+
+ spin_lock(&channel->rd_spinlock);
+ channel->rd_fpga_buf_idx = msg_bufno;
+ channel->rd_full = 0;
+ spin_unlock(&channel->rd_spinlock);
+
+ wake_up_interruptible(&channel->rd_wait);
+ if (!channel->rd_synchronous)
+ queue_delayed_work(
+ xillybus_wq,
+ &channel->rd_workitem,
+ XILLY_RX_TIMEOUT);
+ }
+
+ break;
+ case XILLYMSG_OPCODE_NONEMPTY:
+ if ((msg_channel > ep->num_channels) ||
+ (msg_channel == 0) || (!msg_dir) ||
+ !ep->channels[msg_channel]->wr_supports_nonempty) {
+ malformed_message(ep, &buf[i]);
+ break;
+ }
+
+ channel = ep->channels[msg_channel];
+
+ if (msg_bufno >= channel->num_wr_buffers) {
+ malformed_message(ep, &buf[i]);
+ break;
+ }
+ spin_lock(&channel->wr_spinlock);
+ if (msg_bufno == channel->wr_host_buf_idx)
+ channel->wr_ready = 1;
+ spin_unlock(&channel->wr_spinlock);
+
+ wake_up_interruptible(&channel->wr_ready_wait);
+
+ break;
+ case XILLYMSG_OPCODE_QUIESCEACK:
+ ep->idtlen = msg_data;
+ wake_up_interruptible(&ep->ep_wait);
+
+ break;
+ case XILLYMSG_OPCODE_FIFOEOF:
+ if ((msg_channel > ep->num_channels) ||
+ (msg_channel == 0) || (!msg_dir) ||
+ !ep->channels[msg_channel]->num_wr_buffers) {
+ malformed_message(ep, &buf[i]);
+ break;
+ }
+ channel = ep->channels[msg_channel];
+ spin_lock(&channel->wr_spinlock);
+ channel->wr_eof = msg_bufno;
+ channel->wr_sleepy = 0;
+
+ channel->wr_hangup = channel->wr_empty &&
+ (channel->wr_host_buf_idx == msg_bufno);
+
+ spin_unlock(&channel->wr_spinlock);
+
+ wake_up_interruptible(&channel->wr_wait);
+
+ break;
+ case XILLYMSG_OPCODE_FATAL_ERROR:
+ ep->fatal_error = 1;
+ wake_up_interruptible(&ep->ep_wait); /* For select() */
+ dev_err(ep->dev,
+ "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n");
+ break;
+ default:
+ malformed_message(ep, &buf[i]);
+ break;
+ }
+ }
+
+ dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr,
+ ep->msg_buf_size, DMA_FROM_DEVICE);
+
+ ep->msg_counter = (ep->msg_counter + 1) & 0xf;
+ ep->failed_messages = 0;
+ iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(xillybus_isr);
+
+/*
+ * A few trivial memory management functions.
+ * NOTE: These functions are used only on probe and remove, and therefore
+ * no locks are applied!
+ */
+
+static void xillybus_autoflush(struct work_struct *work);
+
+struct xilly_alloc_state {
+ void *salami;
+ int left_of_salami;
+ int nbuffer;
+ enum dma_data_direction direction;
+ u32 regdirection;
+};
+
+static void xilly_unmap(void *ptr)
+{
+ struct xilly_mapping *data = ptr;
+
+ dma_unmap_single(data->device, data->dma_addr,
+ data->size, data->direction);
+
+ kfree(ptr);
+}
+
+static int xilly_map_single(struct xilly_endpoint *ep,
+ void *ptr,
+ size_t size,
+ int direction,
+ dma_addr_t *ret_dma_handle
+ )
+{
+ dma_addr_t addr;
+ struct xilly_mapping *this;
+
+ this = kzalloc(sizeof(*this), GFP_KERNEL);
+ if (!this)
+ return -ENOMEM;
+
+ addr = dma_map_single(ep->dev, ptr, size, direction);
+
+ if (dma_mapping_error(ep->dev, addr)) {
+ kfree(this);
+ return -ENODEV;
+ }
+
+ this->device = ep->dev;
+ this->dma_addr = addr;
+ this->size = size;
+ this->direction = direction;
+
+ *ret_dma_handle = addr;
+
+ return devm_add_action_or_reset(ep->dev, xilly_unmap, this);
+}
+
+static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
+ struct xilly_alloc_state *s,
+ struct xilly_buffer **buffers,
+ int bufnum, int bytebufsize)
+{
+ int i, rc;
+ dma_addr_t dma_addr;
+ struct device *dev = ep->dev;
+ struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
+
+ if (buffers) { /* Not the message buffer */
+ this_buffer = devm_kcalloc(dev, bufnum,
+ sizeof(struct xilly_buffer),
+ GFP_KERNEL);
+ if (!this_buffer)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < bufnum; i++) {
+ /*
+ * Buffers are expected in descending size order, so there
+ * is either enough space for this buffer or none at all.
+ */
+
+ if ((s->left_of_salami < bytebufsize) &&
+ (s->left_of_salami > 0)) {
+ dev_err(ep->dev,
+ "Corrupt buffer allocation in IDT. Aborting.\n");
+ return -ENODEV;
+ }
+
+ if (s->left_of_salami == 0) {
+ int allocorder, allocsize;
+
+ allocsize = PAGE_SIZE;
+ allocorder = 0;
+ while (bytebufsize > allocsize) {
+ allocsize *= 2;
+ allocorder++;
+ }
+
+ s->salami = (void *) devm_get_free_pages(
+ dev,
+ GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO,
+ allocorder);
+ if (!s->salami)
+ return -ENOMEM;
+
+ s->left_of_salami = allocsize;
+ }
+
+ rc = xilly_map_single(ep, s->salami,
+ bytebufsize, s->direction,
+ &dma_addr);
+ if (rc)
+ return rc;
+
+ iowrite32((u32) (dma_addr & 0xffffffff),
+ ep->registers + fpga_dma_bufaddr_lowaddr_reg);
+ iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)),
+ ep->registers + fpga_dma_bufaddr_highaddr_reg);
+
+ if (buffers) { /* Not the message buffer */
+ this_buffer->addr = s->salami;
+ this_buffer->dma_addr = dma_addr;
+ buffers[i] = this_buffer++;
+
+ iowrite32(s->regdirection | s->nbuffer++,
+ ep->registers + fpga_dma_bufno_reg);
+ } else {
+ ep->msgbuf_addr = s->salami;
+ ep->msgbuf_dma_addr = dma_addr;
+ ep->msg_buf_size = bytebufsize;
+
+ iowrite32(s->regdirection,
+ ep->registers + fpga_dma_bufno_reg);
+ }
+
+ s->left_of_salami -= bytebufsize;
+ s->salami += bytebufsize;
+ }
+ return 0;
+}
+
+static int xilly_setupchannels(struct xilly_endpoint *ep,
+ unsigned char *chandesc,
+ int entries)
+{
+ struct device *dev = ep->dev;
+ int i, entry, rc;
+ struct xilly_channel *channel;
+ int channelnum, bufnum, bufsize, format, is_writebuf;
+ int bytebufsize;
+ int synchronous, allowpartial, exclusive_open, seekable;
+ int supports_nonempty;
+ int msg_buf_done = 0;
+
+ struct xilly_alloc_state rd_alloc = {
+ .salami = NULL,
+ .left_of_salami = 0,
+ .nbuffer = 1,
+ .direction = DMA_TO_DEVICE,
+ .regdirection = 0,
+ };
+
+ struct xilly_alloc_state wr_alloc = {
+ .salami = NULL,
+ .left_of_salami = 0,
+ .nbuffer = 1,
+ .direction = DMA_FROM_DEVICE,
+ .regdirection = 0x80000000,
+ };
+
+ channel = devm_kcalloc(dev, ep->num_channels,
+ sizeof(struct xilly_channel), GFP_KERNEL);
+ if (!channel)
+ return -ENOMEM;
+
+ ep->channels = devm_kcalloc(dev, ep->num_channels + 1,
+ sizeof(struct xilly_channel *),
+ GFP_KERNEL);
+ if (!ep->channels)
+ return -ENOMEM;
+
+ ep->channels[0] = NULL; /* Channel 0 is message buf. */
+
+ /* Initialize all channels with defaults */
+
+ for (i = 1; i <= ep->num_channels; i++) {
+ channel->wr_buffers = NULL;
+ channel->rd_buffers = NULL;
+ channel->num_wr_buffers = 0;
+ channel->num_rd_buffers = 0;
+ channel->wr_fpga_buf_idx = -1;
+ channel->wr_host_buf_idx = 0;
+ channel->wr_host_buf_pos = 0;
+ channel->wr_empty = 1;
+ channel->wr_ready = 0;
+ channel->wr_sleepy = 1;
+ channel->rd_fpga_buf_idx = 0;
+ channel->rd_host_buf_idx = 0;
+ channel->rd_host_buf_pos = 0;
+ channel->rd_full = 0;
+ channel->wr_ref_count = 0;
+ channel->rd_ref_count = 0;
+
+ spin_lock_init(&channel->wr_spinlock);
+ spin_lock_init(&channel->rd_spinlock);
+ mutex_init(&channel->wr_mutex);
+ mutex_init(&channel->rd_mutex);
+ init_waitqueue_head(&channel->rd_wait);
+ init_waitqueue_head(&channel->wr_wait);
+ init_waitqueue_head(&channel->wr_ready_wait);
+
+ INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
+
+ channel->endpoint = ep;
+ channel->chan_num = i;
+
+ channel->log2_element_size = 0;
+
+ ep->channels[i] = channel++;
+ }
+
+ for (entry = 0; entry < entries; entry++, chandesc += 4) {
+ struct xilly_buffer **buffers = NULL;
+
+ is_writebuf = chandesc[0] & 0x01;
+ channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
+ format = (chandesc[1] >> 4) & 0x03;
+ allowpartial = (chandesc[1] >> 6) & 0x01;
+ synchronous = (chandesc[1] >> 7) & 0x01;
+ bufsize = 1 << (chandesc[2] & 0x1f);
+ bufnum = 1 << (chandesc[3] & 0x0f);
+ exclusive_open = (chandesc[2] >> 7) & 0x01;
+ seekable = (chandesc[2] >> 6) & 0x01;
+ supports_nonempty = (chandesc[2] >> 5) & 0x01;
+
+ if ((channelnum > ep->num_channels) ||
+ ((channelnum == 0) && !is_writebuf)) {
+ dev_err(ep->dev,
+ "IDT requests channel out of range. Aborting.\n");
+ return -ENODEV;
+ }
+
+ channel = ep->channels[channelnum]; /* NULL for msg channel */
+
+ if (!is_writebuf || channelnum > 0) {
+ channel->log2_element_size = ((format > 2) ?
+ 2 : format);
+
+ bytebufsize = bufsize *
+ (1 << channel->log2_element_size);
+
+ buffers = devm_kcalloc(dev, bufnum,
+ sizeof(struct xilly_buffer *),
+ GFP_KERNEL);
+ if (!buffers)
+ return -ENOMEM;
+ } else {
+ bytebufsize = bufsize << 2;
+ }
+
+ if (!is_writebuf) {
+ channel->num_rd_buffers = bufnum;
+ channel->rd_buf_size = bytebufsize;
+ channel->rd_allow_partial = allowpartial;
+ channel->rd_synchronous = synchronous;
+ channel->rd_exclusive_open = exclusive_open;
+ channel->seekable = seekable;
+
+ channel->rd_buffers = buffers;
+ rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers,
+ bufnum, bytebufsize);
+ } else if (channelnum > 0) {
+ channel->num_wr_buffers = bufnum;
+ channel->wr_buf_size = bytebufsize;
+
+ channel->seekable = seekable;
+ channel->wr_supports_nonempty = supports_nonempty;
+
+ channel->wr_allow_partial = allowpartial;
+ channel->wr_synchronous = synchronous;
+ channel->wr_exclusive_open = exclusive_open;
+
+ channel->wr_buffers = buffers;
+ rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers,
+ bufnum, bytebufsize);
+ } else {
+ rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL,
+ bufnum, bytebufsize);
+ msg_buf_done++;
+ }
+
+ if (rc)
+ return -ENOMEM;
+ }
+
+ if (!msg_buf_done) {
+ dev_err(ep->dev,
+ "Corrupt IDT: No message buffer. Aborting.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int xilly_scan_idt(struct xilly_endpoint *endpoint,
+ struct xilly_idt_handle *idt_handle)
+{
+ int count = 0;
+ unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
+ unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
+ unsigned char *scan;
+ int len;
+
+ scan = idt + 1;
+ idt_handle->names = scan;
+
+ while ((scan <= end_of_idt) && *scan) {
+ while ((scan <= end_of_idt) && *scan++)
+ /* Do nothing, just scan thru string */;
+ count++;
+ }
+
+ idt_handle->names_len = scan - idt_handle->names;
+
+ scan++;
+
+ if (scan > end_of_idt) {
+ dev_err(endpoint->dev,
+ "IDT device name list overflow. Aborting.\n");
+ return -ENODEV;
+ }
+ idt_handle->chandesc = scan;
+
+ len = endpoint->idtlen - (3 + ((int) (scan - idt)));
+
+ if (len & 0x03) {
+ dev_err(endpoint->dev,
+ "Corrupt IDT device name list. Aborting.\n");
+ return -ENODEV;
+ }
+
+ idt_handle->entries = len >> 2;
+ endpoint->num_channels = count;
+
+ return 0;
+}
+
+static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
+{
+ struct xilly_channel *channel;
+ unsigned char *version;
+ long t;
+
+ channel = endpoint->channels[1]; /* This should be generated ad-hoc */
+
+ channel->wr_sleepy = 1;
+
+ iowrite32(1 |
+ (3 << 24), /* Opcode 3 for channel 0 = Send IDT */
+ endpoint->registers + fpga_buf_ctrl_reg);
+
+ t = wait_event_interruptible_timeout(channel->wr_wait,
+ (!channel->wr_sleepy),
+ XILLY_TIMEOUT);
+
+ if (t <= 0) {
+ dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n");
+
+ if (endpoint->fatal_error)
+ return -EIO;
+
+ return -ENODEV;
+ }
+
+ dma_sync_single_for_cpu(channel->endpoint->dev,
+ channel->wr_buffers[0]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
+
+ if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
+ dev_err(endpoint->dev,
+ "IDT length mismatch (%d != %d). Aborting.\n",
+ channel->wr_buffers[0]->end_offset, endpoint->idtlen);
+ return -ENODEV;
+ }
+
+ if (crc32_le(~0, channel->wr_buffers[0]->addr,
+ endpoint->idtlen+1) != 0) {
+ dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n");
+ return -ENODEV;
+ }
+
+ version = channel->wr_buffers[0]->addr;
+
+ /* Check version number. Reject anything above 0x82. */
+ if (*version > 0x82) {
+ dev_err(endpoint->dev,
+ "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgrade. Aborting.\n",
+ *version);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t rc;
+ unsigned long flags;
+ int bytes_done = 0;
+ int no_time_left = 0;
+ long deadline, left_to_sleep;
+ struct xilly_channel *channel = filp->private_data;
+
+ int empty, reached_eof, exhausted, ready;
+ /* Initializations are there only to silence warnings */
+
+ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
+ int waiting_bufidx;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
+
+ rc = mutex_lock_interruptible(&channel->wr_mutex);
+ if (rc)
+ return rc;
+
+ while (1) { /* Note that we may drop mutex within this loop */
+ int bytes_to_do = count - bytes_done;
+
+ spin_lock_irqsave(&channel->wr_spinlock, flags);
+
+ empty = channel->wr_empty;
+ ready = !empty || channel->wr_ready;
+
+ if (!empty) {
+ bufidx = channel->wr_host_buf_idx;
+ bufpos = channel->wr_host_buf_pos;
+ howmany = ((channel->wr_buffers[bufidx]->end_offset
+ + 1) << channel->log2_element_size)
+ - bufpos;
+
+ /* Update wr_host_* to its post-operation state */
+ if (howmany > bytes_to_do) {
+ bufferdone = 0;
+
+ howmany = bytes_to_do;
+ channel->wr_host_buf_pos += howmany;
+ } else {
+ bufferdone = 1;
+
+ channel->wr_host_buf_pos = 0;
+
+ if (bufidx == channel->wr_fpga_buf_idx) {
+ channel->wr_empty = 1;
+ channel->wr_sleepy = 1;
+ channel->wr_ready = 0;
+ }
+
+ if (bufidx >= (channel->num_wr_buffers - 1))
+ channel->wr_host_buf_idx = 0;
+ else
+ channel->wr_host_buf_idx++;
+ }
+ }
+
+ /*
+ * Marking our situation after the possible changes above,
+ * for use after releasing the spinlock.
+ *
+ * empty = empty before change
+ * exhasted = empty after possible change
+ */
+
+ reached_eof = channel->wr_empty &&
+ (channel->wr_host_buf_idx == channel->wr_eof);
+ channel->wr_hangup = reached_eof;
+ exhausted = channel->wr_empty;
+ waiting_bufidx = channel->wr_host_buf_idx;
+
+ spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+
+ if (!empty) { /* Go on, now without the spinlock */
+
+ if (bufpos == 0) /* Position zero means it's virgin */
+ dma_sync_single_for_cpu(channel->endpoint->dev,
+ channel->wr_buffers[bufidx]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
+
+ if (copy_to_user(
+ userbuf,
+ channel->wr_buffers[bufidx]->addr
+ + bufpos, howmany))
+ rc = -EFAULT;
+
+ userbuf += howmany;
+ bytes_done += howmany;
+
+ if (bufferdone) {
+ dma_sync_single_for_device(channel->endpoint->dev,
+ channel->wr_buffers[bufidx]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
+
+ /*
+ * Tell FPGA the buffer is done with. It's an
+ * atomic operation to the FPGA, so what
+ * happens with other channels doesn't matter,
+ * and the certain channel is protected with
+ * the channel-specific mutex.
+ */
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (bufidx << 12),
+ channel->endpoint->registers +
+ fpga_buf_ctrl_reg);
+ }
+
+ if (rc) {
+ mutex_unlock(&channel->wr_mutex);
+ return rc;
+ }
+ }
+
+ /* This includes a zero-count return = EOF */
+ if ((bytes_done >= count) || reached_eof)
+ break;
+
+ if (!exhausted)
+ continue; /* More in RAM buffer(s)? Just go on. */
+
+ if ((bytes_done > 0) &&
+ (no_time_left ||
+ (channel->wr_synchronous && channel->wr_allow_partial)))
+ break;
+
+ /*
+ * Nonblocking read: The "ready" flag tells us that the FPGA
+ * has data to send. In non-blocking mode, if it isn't on,
+ * just return. But if there is, we jump directly to the point
+ * where we ask for the FPGA to send all it has, and wait
+ * until that data arrives. So in a sense, we *do* block in
+ * nonblocking mode, but only for a very short time.
+ */
+
+ if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
+ if (bytes_done > 0)
+ break;
+
+ if (ready)
+ goto desperate;
+
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (!no_time_left || (bytes_done > 0)) {
+ /*
+ * Note that in case of an element-misaligned read
+ * request, offsetlimit will include the last element,
+ * which will be partially read from.
+ */
+ int offsetlimit = ((count - bytes_done) - 1) >>
+ channel->log2_element_size;
+ int buf_elements = channel->wr_buf_size >>
+ channel->log2_element_size;
+
+ /*
+ * In synchronous mode, always send an offset limit.
+ * Just don't send a value too big.
+ */
+
+ if (channel->wr_synchronous) {
+ /* Don't request more than one buffer */
+ if (channel->wr_allow_partial &&
+ (offsetlimit >= buf_elements))
+ offsetlimit = buf_elements - 1;
+
+ /* Don't request more than all buffers */
+ if (!channel->wr_allow_partial &&
+ (offsetlimit >=
+ (buf_elements * channel->num_wr_buffers)))
+ offsetlimit = buf_elements *
+ channel->num_wr_buffers - 1;
+ }
+
+ /*
+ * In asynchronous mode, force early flush of a buffer
+ * only if that will allow returning a full count. The
+ * "offsetlimit < ( ... )" rather than "<=" excludes
+ * requesting a full buffer, which would obviously
+ * cause a buffer transmission anyhow
+ */
+
+ if (channel->wr_synchronous ||
+ (offsetlimit < (buf_elements - 1))) {
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(offsetlimit,
+ channel->endpoint->registers +
+ fpga_buf_offset_reg);
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (2 << 24) | /* 2 = offset limit */
+ (waiting_bufidx << 12),
+ channel->endpoint->registers +
+ fpga_buf_ctrl_reg);
+
+ mutex_unlock(&channel->endpoint->
+ register_mutex);
+ }
+ }
+
+ /*
+ * If partial completion is disallowed, there is no point in
+ * timeout sleeping. Neither if no_time_left is set and
+ * there's no data.
+ */
+
+ if (!channel->wr_allow_partial ||
+ (no_time_left && (bytes_done == 0))) {
+ /*
+ * This do-loop will run more than once if another
+ * thread reasserted wr_sleepy before we got the mutex
+ * back, so we try again.
+ */
+
+ do {
+ mutex_unlock(&channel->wr_mutex);
+
+ if (wait_event_interruptible(
+ channel->wr_wait,
+ (!channel->wr_sleepy)))
+ goto interrupted;
+
+ if (mutex_lock_interruptible(
+ &channel->wr_mutex))
+ goto interrupted;
+ } while (channel->wr_sleepy);
+
+ continue;
+
+interrupted: /* Mutex is not held if got here */
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+ if (bytes_done)
+ return bytes_done;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN; /* Don't admit snoozing */
+ return -EINTR;
+ }
+
+ left_to_sleep = deadline - ((long) jiffies);
+
+ /*
+ * If our time is out, skip the waiting. We may miss wr_sleepy
+ * being deasserted but hey, almost missing the train is like
+ * missing it.
+ */
+
+ if (left_to_sleep > 0) {
+ left_to_sleep =
+ wait_event_interruptible_timeout(
+ channel->wr_wait,
+ (!channel->wr_sleepy),
+ left_to_sleep);
+
+ if (left_to_sleep > 0) /* wr_sleepy deasserted */
+ continue;
+
+ if (left_to_sleep < 0) { /* Interrupt */
+ mutex_unlock(&channel->wr_mutex);
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+ if (bytes_done)
+ return bytes_done;
+ return -EINTR;
+ }
+ }
+
+desperate:
+ no_time_left = 1; /* We're out of sleeping time. Desperate! */
+
+ if (bytes_done == 0) {
+ /*
+ * Reaching here means that we allow partial return,
+ * that we've run out of time, and that we have
+ * nothing to return.
+ * So tell the FPGA to send anything it has or gets.
+ */
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (3 << 24) | /* Opcode 3, flush it all! */
+ (waiting_bufidx << 12),
+ channel->endpoint->registers +
+ fpga_buf_ctrl_reg);
+ }
+
+ /*
+ * Reaching here means that we *do* have data in the buffer,
+ * but the "partial" flag disallows returning less than
+ * required. And we don't have as much. So loop again,
+ * which is likely to end up blocking indefinitely until
+ * enough data has arrived.
+ */
+ }
+
+ mutex_unlock(&channel->wr_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (rc)
+ return rc;
+
+ return bytes_done;
+}
+
+/*
+ * The timeout argument takes values as follows:
+ * >0 : Flush with timeout
+ * ==0 : Flush, and wait idefinitely for the flush to complete
+ * <0 : Autoflush: Flush only if there's a single buffer occupied
+ */
+
+static int xillybus_myflush(struct xilly_channel *channel, long timeout)
+{
+ int rc;
+ unsigned long flags;
+
+ int end_offset_plus1;
+ int bufidx, bufidx_minus1;
+ int i;
+ int empty;
+ int new_rd_host_buf_pos;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+ if (rc)
+ return rc;
+
+ /*
+ * Don't flush a closed channel. This can happen when the work queued
+ * autoflush thread fires off after the file has closed. This is not
+ * an error, just something to dismiss.
+ */
+
+ if (!channel->rd_ref_count)
+ goto done;
+
+ bufidx = channel->rd_host_buf_idx;
+
+ bufidx_minus1 = (bufidx == 0) ?
+ channel->num_rd_buffers - 1 :
+ bufidx - 1;
+
+ end_offset_plus1 = channel->rd_host_buf_pos >>
+ channel->log2_element_size;
+
+ new_rd_host_buf_pos = channel->rd_host_buf_pos -
+ (end_offset_plus1 << channel->log2_element_size);
+
+ /* Submit the current buffer if it's nonempty */
+ if (end_offset_plus1) {
+ unsigned char *tail = channel->rd_buffers[bufidx]->addr +
+ (end_offset_plus1 << channel->log2_element_size);
+
+ /* Copy unflushed data, so we can put it in next buffer */
+ for (i = 0; i < new_rd_host_buf_pos; i++)
+ channel->rd_leftovers[i] = *tail++;
+
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+ /* Autoflush only if a single buffer is occupied */
+
+ if ((timeout < 0) &&
+ (channel->rd_full ||
+ (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+ /*
+ * A new work item may be queued by the ISR exactly
+ * now, since the execution of a work item allows the
+ * queuing of a new one while it's running.
+ */
+ goto done;
+ }
+
+ /* The 4th element is never needed for data, so it's a flag */
+ channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
+
+ /* Set up rd_full to reflect a certain moment's state */
+
+ if (bufidx == channel->rd_fpga_buf_idx)
+ channel->rd_full = 1;
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ if (bufidx >= (channel->num_rd_buffers - 1))
+ channel->rd_host_buf_idx = 0;
+ else
+ channel->rd_host_buf_idx++;
+
+ dma_sync_single_for_device(channel->endpoint->dev,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(end_offset_plus1 - 1,
+ channel->endpoint->registers + fpga_buf_offset_reg);
+
+ iowrite32((channel->chan_num << 1) | /* Channel ID */
+ (2 << 24) | /* Opcode 2, submit buffer */
+ (bufidx << 12),
+ channel->endpoint->registers + fpga_buf_ctrl_reg);
+
+ mutex_unlock(&channel->endpoint->register_mutex);
+ } else if (bufidx == 0) {
+ bufidx = channel->num_rd_buffers - 1;
+ } else {
+ bufidx--;
+ }
+
+ channel->rd_host_buf_pos = new_rd_host_buf_pos;
+
+ if (timeout < 0)
+ goto done; /* Autoflush */
+
+ /*
+ * bufidx is now the last buffer written to (or equal to
+ * rd_fpga_buf_idx if buffer was never written to), and
+ * channel->rd_host_buf_idx the one after it.
+ *
+ * If bufidx == channel->rd_fpga_buf_idx we're either empty or full.
+ */
+
+ while (1) { /* Loop waiting for draining of buffers */
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+ if (bufidx != channel->rd_fpga_buf_idx)
+ channel->rd_full = 1; /*
+ * Not really full,
+ * but needs waiting.
+ */
+
+ empty = !channel->rd_full;
+
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ if (empty)
+ break;
+
+ /*
+ * Indefinite sleep with mutex taken. With data waiting for
+ * flushing user should not be surprised if open() for write
+ * sleeps.
+ */
+ if (timeout == 0)
+ wait_event_interruptible(channel->rd_wait,
+ (!channel->rd_full));
+
+ else if (wait_event_interruptible_timeout(
+ channel->rd_wait,
+ (!channel->rd_full),
+ timeout) == 0) {
+ dev_warn(channel->endpoint->dev,
+ "Timed out while flushing. Output data may be lost.\n");
+
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ if (channel->rd_full) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+done:
+ mutex_unlock(&channel->rd_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ return rc;
+}
+
+static int xillybus_flush(struct file *filp, fl_owner_t id)
+{
+ if (!(filp->f_mode & FMODE_WRITE))
+ return 0;
+
+ return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */
+}
+
+static void xillybus_autoflush(struct work_struct *work)
+{
+ struct delayed_work *workitem = container_of(
+ work, struct delayed_work, work);
+ struct xilly_channel *channel = container_of(
+ workitem, struct xilly_channel, rd_workitem);
+ int rc;
+
+ rc = xillybus_myflush(channel, -1);
+ if (rc == -EINTR)
+ dev_warn(channel->endpoint->dev,
+ "Autoflush failed because work queue thread got a signal.\n");
+ else if (rc)
+ dev_err(channel->endpoint->dev,
+ "Autoflush failed under weird circumstances.\n");
+}
+
+static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t rc;
+ unsigned long flags;
+ int bytes_done = 0;
+ struct xilly_channel *channel = filp->private_data;
+
+ int full, exhausted;
+ /* Initializations are there only to silence warnings */
+
+ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
+ int end_offset_plus1 = 0;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+ if (rc)
+ return rc;
+
+ while (1) {
+ int bytes_to_do = count - bytes_done;
+
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+ full = channel->rd_full;
+
+ if (!full) {
+ bufidx = channel->rd_host_buf_idx;
+ bufpos = channel->rd_host_buf_pos;
+ howmany = channel->rd_buf_size - bufpos;
+
+ /*
+ * Update rd_host_* to its state after this operation.
+ * count=0 means committing the buffer immediately,
+ * which is like flushing, but not necessarily block.
+ */
+
+ if ((howmany > bytes_to_do) &&
+ (count ||
+ ((bufpos >> channel->log2_element_size) == 0))) {
+ bufferdone = 0;
+
+ howmany = bytes_to_do;
+ channel->rd_host_buf_pos += howmany;
+ } else {
+ bufferdone = 1;
+
+ if (count) {
+ end_offset_plus1 =
+ channel->rd_buf_size >>
+ channel->log2_element_size;
+ channel->rd_host_buf_pos = 0;
+ } else {
+ unsigned char *tail;
+ int i;
+
+ howmany = 0;
+
+ end_offset_plus1 = bufpos >>
+ channel->log2_element_size;
+
+ channel->rd_host_buf_pos -=
+ end_offset_plus1 <<
+ channel->log2_element_size;
+
+ tail = channel->
+ rd_buffers[bufidx]->addr +
+ (end_offset_plus1 <<
+ channel->log2_element_size);
+
+ for (i = 0;
+ i < channel->rd_host_buf_pos;
+ i++)
+ channel->rd_leftovers[i] =
+ *tail++;
+ }
+
+ if (bufidx == channel->rd_fpga_buf_idx)
+ channel->rd_full = 1;
+
+ if (bufidx >= (channel->num_rd_buffers - 1))
+ channel->rd_host_buf_idx = 0;
+ else
+ channel->rd_host_buf_idx++;
+ }
+ }
+
+ /*
+ * Marking our situation after the possible changes above,
+ * for use after releasing the spinlock.
+ *
+ * full = full before change
+ * exhasted = full after possible change
+ */
+
+ exhausted = channel->rd_full;
+
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ if (!full) { /* Go on, now without the spinlock */
+ unsigned char *head =
+ channel->rd_buffers[bufidx]->addr;
+ int i;
+
+ if ((bufpos == 0) || /* Zero means it's virgin */
+ (channel->rd_leftovers[3] != 0)) {
+ dma_sync_single_for_cpu(channel->endpoint->dev,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
+
+ /* Virgin, but leftovers are due */
+ for (i = 0; i < bufpos; i++)
+ *head++ = channel->rd_leftovers[i];
+
+ channel->rd_leftovers[3] = 0; /* Clear flag */
+ }
+
+ if (copy_from_user(
+ channel->rd_buffers[bufidx]->addr + bufpos,
+ userbuf, howmany))
+ rc = -EFAULT;
+
+ userbuf += howmany;
+ bytes_done += howmany;
+
+ if (bufferdone) {
+ dma_sync_single_for_device(channel->endpoint->dev,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(end_offset_plus1 - 1,
+ channel->endpoint->registers +
+ fpga_buf_offset_reg);
+
+ iowrite32((channel->chan_num << 1) |
+ (2 << 24) | /* 2 = submit buffer */
+ (bufidx << 12),
+ channel->endpoint->registers +
+ fpga_buf_ctrl_reg);
+
+ mutex_unlock(&channel->endpoint->
+ register_mutex);
+
+ channel->rd_leftovers[3] =
+ (channel->rd_host_buf_pos != 0);
+ }
+
+ if (rc) {
+ mutex_unlock(&channel->rd_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (!channel->rd_synchronous)
+ queue_delayed_work(
+ xillybus_wq,
+ &channel->rd_workitem,
+ XILLY_RX_TIMEOUT);
+
+ return rc;
+ }
+ }
+
+ if (bytes_done >= count)
+ break;
+
+ if (!exhausted)
+ continue; /* If there's more space, just go on */
+
+ if ((bytes_done > 0) && channel->rd_allow_partial)
+ break;
+
+ /*
+ * Indefinite sleep with mutex taken. With data waiting for
+ * flushing, user should not be surprised if open() for write
+ * sleeps.
+ */
+
+ if (filp->f_flags & O_NONBLOCK) {
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (wait_event_interruptible(channel->rd_wait,
+ (!channel->rd_full))) {
+ mutex_unlock(&channel->rd_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (bytes_done)
+ return bytes_done;
+ return -EINTR;
+ }
+ }
+
+ mutex_unlock(&channel->rd_mutex);
+
+ if (!channel->rd_synchronous)
+ queue_delayed_work(xillybus_wq,
+ &channel->rd_workitem,
+ XILLY_RX_TIMEOUT);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (rc)
+ return rc;
+
+ if ((channel->rd_synchronous) && (bytes_done > 0)) {
+ rc = xillybus_myflush(filp->private_data, 0); /* No timeout */
+
+ if (rc && (rc != -EINTR))
+ return rc;
+ }
+
+ return bytes_done;
+}
+
+static int xillybus_open(struct inode *inode, struct file *filp)
+{
+ int rc;
+ unsigned long flags;
+ struct xilly_endpoint *endpoint;
+ struct xilly_channel *channel;
+ int index;
+
+ rc = xillybus_find_inode(inode, (void **)&endpoint, &index);
+ if (rc)
+ return rc;
+
+ if (endpoint->fatal_error)
+ return -EIO;
+
+ channel = endpoint->channels[1 + index];
+ filp->private_data = channel;
+
+ /*
+ * It gets complicated because:
+ * 1. We don't want to take a mutex we don't have to
+ * 2. We don't want to open one direction if the other will fail.
+ */
+
+ if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
+ return -ENODEV;
+
+ if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
+ return -ENODEV;
+
+ if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
+ (channel->wr_synchronous || !channel->wr_allow_partial ||
+ !channel->wr_supports_nonempty)) {
+ dev_err(endpoint->dev,
+ "open() failed: O_NONBLOCK not allowed for read on this device\n");
+ return -ENODEV;
+ }
+
+ if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
+ (channel->rd_synchronous || !channel->rd_allow_partial)) {
+ dev_err(endpoint->dev,
+ "open() failed: O_NONBLOCK not allowed for write on this device\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Note: open() may block on getting mutexes despite O_NONBLOCK.
+ * This shouldn't occur normally, since multiple open of the same
+ * file descriptor is almost always prohibited anyhow
+ * (*_exclusive_open is normally set in real-life systems).
+ */
+
+ if (filp->f_mode & FMODE_READ) {
+ rc = mutex_lock_interruptible(&channel->wr_mutex);
+ if (rc)
+ return rc;
+ }
+
+ if (filp->f_mode & FMODE_WRITE) {
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+ if (rc)
+ goto unlock_wr;
+ }
+
+ if ((filp->f_mode & FMODE_READ) &&
+ (channel->wr_ref_count != 0) &&
+ (channel->wr_exclusive_open)) {
+ rc = -EBUSY;
+ goto unlock;
+ }
+
+ if ((filp->f_mode & FMODE_WRITE) &&
+ (channel->rd_ref_count != 0) &&
+ (channel->rd_exclusive_open)) {
+ rc = -EBUSY;
+ goto unlock;
+ }
+
+ if (filp->f_mode & FMODE_READ) {
+ if (channel->wr_ref_count == 0) { /* First open of file */
+ /* Move the host to first buffer */
+ spin_lock_irqsave(&channel->wr_spinlock, flags);
+ channel->wr_host_buf_idx = 0;
+ channel->wr_host_buf_pos = 0;
+ channel->wr_fpga_buf_idx = -1;
+ channel->wr_empty = 1;
+ channel->wr_ready = 0;
+ channel->wr_sleepy = 1;
+ channel->wr_eof = -1;
+ channel->wr_hangup = 0;
+
+ spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (4 << 24) | /* Opcode 4, open channel */
+ ((channel->wr_synchronous & 1) << 23),
+ channel->endpoint->registers +
+ fpga_buf_ctrl_reg);
+ }
+
+ channel->wr_ref_count++;
+ }
+
+ if (filp->f_mode & FMODE_WRITE) {
+ if (channel->rd_ref_count == 0) { /* First open of file */
+ /* Move the host to first buffer */
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+ channel->rd_host_buf_idx = 0;
+ channel->rd_host_buf_pos = 0;
+ channel->rd_leftovers[3] = 0; /* No leftovers. */
+ channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
+ channel->rd_full = 0;
+
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ iowrite32((channel->chan_num << 1) |
+ (4 << 24), /* Opcode 4, open channel */
+ channel->endpoint->registers +
+ fpga_buf_ctrl_reg);
+ }
+
+ channel->rd_ref_count++;
+ }
+
+unlock:
+ if (filp->f_mode & FMODE_WRITE)
+ mutex_unlock(&channel->rd_mutex);
+unlock_wr:
+ if (filp->f_mode & FMODE_READ)
+ mutex_unlock(&channel->wr_mutex);
+
+ if (!rc && (!channel->seekable))
+ return nonseekable_open(inode, filp);
+
+ return rc;
+}
+
+static int xillybus_release(struct inode *inode, struct file *filp)
+{
+ unsigned long flags;
+ struct xilly_channel *channel = filp->private_data;
+
+ int buf_idx;
+ int eof;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (filp->f_mode & FMODE_WRITE) {
+ mutex_lock(&channel->rd_mutex);
+
+ channel->rd_ref_count--;
+
+ if (channel->rd_ref_count == 0) {
+ /*
+ * We rely on the kernel calling flush()
+ * before we get here.
+ */
+
+ iowrite32((channel->chan_num << 1) | /* Channel ID */
+ (5 << 24), /* Opcode 5, close channel */
+ channel->endpoint->registers +
+ fpga_buf_ctrl_reg);
+ }
+ mutex_unlock(&channel->rd_mutex);
+ }
+
+ if (filp->f_mode & FMODE_READ) {
+ mutex_lock(&channel->wr_mutex);
+
+ channel->wr_ref_count--;
+
+ if (channel->wr_ref_count == 0) {
+ iowrite32(1 | (channel->chan_num << 1) |
+ (5 << 24), /* Opcode 5, close channel */
+ channel->endpoint->registers +
+ fpga_buf_ctrl_reg);
+
+ /*
+ * This is crazily cautious: We make sure that not
+ * only that we got an EOF (be it because we closed
+ * the channel or because of a user's EOF), but verify
+ * that it's one beyond the last buffer arrived, so
+ * we have no leftover buffers pending before wrapping
+ * up (which can only happen in asynchronous channels,
+ * BTW)
+ */
+
+ while (1) {
+ spin_lock_irqsave(&channel->wr_spinlock,
+ flags);
+ buf_idx = channel->wr_fpga_buf_idx;
+ eof = channel->wr_eof;
+ channel->wr_sleepy = 1;
+ spin_unlock_irqrestore(&channel->wr_spinlock,
+ flags);
+
+ /*
+ * Check if eof points at the buffer after
+ * the last one the FPGA submitted. Note that
+ * no EOF is marked by negative eof.
+ */
+
+ buf_idx++;
+ if (buf_idx == channel->num_wr_buffers)
+ buf_idx = 0;
+
+ if (buf_idx == eof)
+ break;
+
+ /*
+ * Steal extra 100 ms if awaken by interrupt.
+ * This is a simple workaround for an
+ * interrupt pending when entering, which would
+ * otherwise result in declaring the hardware
+ * non-responsive.
+ */
+
+ if (wait_event_interruptible(
+ channel->wr_wait,
+ (!channel->wr_sleepy)))
+ msleep(100);
+
+ if (channel->wr_sleepy) {
+ mutex_unlock(&channel->wr_mutex);
+ dev_warn(channel->endpoint->dev,
+ "Hardware failed to respond to close command, therefore left in messy state.\n");
+ return -EINTR;
+ }
+ }
+ }
+
+ mutex_unlock(&channel->wr_mutex);
+ }
+
+ return 0;
+}
+
+static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
+{
+ struct xilly_channel *channel = filp->private_data;
+ loff_t pos = filp->f_pos;
+ int rc = 0;
+
+ /*
+ * Take both mutexes not allowing interrupts, since it seems like
+ * common applications don't expect an -EINTR here. Besides, multiple
+ * access to a single file descriptor on seekable devices is a mess
+ * anyhow.
+ */
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ mutex_lock(&channel->wr_mutex);
+ mutex_lock(&channel->rd_mutex);
+
+ switch (whence) {
+ case SEEK_SET:
+ pos = offset;
+ break;
+ case SEEK_CUR:
+ pos += offset;
+ break;
+ case SEEK_END:
+ pos = offset; /* Going to the end => to the beginning */
+ break;
+ default:
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* In any case, we must finish on an element boundary */
+ if (pos & ((1 << channel->log2_element_size) - 1)) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(pos >> channel->log2_element_size,
+ channel->endpoint->registers + fpga_buf_offset_reg);
+
+ iowrite32((channel->chan_num << 1) |
+ (6 << 24), /* Opcode 6, set address */
+ channel->endpoint->registers + fpga_buf_ctrl_reg);
+
+ mutex_unlock(&channel->endpoint->register_mutex);
+
+end:
+ mutex_unlock(&channel->rd_mutex);
+ mutex_unlock(&channel->wr_mutex);
+
+ if (rc) /* Return error after releasing mutexes */
+ return rc;
+
+ filp->f_pos = pos;
+
+ /*
+ * Since seekable devices are allowed only when the channel is
+ * synchronous, we assume that there is no data pending in either
+ * direction (which holds true as long as no concurrent access on the
+ * file descriptor takes place).
+ * The only thing we may need to throw away is leftovers from partial
+ * write() flush.
+ */
+
+ channel->rd_leftovers[3] = 0;
+
+ return pos;
+}
+
+static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
+{
+ struct xilly_channel *channel = filp->private_data;
+ __poll_t mask = 0;
+ unsigned long flags;
+
+ poll_wait(filp, &channel->endpoint->ep_wait, wait);
+
+ /*
+ * poll() won't play ball regarding read() channels which
+ * aren't asynchronous and support the nonempty message. Allowing
+ * that will create situations where data has been delivered at
+ * the FPGA, and users expecting select() to wake up, which it may
+ * not.
+ */
+
+ if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
+ poll_wait(filp, &channel->wr_wait, wait);
+ poll_wait(filp, &channel->wr_ready_wait, wait);
+
+ spin_lock_irqsave(&channel->wr_spinlock, flags);
+ if (!channel->wr_empty || channel->wr_ready)
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ if (channel->wr_hangup)
+ /*
+ * Not EPOLLHUP, because its behavior is in the
+ * mist, and EPOLLIN does what we want: Wake up
+ * the read file descriptor so it sees EOF.
+ */
+ mask |= EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+ }
+
+ /*
+ * If partial data write is disallowed on a write() channel,
+ * it's pointless to ever signal OK to write, because is could
+ * block despite some space being available.
+ */
+
+ if (channel->rd_allow_partial) {
+ poll_wait(filp, &channel->rd_wait, wait);
+
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+ if (!channel->rd_full)
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+ }
+
+ if (channel->endpoint->fatal_error)
+ mask |= EPOLLERR;
+
+ return mask;
+}
+
+static const struct file_operations xillybus_fops = {
+ .owner = THIS_MODULE,
+ .read = xillybus_read,
+ .write = xillybus_write,
+ .open = xillybus_open,
+ .flush = xillybus_flush,
+ .release = xillybus_release,
+ .llseek = xillybus_llseek,
+ .poll = xillybus_poll,
+};
+
+struct xilly_endpoint *xillybus_init_endpoint(struct device *dev)
+{
+ struct xilly_endpoint *endpoint;
+
+ endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL);
+ if (!endpoint)
+ return NULL;
+
+ endpoint->dev = dev;
+ endpoint->msg_counter = 0x0b;
+ endpoint->failed_messages = 0;
+ endpoint->fatal_error = 0;
+
+ init_waitqueue_head(&endpoint->ep_wait);
+ mutex_init(&endpoint->register_mutex);
+
+ return endpoint;
+}
+EXPORT_SYMBOL(xillybus_init_endpoint);
+
+static int xilly_quiesce(struct xilly_endpoint *endpoint)
+{
+ long t;
+
+ endpoint->idtlen = -1;
+
+ iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
+ endpoint->registers + fpga_dma_control_reg);
+
+ t = wait_event_interruptible_timeout(endpoint->ep_wait,
+ (endpoint->idtlen >= 0),
+ XILLY_TIMEOUT);
+ if (t <= 0) {
+ dev_err(endpoint->dev,
+ "Failed to quiesce the device on exit.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
+{
+ int rc;
+ long t;
+
+ void *bootstrap_resources;
+ int idtbuffersize = (1 << PAGE_SHIFT);
+ struct device *dev = endpoint->dev;
+
+ /*
+ * The bogus IDT is used during bootstrap for allocating the initial
+ * message buffer, and then the message buffer and space for the IDT
+ * itself. The initial message buffer is of a single page's size, but
+ * it's soon replaced with a more modest one (and memory is freed).
+ */
+
+ unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
+ 3, 192, PAGE_SHIFT, 0 };
+ struct xilly_idt_handle idt_handle;
+
+ /*
+ * Writing the value 0x00000001 to Endianness register signals which
+ * endianness this processor is using, so the FPGA can swap words as
+ * necessary.
+ */
+
+ iowrite32(1, endpoint->registers + fpga_endian_reg);
+
+ /* Bootstrap phase I: Allocate temporary message buffer */
+
+ bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL);
+ if (!bootstrap_resources)
+ return -ENOMEM;
+
+ endpoint->num_channels = 0;
+
+ rc = xilly_setupchannels(endpoint, bogus_idt, 1);
+ if (rc)
+ return rc;
+
+ /* Clear the message subsystem (and counter in particular) */
+ iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg);
+
+ endpoint->idtlen = -1;
+
+ /*
+ * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT
+ * buffer size.
+ */
+ iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
+ endpoint->registers + fpga_dma_control_reg);
+
+ t = wait_event_interruptible_timeout(endpoint->ep_wait,
+ (endpoint->idtlen >= 0),
+ XILLY_TIMEOUT);
+ if (t <= 0) {
+ dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
+ return -ENODEV;
+ }
+
+ /* Enable DMA */
+ iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
+ endpoint->registers + fpga_dma_control_reg);
+
+ /* Bootstrap phase II: Allocate buffer for IDT and obtain it */
+ while (endpoint->idtlen >= idtbuffersize) {
+ idtbuffersize *= 2;
+ bogus_idt[6]++;
+ }
+
+ endpoint->num_channels = 1;
+
+ rc = xilly_setupchannels(endpoint, bogus_idt, 2);
+ if (rc)
+ goto failed_idt;
+
+ rc = xilly_obtain_idt(endpoint);
+ if (rc)
+ goto failed_idt;
+
+ rc = xilly_scan_idt(endpoint, &idt_handle);
+ if (rc)
+ goto failed_idt;
+
+ devres_close_group(dev, bootstrap_resources);
+
+ /* Bootstrap phase III: Allocate buffers according to IDT */
+
+ rc = xilly_setupchannels(endpoint,
+ idt_handle.chandesc,
+ idt_handle.entries);
+ if (rc)
+ goto failed_idt;
+
+ rc = xillybus_init_chrdev(dev, &xillybus_fops,
+ endpoint->owner, endpoint,
+ idt_handle.names,
+ idt_handle.names_len,
+ endpoint->num_channels,
+ xillyname, false);
+
+ if (rc)
+ goto failed_idt;
+
+ devres_release_group(dev, bootstrap_resources);
+
+ return 0;
+
+failed_idt:
+ xilly_quiesce(endpoint);
+ flush_workqueue(xillybus_wq);
+
+ return rc;
+}
+EXPORT_SYMBOL(xillybus_endpoint_discovery);
+
+void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
+{
+ xillybus_cleanup_chrdev(endpoint, endpoint->dev);
+
+ xilly_quiesce(endpoint);
+
+ /*
+ * Flushing is done upon endpoint release to prevent access to memory
+ * just about to be released. This makes the quiesce complete.
+ */
+ flush_workqueue(xillybus_wq);
+}
+EXPORT_SYMBOL(xillybus_endpoint_remove);
+
+static int __init xillybus_init(void)
+{
+ xillybus_wq = alloc_workqueue(xillyname, 0, 0);
+ if (!xillybus_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit xillybus_exit(void)
+{
+ /* flush_workqueue() was called for each endpoint released */
+ destroy_workqueue(xillybus_wq);
+}
+
+module_init(xillybus_init);
+module_exit(xillybus_exit);
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
new file mode 100644
index 0000000000..e5372e45d2
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/misc/xillybus_of.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework using Open Firmware.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus driver for Open Firmware");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_ALIAS("xillybus_of");
+MODULE_LICENSE("GPL v2");
+
+static const char xillyname[] = "xillybus_of";
+
+/* Match table for of_platform binding */
+static const struct of_device_id xillybus_of_match[] = {
+ { .compatible = "xillybus,xillybus-1.00.a", },
+ { .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, xillybus_of_match);
+
+static int xilly_drv_probe(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct xilly_endpoint *endpoint;
+ int rc;
+ int irq;
+
+ endpoint = xillybus_init_endpoint(dev);
+
+ if (!endpoint)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, endpoint);
+
+ endpoint->owner = THIS_MODULE;
+
+ endpoint->registers = devm_platform_ioremap_resource(op, 0);
+ if (IS_ERR(endpoint->registers))
+ return PTR_ERR(endpoint->registers);
+
+ irq = platform_get_irq(op, 0);
+
+ rc = devm_request_irq(dev, irq, xillybus_isr, 0, xillyname, endpoint);
+
+ if (rc) {
+ dev_err(endpoint->dev,
+ "Failed to register IRQ handler. Aborting.\n");
+ return -ENODEV;
+ }
+
+ return xillybus_endpoint_discovery(endpoint);
+}
+
+static int xilly_drv_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct xilly_endpoint *endpoint = dev_get_drvdata(dev);
+
+ xillybus_endpoint_remove(endpoint);
+
+ return 0;
+}
+
+static struct platform_driver xillybus_platform_driver = {
+ .probe = xilly_drv_probe,
+ .remove = xilly_drv_remove,
+ .driver = {
+ .name = xillyname,
+ .of_match_table = xillybus_of_match,
+ },
+};
+
+module_platform_driver(xillybus_platform_driver);
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
new file mode 100644
index 0000000000..9858711e3e
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/misc/xillybus_pcie.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework using PCI Express.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus driver for PCIe");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_ALIAS("xillybus_pcie");
+MODULE_LICENSE("GPL v2");
+
+#define PCI_DEVICE_ID_XILLYBUS 0xebeb
+
+#define PCI_VENDOR_ID_ACTEL 0x11aa
+#define PCI_VENDOR_ID_LATTICE 0x1204
+
+static const char xillyname[] = "xillybus_pcie";
+
+static const struct pci_device_id xillyids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LATTICE, PCI_DEVICE_ID_XILLYBUS)},
+ { /* End: all zeroes */ }
+};
+
+static int xilly_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct xilly_endpoint *endpoint;
+ int rc;
+
+ endpoint = xillybus_init_endpoint(&pdev->dev);
+
+ if (!endpoint)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, endpoint);
+
+ endpoint->owner = THIS_MODULE;
+
+ rc = pcim_enable_device(pdev);
+ if (rc) {
+ dev_err(endpoint->dev,
+ "pcim_enable_device() failed. Aborting.\n");
+ return rc;
+ }
+
+ /* L0s has caused packet drops. No power saving, thank you. */
+
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(endpoint->dev,
+ "Incorrect BAR configuration. Aborting.\n");
+ return -ENODEV;
+ }
+
+ rc = pcim_iomap_regions(pdev, 0x01, xillyname);
+ if (rc) {
+ dev_err(endpoint->dev,
+ "pcim_iomap_regions() failed. Aborting.\n");
+ return rc;
+ }
+
+ endpoint->registers = pcim_iomap_table(pdev)[0];
+
+ pci_set_master(pdev);
+
+ /* Set up a single MSI interrupt */
+ if (pci_enable_msi(pdev)) {
+ dev_err(endpoint->dev,
+ "Failed to enable MSI interrupts. Aborting.\n");
+ return -ENODEV;
+ }
+ rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0,
+ xillyname, endpoint);
+ if (rc) {
+ dev_err(endpoint->dev,
+ "Failed to register MSI handler. Aborting.\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets,
+ * even when the PCIe driver claims that a 64-bit mask is OK. On the
+ * other hand, on some architectures, 64-bit addressing is mandatory.
+ * So go for the 64-bit mask only when failing is the other option.
+ */
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ endpoint->dma_using_dac = 0;
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ endpoint->dma_using_dac = 1;
+ } else {
+ dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
+ return -ENODEV;
+ }
+
+ return xillybus_endpoint_discovery(endpoint);
+}
+
+static void xilly_remove(struct pci_dev *pdev)
+{
+ struct xilly_endpoint *endpoint = pci_get_drvdata(pdev);
+
+ xillybus_endpoint_remove(endpoint);
+}
+
+MODULE_DEVICE_TABLE(pci, xillyids);
+
+static struct pci_driver xillybus_driver = {
+ .name = xillyname,
+ .id_table = xillyids,
+ .probe = xilly_probe,
+ .remove = xilly_remove,
+};
+
+module_pci_driver(xillybus_driver);
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
new file mode 100644
index 0000000000..5a5afa14ca
--- /dev/null
+++ b/drivers/char/xillybus/xillyusb.c
@@ -0,0 +1,2277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the XillyUSB FPGA/host framework.
+ *
+ * This driver interfaces with a special IP core in an FPGA, setting up
+ * a pipe between a hardware FIFO in the programmable logic and a device
+ * file in the host. The number of such pipes and their attributes are
+ * set up on the logic. This driver detects these automatically and
+ * creates the device files accordingly.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+
+#include "xillybus_class.h"
+
+MODULE_DESCRIPTION("Driver for XillyUSB FPGA IP Core");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_ALIAS("xillyusb");
+MODULE_LICENSE("GPL v2");
+
+#define XILLY_RX_TIMEOUT (10 * HZ / 1000)
+#define XILLY_RESPONSE_TIMEOUT (500 * HZ / 1000)
+
+#define BUF_SIZE_ORDER 4
+#define BUFNUM 8
+#define LOG2_IDT_FIFO_SIZE 16
+#define LOG2_INITIAL_FIFO_BUF_SIZE 16
+
+#define MSG_EP_NUM 1
+#define IN_EP_NUM 1
+
+static const char xillyname[] = "xillyusb";
+
+static unsigned int fifo_buf_order;
+
+#define USB_VENDOR_ID_XILINX 0x03fd
+#define USB_VENDOR_ID_ALTERA 0x09fb
+
+#define USB_PRODUCT_ID_XILLYUSB 0xebbe
+
+static const struct usb_device_id xillyusb_table[] = {
+ { USB_DEVICE(USB_VENDOR_ID_XILINX, USB_PRODUCT_ID_XILLYUSB) },
+ { USB_DEVICE(USB_VENDOR_ID_ALTERA, USB_PRODUCT_ID_XILLYUSB) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, xillyusb_table);
+
+struct xillyusb_dev;
+
+struct xillyfifo {
+ unsigned int bufsize; /* In bytes, always a power of 2 */
+ unsigned int bufnum;
+ unsigned int size; /* Lazy: Equals bufsize * bufnum */
+ unsigned int buf_order;
+
+ int fill; /* Number of bytes in the FIFO */
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+
+ unsigned int readpos;
+ unsigned int readbuf;
+ unsigned int writepos;
+ unsigned int writebuf;
+ char **mem;
+};
+
+struct xillyusb_channel;
+
+struct xillyusb_endpoint {
+ struct xillyusb_dev *xdev;
+
+ struct mutex ep_mutex; /* serialize operations on endpoint */
+
+ struct list_head buffers;
+ struct list_head filled_buffers;
+ spinlock_t buffers_lock; /* protect these two lists */
+
+ unsigned int order;
+ unsigned int buffer_size;
+
+ unsigned int fill_mask;
+
+ int outstanding_urbs;
+
+ struct usb_anchor anchor;
+
+ struct xillyfifo fifo;
+
+ struct work_struct workitem;
+
+ bool shutting_down;
+ bool drained;
+ bool wake_on_drain;
+
+ u8 ep_num;
+};
+
+struct xillyusb_channel {
+ struct xillyusb_dev *xdev;
+
+ struct xillyfifo *in_fifo;
+ struct xillyusb_endpoint *out_ep;
+ struct mutex lock; /* protect @out_ep, @in_fifo, bit fields below */
+
+ struct mutex in_mutex; /* serialize fops on FPGA to host stream */
+ struct mutex out_mutex; /* serialize fops on host to FPGA stream */
+ wait_queue_head_t flushq;
+
+ int chan_idx;
+
+ u32 in_consumed_bytes;
+ u32 in_current_checkpoint;
+ u32 out_bytes;
+
+ unsigned int in_log2_element_size;
+ unsigned int out_log2_element_size;
+ unsigned int in_log2_fifo_size;
+ unsigned int out_log2_fifo_size;
+
+ unsigned int read_data_ok; /* EOF not arrived (yet) */
+ unsigned int poll_used;
+ unsigned int flushing;
+ unsigned int flushed;
+ unsigned int canceled;
+
+ /* Bit fields protected by @lock except for initialization */
+ unsigned readable:1;
+ unsigned writable:1;
+ unsigned open_for_read:1;
+ unsigned open_for_write:1;
+ unsigned in_synchronous:1;
+ unsigned out_synchronous:1;
+ unsigned in_seekable:1;
+ unsigned out_seekable:1;
+};
+
+struct xillybuffer {
+ struct list_head entry;
+ struct xillyusb_endpoint *ep;
+ void *buf;
+ unsigned int len;
+};
+
+struct xillyusb_dev {
+ struct xillyusb_channel *channels;
+
+ struct usb_device *udev;
+ struct device *dev; /* For dev_err() and such */
+ struct kref kref;
+ struct workqueue_struct *workq;
+
+ int error;
+ spinlock_t error_lock; /* protect @error */
+ struct work_struct wakeup_workitem;
+
+ int num_channels;
+
+ struct xillyusb_endpoint *msg_ep;
+ struct xillyusb_endpoint *in_ep;
+
+ struct mutex msg_mutex; /* serialize opcode transmission */
+ int in_bytes_left;
+ int leftover_chan_num;
+ unsigned int in_counter;
+ struct mutex process_in_mutex; /* synchronize wakeup_all() */
+};
+
+/*
+ * kref_mutex is used in xillyusb_open() to prevent the xillyusb_dev
+ * struct from being freed during the gap between being found by
+ * xillybus_find_inode() and having its reference count incremented.
+ */
+
+static DEFINE_MUTEX(kref_mutex);
+
+/* FPGA to host opcodes */
+enum {
+ OPCODE_DATA = 0,
+ OPCODE_QUIESCE_ACK = 1,
+ OPCODE_EOF = 2,
+ OPCODE_REACHED_CHECKPOINT = 3,
+ OPCODE_CANCELED_CHECKPOINT = 4,
+};
+
+/* Host to FPGA opcodes */
+enum {
+ OPCODE_QUIESCE = 0,
+ OPCODE_REQ_IDT = 1,
+ OPCODE_SET_CHECKPOINT = 2,
+ OPCODE_CLOSE = 3,
+ OPCODE_SET_PUSH = 4,
+ OPCODE_UPDATE_PUSH = 5,
+ OPCODE_CANCEL_CHECKPOINT = 6,
+ OPCODE_SET_ADDR = 7,
+};
+
+/*
+ * fifo_write() and fifo_read() are NOT reentrant (i.e. concurrent multiple
+ * calls to each on the same FIFO is not allowed) however it's OK to have
+ * threads calling each of the two functions once on the same FIFO, and
+ * at the same time.
+ */
+
+static int fifo_write(struct xillyfifo *fifo,
+ const void *data, unsigned int len,
+ int (*copier)(void *, const void *, int))
+{
+ unsigned int done = 0;
+ unsigned int todo = len;
+ unsigned int nmax;
+ unsigned int writepos = fifo->writepos;
+ unsigned int writebuf = fifo->writebuf;
+ unsigned long flags;
+ int rc;
+
+ nmax = fifo->size - READ_ONCE(fifo->fill);
+
+ while (1) {
+ unsigned int nrail = fifo->bufsize - writepos;
+ unsigned int n = min(todo, nmax);
+
+ if (n == 0) {
+ spin_lock_irqsave(&fifo->lock, flags);
+ fifo->fill += done;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ fifo->writepos = writepos;
+ fifo->writebuf = writebuf;
+
+ return done;
+ }
+
+ if (n > nrail)
+ n = nrail;
+
+ rc = (*copier)(fifo->mem[writebuf] + writepos, data + done, n);
+
+ if (rc)
+ return rc;
+
+ done += n;
+ todo -= n;
+
+ writepos += n;
+ nmax -= n;
+
+ if (writepos == fifo->bufsize) {
+ writepos = 0;
+ writebuf++;
+
+ if (writebuf == fifo->bufnum)
+ writebuf = 0;
+ }
+ }
+}
+
+static int fifo_read(struct xillyfifo *fifo,
+ void *data, unsigned int len,
+ int (*copier)(void *, const void *, int))
+{
+ unsigned int done = 0;
+ unsigned int todo = len;
+ unsigned int fill;
+ unsigned int readpos = fifo->readpos;
+ unsigned int readbuf = fifo->readbuf;
+ unsigned long flags;
+ int rc;
+
+ /*
+ * The spinlock here is necessary, because otherwise fifo->fill
+ * could have been increased by fifo_write() after writing data
+ * to the buffer, but this data would potentially not have been
+ * visible on this thread at the time the updated fifo->fill was.
+ * That could lead to reading invalid data.
+ */
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ fill = fifo->fill;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ while (1) {
+ unsigned int nrail = fifo->bufsize - readpos;
+ unsigned int n = min(todo, fill);
+
+ if (n == 0) {
+ spin_lock_irqsave(&fifo->lock, flags);
+ fifo->fill -= done;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ fifo->readpos = readpos;
+ fifo->readbuf = readbuf;
+
+ return done;
+ }
+
+ if (n > nrail)
+ n = nrail;
+
+ rc = (*copier)(data + done, fifo->mem[readbuf] + readpos, n);
+
+ if (rc)
+ return rc;
+
+ done += n;
+ todo -= n;
+
+ readpos += n;
+ fill -= n;
+
+ if (readpos == fifo->bufsize) {
+ readpos = 0;
+ readbuf++;
+
+ if (readbuf == fifo->bufnum)
+ readbuf = 0;
+ }
+ }
+}
+
+/*
+ * These three wrapper functions are used as the @copier argument to
+ * fifo_write() and fifo_read(), so that they can work directly with
+ * user memory as well.
+ */
+
+static int xilly_copy_from_user(void *dst, const void *src, int n)
+{
+ if (copy_from_user(dst, (const void __user *)src, n))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xilly_copy_to_user(void *dst, const void *src, int n)
+{
+ if (copy_to_user((void __user *)dst, src, n))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xilly_memcpy(void *dst, const void *src, int n)
+{
+ memcpy(dst, src, n);
+
+ return 0;
+}
+
+static int fifo_init(struct xillyfifo *fifo,
+ unsigned int log2_size)
+{
+ unsigned int log2_bufnum;
+ unsigned int buf_order;
+ int i;
+
+ unsigned int log2_fifo_buf_size;
+
+retry:
+ log2_fifo_buf_size = fifo_buf_order + PAGE_SHIFT;
+
+ if (log2_size > log2_fifo_buf_size) {
+ log2_bufnum = log2_size - log2_fifo_buf_size;
+ buf_order = fifo_buf_order;
+ fifo->bufsize = 1 << log2_fifo_buf_size;
+ } else {
+ log2_bufnum = 0;
+ buf_order = (log2_size > PAGE_SHIFT) ?
+ log2_size - PAGE_SHIFT : 0;
+ fifo->bufsize = 1 << log2_size;
+ }
+
+ fifo->bufnum = 1 << log2_bufnum;
+ fifo->size = fifo->bufnum * fifo->bufsize;
+ fifo->buf_order = buf_order;
+
+ fifo->mem = kmalloc_array(fifo->bufnum, sizeof(void *), GFP_KERNEL);
+
+ if (!fifo->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < fifo->bufnum; i++) {
+ fifo->mem[i] = (void *)
+ __get_free_pages(GFP_KERNEL, buf_order);
+
+ if (!fifo->mem[i])
+ goto memfail;
+ }
+
+ fifo->fill = 0;
+ fifo->readpos = 0;
+ fifo->readbuf = 0;
+ fifo->writepos = 0;
+ fifo->writebuf = 0;
+ spin_lock_init(&fifo->lock);
+ init_waitqueue_head(&fifo->waitq);
+ return 0;
+
+memfail:
+ for (i--; i >= 0; i--)
+ free_pages((unsigned long)fifo->mem[i], buf_order);
+
+ kfree(fifo->mem);
+ fifo->mem = NULL;
+
+ if (fifo_buf_order) {
+ fifo_buf_order--;
+ goto retry;
+ } else {
+ return -ENOMEM;
+ }
+}
+
+static void fifo_mem_release(struct xillyfifo *fifo)
+{
+ int i;
+
+ if (!fifo->mem)
+ return;
+
+ for (i = 0; i < fifo->bufnum; i++)
+ free_pages((unsigned long)fifo->mem[i], fifo->buf_order);
+
+ kfree(fifo->mem);
+}
+
+/*
+ * When endpoint_quiesce() returns, the endpoint has no URBs submitted,
+ * won't accept any new URB submissions, and its related work item doesn't
+ * and won't run anymore.
+ */
+
+static void endpoint_quiesce(struct xillyusb_endpoint *ep)
+{
+ mutex_lock(&ep->ep_mutex);
+ ep->shutting_down = true;
+ mutex_unlock(&ep->ep_mutex);
+
+ usb_kill_anchored_urbs(&ep->anchor);
+ cancel_work_sync(&ep->workitem);
+}
+
+/*
+ * Note that endpoint_dealloc() also frees fifo memory (if allocated), even
+ * though endpoint_alloc doesn't allocate that memory.
+ */
+
+static void endpoint_dealloc(struct xillyusb_endpoint *ep)
+{
+ struct list_head *this, *next;
+
+ fifo_mem_release(&ep->fifo);
+
+ /* Join @filled_buffers with @buffers to free these entries too */
+ list_splice(&ep->filled_buffers, &ep->buffers);
+
+ list_for_each_safe(this, next, &ep->buffers) {
+ struct xillybuffer *xb =
+ list_entry(this, struct xillybuffer, entry);
+
+ free_pages((unsigned long)xb->buf, ep->order);
+ kfree(xb);
+ }
+
+ kfree(ep);
+}
+
+static struct xillyusb_endpoint
+*endpoint_alloc(struct xillyusb_dev *xdev,
+ u8 ep_num,
+ void (*work)(struct work_struct *),
+ unsigned int order,
+ int bufnum)
+{
+ int i;
+
+ struct xillyusb_endpoint *ep;
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+
+ if (!ep)
+ return NULL;
+
+ INIT_LIST_HEAD(&ep->buffers);
+ INIT_LIST_HEAD(&ep->filled_buffers);
+
+ spin_lock_init(&ep->buffers_lock);
+ mutex_init(&ep->ep_mutex);
+
+ init_usb_anchor(&ep->anchor);
+ INIT_WORK(&ep->workitem, work);
+
+ ep->order = order;
+ ep->buffer_size = 1 << (PAGE_SHIFT + order);
+ ep->outstanding_urbs = 0;
+ ep->drained = true;
+ ep->wake_on_drain = false;
+ ep->xdev = xdev;
+ ep->ep_num = ep_num;
+ ep->shutting_down = false;
+
+ for (i = 0; i < bufnum; i++) {
+ struct xillybuffer *xb;
+ unsigned long addr;
+
+ xb = kzalloc(sizeof(*xb), GFP_KERNEL);
+
+ if (!xb) {
+ endpoint_dealloc(ep);
+ return NULL;
+ }
+
+ addr = __get_free_pages(GFP_KERNEL, order);
+
+ if (!addr) {
+ kfree(xb);
+ endpoint_dealloc(ep);
+ return NULL;
+ }
+
+ xb->buf = (void *)addr;
+ xb->ep = ep;
+ list_add_tail(&xb->entry, &ep->buffers);
+ }
+ return ep;
+}
+
+static void cleanup_dev(struct kref *kref)
+{
+ struct xillyusb_dev *xdev =
+ container_of(kref, struct xillyusb_dev, kref);
+
+ if (xdev->in_ep)
+ endpoint_dealloc(xdev->in_ep);
+
+ if (xdev->msg_ep)
+ endpoint_dealloc(xdev->msg_ep);
+
+ if (xdev->workq)
+ destroy_workqueue(xdev->workq);
+
+ usb_put_dev(xdev->udev);
+ kfree(xdev->channels); /* Argument may be NULL, and that's fine */
+ kfree(xdev);
+}
+
+/*
+ * @process_in_mutex is taken to ensure that bulk_in_work() won't call
+ * process_bulk_in() after wakeup_all()'s execution: The latter zeroes all
+ * @read_data_ok entries, which will make process_bulk_in() report false
+ * errors if executed. The mechanism relies on that xdev->error is assigned
+ * a non-zero value by report_io_error() prior to queueing wakeup_all(),
+ * which prevents bulk_in_work() from calling process_bulk_in().
+ *
+ * The fact that wakeup_all() and bulk_in_work() are queued on the same
+ * workqueue makes their concurrent execution very unlikely, however the
+ * kernel's API doesn't seem to ensure this strictly.
+ */
+
+static void wakeup_all(struct work_struct *work)
+{
+ int i;
+ struct xillyusb_dev *xdev = container_of(work, struct xillyusb_dev,
+ wakeup_workitem);
+
+ mutex_lock(&xdev->process_in_mutex);
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ struct xillyusb_channel *chan = &xdev->channels[i];
+
+ mutex_lock(&chan->lock);
+
+ if (chan->in_fifo) {
+ /*
+ * Fake an EOF: Even if such arrives, it won't be
+ * processed.
+ */
+ chan->read_data_ok = 0;
+ wake_up_interruptible(&chan->in_fifo->waitq);
+ }
+
+ if (chan->out_ep)
+ wake_up_interruptible(&chan->out_ep->fifo.waitq);
+
+ mutex_unlock(&chan->lock);
+
+ wake_up_interruptible(&chan->flushq);
+ }
+
+ mutex_unlock(&xdev->process_in_mutex);
+
+ wake_up_interruptible(&xdev->msg_ep->fifo.waitq);
+
+ kref_put(&xdev->kref, cleanup_dev);
+}
+
+static void report_io_error(struct xillyusb_dev *xdev,
+ int errcode)
+{
+ unsigned long flags;
+ bool do_once = false;
+
+ spin_lock_irqsave(&xdev->error_lock, flags);
+ if (!xdev->error) {
+ xdev->error = errcode;
+ do_once = true;
+ }
+ spin_unlock_irqrestore(&xdev->error_lock, flags);
+
+ if (do_once) {
+ kref_get(&xdev->kref); /* xdev is used by work item */
+ queue_work(xdev->workq, &xdev->wakeup_workitem);
+ }
+}
+
+/*
+ * safely_assign_in_fifo() changes the value of chan->in_fifo and ensures
+ * the previous pointer is never used after its return.
+ */
+
+static void safely_assign_in_fifo(struct xillyusb_channel *chan,
+ struct xillyfifo *fifo)
+{
+ mutex_lock(&chan->lock);
+ chan->in_fifo = fifo;
+ mutex_unlock(&chan->lock);
+
+ flush_work(&chan->xdev->in_ep->workitem);
+}
+
+static void bulk_in_completer(struct urb *urb)
+{
+ struct xillybuffer *xb = urb->context;
+ struct xillyusb_endpoint *ep = xb->ep;
+ unsigned long flags;
+
+ if (urb->status) {
+ if (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ report_io_error(ep->xdev, -EIO);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ return;
+ }
+
+ xb->len = urb->actual_length;
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->filled_buffers);
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ if (!ep->shutting_down)
+ queue_work(ep->xdev->workq, &ep->workitem);
+}
+
+static void bulk_out_completer(struct urb *urb)
+{
+ struct xillybuffer *xb = urb->context;
+ struct xillyusb_endpoint *ep = xb->ep;
+ unsigned long flags;
+
+ if (urb->status &&
+ (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN)))
+ report_io_error(ep->xdev, -EIO);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ if (!ep->shutting_down)
+ queue_work(ep->xdev->workq, &ep->workitem);
+}
+
+static void try_queue_bulk_in(struct xillyusb_endpoint *ep)
+{
+ struct xillyusb_dev *xdev = ep->xdev;
+ struct xillybuffer *xb;
+ struct urb *urb;
+
+ int rc;
+ unsigned long flags;
+ unsigned int bufsize = ep->buffer_size;
+
+ mutex_lock(&ep->ep_mutex);
+
+ if (ep->shutting_down || xdev->error)
+ goto done;
+
+ while (1) {
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+
+ if (list_empty(&ep->buffers)) {
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ goto done;
+ }
+
+ xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
+ list_del(&xb->entry);
+ ep->outstanding_urbs++;
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ report_io_error(xdev, -ENOMEM);
+ goto relist;
+ }
+
+ usb_fill_bulk_urb(urb, xdev->udev,
+ usb_rcvbulkpipe(xdev->udev, ep->ep_num),
+ xb->buf, bufsize, bulk_in_completer, xb);
+
+ usb_anchor_urb(urb, &ep->anchor);
+
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+
+ if (rc) {
+ report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
+ -EIO);
+ goto unanchor;
+ }
+
+ usb_free_urb(urb); /* This just decrements reference count */
+ }
+
+unanchor:
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+
+relist:
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+done:
+ mutex_unlock(&ep->ep_mutex);
+}
+
+static void try_queue_bulk_out(struct xillyusb_endpoint *ep)
+{
+ struct xillyfifo *fifo = &ep->fifo;
+ struct xillyusb_dev *xdev = ep->xdev;
+ struct xillybuffer *xb;
+ struct urb *urb;
+
+ int rc;
+ unsigned int fill;
+ unsigned long flags;
+ bool do_wake = false;
+
+ mutex_lock(&ep->ep_mutex);
+
+ if (ep->shutting_down || xdev->error)
+ goto done;
+
+ fill = READ_ONCE(fifo->fill) & ep->fill_mask;
+
+ while (1) {
+ int count;
+ unsigned int max_read;
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+
+ /*
+ * Race conditions might have the FIFO filled while the
+ * endpoint is marked as drained here. That doesn't matter,
+ * because the sole purpose of @drained is to ensure that
+ * certain data has been sent on the USB channel before
+ * shutting it down. Hence knowing that the FIFO appears
+ * to be empty with no outstanding URBs at some moment
+ * is good enough.
+ */
+
+ if (!fill) {
+ ep->drained = !ep->outstanding_urbs;
+ if (ep->drained && ep->wake_on_drain)
+ do_wake = true;
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ goto done;
+ }
+
+ ep->drained = false;
+
+ if ((fill < ep->buffer_size && ep->outstanding_urbs) ||
+ list_empty(&ep->buffers)) {
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ goto done;
+ }
+
+ xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
+ list_del(&xb->entry);
+ ep->outstanding_urbs++;
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ max_read = min(fill, ep->buffer_size);
+
+ count = fifo_read(&ep->fifo, xb->buf, max_read, xilly_memcpy);
+
+ /*
+ * xilly_memcpy always returns 0 => fifo_read can't fail =>
+ * count > 0
+ */
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ report_io_error(xdev, -ENOMEM);
+ goto relist;
+ }
+
+ usb_fill_bulk_urb(urb, xdev->udev,
+ usb_sndbulkpipe(xdev->udev, ep->ep_num),
+ xb->buf, count, bulk_out_completer, xb);
+
+ usb_anchor_urb(urb, &ep->anchor);
+
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+
+ if (rc) {
+ report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
+ -EIO);
+ goto unanchor;
+ }
+
+ usb_free_urb(urb); /* This just decrements reference count */
+
+ fill -= count;
+ do_wake = true;
+ }
+
+unanchor:
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+
+relist:
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+done:
+ mutex_unlock(&ep->ep_mutex);
+
+ if (do_wake)
+ wake_up_interruptible(&fifo->waitq);
+}
+
+static void bulk_out_work(struct work_struct *work)
+{
+ struct xillyusb_endpoint *ep = container_of(work,
+ struct xillyusb_endpoint,
+ workitem);
+ try_queue_bulk_out(ep);
+}
+
+static int process_in_opcode(struct xillyusb_dev *xdev,
+ int opcode,
+ int chan_num)
+{
+ struct xillyusb_channel *chan;
+ struct device *dev = xdev->dev;
+ int chan_idx = chan_num >> 1;
+
+ if (chan_idx >= xdev->num_channels) {
+ dev_err(dev, "Received illegal channel ID %d from FPGA\n",
+ chan_num);
+ return -EIO;
+ }
+
+ chan = &xdev->channels[chan_idx];
+
+ switch (opcode) {
+ case OPCODE_EOF:
+ if (!chan->read_data_ok) {
+ dev_err(dev, "Received unexpected EOF for channel %d\n",
+ chan_num);
+ return -EIO;
+ }
+
+ /*
+ * A write memory barrier ensures that the FIFO's fill level
+ * is visible before read_data_ok turns zero, so the data in
+ * the FIFO isn't missed by the consumer.
+ */
+ smp_wmb();
+ WRITE_ONCE(chan->read_data_ok, 0);
+ wake_up_interruptible(&chan->in_fifo->waitq);
+ break;
+
+ case OPCODE_REACHED_CHECKPOINT:
+ chan->flushing = 0;
+ wake_up_interruptible(&chan->flushq);
+ break;
+
+ case OPCODE_CANCELED_CHECKPOINT:
+ chan->canceled = 1;
+ wake_up_interruptible(&chan->flushq);
+ break;
+
+ default:
+ dev_err(dev, "Received illegal opcode %d from FPGA\n",
+ opcode);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int process_bulk_in(struct xillybuffer *xb)
+{
+ struct xillyusb_endpoint *ep = xb->ep;
+ struct xillyusb_dev *xdev = ep->xdev;
+ struct device *dev = xdev->dev;
+ int dws = xb->len >> 2;
+ __le32 *p = xb->buf;
+ u32 ctrlword;
+ struct xillyusb_channel *chan;
+ struct xillyfifo *fifo;
+ int chan_num = 0, opcode;
+ int chan_idx;
+ int bytes, count, dwconsume;
+ int in_bytes_left = 0;
+ int rc;
+
+ if ((dws << 2) != xb->len) {
+ dev_err(dev, "Received BULK IN transfer with %d bytes, not a multiple of 4\n",
+ xb->len);
+ return -EIO;
+ }
+
+ if (xdev->in_bytes_left) {
+ bytes = min(xdev->in_bytes_left, dws << 2);
+ in_bytes_left = xdev->in_bytes_left - bytes;
+ chan_num = xdev->leftover_chan_num;
+ goto resume_leftovers;
+ }
+
+ while (dws) {
+ ctrlword = le32_to_cpu(*p++);
+ dws--;
+
+ chan_num = ctrlword & 0xfff;
+ count = (ctrlword >> 12) & 0x3ff;
+ opcode = (ctrlword >> 24) & 0xf;
+
+ if (opcode != OPCODE_DATA) {
+ unsigned int in_counter = xdev->in_counter++ & 0x3ff;
+
+ if (count != in_counter) {
+ dev_err(dev, "Expected opcode counter %d, got %d\n",
+ in_counter, count);
+ return -EIO;
+ }
+
+ rc = process_in_opcode(xdev, opcode, chan_num);
+
+ if (rc)
+ return rc;
+
+ continue;
+ }
+
+ bytes = min(count + 1, dws << 2);
+ in_bytes_left = count + 1 - bytes;
+
+resume_leftovers:
+ chan_idx = chan_num >> 1;
+
+ if (!(chan_num & 1) || chan_idx >= xdev->num_channels ||
+ !xdev->channels[chan_idx].read_data_ok) {
+ dev_err(dev, "Received illegal channel ID %d from FPGA\n",
+ chan_num);
+ return -EIO;
+ }
+ chan = &xdev->channels[chan_idx];
+
+ fifo = chan->in_fifo;
+
+ if (unlikely(!fifo))
+ return -EIO; /* We got really unexpected data */
+
+ if (bytes != fifo_write(fifo, p, bytes, xilly_memcpy)) {
+ dev_err(dev, "Misbehaving FPGA overflowed an upstream FIFO!\n");
+ return -EIO;
+ }
+
+ wake_up_interruptible(&fifo->waitq);
+
+ dwconsume = (bytes + 3) >> 2;
+ dws -= dwconsume;
+ p += dwconsume;
+ }
+
+ xdev->in_bytes_left = in_bytes_left;
+ xdev->leftover_chan_num = chan_num;
+ return 0;
+}
+
+static void bulk_in_work(struct work_struct *work)
+{
+ struct xillyusb_endpoint *ep =
+ container_of(work, struct xillyusb_endpoint, workitem);
+ struct xillyusb_dev *xdev = ep->xdev;
+ unsigned long flags;
+ struct xillybuffer *xb;
+ bool consumed = false;
+ int rc = 0;
+
+ mutex_lock(&xdev->process_in_mutex);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+
+ while (1) {
+ if (rc || list_empty(&ep->filled_buffers)) {
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ mutex_unlock(&xdev->process_in_mutex);
+
+ if (rc)
+ report_io_error(xdev, rc);
+ else if (consumed)
+ try_queue_bulk_in(ep);
+
+ return;
+ }
+
+ xb = list_first_entry(&ep->filled_buffers, struct xillybuffer,
+ entry);
+ list_del(&xb->entry);
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ consumed = true;
+
+ if (!xdev->error)
+ rc = process_bulk_in(xb);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ }
+}
+
+static int xillyusb_send_opcode(struct xillyusb_dev *xdev,
+ int chan_num, char opcode, u32 data)
+{
+ struct xillyusb_endpoint *ep = xdev->msg_ep;
+ struct xillyfifo *fifo = &ep->fifo;
+ __le32 msg[2];
+
+ int rc = 0;
+
+ msg[0] = cpu_to_le32((chan_num & 0xfff) |
+ ((opcode & 0xf) << 24));
+ msg[1] = cpu_to_le32(data);
+
+ mutex_lock(&xdev->msg_mutex);
+
+ /*
+ * The wait queue is woken with the interruptible variant, so the
+ * wait function matches, however returning because of an interrupt
+ * will mess things up considerably, in particular when the caller is
+ * the release method. And the xdev->error part prevents being stuck
+ * forever in the event of a bizarre hardware bug: Pull the USB plug.
+ */
+
+ while (wait_event_interruptible(fifo->waitq,
+ fifo->fill <= (fifo->size - 8) ||
+ xdev->error))
+ ; /* Empty loop */
+
+ if (xdev->error) {
+ rc = xdev->error;
+ goto unlock_done;
+ }
+
+ fifo_write(fifo, (void *)msg, 8, xilly_memcpy);
+
+ try_queue_bulk_out(ep);
+
+unlock_done:
+ mutex_unlock(&xdev->msg_mutex);
+
+ return rc;
+}
+
+/*
+ * Note that flush_downstream() merely waits for the data to arrive to
+ * the application logic at the FPGA -- unlike PCIe Xillybus' counterpart,
+ * it does nothing to make it happen (and neither is it necessary).
+ *
+ * This function is not reentrant for the same @chan, but this is covered
+ * by the fact that for any given @chan, it's called either by the open,
+ * write, llseek and flush fops methods, which can't run in parallel (and the
+ * write + flush and llseek method handlers are protected with out_mutex).
+ *
+ * chan->flushed is there to avoid multiple flushes at the same position,
+ * in particular as a result of programs that close the file descriptor
+ * e.g. after a dup2() for redirection.
+ */
+
+static int flush_downstream(struct xillyusb_channel *chan,
+ long timeout,
+ bool interruptible)
+{
+ struct xillyusb_dev *xdev = chan->xdev;
+ int chan_num = chan->chan_idx << 1;
+ long deadline, left_to_sleep;
+ int rc;
+
+ if (chan->flushed)
+ return 0;
+
+ deadline = jiffies + 1 + timeout;
+
+ if (chan->flushing) {
+ long cancel_deadline = jiffies + 1 + XILLY_RESPONSE_TIMEOUT;
+
+ chan->canceled = 0;
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_CANCEL_CHECKPOINT, 0);
+
+ if (rc)
+ return rc; /* Only real error, never -EINTR */
+
+ /* Ignoring interrupts. Cancellation must be handled */
+ while (!chan->canceled) {
+ left_to_sleep = cancel_deadline - ((long)jiffies);
+
+ if (left_to_sleep <= 0) {
+ report_io_error(xdev, -EIO);
+ return -EIO;
+ }
+
+ rc = wait_event_interruptible_timeout(chan->flushq,
+ chan->canceled ||
+ xdev->error,
+ left_to_sleep);
+
+ if (xdev->error)
+ return xdev->error;
+ }
+ }
+
+ chan->flushing = 1;
+
+ /*
+ * The checkpoint is given in terms of data elements, not bytes. As
+ * a result, if less than an element's worth of data is stored in the
+ * FIFO, it's not flushed, including the flush before closing, which
+ * means that such data is lost. This is consistent with PCIe Xillybus.
+ */
+
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_SET_CHECKPOINT,
+ chan->out_bytes >>
+ chan->out_log2_element_size);
+
+ if (rc)
+ return rc; /* Only real error, never -EINTR */
+
+ if (!timeout) {
+ while (chan->flushing) {
+ rc = wait_event_interruptible(chan->flushq,
+ !chan->flushing ||
+ xdev->error);
+ if (xdev->error)
+ return xdev->error;
+
+ if (interruptible && rc)
+ return -EINTR;
+ }
+
+ goto done;
+ }
+
+ while (chan->flushing) {
+ left_to_sleep = deadline - ((long)jiffies);
+
+ if (left_to_sleep <= 0)
+ return -ETIMEDOUT;
+
+ rc = wait_event_interruptible_timeout(chan->flushq,
+ !chan->flushing ||
+ xdev->error,
+ left_to_sleep);
+
+ if (xdev->error)
+ return xdev->error;
+
+ if (interruptible && rc < 0)
+ return -EINTR;
+ }
+
+done:
+ chan->flushed = 1;
+ return 0;
+}
+
+/* request_read_anything(): Ask the FPGA for any little amount of data */
+static int request_read_anything(struct xillyusb_channel *chan,
+ char opcode)
+{
+ struct xillyusb_dev *xdev = chan->xdev;
+ unsigned int sh = chan->in_log2_element_size;
+ int chan_num = (chan->chan_idx << 1) | 1;
+ u32 mercy = chan->in_consumed_bytes + (2 << sh) - 1;
+
+ return xillyusb_send_opcode(xdev, chan_num, opcode, mercy >> sh);
+}
+
+static int xillyusb_open(struct inode *inode, struct file *filp)
+{
+ struct xillyusb_dev *xdev;
+ struct xillyusb_channel *chan;
+ struct xillyfifo *in_fifo = NULL;
+ struct xillyusb_endpoint *out_ep = NULL;
+ int rc;
+ int index;
+
+ mutex_lock(&kref_mutex);
+
+ rc = xillybus_find_inode(inode, (void **)&xdev, &index);
+ if (rc) {
+ mutex_unlock(&kref_mutex);
+ return rc;
+ }
+
+ kref_get(&xdev->kref);
+ mutex_unlock(&kref_mutex);
+
+ chan = &xdev->channels[index];
+ filp->private_data = chan;
+
+ mutex_lock(&chan->lock);
+
+ rc = -ENODEV;
+
+ if (xdev->error)
+ goto unmutex_fail;
+
+ if (((filp->f_mode & FMODE_READ) && !chan->readable) ||
+ ((filp->f_mode & FMODE_WRITE) && !chan->writable))
+ goto unmutex_fail;
+
+ if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_READ) &&
+ chan->in_synchronous) {
+ dev_err(xdev->dev,
+ "open() failed: O_NONBLOCK not allowed for read on this device\n");
+ goto unmutex_fail;
+ }
+
+ if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_WRITE) &&
+ chan->out_synchronous) {
+ dev_err(xdev->dev,
+ "open() failed: O_NONBLOCK not allowed for write on this device\n");
+ goto unmutex_fail;
+ }
+
+ rc = -EBUSY;
+
+ if (((filp->f_mode & FMODE_READ) && chan->open_for_read) ||
+ ((filp->f_mode & FMODE_WRITE) && chan->open_for_write))
+ goto unmutex_fail;
+
+ if (filp->f_mode & FMODE_READ)
+ chan->open_for_read = 1;
+
+ if (filp->f_mode & FMODE_WRITE)
+ chan->open_for_write = 1;
+
+ mutex_unlock(&chan->lock);
+
+ if (filp->f_mode & FMODE_WRITE) {
+ out_ep = endpoint_alloc(xdev,
+ (chan->chan_idx + 2) | USB_DIR_OUT,
+ bulk_out_work, BUF_SIZE_ORDER, BUFNUM);
+
+ if (!out_ep) {
+ rc = -ENOMEM;
+ goto unopen;
+ }
+
+ rc = fifo_init(&out_ep->fifo, chan->out_log2_fifo_size);
+
+ if (rc)
+ goto late_unopen;
+
+ out_ep->fill_mask = -(1 << chan->out_log2_element_size);
+ chan->out_bytes = 0;
+ chan->flushed = 0;
+
+ /*
+ * Sending a flush request to a previously closed stream
+ * effectively opens it, and also waits until the command is
+ * confirmed by the FPGA. The latter is necessary because the
+ * data is sent through a separate BULK OUT endpoint, and the
+ * xHCI controller is free to reorder transmissions.
+ *
+ * This can't go wrong unless there's a serious hardware error
+ * (or the computer is stuck for 500 ms?)
+ */
+ rc = flush_downstream(chan, XILLY_RESPONSE_TIMEOUT, false);
+
+ if (rc == -ETIMEDOUT) {
+ rc = -EIO;
+ report_io_error(xdev, rc);
+ }
+
+ if (rc)
+ goto late_unopen;
+ }
+
+ if (filp->f_mode & FMODE_READ) {
+ in_fifo = kzalloc(sizeof(*in_fifo), GFP_KERNEL);
+
+ if (!in_fifo) {
+ rc = -ENOMEM;
+ goto late_unopen;
+ }
+
+ rc = fifo_init(in_fifo, chan->in_log2_fifo_size);
+
+ if (rc) {
+ kfree(in_fifo);
+ goto late_unopen;
+ }
+ }
+
+ mutex_lock(&chan->lock);
+ if (in_fifo) {
+ chan->in_fifo = in_fifo;
+ chan->read_data_ok = 1;
+ }
+ if (out_ep)
+ chan->out_ep = out_ep;
+ mutex_unlock(&chan->lock);
+
+ if (in_fifo) {
+ u32 in_checkpoint = 0;
+
+ if (!chan->in_synchronous)
+ in_checkpoint = in_fifo->size >>
+ chan->in_log2_element_size;
+
+ chan->in_consumed_bytes = 0;
+ chan->poll_used = 0;
+ chan->in_current_checkpoint = in_checkpoint;
+ rc = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
+ OPCODE_SET_CHECKPOINT,
+ in_checkpoint);
+
+ if (rc) /* Failure guarantees that opcode wasn't sent */
+ goto unfifo;
+
+ /*
+ * In non-blocking mode, request the FPGA to send any data it
+ * has right away. Otherwise, the first read() will always
+ * return -EAGAIN, which is OK strictly speaking, but ugly.
+ * Checking and unrolling if this fails isn't worth the
+ * effort -- the error is propagated to the first read()
+ * anyhow.
+ */
+ if (filp->f_flags & O_NONBLOCK)
+ request_read_anything(chan, OPCODE_SET_PUSH);
+ }
+
+ return 0;
+
+unfifo:
+ chan->read_data_ok = 0;
+ safely_assign_in_fifo(chan, NULL);
+ fifo_mem_release(in_fifo);
+ kfree(in_fifo);
+
+ if (out_ep) {
+ mutex_lock(&chan->lock);
+ chan->out_ep = NULL;
+ mutex_unlock(&chan->lock);
+ }
+
+late_unopen:
+ if (out_ep)
+ endpoint_dealloc(out_ep);
+
+unopen:
+ mutex_lock(&chan->lock);
+
+ if (filp->f_mode & FMODE_READ)
+ chan->open_for_read = 0;
+
+ if (filp->f_mode & FMODE_WRITE)
+ chan->open_for_write = 0;
+
+ mutex_unlock(&chan->lock);
+
+ kref_put(&xdev->kref, cleanup_dev);
+
+ return rc;
+
+unmutex_fail:
+ kref_put(&xdev->kref, cleanup_dev);
+ mutex_unlock(&chan->lock);
+ return rc;
+}
+
+static ssize_t xillyusb_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ struct xillyfifo *fifo = chan->in_fifo;
+ int chan_num = (chan->chan_idx << 1) | 1;
+
+ long deadline, left_to_sleep;
+ int bytes_done = 0;
+ bool sent_set_push = false;
+ int rc;
+
+ deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
+
+ rc = mutex_lock_interruptible(&chan->in_mutex);
+
+ if (rc)
+ return rc;
+
+ while (1) {
+ u32 fifo_checkpoint_bytes, complete_checkpoint_bytes;
+ u32 complete_checkpoint, fifo_checkpoint;
+ u32 checkpoint;
+ s32 diff, leap;
+ unsigned int sh = chan->in_log2_element_size;
+ bool checkpoint_for_complete;
+
+ rc = fifo_read(fifo, (__force void *)userbuf + bytes_done,
+ count - bytes_done, xilly_copy_to_user);
+
+ if (rc < 0)
+ break;
+
+ bytes_done += rc;
+ chan->in_consumed_bytes += rc;
+
+ left_to_sleep = deadline - ((long)jiffies);
+
+ /*
+ * Some 32-bit arithmetic that may wrap. Note that
+ * complete_checkpoint is rounded up to the closest element
+ * boundary, because the read() can't be completed otherwise.
+ * fifo_checkpoint_bytes is rounded down, because it protects
+ * in_fifo from overflowing.
+ */
+
+ fifo_checkpoint_bytes = chan->in_consumed_bytes + fifo->size;
+ complete_checkpoint_bytes =
+ chan->in_consumed_bytes + count - bytes_done;
+
+ fifo_checkpoint = fifo_checkpoint_bytes >> sh;
+ complete_checkpoint =
+ (complete_checkpoint_bytes + (1 << sh) - 1) >> sh;
+
+ diff = (fifo_checkpoint - complete_checkpoint) << sh;
+
+ if (chan->in_synchronous && diff >= 0) {
+ checkpoint = complete_checkpoint;
+ checkpoint_for_complete = true;
+ } else {
+ checkpoint = fifo_checkpoint;
+ checkpoint_for_complete = false;
+ }
+
+ leap = (checkpoint - chan->in_current_checkpoint) << sh;
+
+ /*
+ * To prevent flooding of OPCODE_SET_CHECKPOINT commands as
+ * data is consumed, it's issued only if it moves the
+ * checkpoint by at least an 8th of the FIFO's size, or if
+ * it's necessary to complete the number of bytes requested by
+ * the read() call.
+ *
+ * chan->read_data_ok is checked to spare an unnecessary
+ * submission after receiving EOF, however it's harmless if
+ * such slips away.
+ */
+
+ if (chan->read_data_ok &&
+ (leap > (fifo->size >> 3) ||
+ (checkpoint_for_complete && leap > 0))) {
+ chan->in_current_checkpoint = checkpoint;
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_SET_CHECKPOINT,
+ checkpoint);
+
+ if (rc)
+ break;
+ }
+
+ if (bytes_done == count ||
+ (left_to_sleep <= 0 && bytes_done))
+ break;
+
+ /*
+ * Reaching here means that the FIFO was empty when
+ * fifo_read() returned, but not necessarily right now. Error
+ * and EOF are checked and reported only now, so that no data
+ * that managed its way to the FIFO is lost.
+ */
+
+ if (!READ_ONCE(chan->read_data_ok)) { /* FPGA has sent EOF */
+ /* Has data slipped into the FIFO since fifo_read()? */
+ smp_rmb();
+ if (READ_ONCE(fifo->fill))
+ continue;
+
+ rc = 0;
+ break;
+ }
+
+ if (xdev->error) {
+ rc = xdev->error;
+ break;
+ }
+
+ if (filp->f_flags & O_NONBLOCK) {
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (!sent_set_push) {
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_SET_PUSH,
+ complete_checkpoint);
+
+ if (rc)
+ break;
+
+ sent_set_push = true;
+ }
+
+ if (left_to_sleep > 0) {
+ /*
+ * Note that when xdev->error is set (e.g. when the
+ * device is unplugged), read_data_ok turns zero and
+ * fifo->waitq is awaken.
+ * Therefore no special attention to xdev->error.
+ */
+
+ rc = wait_event_interruptible_timeout
+ (fifo->waitq,
+ fifo->fill || !chan->read_data_ok,
+ left_to_sleep);
+ } else { /* bytes_done == 0 */
+ /* Tell FPGA to send anything it has */
+ rc = request_read_anything(chan, OPCODE_UPDATE_PUSH);
+
+ if (rc)
+ break;
+
+ rc = wait_event_interruptible
+ (fifo->waitq,
+ fifo->fill || !chan->read_data_ok);
+ }
+
+ if (rc < 0) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (((filp->f_flags & O_NONBLOCK) || chan->poll_used) &&
+ !READ_ONCE(fifo->fill))
+ request_read_anything(chan, OPCODE_SET_PUSH);
+
+ mutex_unlock(&chan->in_mutex);
+
+ if (bytes_done)
+ return bytes_done;
+
+ return rc;
+}
+
+static int xillyusb_flush(struct file *filp, fl_owner_t id)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ int rc;
+
+ if (!(filp->f_mode & FMODE_WRITE))
+ return 0;
+
+ rc = mutex_lock_interruptible(&chan->out_mutex);
+
+ if (rc)
+ return rc;
+
+ /*
+ * One second's timeout on flushing. Interrupts are ignored, because if
+ * the user pressed CTRL-C, that interrupt will still be in flight by
+ * the time we reach here, and the opportunity to flush is lost.
+ */
+ rc = flush_downstream(chan, HZ, false);
+
+ mutex_unlock(&chan->out_mutex);
+
+ if (rc == -ETIMEDOUT) {
+ /* The things you do to use dev_warn() and not pr_warn() */
+ struct xillyusb_dev *xdev = chan->xdev;
+
+ mutex_lock(&chan->lock);
+ if (!xdev->error)
+ dev_warn(xdev->dev,
+ "Timed out while flushing. Output data may be lost.\n");
+ mutex_unlock(&chan->lock);
+ }
+
+ return rc;
+}
+
+static ssize_t xillyusb_write(struct file *filp, const char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ struct xillyfifo *fifo = &chan->out_ep->fifo;
+ int rc;
+
+ rc = mutex_lock_interruptible(&chan->out_mutex);
+
+ if (rc)
+ return rc;
+
+ while (1) {
+ if (xdev->error) {
+ rc = xdev->error;
+ break;
+ }
+
+ if (count == 0)
+ break;
+
+ rc = fifo_write(fifo, (__force void *)userbuf, count,
+ xilly_copy_from_user);
+
+ if (rc != 0)
+ break;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (wait_event_interruptible
+ (fifo->waitq,
+ fifo->fill != fifo->size || xdev->error)) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (rc < 0)
+ goto done;
+
+ chan->out_bytes += rc;
+
+ if (rc) {
+ try_queue_bulk_out(chan->out_ep);
+ chan->flushed = 0;
+ }
+
+ if (chan->out_synchronous) {
+ int flush_rc = flush_downstream(chan, 0, true);
+
+ if (flush_rc && !rc)
+ rc = flush_rc;
+ }
+
+done:
+ mutex_unlock(&chan->out_mutex);
+
+ return rc;
+}
+
+static int xillyusb_release(struct inode *inode, struct file *filp)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ int rc_read = 0, rc_write = 0;
+
+ if (filp->f_mode & FMODE_READ) {
+ struct xillyfifo *in_fifo = chan->in_fifo;
+
+ rc_read = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
+ OPCODE_CLOSE, 0);
+ /*
+ * If rc_read is nonzero, xdev->error indicates a global
+ * device error. The error is reported later, so that
+ * resources are freed.
+ *
+ * Looping on wait_event_interruptible() kinda breaks the idea
+ * of being interruptible, and this should have been
+ * wait_event(). Only it's being waken with
+ * wake_up_interruptible() for the sake of other uses. If
+ * there's a global device error, chan->read_data_ok is
+ * deasserted and the wait queue is awaken, so this is covered.
+ */
+
+ while (wait_event_interruptible(in_fifo->waitq,
+ !chan->read_data_ok))
+ ; /* Empty loop */
+
+ safely_assign_in_fifo(chan, NULL);
+ fifo_mem_release(in_fifo);
+ kfree(in_fifo);
+
+ mutex_lock(&chan->lock);
+ chan->open_for_read = 0;
+ mutex_unlock(&chan->lock);
+ }
+
+ if (filp->f_mode & FMODE_WRITE) {
+ struct xillyusb_endpoint *ep = chan->out_ep;
+ /*
+ * chan->flushing isn't zeroed. If the pre-release flush timed
+ * out, a cancel request will be sent before the next
+ * OPCODE_SET_CHECKPOINT (i.e. when the file is opened again).
+ * This is despite that the FPGA forgets about the checkpoint
+ * request as the file closes. Still, in an exceptional race
+ * condition, the FPGA could send an OPCODE_REACHED_CHECKPOINT
+ * just before closing that would reach the host after the
+ * file has re-opened.
+ */
+
+ mutex_lock(&chan->lock);
+ chan->out_ep = NULL;
+ mutex_unlock(&chan->lock);
+
+ endpoint_quiesce(ep);
+ endpoint_dealloc(ep);
+
+ /* See comments on rc_read above */
+ rc_write = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
+ OPCODE_CLOSE, 0);
+
+ mutex_lock(&chan->lock);
+ chan->open_for_write = 0;
+ mutex_unlock(&chan->lock);
+ }
+
+ kref_put(&xdev->kref, cleanup_dev);
+
+ return rc_read ? rc_read : rc_write;
+}
+
+/*
+ * Xillybus' API allows device nodes to be seekable, giving the user
+ * application access to a RAM array on the FPGA (or logic emulating it).
+ */
+
+static loff_t xillyusb_llseek(struct file *filp, loff_t offset, int whence)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ loff_t pos = filp->f_pos;
+ int rc = 0;
+ unsigned int log2_element_size = chan->readable ?
+ chan->in_log2_element_size : chan->out_log2_element_size;
+
+ /*
+ * Take both mutexes not allowing interrupts, since it seems like
+ * common applications don't expect an -EINTR here. Besides, multiple
+ * access to a single file descriptor on seekable devices is a mess
+ * anyhow.
+ */
+
+ mutex_lock(&chan->out_mutex);
+ mutex_lock(&chan->in_mutex);
+
+ switch (whence) {
+ case SEEK_SET:
+ pos = offset;
+ break;
+ case SEEK_CUR:
+ pos += offset;
+ break;
+ case SEEK_END:
+ pos = offset; /* Going to the end => to the beginning */
+ break;
+ default:
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* In any case, we must finish on an element boundary */
+ if (pos & ((1 << log2_element_size) - 1)) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
+ OPCODE_SET_ADDR,
+ pos >> log2_element_size);
+
+ if (rc)
+ goto end;
+
+ if (chan->writable) {
+ chan->flushed = 0;
+ rc = flush_downstream(chan, HZ, false);
+ }
+
+end:
+ mutex_unlock(&chan->out_mutex);
+ mutex_unlock(&chan->in_mutex);
+
+ if (rc) /* Return error after releasing mutexes */
+ return rc;
+
+ filp->f_pos = pos;
+
+ return pos;
+}
+
+static __poll_t xillyusb_poll(struct file *filp, poll_table *wait)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ __poll_t mask = 0;
+
+ if (chan->in_fifo)
+ poll_wait(filp, &chan->in_fifo->waitq, wait);
+
+ if (chan->out_ep)
+ poll_wait(filp, &chan->out_ep->fifo.waitq, wait);
+
+ /*
+ * If this is the first time poll() is called, and the file is
+ * readable, set the relevant flag. Also tell the FPGA to send all it
+ * has, to kickstart the mechanism that ensures there's always some
+ * data in in_fifo unless the stream is dry end-to-end. Note that the
+ * first poll() may not return a EPOLLIN, even if there's data on the
+ * FPGA. Rather, the data will arrive soon, and trigger the relevant
+ * wait queue.
+ */
+
+ if (!chan->poll_used && chan->in_fifo) {
+ chan->poll_used = 1;
+ request_read_anything(chan, OPCODE_SET_PUSH);
+ }
+
+ /*
+ * poll() won't play ball regarding read() channels which
+ * are synchronous. Allowing that will create situations where data has
+ * been delivered at the FPGA, and users expecting select() to wake up,
+ * which it may not. So make it never work.
+ */
+
+ if (chan->in_fifo && !chan->in_synchronous &&
+ (READ_ONCE(chan->in_fifo->fill) || !chan->read_data_ok))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ if (chan->out_ep &&
+ (READ_ONCE(chan->out_ep->fifo.fill) != chan->out_ep->fifo.size))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ if (chan->xdev->error)
+ mask |= EPOLLERR;
+
+ return mask;
+}
+
+static const struct file_operations xillyusb_fops = {
+ .owner = THIS_MODULE,
+ .read = xillyusb_read,
+ .write = xillyusb_write,
+ .open = xillyusb_open,
+ .flush = xillyusb_flush,
+ .release = xillyusb_release,
+ .llseek = xillyusb_llseek,
+ .poll = xillyusb_poll,
+};
+
+static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
+{
+ xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
+ bulk_out_work, 1, 2);
+ if (!xdev->msg_ep)
+ return -ENOMEM;
+
+ if (fifo_init(&xdev->msg_ep->fifo, 13)) /* 8 kiB */
+ goto dealloc;
+
+ xdev->msg_ep->fill_mask = -8; /* 8 bytes granularity */
+
+ xdev->in_ep = endpoint_alloc(xdev, IN_EP_NUM | USB_DIR_IN,
+ bulk_in_work, BUF_SIZE_ORDER, BUFNUM);
+ if (!xdev->in_ep)
+ goto dealloc;
+
+ try_queue_bulk_in(xdev->in_ep);
+
+ return 0;
+
+dealloc:
+ endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */
+ xdev->msg_ep = NULL;
+ return -ENOMEM;
+}
+
+static int setup_channels(struct xillyusb_dev *xdev,
+ __le16 *chandesc,
+ int num_channels)
+{
+ struct xillyusb_channel *chan;
+ int i;
+
+ chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ xdev->channels = chan;
+
+ for (i = 0; i < num_channels; i++, chan++) {
+ unsigned int in_desc = le16_to_cpu(*chandesc++);
+ unsigned int out_desc = le16_to_cpu(*chandesc++);
+
+ chan->xdev = xdev;
+ mutex_init(&chan->in_mutex);
+ mutex_init(&chan->out_mutex);
+ mutex_init(&chan->lock);
+ init_waitqueue_head(&chan->flushq);
+
+ chan->chan_idx = i;
+
+ if (in_desc & 0x80) { /* Entry is valid */
+ chan->readable = 1;
+ chan->in_synchronous = !!(in_desc & 0x40);
+ chan->in_seekable = !!(in_desc & 0x20);
+ chan->in_log2_element_size = in_desc & 0x0f;
+ chan->in_log2_fifo_size = ((in_desc >> 8) & 0x1f) + 16;
+ }
+
+ /*
+ * A downstream channel should never exist above index 13,
+ * as it would request a nonexistent BULK endpoint > 15.
+ * In the peculiar case that it does, it's ignored silently.
+ */
+
+ if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
+ chan->writable = 1;
+ chan->out_synchronous = !!(out_desc & 0x40);
+ chan->out_seekable = !!(out_desc & 0x20);
+ chan->out_log2_element_size = out_desc & 0x0f;
+ chan->out_log2_fifo_size =
+ ((out_desc >> 8) & 0x1f) + 16;
+ }
+ }
+
+ return 0;
+}
+
+static int xillyusb_discovery(struct usb_interface *interface)
+{
+ int rc;
+ struct xillyusb_dev *xdev = usb_get_intfdata(interface);
+ __le16 bogus_chandesc[2];
+ struct xillyfifo idt_fifo;
+ struct xillyusb_channel *chan;
+ unsigned int idt_len, names_offset;
+ unsigned char *idt;
+ int num_channels;
+
+ rc = xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
+
+ if (rc) {
+ dev_err(&interface->dev, "Failed to send quiesce request. Aborting.\n");
+ return rc;
+ }
+
+ /* Phase I: Set up one fake upstream channel and obtain IDT */
+
+ /* Set up a fake IDT with one async IN stream */
+ bogus_chandesc[0] = cpu_to_le16(0x80);
+ bogus_chandesc[1] = cpu_to_le16(0);
+
+ rc = setup_channels(xdev, bogus_chandesc, 1);
+
+ if (rc)
+ return rc;
+
+ rc = fifo_init(&idt_fifo, LOG2_IDT_FIFO_SIZE);
+
+ if (rc)
+ return rc;
+
+ chan = xdev->channels;
+
+ chan->in_fifo = &idt_fifo;
+ chan->read_data_ok = 1;
+
+ xdev->num_channels = 1;
+
+ rc = xillyusb_send_opcode(xdev, ~0, OPCODE_REQ_IDT, 0);
+
+ if (rc) {
+ dev_err(&interface->dev, "Failed to send IDT request. Aborting.\n");
+ goto unfifo;
+ }
+
+ rc = wait_event_interruptible_timeout(idt_fifo.waitq,
+ !chan->read_data_ok,
+ XILLY_RESPONSE_TIMEOUT);
+
+ if (xdev->error) {
+ rc = xdev->error;
+ goto unfifo;
+ }
+
+ if (rc < 0) {
+ rc = -EINTR; /* Interrupt on probe method? Interesting. */
+ goto unfifo;
+ }
+
+ if (chan->read_data_ok) {
+ rc = -ETIMEDOUT;
+ dev_err(&interface->dev, "No response from FPGA. Aborting.\n");
+ goto unfifo;
+ }
+
+ idt_len = READ_ONCE(idt_fifo.fill);
+ idt = kmalloc(idt_len, GFP_KERNEL);
+
+ if (!idt) {
+ rc = -ENOMEM;
+ goto unfifo;
+ }
+
+ fifo_read(&idt_fifo, idt, idt_len, xilly_memcpy);
+
+ if (crc32_le(~0, idt, idt_len) != 0) {
+ dev_err(&interface->dev, "IDT failed CRC check. Aborting.\n");
+ rc = -ENODEV;
+ goto unidt;
+ }
+
+ if (*idt > 0x90) {
+ dev_err(&interface->dev, "No support for IDT version 0x%02x. Maybe the xillyusb driver needs an upgrade. Aborting.\n",
+ (int)*idt);
+ rc = -ENODEV;
+ goto unidt;
+ }
+
+ /* Phase II: Set up the streams as defined in IDT */
+
+ num_channels = le16_to_cpu(*((__le16 *)(idt + 1)));
+ names_offset = 3 + num_channels * 4;
+ idt_len -= 4; /* Exclude CRC */
+
+ if (idt_len < names_offset) {
+ dev_err(&interface->dev, "IDT too short. This is exceptionally weird, because its CRC is OK\n");
+ rc = -ENODEV;
+ goto unidt;
+ }
+
+ rc = setup_channels(xdev, (void *)idt + 3, num_channels);
+
+ if (rc)
+ goto unidt;
+
+ /*
+ * Except for wildly misbehaving hardware, or if it was disconnected
+ * just after responding with the IDT, there is no reason for any
+ * work item to be running now. To be sure that xdev->channels
+ * is updated on anything that might run in parallel, flush the
+ * workqueue, which rarely does anything.
+ */
+ flush_workqueue(xdev->workq);
+
+ xdev->num_channels = num_channels;
+
+ fifo_mem_release(&idt_fifo);
+ kfree(chan);
+
+ rc = xillybus_init_chrdev(&interface->dev, &xillyusb_fops,
+ THIS_MODULE, xdev,
+ idt + names_offset,
+ idt_len - names_offset,
+ num_channels,
+ xillyname, true);
+
+ kfree(idt);
+
+ return rc;
+
+unidt:
+ kfree(idt);
+
+unfifo:
+ safely_assign_in_fifo(chan, NULL);
+ fifo_mem_release(&idt_fifo);
+
+ return rc;
+}
+
+static int xillyusb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct xillyusb_dev *xdev;
+ int rc;
+
+ xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ kref_init(&xdev->kref);
+ mutex_init(&xdev->process_in_mutex);
+ mutex_init(&xdev->msg_mutex);
+
+ xdev->udev = usb_get_dev(interface_to_usbdev(interface));
+ xdev->dev = &interface->dev;
+ xdev->error = 0;
+ spin_lock_init(&xdev->error_lock);
+ xdev->in_counter = 0;
+ xdev->in_bytes_left = 0;
+ xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
+
+ if (!xdev->workq) {
+ dev_err(&interface->dev, "Failed to allocate work queue\n");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_WORK(&xdev->wakeup_workitem, wakeup_all);
+
+ usb_set_intfdata(interface, xdev);
+
+ rc = xillyusb_setup_base_eps(xdev);
+ if (rc)
+ goto fail;
+
+ rc = xillyusb_discovery(interface);
+ if (rc)
+ goto latefail;
+
+ return 0;
+
+latefail:
+ endpoint_quiesce(xdev->in_ep);
+ endpoint_quiesce(xdev->msg_ep);
+
+fail:
+ usb_set_intfdata(interface, NULL);
+ kref_put(&xdev->kref, cleanup_dev);
+ return rc;
+}
+
+static void xillyusb_disconnect(struct usb_interface *interface)
+{
+ struct xillyusb_dev *xdev = usb_get_intfdata(interface);
+ struct xillyusb_endpoint *msg_ep = xdev->msg_ep;
+ struct xillyfifo *fifo = &msg_ep->fifo;
+ int rc;
+ int i;
+
+ xillybus_cleanup_chrdev(xdev, &interface->dev);
+
+ /*
+ * Try to send OPCODE_QUIESCE, which will fail silently if the device
+ * was disconnected, but makes sense on module unload.
+ */
+
+ msg_ep->wake_on_drain = true;
+ xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
+
+ /*
+ * If the device has been disconnected, sending the opcode causes
+ * a global device error with xdev->error, if such error didn't
+ * occur earlier. Hence timing out means that the USB link is fine,
+ * but somehow the message wasn't sent. Should never happen.
+ */
+
+ rc = wait_event_interruptible_timeout(fifo->waitq,
+ msg_ep->drained || xdev->error,
+ XILLY_RESPONSE_TIMEOUT);
+
+ if (!rc)
+ dev_err(&interface->dev,
+ "Weird timeout condition on sending quiesce request.\n");
+
+ report_io_error(xdev, -ENODEV); /* Discourage further activity */
+
+ /*
+ * This device driver is declared with soft_unbind set, or else
+ * sending OPCODE_QUIESCE above would always fail. The price is
+ * that the USB framework didn't kill outstanding URBs, so it has
+ * to be done explicitly before returning from this call.
+ */
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ struct xillyusb_channel *chan = &xdev->channels[i];
+
+ /*
+ * Lock taken to prevent chan->out_ep from changing. It also
+ * ensures xillyusb_open() and xillyusb_flush() don't access
+ * xdev->dev after being nullified below.
+ */
+ mutex_lock(&chan->lock);
+ if (chan->out_ep)
+ endpoint_quiesce(chan->out_ep);
+ mutex_unlock(&chan->lock);
+ }
+
+ endpoint_quiesce(xdev->in_ep);
+ endpoint_quiesce(xdev->msg_ep);
+
+ usb_set_intfdata(interface, NULL);
+
+ xdev->dev = NULL;
+
+ mutex_lock(&kref_mutex);
+ kref_put(&xdev->kref, cleanup_dev);
+ mutex_unlock(&kref_mutex);
+}
+
+static struct usb_driver xillyusb_driver = {
+ .name = xillyname,
+ .id_table = xillyusb_table,
+ .probe = xillyusb_probe,
+ .disconnect = xillyusb_disconnect,
+ .soft_unbind = 1,
+};
+
+static int __init xillyusb_init(void)
+{
+ int rc = 0;
+
+ if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
+ fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
+ else
+ fifo_buf_order = 0;
+
+ rc = usb_register(&xillyusb_driver);
+
+ return rc;
+}
+
+static void __exit xillyusb_exit(void)
+{
+ usb_deregister(&xillyusb_driver);
+}
+
+module_init(xillyusb_init);
+module_exit(xillyusb_exit);