summaryrefslogtreecommitdiffstats
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/char/ipmi
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/Kconfig180
-rw-r--r--drivers/char/ipmi/Makefile32
-rw-r--r--drivers/char/ipmi/bt-bmc.c493
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c377
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c696
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c910
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c223
-rw-r--r--drivers/char/ipmi/ipmi_dmi.h10
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c583
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c536
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c5573
-rw-r--r--drivers/char/ipmi/ipmi_plat_data.c124
-rw-r--r--drivers/char/ipmi/ipmi_plat_data.h25
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c316
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c738
-rw-r--r--drivers/char/ipmi/ipmi_si.h107
-rw-r--r--drivers/char/ipmi/ipmi_si_hardcode.c153
-rw-r--r--drivers/char/ipmi/ipmi_si_hotmod.c237
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2307
-rw-r--r--drivers/char/ipmi/ipmi_si_mem_io.c146
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c61
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c160
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c471
-rw-r--r--drivers/char/ipmi/ipmi_si_port_io.c114
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h104
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c585
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2168
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1344
-rw-r--r--drivers/char/ipmi/kcs_bmc.c189
-rw-r--r--drivers/char/ipmi/kcs_bmc.h46
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c685
-rw-r--r--drivers/char/ipmi/kcs_bmc_cdev_ipmi.c568
-rw-r--r--drivers/char/ipmi/kcs_bmc_client.h45
-rw-r--r--drivers/char/ipmi/kcs_bmc_device.h22
-rw-r--r--drivers/char/ipmi/kcs_bmc_npcm7xx.c253
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c159
36 files changed, 20740 insertions, 0 deletions
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
new file mode 100644
index 000000000..df45e0af9
--- /dev/null
+++ b/drivers/char/ipmi/Kconfig
@@ -0,0 +1,180 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# IPMI device configuration
+#
+
+menuconfig IPMI_HANDLER
+ tristate 'IPMI top-level message handler'
+ depends on HAS_IOMEM
+ select IPMI_DMI_DECODE if DMI
+ help
+ This enables the central IPMI message handler, required for IPMI
+ to work.
+
+ IPMI is a standard for managing sensors (temperature,
+ voltage, etc.) in a system.
+
+ See <file:Documentation/driver-api/ipmi.rst> for more details on the driver.
+
+ If unsure, say N.
+
+config IPMI_DMI_DECODE
+ select IPMI_PLAT_DATA
+ bool
+
+config IPMI_PLAT_DATA
+ bool
+
+if IPMI_HANDLER
+
+config IPMI_PANIC_EVENT
+ bool 'Generate a panic event to all BMCs on a panic'
+ help
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate an IPMI event describing the panic to each
+ interface registered with the message handler. This is always
+ available, the module parameter for ipmi_msghandler named
+ panic_op can be set to "event" to chose this value, this config
+ simply causes the default value to be set to "event".
+
+config IPMI_PANIC_STRING
+ bool 'Generate OEM events containing the panic string'
+ depends on IPMI_PANIC_EVENT
+ help
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate IPMI OEM type f0 events holding the IPMB
+ address of the panic generator (byte 4 of the event), a sequence
+ number for the string (byte 5 of the event) and part of the
+ string (the rest of the event). Bytes 1, 2, and 3 are the normal
+ usage for an OEM event. You can fetch these events and use the
+ sequence numbers to piece the string together. This config
+ parameter sets the default value to generate these events,
+ the module parameter for ipmi_msghandler named panic_op can
+ be set to "string" to chose this value, this config simply
+ causes the default value to be set to "string".
+
+config IPMI_DEVICE_INTERFACE
+ tristate 'Device interface for IPMI'
+ help
+ This provides an IOCTL interface to the IPMI message handler so
+ userland processes may use IPMI. It supports poll() and select().
+
+config IPMI_SI
+ tristate 'IPMI System Interface handler'
+ select IPMI_PLAT_DATA
+ help
+ Provides a driver for System Interfaces (KCS, SMIC, BT).
+ Currently, only KCS and SMIC are supported. If
+ you are using IPMI, you should probably say "y" here.
+
+config IPMI_SSIF
+ tristate 'IPMI SMBus handler (SSIF)'
+ depends on I2C
+ help
+ Provides a driver for a SMBus interface to a BMC, meaning that you
+ have a driver that must be accessed over an I2C bus instead of a
+ standard interface. This module requires I2C support.
+
+config IPMI_IPMB
+ tristate 'IPMI IPMB interface'
+ depends on I2C && I2C_SLAVE
+ help
+ Provides a driver for a system running right on the IPMB bus.
+ It supports normal system interface messages to a BMC on the IPMB
+ bus, and it also supports direct messaging on the bus using
+ IPMB direct messages. This module requires I2C support.
+
+config IPMI_POWERNV
+ depends on PPC_POWERNV
+ tristate 'POWERNV (OPAL firmware) IPMI interface'
+ help
+ Provides a driver for OPAL firmware-based IPMI interfaces.
+
+config IPMI_WATCHDOG
+ tristate 'IPMI Watchdog Timer'
+ help
+ This enables the IPMI watchdog timer.
+
+config IPMI_POWEROFF
+ tristate 'IPMI Poweroff'
+ help
+ This enables a function to power off the system with IPMI if
+ the IPMI management controller is capable of this.
+
+endif # IPMI_HANDLER
+
+config IPMI_KCS_BMC
+ tristate
+
+config ASPEED_KCS_IPMI_BMC
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select IPMI_KCS_BMC
+ select REGMAP_MMIO
+ tristate "Aspeed KCS IPMI BMC driver"
+ help
+ Provides a driver for the KCS (Keyboard Controller Style) IPMI
+ interface found on Aspeed SOCs (AST2400 and AST2500).
+
+ The driver implements the BMC side of the KCS contorller, it
+ provides the access of KCS IO space for BMC side.
+
+config NPCM7XX_KCS_IPMI_BMC
+ depends on ARCH_NPCM || COMPILE_TEST
+ select IPMI_KCS_BMC
+ select REGMAP_MMIO
+ tristate "NPCM KCS IPMI BMC driver"
+ help
+ Provides a driver for the KCS (Keyboard Controller Style) IPMI
+ interface found on Nuvoton NPCM SOCs.
+
+ The driver implements the BMC side of the KCS contorller, it
+ provides the access of KCS IO space for BMC side.
+
+ This support is also available as a module. If so, the module
+ will be called kcs_bmc_npcm7xx.
+
+config IPMI_KCS_BMC_CDEV_IPMI
+ depends on IPMI_KCS_BMC
+ tristate "IPMI character device interface for BMC KCS devices"
+ help
+ Provides a BMC-side character device implementing IPMI
+ semantics for KCS IPMI devices.
+
+ Say YES if you wish to expose KCS devices on the BMC for IPMI
+ purposes.
+
+ This support is also available as a module. The module will be
+ called kcs_bmc_cdev_ipmi.
+
+config IPMI_KCS_BMC_SERIO
+ depends on IPMI_KCS_BMC && SERIO
+ tristate "SerIO adaptor for BMC KCS devices"
+ help
+ Adapts the BMC KCS device for the SerIO subsystem. This allows users
+ to take advantage of userspace interfaces provided by SerIO where
+ appropriate.
+
+ Say YES if you wish to expose KCS devices on the BMC via SerIO
+ interfaces.
+
+ This support is also available as a module. The module will be
+ called kcs_bmc_serio.
+
+config ASPEED_BT_IPMI_BMC
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on MFD_SYSCON
+ select REGMAP_MMIO
+ tristate "BT IPMI bmc driver"
+ help
+ Provides a driver for the BT (Block Transfer) IPMI interface
+ found on Aspeed SOCs (AST2400 and AST2500). The driver
+ implements the BMC side of the BT interface.
+
+config IPMB_DEVICE_INTERFACE
+ tristate 'IPMB Interface handler'
+ depends on I2C
+ depends on I2C_SLAVE
+ help
+ Provides a driver for a device (Satellite MC) to
+ receive requests and send responses back to the BMC via
+ the IPMB interface. This module requires I2C support.
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
new file mode 100644
index 000000000..7ce790efa
--- /dev/null
+++ b/drivers/char/ipmi/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the ipmi drivers.
+#
+
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
+ ipmi_si_hotmod.o ipmi_si_hardcode.o ipmi_si_platform.o \
+ ipmi_si_port_io.o ipmi_si_mem_io.o
+ifdef CONFIG_PCI
+ipmi_si-y += ipmi_si_pci.o
+endif
+ifdef CONFIG_PARISC
+ipmi_si-y += ipmi_si_parisc.o
+endif
+
+obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
+obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
+obj-$(CONFIG_IPMI_SI) += ipmi_si.o
+obj-$(CONFIG_IPMI_DMI_DECODE) += ipmi_dmi.o
+obj-$(CONFIG_IPMI_PLAT_DATA) += ipmi_plat_data.o
+obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
+obj-$(CONFIG_IPMI_IPMB) += ipmi_ipmb.o
+obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
+obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
+obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_IPMI_KCS_BMC) += kcs_bmc.o
+obj-$(CONFIG_IPMI_KCS_BMC_SERIO) += kcs_bmc_serio.o
+obj-$(CONFIG_IPMI_KCS_BMC_CDEV_IPMI) += kcs_bmc_cdev_ipmi.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
+obj-$(CONFIG_ASPEED_KCS_IPMI_BMC) += kcs_bmc_aspeed.o
+obj-$(CONFIG_NPCM7XX_KCS_IPMI_BMC) += kcs_bmc_npcm7xx.o
+obj-$(CONFIG_IPMB_DEVICE_INTERFACE) += ipmb_dev_int.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 000000000..7450904e3
--- /dev/null
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME "ipmi-bt-host"
+
+#define BT_IO_BASE 0xe4
+#define BT_IRQ 10
+
+#define BT_CR0 0x0
+#define BT_CR0_IO_BASE 16
+#define BT_CR0_IRQ 12
+#define BT_CR0_EN_CLR_SLV_RDP 0x8
+#define BT_CR0_EN_CLR_SLV_WRP 0x4
+#define BT_CR0_ENABLE_IBT 0x1
+#define BT_CR1 0x4
+#define BT_CR1_IRQ_H2B 0x01
+#define BT_CR1_IRQ_HBUSY 0x40
+#define BT_CR2 0x8
+#define BT_CR2_IRQ_H2B 0x01
+#define BT_CR2_IRQ_HBUSY 0x40
+#define BT_CR3 0xc
+#define BT_CTRL 0x10
+#define BT_CTRL_B_BUSY 0x80
+#define BT_CTRL_H_BUSY 0x40
+#define BT_CTRL_OEM0 0x20
+#define BT_CTRL_SMS_ATN 0x10
+#define BT_CTRL_B2H_ATN 0x08
+#define BT_CTRL_H2B_ATN 0x04
+#define BT_CTRL_CLR_RD_PTR 0x02
+#define BT_CTRL_CLR_WR_PTR 0x01
+#define BT_BMC2HOST 0x14
+#define BT_INTMASK 0x18
+#define BT_INTMASK_B2H_IRQEN 0x01
+#define BT_INTMASK_B2H_IRQ 0x02
+#define BT_INTMASK_BMC_HWRST 0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+ struct device dev;
+ struct miscdevice miscdev;
+ void __iomem *base;
+ int irq;
+ wait_queue_head_t queue;
+ struct timer_list poll_timer;
+ struct mutex mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+ return readb(bt_bmc->base + reg);
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+ writeb(data, bt_bmc->base + reg);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+ return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ buf[i] = bt_read(bt_bmc);
+ return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+ bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ bt_write(bt_bmc, buf[i]);
+ return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+ return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ if (atomic_inc_return(&open_count) == 1) {
+ clr_b_busy(bt_bmc);
+ return 0;
+ }
+
+ atomic_dec(&open_count);
+ return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N
+ * Length NetFn/LUN Seq Cmd Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 len;
+ int len_byte = 1;
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nread;
+
+ WARN_ON(*ppos);
+
+ if (wait_event_interruptible(bt_bmc->queue,
+ bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ set_b_busy(bt_bmc);
+ clr_h2b_atn(bt_bmc);
+ clr_rd_ptr(bt_bmc);
+
+ /*
+ * The BT frames start with the message length, which does not
+ * include the length byte.
+ */
+ kbuffer[0] = bt_read(bt_bmc);
+ len = kbuffer[0];
+
+ /* We pass the length back to userspace as well */
+ if (len + 1 > count)
+ len = count - 1;
+
+ while (len) {
+ nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+ bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+ if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+ ret = -EFAULT;
+ break;
+ }
+ len -= nread;
+ buf += nread + len_byte;
+ ret += nread + len_byte;
+ len_byte = 0;
+ }
+
+ clr_b_busy(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N
+ * Length NetFn/LUN Seq Cmd Code Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nwritten;
+
+ /*
+ * send a minimum response size
+ */
+ if (count < 5)
+ return -EINVAL;
+
+ WARN_ON(*ppos);
+
+ /*
+ * There's no interrupt for clearing bmc busy so we have to
+ * poll
+ */
+ if (wait_event_interruptible(bt_bmc->queue,
+ !(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ clr_wr_ptr(bt_bmc);
+
+ while (count) {
+ nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+ if (copy_from_user(&kbuffer, buf, nwritten)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ bt_writen(bt_bmc, kbuffer, nwritten);
+
+ count -= nwritten;
+ buf += nwritten;
+ ret += nwritten;
+ }
+
+ set_b2h_atn(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ switch (cmd) {
+ case BT_BMC_IOCTL_SMS_ATN:
+ set_sms_atn(bt_bmc);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ atomic_dec(&open_count);
+ set_b_busy(bt_bmc);
+ return 0;
+}
+
+static __poll_t bt_bmc_poll(struct file *file, poll_table *wait)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ __poll_t mask = 0;
+ u8 ctrl;
+
+ poll_wait(file, &bt_bmc->queue, wait);
+
+ ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+ if (ctrl & BT_CTRL_H2B_ATN)
+ mask |= EPOLLIN;
+
+ if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+ mask |= EPOLLOUT;
+
+ return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+ .owner = THIS_MODULE,
+ .open = bt_bmc_open,
+ .read = bt_bmc_read,
+ .write = bt_bmc_write,
+ .release = bt_bmc_release,
+ .poll = bt_bmc_poll,
+ .unlocked_ioctl = bt_bmc_ioctl,
+};
+
+static void poll_timer(struct timer_list *t)
+{
+ struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
+
+ bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+ wake_up(&bt_bmc->queue);
+ add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+ struct bt_bmc *bt_bmc = arg;
+ u32 reg;
+
+ reg = readl(bt_bmc->base + BT_CR2);
+
+ reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+ if (!reg)
+ return IRQ_NONE;
+
+ /* ack pending IRQs */
+ writel(reg, bt_bmc->base + BT_CR2);
+
+ wake_up(&bt_bmc->queue);
+ return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int rc;
+ u32 reg;
+
+ bt_bmc->irq = platform_get_irq_optional(pdev, 0);
+ if (bt_bmc->irq < 0)
+ return bt_bmc->irq;
+
+ rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+ DEVICE_NAME, bt_bmc);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+ bt_bmc->irq = rc;
+ return rc;
+ }
+
+ /*
+ * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+ * H2B will be asserted when the bmc has data for us; HBUSY
+ * will be cleared (along with B2H) when we can write the next
+ * message to the BT buffer
+ */
+ reg = readl(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ writel(reg, bt_bmc->base + BT_CR1);
+
+ return 0;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc;
+ struct device *dev;
+ int rc;
+
+ dev = &pdev->dev;
+ dev_info(dev, "Found bt bmc device\n");
+
+ bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+ if (!bt_bmc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, bt_bmc);
+
+ bt_bmc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
+
+ mutex_init(&bt_bmc->mutex);
+ init_waitqueue_head(&bt_bmc->queue);
+
+ bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
+ bt_bmc->miscdev.name = DEVICE_NAME;
+ bt_bmc->miscdev.fops = &bt_bmc_fops;
+ bt_bmc->miscdev.parent = dev;
+ rc = misc_register(&bt_bmc->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register misc device\n");
+ return rc;
+ }
+
+ bt_bmc_config_irq(bt_bmc, pdev);
+
+ if (bt_bmc->irq >= 0) {
+ dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+ } else {
+ dev_info(dev, "No IRQ; using timer\n");
+ timer_setup(&bt_bmc->poll_timer, poll_timer, 0);
+ bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+ add_timer(&bt_bmc->poll_timer);
+ }
+
+ writel((BT_IO_BASE << BT_CR0_IO_BASE) |
+ (BT_IRQ << BT_CR0_IRQ) |
+ BT_CR0_EN_CLR_SLV_RDP |
+ BT_CR0_EN_CLR_SLV_WRP |
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
+
+ clr_b_busy(bt_bmc);
+
+ return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&bt_bmc->miscdev);
+ if (bt_bmc->irq < 0)
+ del_timer_sync(&bt_bmc->poll_timer);
+ return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+ { .compatible = "aspeed,ast2400-ibt-bmc" },
+ { .compatible = "aspeed,ast2500-ibt-bmc" },
+ { .compatible = "aspeed,ast2600-ibt-bmc" },
+ { },
+};
+
+static struct platform_driver bt_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = bt_bmc_match,
+ },
+ .probe = bt_bmc_probe,
+ .remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
new file mode 100644
index 000000000..a0e9e80d9
--- /dev/null
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * IPMB driver to receive a request and send a response
+ *
+ * Copyright (C) 2019 Mellanox Techologies, Ltd.
+ *
+ * This was inspired by Brendan Higgins' ipmi-bmc-bt-i2c driver.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#define MAX_MSG_LEN 240
+#define IPMB_REQUEST_LEN_MIN 7
+#define NETFN_RSP_BIT_MASK 0x4
+#define REQUEST_QUEUE_MAX_LEN 256
+
+#define IPMB_MSG_LEN_IDX 0
+#define RQ_SA_8BIT_IDX 1
+#define NETFN_LUN_IDX 2
+
+#define GET_7BIT_ADDR(addr_8bit) (addr_8bit >> 1)
+#define GET_8BIT_ADDR(addr_7bit) ((addr_7bit << 1) & 0xff)
+
+#define IPMB_MSG_PAYLOAD_LEN_MAX (MAX_MSG_LEN - IPMB_REQUEST_LEN_MIN - 1)
+
+#define SMBUS_MSG_HEADER_LENGTH 2
+#define SMBUS_MSG_IDX_OFFSET (SMBUS_MSG_HEADER_LENGTH + 1)
+
+struct ipmb_msg {
+ u8 len;
+ u8 rs_sa;
+ u8 netfn_rs_lun;
+ u8 checksum1;
+ u8 rq_sa;
+ u8 rq_seq_rq_lun;
+ u8 cmd;
+ u8 payload[IPMB_MSG_PAYLOAD_LEN_MAX];
+ /* checksum2 is included in payload */
+} __packed;
+
+struct ipmb_request_elem {
+ struct list_head list;
+ struct ipmb_msg request;
+};
+
+struct ipmb_dev {
+ struct i2c_client *client;
+ struct miscdevice miscdev;
+ struct ipmb_msg request;
+ struct list_head request_queue;
+ atomic_t request_queue_len;
+ size_t msg_idx;
+ spinlock_t lock;
+ wait_queue_head_t wait_queue;
+ struct mutex file_mutex;
+ bool is_i2c_protocol;
+};
+
+static inline struct ipmb_dev *to_ipmb_dev(struct file *file)
+{
+ return container_of(file->private_data, struct ipmb_dev, miscdev);
+}
+
+static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
+ struct ipmb_request_elem *queue_elem;
+ struct ipmb_msg msg;
+ ssize_t ret = 0;
+
+ memset(&msg, 0, sizeof(msg));
+
+ spin_lock_irq(&ipmb_dev->lock);
+
+ while (list_empty(&ipmb_dev->request_queue)) {
+ spin_unlock_irq(&ipmb_dev->lock);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(ipmb_dev->wait_queue,
+ !list_empty(&ipmb_dev->request_queue));
+ if (ret)
+ return ret;
+
+ spin_lock_irq(&ipmb_dev->lock);
+ }
+
+ queue_elem = list_first_entry(&ipmb_dev->request_queue,
+ struct ipmb_request_elem, list);
+ memcpy(&msg, &queue_elem->request, sizeof(msg));
+ list_del(&queue_elem->list);
+ kfree(queue_elem);
+ atomic_dec(&ipmb_dev->request_queue_len);
+
+ spin_unlock_irq(&ipmb_dev->lock);
+
+ count = min_t(size_t, count, msg.len + 1);
+ if (copy_to_user(buf, &msg, count))
+ ret = -EFAULT;
+
+ return ret < 0 ? ret : count;
+}
+
+static int ipmb_i2c_write(struct i2c_client *client, u8 *msg, u8 addr)
+{
+ struct i2c_msg i2c_msg;
+
+ /*
+ * subtract 1 byte (rq_sa) from the length of the msg passed to
+ * raw i2c_transfer
+ */
+ i2c_msg.len = msg[IPMB_MSG_LEN_IDX] - 1;
+
+ /* Assign message to buffer except first 2 bytes (length and address) */
+ i2c_msg.buf = msg + 2;
+
+ i2c_msg.addr = addr;
+ i2c_msg.flags = client->flags & I2C_CLIENT_PEC;
+
+ return i2c_transfer(client->adapter, &i2c_msg, 1);
+}
+
+static ssize_t ipmb_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
+ u8 rq_sa, netf_rq_lun, msg_len;
+ struct i2c_client *temp_client;
+ u8 msg[MAX_MSG_LEN];
+ ssize_t ret;
+
+ if (count > sizeof(msg))
+ return -EINVAL;
+
+ if (copy_from_user(&msg, buf, count))
+ return -EFAULT;
+
+ if (count < msg[0])
+ return -EINVAL;
+
+ rq_sa = GET_7BIT_ADDR(msg[RQ_SA_8BIT_IDX]);
+ netf_rq_lun = msg[NETFN_LUN_IDX];
+
+ /* Check i2c block transfer vs smbus */
+ if (ipmb_dev->is_i2c_protocol) {
+ ret = ipmb_i2c_write(ipmb_dev->client, msg, rq_sa);
+ return (ret == 1) ? count : ret;
+ }
+
+ /*
+ * subtract rq_sa and netf_rq_lun from the length of the msg. Fill the
+ * temporary client. Note that its use is an exception for IPMI.
+ */
+ msg_len = msg[IPMB_MSG_LEN_IDX] - SMBUS_MSG_HEADER_LENGTH;
+ temp_client = kmemdup(ipmb_dev->client, sizeof(*temp_client), GFP_KERNEL);
+ if (!temp_client)
+ return -ENOMEM;
+
+ temp_client->addr = rq_sa;
+
+ ret = i2c_smbus_write_block_data(temp_client, netf_rq_lun, msg_len,
+ msg + SMBUS_MSG_IDX_OFFSET);
+ kfree(temp_client);
+
+ return ret < 0 ? ret : count;
+}
+
+static __poll_t ipmb_poll(struct file *file, poll_table *wait)
+{
+ struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
+ __poll_t mask = EPOLLOUT;
+
+ mutex_lock(&ipmb_dev->file_mutex);
+ poll_wait(file, &ipmb_dev->wait_queue, wait);
+
+ if (atomic_read(&ipmb_dev->request_queue_len))
+ mask |= EPOLLIN;
+ mutex_unlock(&ipmb_dev->file_mutex);
+
+ return mask;
+}
+
+static const struct file_operations ipmb_fops = {
+ .owner = THIS_MODULE,
+ .read = ipmb_read,
+ .write = ipmb_write,
+ .poll = ipmb_poll,
+};
+
+/* Called with ipmb_dev->lock held. */
+static void ipmb_handle_request(struct ipmb_dev *ipmb_dev)
+{
+ struct ipmb_request_elem *queue_elem;
+
+ if (atomic_read(&ipmb_dev->request_queue_len) >=
+ REQUEST_QUEUE_MAX_LEN)
+ return;
+
+ queue_elem = kmalloc(sizeof(*queue_elem), GFP_ATOMIC);
+ if (!queue_elem)
+ return;
+
+ memcpy(&queue_elem->request, &ipmb_dev->request,
+ sizeof(struct ipmb_msg));
+ list_add(&queue_elem->list, &ipmb_dev->request_queue);
+ atomic_inc(&ipmb_dev->request_queue_len);
+ wake_up_all(&ipmb_dev->wait_queue);
+}
+
+static u8 ipmb_verify_checksum1(struct ipmb_dev *ipmb_dev, u8 rs_sa)
+{
+ /* The 8 lsb of the sum is 0 when the checksum is valid */
+ return (rs_sa + ipmb_dev->request.netfn_rs_lun +
+ ipmb_dev->request.checksum1);
+}
+
+/*
+ * Verify if message has proper ipmb header with minimum length
+ * and correct checksum byte.
+ */
+static bool is_ipmb_msg(struct ipmb_dev *ipmb_dev, u8 rs_sa)
+{
+ if ((ipmb_dev->msg_idx >= IPMB_REQUEST_LEN_MIN) &&
+ (!ipmb_verify_checksum1(ipmb_dev, rs_sa)))
+ return true;
+
+ return false;
+}
+
+/*
+ * The IPMB protocol only supports I2C Writes so there is no need
+ * to support I2C_SLAVE_READ* events.
+ * This i2c callback function only monitors IPMB request messages
+ * and adds them in a queue, so that they can be handled by
+ * receive_ipmb_request.
+ */
+static int ipmb_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct ipmb_dev *ipmb_dev = i2c_get_clientdata(client);
+ u8 *buf = (u8 *)&ipmb_dev->request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipmb_dev->lock, flags);
+ switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ memset(&ipmb_dev->request, 0, sizeof(ipmb_dev->request));
+ ipmb_dev->msg_idx = 0;
+
+ /*
+ * At index 0, ipmb_msg stores the length of msg,
+ * skip it for now.
+ * The len will be populated once the whole
+ * buf is populated.
+ *
+ * The I2C bus driver's responsibility is to pass the
+ * data bytes to the backend driver; it does not
+ * forward the i2c slave address.
+ * Since the first byte in the IPMB message is the
+ * address of the responder, it is the responsibility
+ * of the IPMB driver to format the message properly.
+ * So this driver prepends the address of the responder
+ * to the received i2c data before the request message
+ * is handled in userland.
+ */
+ buf[++ipmb_dev->msg_idx] = GET_8BIT_ADDR(client->addr);
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
+ break;
+
+ buf[++ipmb_dev->msg_idx] = *val;
+ break;
+
+ case I2C_SLAVE_STOP:
+ ipmb_dev->request.len = ipmb_dev->msg_idx;
+ if (is_ipmb_msg(ipmb_dev, GET_8BIT_ADDR(client->addr)))
+ ipmb_handle_request(ipmb_dev);
+ break;
+
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&ipmb_dev->lock, flags);
+
+ return 0;
+}
+
+static int ipmb_probe(struct i2c_client *client)
+{
+ struct ipmb_dev *ipmb_dev;
+ int ret;
+
+ ipmb_dev = devm_kzalloc(&client->dev, sizeof(*ipmb_dev),
+ GFP_KERNEL);
+ if (!ipmb_dev)
+ return -ENOMEM;
+
+ spin_lock_init(&ipmb_dev->lock);
+ init_waitqueue_head(&ipmb_dev->wait_queue);
+ atomic_set(&ipmb_dev->request_queue_len, 0);
+ INIT_LIST_HEAD(&ipmb_dev->request_queue);
+
+ mutex_init(&ipmb_dev->file_mutex);
+
+ ipmb_dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+
+ ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL,
+ "%s%d", "ipmb-",
+ client->adapter->nr);
+ ipmb_dev->miscdev.fops = &ipmb_fops;
+ ipmb_dev->miscdev.parent = &client->dev;
+ ret = misc_register(&ipmb_dev->miscdev);
+ if (ret)
+ return ret;
+
+ ipmb_dev->is_i2c_protocol
+ = device_property_read_bool(&client->dev, "i2c-protocol");
+
+ ipmb_dev->client = client;
+ i2c_set_clientdata(client, ipmb_dev);
+ ret = i2c_slave_register(client, ipmb_slave_cb);
+ if (ret) {
+ misc_deregister(&ipmb_dev->miscdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ipmb_remove(struct i2c_client *client)
+{
+ struct ipmb_dev *ipmb_dev = i2c_get_clientdata(client);
+
+ i2c_slave_unregister(client);
+ misc_deregister(&ipmb_dev->miscdev);
+}
+
+static const struct i2c_device_id ipmb_id[] = {
+ { "ipmb-dev", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ipmb_id);
+
+static const struct acpi_device_id acpi_ipmb_id[] = {
+ { "IPMB0001", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id);
+
+static struct i2c_driver ipmb_driver = {
+ .driver = {
+ .name = "ipmb-dev",
+ .acpi_match_table = ACPI_PTR(acpi_ipmb_id),
+ },
+ .probe_new = ipmb_probe,
+ .remove = ipmb_remove,
+ .id_table = ipmb_id,
+};
+module_i2c_driver(ipmb_driver);
+
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_DESCRIPTION("IPMB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
new file mode 100644
index 000000000..f41f78972
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_bt_sm.c
+ *
+ * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part
+ * of the driver architecture at http://sourceforge.net/projects/openipmi
+ *
+ * Author: Rocky Craig <first.last@hp.com>
+ */
+
+#define DEBUG /* So dev_dbg() is always available. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+#define BT_DEBUG_OFF 0 /* Used in production */
+#define BT_DEBUG_ENABLE 1 /* Generic messages */
+#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */
+#define BT_DEBUG_STATES 4 /* Verbose look at state changes */
+/*
+ * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
+ * value
+ */
+
+static int bt_debug; /* 0 == BT_DEBUG_OFF */
+
+module_param(bt_debug, int, 0644);
+MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/*
+ * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
+ * and 64 byte buffers. However, one HP implementation wants 255 bytes of
+ * buffer (with a documented message of 160 bytes) so go for the max.
+ * Since the Open IPMI architecture is single-message oriented at this
+ * stage, the queue depth of BT is of no concern.
+ */
+
+#define BT_NORMAL_TIMEOUT 5 /* seconds */
+#define BT_NORMAL_RETRY_LIMIT 2
+#define BT_RESET_DELAY 6 /* seconds after warm reset */
+
+/*
+ * States are written in chronological order and usually cover
+ * multiple rows of the state table discussion in the IPMI spec.
+ */
+
+enum bt_states {
+ BT_STATE_IDLE = 0, /* Order is critical in this list */
+ BT_STATE_XACTION_START,
+ BT_STATE_WRITE_BYTES,
+ BT_STATE_WRITE_CONSUME,
+ BT_STATE_READ_WAIT,
+ BT_STATE_CLEAR_B2H,
+ BT_STATE_READ_BYTES,
+ BT_STATE_RESET1, /* These must come last */
+ BT_STATE_RESET2,
+ BT_STATE_RESET3,
+ BT_STATE_RESTART,
+ BT_STATE_PRINTME,
+ BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
+};
+
+/*
+ * Macros seen at the end of state "case" blocks. They help with legibility
+ * and debugging.
+ */
+
+#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
+
+#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; }
+
+struct si_sm_data {
+ enum bt_states state;
+ unsigned char seq; /* BT sequence number */
+ struct si_sm_io *io;
+ unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int write_count;
+ unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int read_count;
+ int truncated;
+ long timeout; /* microseconds countdown */
+ int error_retries; /* end of "common" fields */
+ int nonzero_status; /* hung BMCs stay all 0 */
+ enum bt_states complete; /* to divert the state machine */
+ long BT_CAP_req2rsp;
+ int BT_CAP_retries; /* Recommended retries */
+};
+
+#define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */
+#define BT_CLR_RD_PTR 0x02
+#define BT_H2B_ATN 0x04
+#define BT_B2H_ATN 0x08
+#define BT_SMS_ATN 0x10
+#define BT_OEM0 0x20
+#define BT_H_BUSY 0x40
+#define BT_B_BUSY 0x80
+
+/*
+ * Some bits are toggled on each write: write once to set it, once
+ * more to clear it; writing a zero does nothing. To absolutely
+ * clear it, check its state and write if set. This avoids the "get
+ * current then use as mask" scheme to modify one bit. Note that the
+ * variable "bt" is hardcoded into these macros.
+ */
+
+#define BT_STATUS bt->io->inputb(bt->io, 0)
+#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x)
+
+#define BMC2HOST bt->io->inputb(bt->io, 1)
+#define HOST2BMC(x) bt->io->outputb(bt->io, 1, x)
+
+#define BT_INTMASK_R bt->io->inputb(bt->io, 2)
+#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x)
+
+/*
+ * Convenience routines for debugging. These are not multi-open safe!
+ * Note the macros have hardcoded variables in them.
+ */
+
+static char *state2txt(unsigned char state)
+{
+ switch (state) {
+ case BT_STATE_IDLE: return("IDLE");
+ case BT_STATE_XACTION_START: return("XACTION");
+ case BT_STATE_WRITE_BYTES: return("WR_BYTES");
+ case BT_STATE_WRITE_CONSUME: return("WR_CONSUME");
+ case BT_STATE_READ_WAIT: return("RD_WAIT");
+ case BT_STATE_CLEAR_B2H: return("CLEAR_B2H");
+ case BT_STATE_READ_BYTES: return("RD_BYTES");
+ case BT_STATE_RESET1: return("RESET1");
+ case BT_STATE_RESET2: return("RESET2");
+ case BT_STATE_RESET3: return("RESET3");
+ case BT_STATE_RESTART: return("RESTART");
+ case BT_STATE_LONG_BUSY: return("LONG_BUSY");
+ }
+ return("BAD STATE");
+}
+#define STATE2TXT state2txt(bt->state)
+
+static char *status2txt(unsigned char status)
+{
+ /*
+ * This cannot be called by two threads at the same time and
+ * the buffer is always consumed immediately, so the static is
+ * safe to use.
+ */
+ static char buf[40];
+
+ strcpy(buf, "[ ");
+ if (status & BT_B_BUSY)
+ strcat(buf, "B_BUSY ");
+ if (status & BT_H_BUSY)
+ strcat(buf, "H_BUSY ");
+ if (status & BT_OEM0)
+ strcat(buf, "OEM0 ");
+ if (status & BT_SMS_ATN)
+ strcat(buf, "SMS ");
+ if (status & BT_B2H_ATN)
+ strcat(buf, "B2H ");
+ if (status & BT_H2B_ATN)
+ strcat(buf, "H2B ");
+ strcat(buf, "]");
+ return buf;
+}
+#define STATUS2TXT status2txt(status)
+
+/* called externally at insmod time, and internally on cleanup */
+
+static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
+{
+ memset(bt, 0, sizeof(struct si_sm_data));
+ if (bt->io != io) {
+ /* external: one-time only things */
+ bt->io = io;
+ bt->seq = 0;
+ }
+ bt->state = BT_STATE_IDLE; /* start here */
+ bt->complete = BT_STATE_IDLE; /* end here */
+ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
+ bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
+ return 3; /* We claim 3 bytes of space; ought to check SPMI table */
+}
+
+/* Jam a completion code (probably an error) into a response */
+
+static void force_result(struct si_sm_data *bt, unsigned char completion_code)
+{
+ bt->read_data[0] = 4; /* # following bytes */
+ bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */
+ bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */
+ bt->read_data[3] = bt->write_data[3]; /* Command */
+ bt->read_data[4] = completion_code;
+ bt->read_count = 5;
+}
+
+/* The upper state machine starts here */
+
+static int bt_start_transaction(struct si_sm_data *bt,
+ unsigned char *data,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > IPMI_MAX_MSG_LENGTH)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if (bt->state == BT_STATE_LONG_BUSY)
+ return IPMI_NODE_BUSY_ERR;
+
+ if (bt->state != BT_STATE_IDLE) {
+ dev_warn(bt->io->dev, "BT in invalid state %d\n", bt->state);
+ return IPMI_NOT_IN_MY_STATE_ERR;
+ }
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ dev_dbg(bt->io->dev, "+++++++++++++++++ New command\n");
+ dev_dbg(bt->io->dev, "NetFn/LUN CMD [%d data]:", size - 2);
+ for (i = 0; i < size; i ++)
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
+ }
+ bt->write_data[0] = size + 1; /* all data plus seq byte */
+ bt->write_data[1] = *data; /* NetFn/LUN */
+ bt->write_data[2] = bt->seq++;
+ memcpy(bt->write_data + 3, data + 1, size - 1);
+ bt->write_count = size + 2;
+ bt->error_retries = 0;
+ bt->nonzero_status = 0;
+ bt->truncated = 0;
+ bt->state = BT_STATE_XACTION_START;
+ bt->timeout = bt->BT_CAP_req2rsp;
+ force_result(bt, IPMI_ERR_UNSPECIFIED);
+ return 0;
+}
+
+/*
+ * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
+ * it calls this. Strip out the length and seq bytes.
+ */
+
+static int bt_get_result(struct si_sm_data *bt,
+ unsigned char *data,
+ unsigned int length)
+{
+ int i, msg_len;
+
+ msg_len = bt->read_count - 2; /* account for length & seq */
+ if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
+ force_result(bt, IPMI_ERR_UNSPECIFIED);
+ msg_len = 3;
+ }
+ data[0] = bt->read_data[1];
+ data[1] = bt->read_data[3];
+ if (length < msg_len || bt->truncated) {
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ msg_len = 3;
+ } else
+ memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ dev_dbg(bt->io->dev, "result %d bytes:", msg_len);
+ for (i = 0; i < msg_len; i++)
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
+ }
+ return msg_len;
+}
+
+/* This bit's functionality is optional */
+#define BT_BMC_HWRST 0x80
+
+static void reset_flags(struct si_sm_data *bt)
+{
+ if (bt_debug)
+ dev_dbg(bt->io->dev, "flag reset %s\n", status2txt(BT_STATUS));
+ if (BT_STATUS & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* force clear */
+ BT_CONTROL(BT_CLR_WR_PTR); /* always reset */
+ BT_CONTROL(BT_SMS_ATN); /* always clear */
+ BT_INTMASK_W(BT_BMC_HWRST);
+}
+
+/*
+ * Get rid of an unwanted/stale response. This should only be needed for
+ * BMCs that support multiple outstanding requests.
+ */
+
+static void drain_BMC2HOST(struct si_sm_data *bt)
+{
+ int i, size;
+
+ if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */
+ return;
+
+ BT_CONTROL(BT_H_BUSY); /* now set */
+ BT_CONTROL(BT_B2H_ATN); /* always clear */
+ BT_STATUS; /* pause */
+ BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */
+ BT_CONTROL(BT_CLR_RD_PTR); /* always reset */
+ if (bt_debug)
+ dev_dbg(bt->io->dev, "stale response %s; ",
+ status2txt(BT_STATUS));
+ size = BMC2HOST;
+ for (i = 0; i < size ; i++)
+ BMC2HOST;
+ BT_CONTROL(BT_H_BUSY); /* now clear */
+ if (bt_debug)
+ pr_cont("drained %d bytes\n", size + 1);
+}
+
+static inline void write_all_bytes(struct si_sm_data *bt)
+{
+ int i;
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ dev_dbg(bt->io->dev, "write %d bytes seq=0x%02X",
+ bt->write_count, bt->seq);
+ for (i = 0; i < bt->write_count; i++)
+ pr_cont(" %02x", bt->write_data[i]);
+ pr_cont("\n");
+ }
+ for (i = 0; i < bt->write_count; i++)
+ HOST2BMC(bt->write_data[i]);
+}
+
+static inline int read_all_bytes(struct si_sm_data *bt)
+{
+ unsigned int i;
+
+ /*
+ * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+ * Keep layout of first four bytes aligned with write_data[]
+ */
+
+ bt->read_data[0] = BMC2HOST;
+ bt->read_count = bt->read_data[0];
+
+ if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
+ if (bt_debug & BT_DEBUG_MSG)
+ dev_dbg(bt->io->dev,
+ "bad raw rsp len=%d\n", bt->read_count);
+ bt->truncated = 1;
+ return 1; /* let next XACTION START clean it up */
+ }
+ for (i = 1; i <= bt->read_count; i++)
+ bt->read_data[i] = BMC2HOST;
+ bt->read_count++; /* Account internally for length byte */
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ int max = bt->read_count;
+
+ dev_dbg(bt->io->dev,
+ "got %d bytes seq=0x%02X", max, bt->read_data[2]);
+ if (max > 16)
+ max = 16;
+ for (i = 0; i < max; i++)
+ pr_cont(" %02x", bt->read_data[i]);
+ pr_cont("%s\n", bt->read_count == max ? "" : " ...");
+ }
+
+ /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
+ if ((bt->read_data[3] == bt->write_data[3]) &&
+ (bt->read_data[2] == bt->write_data[2]) &&
+ ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+ return 1;
+
+ if (bt_debug & BT_DEBUG_MSG)
+ dev_dbg(bt->io->dev,
+ "IPMI BT: bad packet: want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
+ bt->write_data[1] | 0x04, bt->write_data[2],
+ bt->write_data[3],
+ bt->read_data[1], bt->read_data[2], bt->read_data[3]);
+ return 0;
+}
+
+/* Restart if retries are left, or return an error completion code */
+
+static enum si_sm_result error_recovery(struct si_sm_data *bt,
+ unsigned char status,
+ unsigned char cCode)
+{
+ char *reason;
+
+ bt->timeout = bt->BT_CAP_req2rsp;
+
+ switch (cCode) {
+ case IPMI_TIMEOUT_ERR:
+ reason = "timeout";
+ break;
+ default:
+ reason = "internal error";
+ break;
+ }
+
+ dev_warn(bt->io->dev, "IPMI BT: %s in %s %s ", /* open-ended line */
+ reason, STATE2TXT, STATUS2TXT);
+
+ /*
+ * Per the IPMI spec, retries are based on the sequence number
+ * known only to this module, so manage a restart here.
+ */
+ (bt->error_retries)++;
+ if (bt->error_retries < bt->BT_CAP_retries) {
+ pr_cont("%d retries left\n",
+ bt->BT_CAP_retries - bt->error_retries);
+ bt->state = BT_STATE_RESTART;
+ return SI_SM_CALL_WITHOUT_DELAY;
+ }
+
+ dev_warn(bt->io->dev, "failed %d retries, sending error response\n",
+ bt->BT_CAP_retries);
+ if (!bt->nonzero_status)
+ dev_err(bt->io->dev, "stuck, try power cycle\n");
+
+ /* this is most likely during insmod */
+ else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
+ dev_warn(bt->io->dev, "BT reset (takes 5 secs)\n");
+ bt->state = BT_STATE_RESET1;
+ return SI_SM_CALL_WITHOUT_DELAY;
+ }
+
+ /*
+ * Concoct a useful error message, set up the next state, and
+ * be done with this sequence.
+ */
+
+ bt->state = BT_STATE_IDLE;
+ switch (cCode) {
+ case IPMI_TIMEOUT_ERR:
+ if (status & BT_B_BUSY) {
+ cCode = IPMI_NODE_BUSY_ERR;
+ bt->state = BT_STATE_LONG_BUSY;
+ }
+ break;
+ default:
+ break;
+ }
+ force_result(bt, cCode);
+ return SI_SM_TRANSACTION_COMPLETE;
+}
+
+/* Check status and (usually) take action and change this state machine. */
+
+static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
+{
+ unsigned char status;
+ static enum bt_states last_printed = BT_STATE_PRINTME;
+ int i;
+
+ status = BT_STATUS;
+ bt->nonzero_status |= status;
+ if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
+ dev_dbg(bt->io->dev, "BT: %s %s TO=%ld - %ld\n",
+ STATE2TXT,
+ STATUS2TXT,
+ bt->timeout,
+ time);
+ last_printed = bt->state;
+ }
+
+ /*
+ * Commands that time out may still (eventually) provide a response.
+ * This stale response will get in the way of a new response so remove
+ * it if possible (hopefully during IDLE). Even if it comes up later
+ * it will be rejected by its (now-forgotten) seq number.
+ */
+
+ if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
+ drain_BMC2HOST(bt);
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ }
+
+ if ((bt->state != BT_STATE_IDLE) &&
+ (bt->state < BT_STATE_PRINTME)) {
+ /* check timeout */
+ bt->timeout -= time;
+ if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
+ return error_recovery(bt,
+ status,
+ IPMI_TIMEOUT_ERR);
+ }
+
+ switch (bt->state) {
+
+ /*
+ * Idle state first checks for asynchronous messages from another
+ * channel, then does some opportunistic housekeeping.
+ */
+
+ case BT_STATE_IDLE:
+ if (status & BT_SMS_ATN) {
+ BT_CONTROL(BT_SMS_ATN); /* clear it */
+ return SI_SM_ATTN;
+ }
+
+ if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
+ BT_CONTROL(BT_H_BUSY);
+
+ BT_SI_SM_RETURN(SI_SM_IDLE);
+
+ case BT_STATE_XACTION_START:
+ if (status & (BT_B_BUSY | BT_H2B_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ if (BT_STATUS & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* force clear */
+ BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_WRITE_BYTES:
+ if (status & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* clear */
+ BT_CONTROL(BT_CLR_WR_PTR);
+ write_all_bytes(bt);
+ BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */
+ BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_WRITE_CONSUME:
+ if (status & (BT_B_BUSY | BT_H2B_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ /* Spinning hard can suppress B2H_ATN and force a timeout */
+
+ case BT_STATE_READ_WAIT:
+ if (!(status & BT_B2H_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ BT_CONTROL(BT_H_BUSY); /* set */
+
+ /*
+ * Uncached, ordered writes should just proceed serially but
+ * some BMCs don't clear B2H_ATN with one hit. Fast-path a
+ * workaround without too much penalty to the general case.
+ */
+
+ BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */
+ BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_CLEAR_B2H:
+ if (status & BT_B2H_ATN) {
+ /* keep hitting it */
+ BT_CONTROL(BT_B2H_ATN);
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ }
+ BT_STATE_CHANGE(BT_STATE_READ_BYTES,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_READ_BYTES:
+ if (!(status & BT_H_BUSY))
+ /* check in case of retry */
+ BT_CONTROL(BT_H_BUSY);
+ BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */
+ i = read_all_bytes(bt); /* true == packet seq match */
+ BT_CONTROL(BT_H_BUSY); /* NOW clear */
+ if (!i) /* Not my message */
+ BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+ SI_SM_CALL_WITHOUT_DELAY);
+ bt->state = bt->complete;
+ return bt->state == BT_STATE_IDLE ? /* where to next? */
+ SI_SM_TRANSACTION_COMPLETE : /* normal */
+ SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */
+
+ case BT_STATE_LONG_BUSY: /* For example: after FW update */
+ if (!(status & BT_B_BUSY)) {
+ reset_flags(bt); /* next state is now IDLE */
+ bt_init_data(bt, bt->io);
+ }
+ return SI_SM_CALL_WITH_DELAY; /* No repeat printing */
+
+ case BT_STATE_RESET1:
+ reset_flags(bt);
+ drain_BMC2HOST(bt);
+ BT_STATE_CHANGE(BT_STATE_RESET2,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_RESET2: /* Send a soft reset */
+ BT_CONTROL(BT_CLR_WR_PTR);
+ HOST2BMC(3); /* number of bytes following */
+ HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */
+ HOST2BMC(42); /* Sequence number */
+ HOST2BMC(3); /* Cmd == Soft reset */
+ BT_CONTROL(BT_H2B_ATN);
+ bt->timeout = BT_RESET_DELAY * USEC_PER_SEC;
+ BT_STATE_CHANGE(BT_STATE_RESET3,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_RESET3: /* Hold off everything for a bit */
+ if (bt->timeout > 0)
+ return SI_SM_CALL_WITH_DELAY;
+ drain_BMC2HOST(bt);
+ BT_STATE_CHANGE(BT_STATE_RESTART,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_RESTART: /* don't reset retries or seq! */
+ bt->read_count = 0;
+ bt->nonzero_status = 0;
+ bt->timeout = bt->BT_CAP_req2rsp;
+ BT_STATE_CHANGE(BT_STATE_XACTION_START,
+ SI_SM_CALL_WITH_DELAY);
+
+ default: /* should never occur */
+ return error_recovery(bt,
+ status,
+ IPMI_ERR_UNSPECIFIED);
+ }
+ return SI_SM_CALL_WITH_DELAY;
+}
+
+static int bt_detect(struct si_sm_data *bt)
+{
+ unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+ unsigned char BT_CAP[8];
+ enum si_sm_result smi_result;
+ int rv;
+
+ /*
+ * It's impossible for the BT status and interrupt registers to be
+ * all 1's, (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first. The calling routine uses negative logic.
+ */
+
+ if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
+ return 1;
+ reset_flags(bt);
+
+ /*
+ * Try getting the BT capabilities here.
+ */
+ rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+ if (rv) {
+ dev_warn(bt->io->dev,
+ "Can't start capabilities transaction: %d\n", rv);
+ goto out_no_bt_cap;
+ }
+
+ smi_result = SI_SM_CALL_WITHOUT_DELAY;
+ for (;;) {
+ if (smi_result == SI_SM_CALL_WITH_DELAY ||
+ smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+ schedule_timeout_uninterruptible(1);
+ smi_result = bt_event(bt, jiffies_to_usecs(1));
+ } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ smi_result = bt_event(bt, 0);
+ } else
+ break;
+ }
+
+ rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+ bt_init_data(bt, bt->io);
+ if (rv < 8) {
+ dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
+ goto out_no_bt_cap;
+ }
+
+ if (BT_CAP[2]) {
+ dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
+out_no_bt_cap:
+ dev_warn(bt->io->dev, "using default values\n");
+ } else {
+ bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
+ bt->BT_CAP_retries = BT_CAP[7];
+ }
+
+ dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
+ bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
+
+ return 0;
+}
+
+static void bt_cleanup(struct si_sm_data *bt)
+{
+}
+
+static int bt_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+const struct si_sm_handlers bt_smi_handlers = {
+ .init_data = bt_init_data,
+ .start_transaction = bt_start_transaction,
+ .get_result = bt_get_result,
+ .event = bt_event,
+ .detect = bt_detect,
+ .cleanup = bt_cleanup,
+ .size = bt_size,
+};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
new file mode 100644
index 000000000..d160fa4c7
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_devintf.c
+ *
+ * Linux device interface for the IPMI message handler.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/compat.h>
+
+struct ipmi_file_private
+{
+ struct ipmi_user *user;
+ spinlock_t recv_msg_lock;
+ struct list_head recv_msgs;
+ struct fasync_struct *fasync_queue;
+ wait_queue_head_t wait;
+ struct mutex recv_mutex;
+ int default_retries;
+ unsigned int default_retry_time_ms;
+};
+
+static void file_receive_handler(struct ipmi_recv_msg *msg,
+ void *handler_data)
+{
+ struct ipmi_file_private *priv = handler_data;
+ int was_empty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+ was_empty = list_empty(&priv->recv_msgs);
+ list_add_tail(&msg->link, &priv->recv_msgs);
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+ if (was_empty) {
+ wake_up_interruptible(&priv->wait);
+ kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
+ }
+}
+
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ __poll_t mask = 0;
+ unsigned long flags;
+
+ poll_wait(file, &priv->wait, wait);
+
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+
+ if (!list_empty(&priv->recv_msgs))
+ mask |= (EPOLLIN | EPOLLRDNORM);
+
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+ return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+ struct ipmi_file_private *priv = file->private_data;
+
+ return fasync_helper(fd, file, on, &priv->fasync_queue);
+}
+
+static const struct ipmi_user_hndl ipmi_hndlrs =
+{
+ .ipmi_recv_hndl = file_receive_handler,
+};
+
+static int ipmi_open(struct inode *inode, struct file *file)
+{
+ int if_num = iminor(inode);
+ int rv;
+ struct ipmi_file_private *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rv = ipmi_create_user(if_num,
+ &ipmi_hndlrs,
+ priv,
+ &priv->user);
+ if (rv) {
+ kfree(priv);
+ goto out;
+ }
+
+ file->private_data = priv;
+
+ spin_lock_init(&priv->recv_msg_lock);
+ INIT_LIST_HEAD(&priv->recv_msgs);
+ init_waitqueue_head(&priv->wait);
+ priv->fasync_queue = NULL;
+ mutex_init(&priv->recv_mutex);
+
+ /* Use the low-level defaults. */
+ priv->default_retries = -1;
+ priv->default_retry_time_ms = 0;
+
+out:
+ return rv;
+}
+
+static int ipmi_release(struct inode *inode, struct file *file)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ int rv;
+ struct ipmi_recv_msg *msg, *next;
+
+ rv = ipmi_destroy_user(priv->user);
+ if (rv)
+ return rv;
+
+ list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
+ ipmi_free_recv_msg(msg);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int handle_send_req(struct ipmi_user *user,
+ struct ipmi_req *req,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ int rv;
+ struct ipmi_addr addr;
+ struct kernel_ipmi_msg msg;
+
+ if (req->addr_len > sizeof(struct ipmi_addr))
+ return -EINVAL;
+
+ if (copy_from_user(&addr, req->addr, req->addr_len))
+ return -EFAULT;
+
+ msg.netfn = req->msg.netfn;
+ msg.cmd = req->msg.cmd;
+ msg.data_len = req->msg.data_len;
+ msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!msg.data)
+ return -ENOMEM;
+
+ /* From here out we cannot return, we must jump to "out" for
+ error exits to free msgdata. */
+
+ rv = ipmi_validate_addr(&addr, req->addr_len);
+ if (rv)
+ goto out;
+
+ if (req->msg.data != NULL) {
+ if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
+ rv = -EMSGSIZE;
+ goto out;
+ }
+
+ if (copy_from_user(msg.data,
+ req->msg.data,
+ req->msg.data_len)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ } else {
+ msg.data_len = 0;
+ }
+
+ rv = ipmi_request_settime(user,
+ &addr,
+ req->msgid,
+ &msg,
+ NULL,
+ 0,
+ retries,
+ retry_time_ms);
+ out:
+ kfree(msg.data);
+ return rv;
+}
+
+static int handle_recv(struct ipmi_file_private *priv,
+ bool trunc, struct ipmi_recv *rsp,
+ int (*copyout)(struct ipmi_recv *, void __user *),
+ void __user *to)
+{
+ int addr_len;
+ struct list_head *entry;
+ struct ipmi_recv_msg *msg;
+ unsigned long flags;
+ int rv = 0, rv2 = 0;
+
+ /* We claim a mutex because we don't want two
+ users getting something from the queue at a time.
+ Since we have to release the spinlock before we can
+ copy the data to the user, it's possible another
+ user will grab something from the queue, too. Then
+ the messages might get out of order if something
+ fails and the message gets put back onto the
+ queue. This mutex prevents that problem. */
+ mutex_lock(&priv->recv_mutex);
+
+ /* Grab the message off the list. */
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+ if (list_empty(&(priv->recv_msgs))) {
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+ rv = -EAGAIN;
+ goto recv_err;
+ }
+ entry = priv->recv_msgs.next;
+ msg = list_entry(entry, struct ipmi_recv_msg, link);
+ list_del(entry);
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+ addr_len = ipmi_addr_length(msg->addr.addr_type);
+ if (rsp->addr_len < addr_len) {
+ rv = -EINVAL;
+ goto recv_putback_on_err;
+ }
+
+ if (copy_to_user(rsp->addr, &msg->addr, addr_len)) {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp->addr_len = addr_len;
+
+ rsp->recv_type = msg->recv_type;
+ rsp->msgid = msg->msgid;
+ rsp->msg.netfn = msg->msg.netfn;
+ rsp->msg.cmd = msg->msg.cmd;
+
+ if (msg->msg.data_len > 0) {
+ if (rsp->msg.data_len < msg->msg.data_len) {
+ if (trunc) {
+ rv2 = -EMSGSIZE;
+ msg->msg.data_len = rsp->msg.data_len;
+ } else {
+ rv = -EMSGSIZE;
+ goto recv_putback_on_err;
+ }
+ }
+
+ if (copy_to_user(rsp->msg.data,
+ msg->msg.data,
+ msg->msg.data_len)) {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp->msg.data_len = msg->msg.data_len;
+ } else {
+ rsp->msg.data_len = 0;
+ }
+
+ rv = copyout(rsp, to);
+ if (rv)
+ goto recv_putback_on_err;
+
+ mutex_unlock(&priv->recv_mutex);
+ ipmi_free_recv_msg(msg);
+ return rv2;
+
+recv_putback_on_err:
+ /* If we got an error, put the message back onto
+ the head of the queue. */
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+ list_add(entry, &priv->recv_msgs);
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+recv_err:
+ mutex_unlock(&priv->recv_mutex);
+ return rv;
+}
+
+static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
+{
+ return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
+}
+
+static long ipmi_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long data)
+{
+ int rv = -EINVAL;
+ struct ipmi_file_private *priv = file->private_data;
+ void __user *arg = (void __user *)data;
+
+ switch (cmd)
+ {
+ case IPMICTL_SEND_COMMAND:
+ {
+ struct ipmi_req req;
+ int retries;
+ unsigned int retry_time_ms;
+
+ if (copy_from_user(&req, arg, sizeof(req))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ mutex_lock(&priv->recv_mutex);
+ retries = priv->default_retries;
+ retry_time_ms = priv->default_retry_time_ms;
+ mutex_unlock(&priv->recv_mutex);
+
+ rv = handle_send_req(priv->user, &req, retries, retry_time_ms);
+ break;
+ }
+
+ case IPMICTL_SEND_COMMAND_SETTIME:
+ {
+ struct ipmi_req_settime req;
+
+ if (copy_from_user(&req, arg, sizeof(req))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = handle_send_req(priv->user,
+ &req.req,
+ req.retries,
+ req.retry_time_ms);
+ break;
+ }
+
+ case IPMICTL_RECEIVE_MSG:
+ case IPMICTL_RECEIVE_MSG_TRUNC:
+ {
+ struct ipmi_recv rsp;
+
+ if (copy_from_user(&rsp, arg, sizeof(rsp)))
+ rv = -EFAULT;
+ else
+ rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
+ &rsp, copyout_recv, arg);
+ break;
+ }
+
+ case IPMICTL_REGISTER_FOR_CMD:
+ {
+ struct ipmi_cmdspec val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+ IPMI_CHAN_ALL);
+ break;
+ }
+
+ case IPMICTL_UNREGISTER_FOR_CMD:
+ {
+ struct ipmi_cmdspec val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+ IPMI_CHAN_ALL);
+ break;
+ }
+
+ case IPMICTL_REGISTER_FOR_CMD_CHANS:
+ {
+ struct ipmi_cmdspec_chans val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+ val.chans);
+ break;
+ }
+
+ case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
+ {
+ struct ipmi_cmdspec_chans val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+ val.chans);
+ break;
+ }
+
+ case IPMICTL_SET_GETS_EVENTS_CMD:
+ {
+ int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_gets_events(priv->user, val);
+ break;
+ }
+
+ /* The next four are legacy, not per-channel. */
+ case IPMICTL_SET_MY_ADDRESS_CMD:
+ {
+ unsigned int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_my_address(priv->user, 0, val);
+ break;
+ }
+
+ case IPMICTL_GET_MY_ADDRESS_CMD:
+ {
+ unsigned int val;
+ unsigned char rval;
+
+ rv = ipmi_get_my_address(priv->user, 0, &rval);
+ if (rv)
+ break;
+
+ val = rval;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_MY_LUN_CMD:
+ {
+ unsigned int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_my_LUN(priv->user, 0, val);
+ break;
+ }
+
+ case IPMICTL_GET_MY_LUN_CMD:
+ {
+ unsigned int val;
+ unsigned char rval;
+
+ rv = ipmi_get_my_LUN(priv->user, 0, &rval);
+ if (rv)
+ break;
+
+ val = rval;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ return ipmi_set_my_address(priv->user, val.channel, val.value);
+ }
+
+ case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
+ if (rv)
+ break;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
+ break;
+ }
+
+ case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
+ if (rv)
+ break;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_TIMING_PARMS_CMD:
+ {
+ struct ipmi_timing_parms parms;
+
+ if (copy_from_user(&parms, arg, sizeof(parms))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ mutex_lock(&priv->recv_mutex);
+ priv->default_retries = parms.retries;
+ priv->default_retry_time_ms = parms.retry_time_ms;
+ mutex_unlock(&priv->recv_mutex);
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_GET_TIMING_PARMS_CMD:
+ {
+ struct ipmi_timing_parms parms;
+
+ mutex_lock(&priv->recv_mutex);
+ parms.retries = priv->default_retries;
+ parms.retry_time_ms = priv->default_retry_time_ms;
+ mutex_unlock(&priv->recv_mutex);
+
+ if (copy_to_user(arg, &parms, sizeof(parms))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_GET_MAINTENANCE_MODE_CMD:
+ {
+ int mode;
+
+ mode = ipmi_get_maintenance_mode(priv->user);
+ if (copy_to_user(arg, &mode, sizeof(mode))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_SET_MAINTENANCE_MODE_CMD:
+ {
+ int mode;
+
+ if (copy_from_user(&mode, arg, sizeof(mode))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = ipmi_set_maintenance_mode(priv->user, mode);
+ break;
+ }
+
+ default:
+ rv = -ENOTTY;
+ break;
+ }
+
+ return rv;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The following code contains code for supporting 32-bit compatible
+ * ioctls on 64-bit kernels. This allows running 32-bit apps on the
+ * 64-bit kernel
+ */
+#define COMPAT_IPMICTL_SEND_COMMAND \
+ _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
+#define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
+ _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
+#define COMPAT_IPMICTL_RECEIVE_MSG \
+ _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
+#define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
+ _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
+
+struct compat_ipmi_msg {
+ u8 netfn;
+ u8 cmd;
+ u16 data_len;
+ compat_uptr_t data;
+};
+
+struct compat_ipmi_req {
+ compat_uptr_t addr;
+ compat_uint_t addr_len;
+ compat_long_t msgid;
+ struct compat_ipmi_msg msg;
+};
+
+struct compat_ipmi_recv {
+ compat_int_t recv_type;
+ compat_uptr_t addr;
+ compat_uint_t addr_len;
+ compat_long_t msgid;
+ struct compat_ipmi_msg msg;
+};
+
+struct compat_ipmi_req_settime {
+ struct compat_ipmi_req req;
+ compat_int_t retries;
+ compat_uint_t retry_time_ms;
+};
+
+/*
+ * Define some helper functions for copying IPMI data
+ */
+static void get_compat_ipmi_msg(struct ipmi_msg *p64,
+ struct compat_ipmi_msg *p32)
+{
+ p64->netfn = p32->netfn;
+ p64->cmd = p32->cmd;
+ p64->data_len = p32->data_len;
+ p64->data = compat_ptr(p32->data);
+}
+
+static void get_compat_ipmi_req(struct ipmi_req *p64,
+ struct compat_ipmi_req *p32)
+{
+ p64->addr = compat_ptr(p32->addr);
+ p64->addr_len = p32->addr_len;
+ p64->msgid = p32->msgid;
+ get_compat_ipmi_msg(&p64->msg, &p32->msg);
+}
+
+static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
+ struct compat_ipmi_req_settime *p32)
+{
+ get_compat_ipmi_req(&p64->req, &p32->req);
+ p64->retries = p32->retries;
+ p64->retry_time_ms = p32->retry_time_ms;
+}
+
+static void get_compat_ipmi_recv(struct ipmi_recv *p64,
+ struct compat_ipmi_recv *p32)
+{
+ memset(p64, 0, sizeof(struct ipmi_recv));
+ p64->recv_type = p32->recv_type;
+ p64->addr = compat_ptr(p32->addr);
+ p64->addr_len = p32->addr_len;
+ p64->msgid = p32->msgid;
+ get_compat_ipmi_msg(&p64->msg, &p32->msg);
+}
+
+static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
+{
+ struct compat_ipmi_recv v32;
+ memset(&v32, 0, sizeof(struct compat_ipmi_recv));
+ v32.recv_type = p64->recv_type;
+ v32.addr = ptr_to_compat(p64->addr);
+ v32.addr_len = p64->addr_len;
+ v32.msgid = p64->msgid;
+ v32.msg.netfn = p64->msg.netfn;
+ v32.msg.cmd = p64->msg.cmd;
+ v32.msg.data_len = p64->msg.data_len;
+ v32.msg.data = ptr_to_compat(p64->msg.data);
+ return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
+}
+
+/*
+ * Handle compatibility ioctls
+ */
+static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ipmi_file_private *priv = filep->private_data;
+
+ switch(cmd) {
+ case COMPAT_IPMICTL_SEND_COMMAND:
+ {
+ struct ipmi_req rp;
+ struct compat_ipmi_req r32;
+ int retries;
+ unsigned int retry_time_ms;
+
+ if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
+ return -EFAULT;
+
+ get_compat_ipmi_req(&rp, &r32);
+
+ mutex_lock(&priv->recv_mutex);
+ retries = priv->default_retries;
+ retry_time_ms = priv->default_retry_time_ms;
+ mutex_unlock(&priv->recv_mutex);
+
+ return handle_send_req(priv->user, &rp,
+ retries, retry_time_ms);
+ }
+ case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
+ {
+ struct ipmi_req_settime sp;
+ struct compat_ipmi_req_settime sp32;
+
+ if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
+ return -EFAULT;
+
+ get_compat_ipmi_req_settime(&sp, &sp32);
+
+ return handle_send_req(priv->user, &sp.req,
+ sp.retries, sp.retry_time_ms);
+ }
+ case COMPAT_IPMICTL_RECEIVE_MSG:
+ case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
+ {
+ struct ipmi_recv recv64;
+ struct compat_ipmi_recv recv32;
+
+ if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
+ return -EFAULT;
+
+ get_compat_ipmi_recv(&recv64, &recv32);
+
+ return handle_recv(priv,
+ cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
+ &recv64, copyout_recv32, compat_ptr(arg));
+ }
+ default:
+ return ipmi_ioctl(filep, cmd, arg);
+ }
+}
+#endif
+
+static const struct file_operations ipmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ipmi_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_ipmi_ioctl,
+#endif
+ .open = ipmi_open,
+ .release = ipmi_release,
+ .fasync = ipmi_fasync,
+ .poll = ipmi_poll,
+ .llseek = noop_llseek,
+};
+
+#define DEVICE_NAME "ipmidev"
+
+static int ipmi_major;
+module_param(ipmi_major, int, 0);
+MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
+ " default, or if you set it to zero, it will choose the next"
+ " available device. Setting it to -1 will disable the"
+ " interface. Other values will set the major device number"
+ " to that value.");
+
+/* Keep track of the devices that are registered. */
+struct ipmi_reg_list {
+ dev_t dev;
+ struct list_head link;
+};
+static LIST_HEAD(reg_list);
+static DEFINE_MUTEX(reg_list_mutex);
+
+static struct class *ipmi_class;
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+ dev_t dev = MKDEV(ipmi_major, if_num);
+ struct ipmi_reg_list *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("ipmi_devintf: Unable to create the ipmi class device link\n");
+ return;
+ }
+ entry->dev = dev;
+
+ mutex_lock(&reg_list_mutex);
+ device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
+ list_add(&entry->link, &reg_list);
+ mutex_unlock(&reg_list_mutex);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+ dev_t dev = MKDEV(ipmi_major, if_num);
+ struct ipmi_reg_list *entry;
+
+ mutex_lock(&reg_list_mutex);
+ list_for_each_entry(entry, &reg_list, link) {
+ if (entry->dev == dev) {
+ list_del(&entry->link);
+ kfree(entry);
+ break;
+ }
+ }
+ device_destroy(ipmi_class, dev);
+ mutex_unlock(&reg_list_mutex);
+}
+
+static struct ipmi_smi_watcher smi_watcher =
+{
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_new_smi,
+ .smi_gone = ipmi_smi_gone,
+};
+
+static int __init init_ipmi_devintf(void)
+{
+ int rv;
+
+ if (ipmi_major < 0)
+ return -EINVAL;
+
+ pr_info("ipmi device interface\n");
+
+ ipmi_class = class_create(THIS_MODULE, "ipmi");
+ if (IS_ERR(ipmi_class)) {
+ pr_err("ipmi: can't register device class\n");
+ return PTR_ERR(ipmi_class);
+ }
+
+ rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
+ if (rv < 0) {
+ class_destroy(ipmi_class);
+ pr_err("ipmi: can't get major %d\n", ipmi_major);
+ return rv;
+ }
+
+ if (ipmi_major == 0) {
+ ipmi_major = rv;
+ }
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+ if (rv) {
+ unregister_chrdev(ipmi_major, DEVICE_NAME);
+ class_destroy(ipmi_class);
+ pr_warn("ipmi: can't register smi watcher\n");
+ return rv;
+ }
+
+ return 0;
+}
+module_init(init_ipmi_devintf);
+
+static void __exit cleanup_ipmi(void)
+{
+ struct ipmi_reg_list *entry, *entry2;
+ mutex_lock(&reg_list_mutex);
+ list_for_each_entry_safe(entry, entry2, &reg_list, link) {
+ list_del(&entry->link);
+ device_destroy(ipmi_class, entry->dev);
+ kfree(entry);
+ }
+ mutex_unlock(&reg_list_mutex);
+ class_destroy(ipmi_class);
+ ipmi_smi_watcher_unregister(&smi_watcher);
+ unregister_chrdev(ipmi_major, DEVICE_NAME);
+}
+module_exit(cleanup_ipmi);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
new file mode 100644
index 000000000..bbf7029e2
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * A hack to create a platform device from a DMI entry. This will
+ * allow autoloading of the IPMI drive based on SMBIOS entries.
+ */
+
+#define pr_fmt(fmt) "%s" fmt, "ipmi:dmi: "
+#define dev_fmt pr_fmt
+
+#include <linux/ipmi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include "ipmi_dmi.h"
+#include "ipmi_plat_data.h"
+
+#define IPMI_DMI_TYPE_KCS 0x01
+#define IPMI_DMI_TYPE_SMIC 0x02
+#define IPMI_DMI_TYPE_BT 0x03
+#define IPMI_DMI_TYPE_SSIF 0x04
+
+struct ipmi_dmi_info {
+ enum si_type si_type;
+ unsigned int space; /* addr space for si, intf# for ssif */
+ unsigned long addr;
+ u8 slave_addr;
+ struct ipmi_dmi_info *next;
+};
+
+static struct ipmi_dmi_info *ipmi_dmi_infos;
+
+static int ipmi_dmi_nr __initdata;
+
+static void __init dmi_add_platform_ipmi(unsigned long base_addr,
+ unsigned int space,
+ u8 slave_addr,
+ int irq,
+ int offset,
+ int type)
+{
+ const char *name;
+ struct ipmi_dmi_info *info;
+ struct ipmi_plat_data p;
+
+ memset(&p, 0, sizeof(p));
+
+ name = "dmi-ipmi-si";
+ p.iftype = IPMI_PLAT_IF_SI;
+ switch (type) {
+ case IPMI_DMI_TYPE_SSIF:
+ name = "dmi-ipmi-ssif";
+ p.iftype = IPMI_PLAT_IF_SSIF;
+ p.type = SI_TYPE_INVALID;
+ break;
+ case IPMI_DMI_TYPE_BT:
+ p.type = SI_BT;
+ break;
+ case IPMI_DMI_TYPE_KCS:
+ p.type = SI_KCS;
+ break;
+ case IPMI_DMI_TYPE_SMIC:
+ p.type = SI_SMIC;
+ break;
+ default:
+ pr_err("Invalid IPMI type: %d\n", type);
+ return;
+ }
+
+ p.addr = base_addr;
+ p.space = space;
+ p.regspacing = offset;
+ p.irq = irq;
+ p.slave_addr = slave_addr;
+ p.addr_source = SI_SMBIOS;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_warn("Could not allocate dmi info\n");
+ } else {
+ info->si_type = p.type;
+ info->space = space;
+ info->addr = base_addr;
+ info->slave_addr = slave_addr;
+ info->next = ipmi_dmi_infos;
+ ipmi_dmi_infos = info;
+ }
+
+ if (ipmi_platform_add(name, ipmi_dmi_nr, &p))
+ ipmi_dmi_nr++;
+}
+
+/*
+ * Look up the slave address for a given interface. This is here
+ * because ACPI doesn't have a slave address while SMBIOS does, but we
+ * prefer using ACPI so the ACPI code can use the IPMI namespace.
+ * This function allows an ACPI-specified IPMI device to look up the
+ * slave address from the DMI table.
+ */
+int ipmi_dmi_get_slave_addr(enum si_type si_type, unsigned int space,
+ unsigned long base_addr)
+{
+ struct ipmi_dmi_info *info = ipmi_dmi_infos;
+
+ while (info) {
+ if (info->si_type == si_type &&
+ info->space == space &&
+ info->addr == base_addr)
+ return info->slave_addr;
+ info = info->next;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_dmi_get_slave_addr);
+
+#define DMI_IPMI_MIN_LENGTH 0x10
+#define DMI_IPMI_VER2_LENGTH 0x12
+#define DMI_IPMI_TYPE 4
+#define DMI_IPMI_SLAVEADDR 6
+#define DMI_IPMI_ADDR 8
+#define DMI_IPMI_ACCESS 0x10
+#define DMI_IPMI_IRQ 0x11
+#define DMI_IPMI_IO_MASK 0xfffe
+
+static void __init dmi_decode_ipmi(const struct dmi_header *dm)
+{
+ const u8 *data = (const u8 *) dm;
+ int space = IPMI_IO_ADDR_SPACE;
+ unsigned long base_addr;
+ u8 len = dm->length;
+ u8 slave_addr;
+ int irq = 0, offset = 0;
+ int type;
+
+ if (len < DMI_IPMI_MIN_LENGTH)
+ return;
+
+ type = data[DMI_IPMI_TYPE];
+ slave_addr = data[DMI_IPMI_SLAVEADDR];
+
+ memcpy(&base_addr, data + DMI_IPMI_ADDR, sizeof(unsigned long));
+ if (!base_addr) {
+ pr_err("Base address is zero, assuming no IPMI interface\n");
+ return;
+ }
+ if (len >= DMI_IPMI_VER2_LENGTH) {
+ if (type == IPMI_DMI_TYPE_SSIF) {
+ space = 0; /* Match I2C interface 0. */
+ base_addr = data[DMI_IPMI_ADDR] >> 1;
+ if (base_addr == 0) {
+ /*
+ * Some broken systems put the I2C address in
+ * the slave address field. We try to
+ * accommodate them here.
+ */
+ base_addr = data[DMI_IPMI_SLAVEADDR] >> 1;
+ slave_addr = 0;
+ }
+ } else {
+ if (base_addr & 1) {
+ /* I/O */
+ base_addr &= DMI_IPMI_IO_MASK;
+ } else {
+ /* Memory */
+ space = IPMI_MEM_ADDR_SPACE;
+ }
+
+ /*
+ * If bit 4 of byte 0x10 is set, then the lsb
+ * for the address is odd.
+ */
+ base_addr |= (data[DMI_IPMI_ACCESS] >> 4) & 1;
+
+ irq = data[DMI_IPMI_IRQ];
+
+ /*
+ * The top two bits of byte 0x10 hold the
+ * register spacing.
+ */
+ switch ((data[DMI_IPMI_ACCESS] >> 6) & 3) {
+ case 0: /* Byte boundaries */
+ offset = 1;
+ break;
+ case 1: /* 32-bit boundaries */
+ offset = 4;
+ break;
+ case 2: /* 16-byte boundaries */
+ offset = 16;
+ break;
+ default:
+ pr_err("Invalid offset: 0\n");
+ return;
+ }
+ }
+ } else {
+ /* Old DMI spec. */
+ /*
+ * Note that technically, the lower bit of the base
+ * address should be 1 if the address is I/O and 0 if
+ * the address is in memory. So many systems get that
+ * wrong (and all that I have seen are I/O) so we just
+ * ignore that bit and assume I/O. Systems that use
+ * memory should use the newer spec, anyway.
+ */
+ base_addr = base_addr & DMI_IPMI_IO_MASK;
+ offset = 1;
+ }
+
+ dmi_add_platform_ipmi(base_addr, space, slave_addr, irq,
+ offset, type);
+}
+
+static int __init scan_for_dmi_ipmi(void)
+{
+ const struct dmi_device *dev = NULL;
+
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev)))
+ dmi_decode_ipmi((const struct dmi_header *) dev->device_data);
+
+ return 0;
+}
+subsys_initcall(scan_for_dmi_ipmi);
diff --git a/drivers/char/ipmi/ipmi_dmi.h b/drivers/char/ipmi/ipmi_dmi.h
new file mode 100644
index 000000000..e16a9dbdc
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_dmi.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * DMI defines for use by IPMI
+ */
+#include "ipmi_si.h"
+
+#ifdef CONFIG_IPMI_DMI_DECODE
+int ipmi_dmi_get_slave_addr(enum si_type si_type, unsigned int space,
+ unsigned long base_addr);
+#endif
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
new file mode 100644
index 000000000..3f1c9f157
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Driver to talk to a remote management controller on IPMB.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/ipmi_msgdefs.h>
+#include <linux/ipmi_smi.h>
+
+#define DEVICE_NAME "ipmi-ipmb"
+
+static int bmcaddr = 0x20;
+module_param(bmcaddr, int, 0644);
+MODULE_PARM_DESC(bmcaddr, "Address to use for BMC.");
+
+static unsigned int retry_time_ms = 250;
+module_param(retry_time_ms, uint, 0644);
+MODULE_PARM_DESC(retry_time_ms, "Timeout time between retries, in milliseconds.");
+
+static unsigned int max_retries = 1;
+module_param(max_retries, uint, 0644);
+MODULE_PARM_DESC(max_retries, "Max resends of a command before timing out.");
+
+/* Add room for the two slave addresses, two checksums, and rqSeq. */
+#define IPMB_MAX_MSG_LEN (IPMI_MAX_MSG_LENGTH + 5)
+
+struct ipmi_ipmb_dev {
+ struct ipmi_smi *intf;
+ struct i2c_client *client;
+ struct i2c_client *slave;
+
+ struct ipmi_smi_handlers handlers;
+
+ bool ready;
+
+ u8 curr_seq;
+
+ u8 bmcaddr;
+ u32 retry_time_ms;
+ u32 max_retries;
+
+ struct ipmi_smi_msg *next_msg;
+ struct ipmi_smi_msg *working_msg;
+
+ /* Transmit thread. */
+ struct task_struct *thread;
+ struct semaphore wake_thread;
+ struct semaphore got_rsp;
+ spinlock_t lock;
+ bool stopping;
+
+ u8 xmitmsg[IPMB_MAX_MSG_LEN];
+ unsigned int xmitlen;
+
+ u8 rcvmsg[IPMB_MAX_MSG_LEN];
+ unsigned int rcvlen;
+ bool overrun;
+};
+
+static bool valid_ipmb(struct ipmi_ipmb_dev *iidev)
+{
+ u8 *msg = iidev->rcvmsg;
+ u8 netfn;
+
+ if (iidev->overrun)
+ return false;
+
+ /* Minimum message size. */
+ if (iidev->rcvlen < 7)
+ return false;
+
+ /* Is it a response? */
+ netfn = msg[1] >> 2;
+ if (netfn & 1) {
+ /* Response messages have an added completion code. */
+ if (iidev->rcvlen < 8)
+ return false;
+ }
+
+ if (ipmb_checksum(msg, 3) != 0)
+ return false;
+ if (ipmb_checksum(msg + 3, iidev->rcvlen - 3) != 0)
+ return false;
+
+ return true;
+}
+
+static void ipmi_ipmb_check_msg_done(struct ipmi_ipmb_dev *iidev)
+{
+ struct ipmi_smi_msg *imsg = NULL;
+ u8 *msg = iidev->rcvmsg;
+ bool is_cmd;
+ unsigned long flags;
+
+ if (iidev->rcvlen == 0)
+ return;
+ if (!valid_ipmb(iidev))
+ goto done;
+
+ is_cmd = ((msg[1] >> 2) & 1) == 0;
+
+ if (is_cmd) {
+ /* Ignore commands until we are up. */
+ if (!iidev->ready)
+ goto done;
+
+ /* It's a command, allocate a message for it. */
+ imsg = ipmi_alloc_smi_msg();
+ if (!imsg)
+ goto done;
+ imsg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ imsg->data_size = 0;
+ } else {
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->working_msg) {
+ u8 seq = msg[4] >> 2;
+ bool xmit_rsp = (iidev->working_msg->data[0] >> 2) & 1;
+
+ /*
+ * Responses should carry the sequence we sent
+ * them with. If it's a transmitted response,
+ * ignore it. And if the message hasn't been
+ * transmitted, ignore it.
+ */
+ if (!xmit_rsp && seq == iidev->curr_seq) {
+ iidev->curr_seq = (iidev->curr_seq + 1) & 0x3f;
+
+ imsg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ }
+
+ if (!imsg)
+ goto done;
+
+ if (imsg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Keep the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 3, iidev->rcvlen - 4);
+ imsg->rsp_size = iidev->rcvlen - 3;
+ } else {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Skip the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 5, iidev->rcvlen - 6);
+ imsg->rsp_size = iidev->rcvlen - 5;
+ }
+ ipmi_smi_msg_received(iidev->intf, imsg);
+ if (!is_cmd)
+ up(&iidev->got_rsp);
+
+done:
+ iidev->overrun = false;
+ iidev->rcvlen = 0;
+}
+
+/*
+ * The IPMB protocol only supports i2c writes so there is no need to
+ * support I2C_SLAVE_READ* events, except to know if the other end has
+ * issued a read without going to stop mode.
+ */
+static int ipmi_ipmb_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ ipmi_ipmb_check_msg_done(iidev);
+ /*
+ * First byte is the slave address, to ease the checksum
+ * calculation.
+ */
+ iidev->rcvmsg[0] = client->addr << 1;
+ iidev->rcvlen = 1;
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (iidev->rcvlen >= sizeof(iidev->rcvmsg))
+ iidev->overrun = true;
+ else
+ iidev->rcvmsg[iidev->rcvlen++] = *val;
+ break;
+
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_STOP:
+ ipmi_ipmb_check_msg_done(iidev);
+ break;
+
+ case I2C_SLAVE_READ_PROCESSED:
+ break;
+ }
+
+ return 0;
+}
+
+static void ipmi_ipmb_send_response(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg, u8 cc)
+{
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response being sent, we need to return a
+ * response to the response. Fake a send msg command
+ * response with channel 0. This will always be ipmb
+ * direct.
+ */
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST | 1) << 2;
+ msg->data[3] = IPMI_SEND_MSG_CMD;
+ msg->data[4] = cc;
+ msg->data_size = 5;
+ }
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = msg->data[2];
+ msg->rsp[3] = msg->data[3];
+ msg->rsp[4] = cc;
+ msg->rsp_size = 5;
+ } else {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = cc;
+ msg->rsp_size = 3;
+ }
+ ipmi_smi_msg_received(iidev->intf, msg);
+}
+
+static void ipmi_ipmb_format_for_xmit(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg)
+{
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ iidev->xmitmsg[0] = msg->data[1];
+ iidev->xmitmsg[1] = msg->data[0];
+ memcpy(iidev->xmitmsg + 4, msg->data + 2, msg->data_size - 2);
+ iidev->xmitlen = msg->data_size + 2;
+ } else {
+ iidev->xmitmsg[0] = iidev->bmcaddr;
+ iidev->xmitmsg[1] = msg->data[0];
+ iidev->xmitmsg[4] = 0;
+ memcpy(iidev->xmitmsg + 5, msg->data + 1, msg->data_size - 1);
+ iidev->xmitlen = msg->data_size + 4;
+ }
+ iidev->xmitmsg[3] = iidev->slave->addr << 1;
+ if (((msg->data[0] >> 2) & 1) == 0)
+ /* If it's a command, put in our own sequence number. */
+ iidev->xmitmsg[4] = ((iidev->xmitmsg[4] & 0x03) |
+ (iidev->curr_seq << 2));
+
+ /* Now add on the final checksums. */
+ iidev->xmitmsg[2] = ipmb_checksum(iidev->xmitmsg, 2);
+ iidev->xmitmsg[iidev->xmitlen] =
+ ipmb_checksum(iidev->xmitmsg + 3, iidev->xmitlen - 3);
+ iidev->xmitlen++;
+}
+
+static int ipmi_ipmb_thread(void *data)
+{
+ struct ipmi_ipmb_dev *iidev = data;
+
+ while (!kthread_should_stop()) {
+ long ret;
+ struct i2c_msg i2c_msg;
+ struct ipmi_smi_msg *msg = NULL;
+ unsigned long flags;
+ unsigned int retries = 0;
+
+ /* Wait for a message to send */
+ ret = down_interruptible(&iidev->wake_thread);
+ if (iidev->stopping)
+ break;
+ if (ret)
+ continue;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->next_msg) {
+ msg = iidev->next_msg;
+ iidev->next_msg = NULL;
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ if (!msg)
+ continue;
+
+ ipmi_ipmb_format_for_xmit(iidev, msg);
+
+retry:
+ i2c_msg.len = iidev->xmitlen - 1;
+ if (i2c_msg.len > 32) {
+ ipmi_ipmb_send_response(iidev, msg,
+ IPMI_REQ_LEN_EXCEEDED_ERR);
+ continue;
+ }
+
+ i2c_msg.addr = iidev->xmitmsg[0] >> 1;
+ i2c_msg.flags = 0;
+ i2c_msg.buf = iidev->xmitmsg + 1;
+
+ /* Rely on i2c_transfer for a barrier. */
+ iidev->working_msg = msg;
+
+ ret = i2c_transfer(iidev->client->adapter, &i2c_msg, 1);
+
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response, nothing will be returned
+ * by the other end.
+ */
+
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg,
+ ret < 0 ? IPMI_BUS_ERR : 0);
+ continue;
+ }
+ if (ret < 0) {
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg, IPMI_BUS_ERR);
+ continue;
+ }
+
+ /* A command was sent, wait for its response. */
+ ret = down_timeout(&iidev->got_rsp,
+ msecs_to_jiffies(iidev->retry_time_ms));
+
+ /*
+ * Grab the message if we can. If the handler hasn't
+ * already handled it, the message will still be there.
+ */
+ spin_lock_irqsave(&iidev->lock, flags);
+ msg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ if (!msg && ret) {
+ /*
+ * If working_msg is not set and we timed out,
+ * that means the message grabbed by
+ * check_msg_done before we could grab it
+ * here. Wait again for check_msg_done to up
+ * the semaphore.
+ */
+ down(&iidev->got_rsp);
+ } else if (msg && ++retries <= iidev->max_retries) {
+ spin_lock_irqsave(&iidev->lock, flags);
+ iidev->working_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ goto retry;
+ }
+
+ if (msg)
+ ipmi_ipmb_send_response(iidev, msg, IPMI_TIMEOUT_ERR);
+ }
+
+ if (iidev->next_msg)
+ /* Return an unspecified error. */
+ ipmi_ipmb_send_response(iidev, iidev->next_msg, 0xff);
+
+ return 0;
+}
+
+static int ipmi_ipmb_start_processing(void *send_info,
+ struct ipmi_smi *new_intf)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ iidev->intf = new_intf;
+ iidev->ready = true;
+ return 0;
+}
+
+static void ipmi_ipmb_stop_thread(struct ipmi_ipmb_dev *iidev)
+{
+ if (iidev->thread) {
+ struct task_struct *t = iidev->thread;
+
+ iidev->thread = NULL;
+ iidev->stopping = true;
+ up(&iidev->wake_thread);
+ up(&iidev->got_rsp);
+ kthread_stop(t);
+ }
+}
+
+static void ipmi_ipmb_shutdown(void *send_info)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ ipmi_ipmb_stop_thread(iidev);
+}
+
+static void ipmi_ipmb_sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ BUG_ON(iidev->next_msg);
+
+ iidev->next_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ up(&iidev->wake_thread);
+}
+
+static void ipmi_ipmb_request_events(void *send_info)
+{
+ /* We don't fetch events here. */
+}
+
+static void ipmi_ipmb_cleanup(struct ipmi_ipmb_dev *iidev)
+{
+ if (iidev->slave) {
+ i2c_slave_unregister(iidev->slave);
+ if (iidev->slave != iidev->client)
+ i2c_unregister_device(iidev->slave);
+ }
+ iidev->slave = NULL;
+ iidev->client = NULL;
+ ipmi_ipmb_stop_thread(iidev);
+}
+
+static void ipmi_ipmb_remove(struct i2c_client *client)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ ipmi_ipmb_cleanup(iidev);
+ ipmi_unregister_smi(iidev->intf);
+}
+
+static int ipmi_ipmb_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ipmi_ipmb_dev *iidev;
+ struct device_node *slave_np;
+ struct i2c_adapter *slave_adap = NULL;
+ struct i2c_client *slave = NULL;
+ int rv;
+
+ iidev = devm_kzalloc(&client->dev, sizeof(*iidev), GFP_KERNEL);
+ if (!iidev)
+ return -ENOMEM;
+
+ if (of_property_read_u8(dev->of_node, "bmcaddr", &iidev->bmcaddr) != 0)
+ iidev->bmcaddr = bmcaddr;
+ if (iidev->bmcaddr == 0 || iidev->bmcaddr & 1) {
+ /* Can't have the write bit set. */
+ dev_notice(&client->dev,
+ "Invalid bmc address value %2.2x\n", iidev->bmcaddr);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dev->of_node, "retry-time",
+ &iidev->retry_time_ms) != 0)
+ iidev->retry_time_ms = retry_time_ms;
+
+ if (of_property_read_u32(dev->of_node, "max-retries",
+ &iidev->max_retries) != 0)
+ iidev->max_retries = max_retries;
+
+ slave_np = of_parse_phandle(dev->of_node, "slave-dev", 0);
+ if (slave_np) {
+ slave_adap = of_get_i2c_adapter_by_node(slave_np);
+ of_node_put(slave_np);
+ if (!slave_adap) {
+ dev_notice(&client->dev,
+ "Could not find slave adapter\n");
+ return -EINVAL;
+ }
+ }
+
+ iidev->client = client;
+
+ if (slave_adap) {
+ struct i2c_board_info binfo;
+
+ memset(&binfo, 0, sizeof(binfo));
+ strscpy(binfo.type, "ipmb-slave", I2C_NAME_SIZE);
+ binfo.addr = client->addr;
+ binfo.flags = I2C_CLIENT_SLAVE;
+ slave = i2c_new_client_device(slave_adap, &binfo);
+ i2c_put_adapter(slave_adap);
+ if (IS_ERR(slave)) {
+ rv = PTR_ERR(slave);
+ dev_notice(&client->dev,
+ "Could not allocate slave device: %d\n", rv);
+ return rv;
+ }
+ i2c_set_clientdata(slave, iidev);
+ } else {
+ slave = client;
+ }
+ i2c_set_clientdata(client, iidev);
+ slave->flags |= I2C_CLIENT_SLAVE;
+
+ rv = i2c_slave_register(slave, ipmi_ipmb_slave_cb);
+ if (rv)
+ goto out_err;
+ iidev->slave = slave;
+ slave = NULL;
+
+ iidev->handlers.flags = IPMI_SMI_CAN_HANDLE_IPMB_DIRECT;
+ iidev->handlers.start_processing = ipmi_ipmb_start_processing;
+ iidev->handlers.shutdown = ipmi_ipmb_shutdown;
+ iidev->handlers.sender = ipmi_ipmb_sender;
+ iidev->handlers.request_events = ipmi_ipmb_request_events;
+
+ spin_lock_init(&iidev->lock);
+ sema_init(&iidev->wake_thread, 0);
+ sema_init(&iidev->got_rsp, 0);
+
+ iidev->thread = kthread_run(ipmi_ipmb_thread, iidev,
+ "kipmb%4.4x", client->addr);
+ if (IS_ERR(iidev->thread)) {
+ rv = PTR_ERR(iidev->thread);
+ dev_notice(&client->dev,
+ "Could not start kernel thread: error %d\n", rv);
+ goto out_err;
+ }
+
+ rv = ipmi_register_smi(&iidev->handlers,
+ iidev,
+ &client->dev,
+ iidev->bmcaddr);
+ if (rv)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ if (slave && slave != client)
+ i2c_unregister_device(slave);
+ ipmi_ipmb_cleanup(iidev);
+ return rv;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_ipmb_match[] = {
+ { .type = "ipmi", .compatible = DEVICE_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match);
+#else
+#define of_ipmi_ipmb_match NULL
+#endif
+
+static const struct i2c_device_id ipmi_ipmb_id[] = {
+ { DEVICE_NAME, 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id);
+
+static struct i2c_driver ipmi_ipmb_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = of_ipmi_ipmb_match,
+ },
+ .probe_new = ipmi_ipmb_probe,
+ .remove = ipmi_ipmb_remove,
+ .id_table = ipmi_ipmb_id,
+};
+module_i2c_driver(ipmi_ipmb_driver);
+
+MODULE_AUTHOR("Corey Minyard");
+MODULE_DESCRIPTION("IPMI IPMB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
new file mode 100644
index 000000000..efda90dcf
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_kcs_sm.c
+ *
+ * State machine for handling IPMI KCS interfaces.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+/*
+ * This state machine is taken from the state machine in the IPMI spec,
+ * pretty much verbatim. If you have questions about the states, see
+ * that document.
+ */
+
+#define DEBUG /* So dev_dbg() is always available. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* kcs_debug is a bit-field
+ * KCS_DEBUG_ENABLE - turned on for now
+ * KCS_DEBUG_MSG - commands and their responses
+ * KCS_DEBUG_STATES - state machine
+ */
+#define KCS_DEBUG_STATES 4
+#define KCS_DEBUG_MSG 2
+#define KCS_DEBUG_ENABLE 1
+
+static int kcs_debug;
+module_param(kcs_debug, int, 0644);
+MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/* The states the KCS driver may be in. */
+enum kcs_states {
+ /* The KCS interface is currently doing nothing. */
+ KCS_IDLE,
+
+ /*
+ * We are starting an operation. The data is in the output
+ * buffer, but nothing has been done to the interface yet. This
+ * was added to the state machine in the spec to wait for the
+ * initial IBF.
+ */
+ KCS_START_OP,
+
+ /* We have written a write cmd to the interface. */
+ KCS_WAIT_WRITE_START,
+
+ /* We are writing bytes to the interface. */
+ KCS_WAIT_WRITE,
+
+ /*
+ * We have written the write end cmd to the interface, and
+ * still need to write the last byte.
+ */
+ KCS_WAIT_WRITE_END,
+
+ /* We are waiting to read data from the interface. */
+ KCS_WAIT_READ,
+
+ /*
+ * State to transition to the error handler, this was added to
+ * the state machine in the spec to be sure IBF was there.
+ */
+ KCS_ERROR0,
+
+ /*
+ * First stage error handler, wait for the interface to
+ * respond.
+ */
+ KCS_ERROR1,
+
+ /*
+ * The abort cmd has been written, wait for the interface to
+ * respond.
+ */
+ KCS_ERROR2,
+
+ /*
+ * We wrote some data to the interface, wait for it to switch
+ * to read mode.
+ */
+ KCS_ERROR3,
+
+ /* The hardware failed to follow the state machine. */
+ KCS_HOSED
+};
+
+#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
+#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
+
+/* Timeouts in microseconds. */
+#define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define MAX_ERROR_RETRIES 10
+#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
+
+struct si_sm_data {
+ enum kcs_states state;
+ struct si_sm_io *io;
+ unsigned char write_data[MAX_KCS_WRITE_SIZE];
+ int write_pos;
+ int write_count;
+ int orig_write_count;
+ unsigned char read_data[MAX_KCS_READ_SIZE];
+ int read_pos;
+ int truncated;
+
+ unsigned int error_retries;
+ long ibf_timeout;
+ long obf_timeout;
+ unsigned long error0_timeout;
+};
+
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
+{
+ kcs->state = KCS_IDLE;
+ kcs->io = io;
+ kcs->write_pos = 0;
+ kcs->write_count = 0;
+ kcs->orig_write_count = 0;
+ kcs->read_pos = 0;
+ kcs->error_retries = 0;
+ kcs->truncated = 0;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+
+ /* Reserve 2 I/O bytes. */
+ return 2;
+}
+
+static inline unsigned char read_status(struct si_sm_data *kcs)
+{
+ return kcs->io->inputb(kcs->io, 1);
+}
+
+static inline unsigned char read_data(struct si_sm_data *kcs)
+{
+ return kcs->io->inputb(kcs->io, 0);
+}
+
+static inline void write_cmd(struct si_sm_data *kcs, unsigned char data)
+{
+ kcs->io->outputb(kcs->io, 1, data);
+}
+
+static inline void write_data(struct si_sm_data *kcs, unsigned char data)
+{
+ kcs->io->outputb(kcs->io, 0, data);
+}
+
+/* Control codes. */
+#define KCS_GET_STATUS_ABORT 0x60
+#define KCS_WRITE_START 0x61
+#define KCS_WRITE_END 0x62
+#define KCS_READ_BYTE 0x68
+
+/* Status bits. */
+#define GET_STATUS_STATE(status) (((status) >> 6) & 0x03)
+#define KCS_IDLE_STATE 0
+#define KCS_READ_STATE 1
+#define KCS_WRITE_STATE 2
+#define KCS_ERROR_STATE 3
+#define GET_STATUS_ATN(status) ((status) & 0x04)
+#define GET_STATUS_IBF(status) ((status) & 0x02)
+#define GET_STATUS_OBF(status) ((status) & 0x01)
+
+
+static inline void write_next_byte(struct si_sm_data *kcs)
+{
+ write_data(kcs, kcs->write_data[kcs->write_pos]);
+ (kcs->write_pos)++;
+ (kcs->write_count)--;
+}
+
+static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
+{
+ (kcs->error_retries)++;
+ if (kcs->error_retries > MAX_ERROR_RETRIES) {
+ if (kcs_debug & KCS_DEBUG_ENABLE)
+ dev_dbg(kcs->io->dev, "ipmi_kcs_sm: kcs hosed: %s\n",
+ reason);
+ kcs->state = KCS_HOSED;
+ } else {
+ kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
+ kcs->state = KCS_ERROR0;
+ }
+}
+
+static inline void read_next_byte(struct si_sm_data *kcs)
+{
+ if (kcs->read_pos >= MAX_KCS_READ_SIZE) {
+ /* Throw the data away and mark it truncated. */
+ read_data(kcs);
+ kcs->truncated = 1;
+ } else {
+ kcs->read_data[kcs->read_pos] = read_data(kcs);
+ (kcs->read_pos)++;
+ }
+ write_data(kcs, KCS_READ_BYTE);
+}
+
+static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
+ long time)
+{
+ if (GET_STATUS_IBF(status)) {
+ kcs->ibf_timeout -= time;
+ if (kcs->ibf_timeout < 0) {
+ start_error_recovery(kcs, "IBF not ready in time");
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ return 1;
+ }
+ return 0;
+ }
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ return 1;
+}
+
+static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
+ long time)
+{
+ if (!GET_STATUS_OBF(status)) {
+ kcs->obf_timeout -= time;
+ if (kcs->obf_timeout < 0) {
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ start_error_recovery(kcs, "OBF not ready in time");
+ return 1;
+ }
+ return 0;
+ }
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ return 1;
+}
+
+static void clear_obf(struct si_sm_data *kcs, unsigned char status)
+{
+ if (GET_STATUS_OBF(status))
+ read_data(kcs);
+}
+
+static void restart_kcs_transaction(struct si_sm_data *kcs)
+{
+ kcs->write_count = kcs->orig_write_count;
+ kcs->write_pos = 0;
+ kcs->read_pos = 0;
+ kcs->state = KCS_WAIT_WRITE_START;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ write_cmd(kcs, KCS_WRITE_START);
+}
+
+static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > MAX_KCS_WRITE_SIZE)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
+ dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
+ return IPMI_NOT_IN_MY_STATE_ERR;
+ }
+
+ if (kcs_debug & KCS_DEBUG_MSG) {
+ dev_dbg(kcs->io->dev, "%s -", __func__);
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
+ }
+ kcs->error_retries = 0;
+ memcpy(kcs->write_data, data, size);
+ kcs->write_count = size;
+ kcs->orig_write_count = size;
+ kcs->write_pos = 0;
+ kcs->read_pos = 0;
+ kcs->state = KCS_START_OP;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ return 0;
+}
+
+static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
+ unsigned int length)
+{
+ if (length < kcs->read_pos) {
+ kcs->read_pos = length;
+ kcs->truncated = 1;
+ }
+
+ memcpy(data, kcs->read_data, kcs->read_pos);
+
+ if ((length >= 3) && (kcs->read_pos < 3)) {
+ /* Guarantee that we return at least 3 bytes, with an
+ error in the third byte if it is too short. */
+ data[2] = IPMI_ERR_UNSPECIFIED;
+ kcs->read_pos = 3;
+ }
+ if (kcs->truncated) {
+ /*
+ * Report a truncated error. We might overwrite
+ * another error, but that's too bad, the user needs
+ * to know it was truncated.
+ */
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ kcs->truncated = 0;
+ }
+
+ return kcs->read_pos;
+}
+
+/*
+ * This implements the state machine defined in the IPMI manual, see
+ * that for details on how this works. Divide that flowchart into
+ * sections delimited by "Wait for IBF" and this will become clear.
+ */
+static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
+{
+ unsigned char status;
+ unsigned char state;
+
+ status = read_status(kcs);
+
+ if (kcs_debug & KCS_DEBUG_STATES)
+ dev_dbg(kcs->io->dev,
+ "KCS: State = %d, %x\n", kcs->state, status);
+
+ /* All states wait for ibf, so just do it here. */
+ if (!check_ibf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ /* Just about everything looks at the KCS state, so grab that, too. */
+ state = GET_STATUS_STATE(status);
+
+ switch (kcs->state) {
+ case KCS_IDLE:
+ /* If there's and interrupt source, turn it off. */
+ clear_obf(kcs, status);
+
+ if (GET_STATUS_ATN(status))
+ return SI_SM_ATTN;
+ else
+ return SI_SM_IDLE;
+
+ case KCS_START_OP:
+ if (state != KCS_IDLE_STATE) {
+ start_error_recovery(kcs,
+ "State machine not idle at start");
+ break;
+ }
+
+ clear_obf(kcs, status);
+ write_cmd(kcs, KCS_WRITE_START);
+ kcs->state = KCS_WAIT_WRITE_START;
+ break;
+
+ case KCS_WAIT_WRITE_START:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(
+ kcs,
+ "Not in write state at write start");
+ break;
+ }
+ read_data(kcs);
+ if (kcs->write_count == 1) {
+ write_cmd(kcs, KCS_WRITE_END);
+ kcs->state = KCS_WAIT_WRITE_END;
+ } else {
+ write_next_byte(kcs);
+ kcs->state = KCS_WAIT_WRITE;
+ }
+ break;
+
+ case KCS_WAIT_WRITE:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(kcs,
+ "Not in write state for write");
+ break;
+ }
+ clear_obf(kcs, status);
+ if (kcs->write_count == 1) {
+ write_cmd(kcs, KCS_WRITE_END);
+ kcs->state = KCS_WAIT_WRITE_END;
+ } else {
+ write_next_byte(kcs);
+ }
+ break;
+
+ case KCS_WAIT_WRITE_END:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(kcs,
+ "Not in write state"
+ " for write end");
+ break;
+ }
+ clear_obf(kcs, status);
+ write_next_byte(kcs);
+ kcs->state = KCS_WAIT_READ;
+ break;
+
+ case KCS_WAIT_READ:
+ if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
+ start_error_recovery(
+ kcs,
+ "Not in read or idle in read state");
+ break;
+ }
+
+ if (state == KCS_READ_STATE) {
+ if (!check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+ read_next_byte(kcs);
+ } else {
+ /*
+ * We don't implement this exactly like the state
+ * machine in the spec. Some broken hardware
+ * does not write the final dummy byte to the
+ * read register. Thus obf will never go high
+ * here. We just go straight to idle, and we
+ * handle clearing out obf in idle state if it
+ * happens to come in.
+ */
+ clear_obf(kcs, status);
+ kcs->orig_write_count = 0;
+ kcs->state = KCS_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+ break;
+
+ case KCS_ERROR0:
+ clear_obf(kcs, status);
+ status = read_status(kcs);
+ if (GET_STATUS_OBF(status))
+ /* controller isn't responding */
+ if (time_before(jiffies, kcs->error0_timeout))
+ return SI_SM_CALL_WITH_TICK_DELAY;
+ write_cmd(kcs, KCS_GET_STATUS_ABORT);
+ kcs->state = KCS_ERROR1;
+ break;
+
+ case KCS_ERROR1:
+ clear_obf(kcs, status);
+ write_data(kcs, 0);
+ kcs->state = KCS_ERROR2;
+ break;
+
+ case KCS_ERROR2:
+ if (state != KCS_READ_STATE) {
+ start_error_recovery(kcs,
+ "Not in read state for error2");
+ break;
+ }
+ if (!check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ clear_obf(kcs, status);
+ write_data(kcs, KCS_READ_BYTE);
+ kcs->state = KCS_ERROR3;
+ break;
+
+ case KCS_ERROR3:
+ if (state != KCS_IDLE_STATE) {
+ start_error_recovery(kcs,
+ "Not in idle state for error3");
+ break;
+ }
+
+ if (!check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ clear_obf(kcs, status);
+ if (kcs->orig_write_count) {
+ restart_kcs_transaction(kcs);
+ } else {
+ kcs->state = KCS_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+ break;
+
+ case KCS_HOSED:
+ break;
+ }
+
+ if (kcs->state == KCS_HOSED) {
+ init_kcs_data(kcs, kcs->io);
+ return SI_SM_HOSED;
+ }
+
+ return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int kcs_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+static int kcs_detect(struct si_sm_data *kcs)
+{
+ /*
+ * It's impossible for the KCS status register to be all 1's,
+ * (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first.
+ */
+ if (read_status(kcs) == 0xff)
+ return 1;
+
+ return 0;
+}
+
+static void kcs_cleanup(struct si_sm_data *kcs)
+{
+}
+
+const struct si_sm_handlers kcs_smi_handlers = {
+ .init_data = init_kcs_data,
+ .start_transaction = start_kcs_transaction,
+ .get_result = get_kcs_result,
+ .event = kcs_event,
+ .detect = kcs_detect,
+ .cleanup = kcs_cleanup,
+ .size = kcs_size,
+};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
new file mode 100644
index 000000000..5d403fb5b
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -0,0 +1,5573 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_msghandler.c
+ *
+ * Incoming and outgoing message routing for an IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#define pr_fmt(fmt) "IPMI message handler: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/panic_notifier.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/uuid.h>
+#include <linux/nospec.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+#define IPMI_DRIVER_VERSION "39.2"
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static int ipmi_init_msghandler(void);
+static void smi_recv_tasklet(struct tasklet_struct *t);
+static void handle_new_recv_msgs(struct ipmi_smi *intf);
+static void need_waiter(struct ipmi_smi *intf);
+static int handle_one_recv_msg(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg);
+
+static bool initialized;
+static bool drvregistered;
+
+/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
+enum ipmi_panic_event_op {
+ IPMI_SEND_PANIC_EVENT_NONE,
+ IPMI_SEND_PANIC_EVENT,
+ IPMI_SEND_PANIC_EVENT_STRING,
+ IPMI_SEND_PANIC_EVENT_MAX
+};
+
+/* Indices in this array should be mapped to enum ipmi_panic_event_op */
+static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
+
+#ifdef CONFIG_IPMI_PANIC_STRING
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
+#elif defined(CONFIG_IPMI_PANIC_EVENT)
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
+#else
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
+#endif
+
+static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
+
+static int panic_op_write_handler(const char *val,
+ const struct kernel_param *kp)
+{
+ char valcp[16];
+ int e;
+
+ strscpy(valcp, val, sizeof(valcp));
+ e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
+ if (e < 0)
+ return e;
+
+ ipmi_send_panic_event = e;
+ return 0;
+}
+
+static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
+{
+ const char *event_str;
+
+ if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
+ event_str = "???";
+ else
+ event_str = ipmi_panic_event_str[ipmi_send_panic_event];
+
+ return sprintf(buffer, "%s\n", event_str);
+}
+
+static const struct kernel_param_ops panic_op_ops = {
+ .set = panic_op_write_handler,
+ .get = panic_op_read_handler
+};
+module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
+MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
+
+
+#define MAX_EVENTS_IN_QUEUE 25
+
+/* Remain in auto-maintenance mode for this amount of time (in ms). */
+static unsigned long maintenance_mode_timeout_ms = 30000;
+module_param(maintenance_mode_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(maintenance_mode_timeout_ms,
+ "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
+
+/*
+ * Don't let a message sit in a queue forever, always time it with at lest
+ * the max message timer. This is in milliseconds.
+ */
+#define MAX_MSG_TIMEOUT 60000
+
+/*
+ * Timeout times below are in milliseconds, and are done off a 1
+ * second timer. So setting the value to 1000 would mean anything
+ * between 0 and 1000ms. So really the only reasonable minimum
+ * setting it 2000ms, which is between 1 and 2 seconds.
+ */
+
+/* The default timeout for message retries. */
+static unsigned long default_retry_ms = 2000;
+module_param(default_retry_ms, ulong, 0644);
+MODULE_PARM_DESC(default_retry_ms,
+ "The time (milliseconds) between retry sends");
+
+/* The default timeout for maintenance mode message retries. */
+static unsigned long default_maintenance_retry_ms = 3000;
+module_param(default_maintenance_retry_ms, ulong, 0644);
+MODULE_PARM_DESC(default_maintenance_retry_ms,
+ "The time (milliseconds) between retry sends in maintenance mode");
+
+/* The default maximum number of retries */
+static unsigned int default_max_retries = 4;
+module_param(default_max_retries, uint, 0644);
+MODULE_PARM_DESC(default_max_retries,
+ "The time (milliseconds) between retry sends in maintenance mode");
+
+/* The default maximum number of users that may register. */
+static unsigned int max_users = 30;
+module_param(max_users, uint, 0644);
+MODULE_PARM_DESC(max_users,
+ "The most users that may use the IPMI stack at one time.");
+
+/* The default maximum number of message a user may have outstanding. */
+static unsigned int max_msgs_per_user = 100;
+module_param(max_msgs_per_user, uint, 0644);
+MODULE_PARM_DESC(max_msgs_per_user,
+ "The most message a user may have outstanding.");
+
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME 1000
+
+/* How many jiffies does it take to get to the timeout time. */
+#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
+
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
+#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
+
+/* How long should we cache dynamic device IDs? */
+#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
+
+/*
+ * The main "user" data structure.
+ */
+struct ipmi_user {
+ struct list_head link;
+
+ /*
+ * Set to NULL when the user is destroyed, a pointer to myself
+ * so srcu_dereference can be used on it.
+ */
+ struct ipmi_user *self;
+ struct srcu_struct release_barrier;
+
+ struct kref refcount;
+
+ /* The upper layer that handles receive messages. */
+ const struct ipmi_user_hndl *handler;
+ void *handler_data;
+
+ /* The interface this user is bound to. */
+ struct ipmi_smi *intf;
+
+ /* Does this interface receive IPMI events? */
+ bool gets_events;
+
+ atomic_t nr_msgs;
+
+ /* Free must run in process context for RCU cleanup. */
+ struct work_struct remove_work;
+};
+
+static struct workqueue_struct *remove_work_wq;
+
+static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
+ __acquires(user->release_barrier)
+{
+ struct ipmi_user *ruser;
+
+ *index = srcu_read_lock(&user->release_barrier);
+ ruser = srcu_dereference(user->self, &user->release_barrier);
+ if (!ruser)
+ srcu_read_unlock(&user->release_barrier, *index);
+ return ruser;
+}
+
+static void release_ipmi_user(struct ipmi_user *user, int index)
+{
+ srcu_read_unlock(&user->release_barrier, index);
+}
+
+struct cmd_rcvr {
+ struct list_head link;
+
+ struct ipmi_user *user;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned int chans;
+
+ /*
+ * This is used to form a linked lised during mass deletion.
+ * Since this is in an RCU list, we cannot use the link above
+ * or change any data until the RCU period completes. So we
+ * use this next variable during mass deletion so we can have
+ * a list and don't have to wait and restart the search on
+ * every individual deletion of a command.
+ */
+ struct cmd_rcvr *next;
+};
+
+struct seq_table {
+ unsigned int inuse : 1;
+ unsigned int broadcast : 1;
+
+ unsigned long timeout;
+ unsigned long orig_timeout;
+ unsigned int retries_left;
+
+ /*
+ * To verify on an incoming send message response that this is
+ * the message that the response is for, we keep a sequence id
+ * and increment it every time we send a message.
+ */
+ long seqid;
+
+ /*
+ * This is held so we can properly respond to the message on a
+ * timeout, and it is used to hold the temporary data for
+ * retransmission, too.
+ */
+ struct ipmi_recv_msg *recv_msg;
+};
+
+/*
+ * Store the information in a msgid (long) to allow us to find a
+ * sequence table entry from the msgid.
+ */
+#define STORE_SEQ_IN_MSGID(seq, seqid) \
+ ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
+
+#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
+ do { \
+ seq = (((msgid) >> 26) & 0x3f); \
+ seqid = ((msgid) & 0x3ffffff); \
+ } while (0)
+
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
+
+#define IPMI_MAX_CHANNELS 16
+struct ipmi_channel {
+ unsigned char medium;
+ unsigned char protocol;
+};
+
+struct ipmi_channel_set {
+ struct ipmi_channel c[IPMI_MAX_CHANNELS];
+};
+
+struct ipmi_my_addrinfo {
+ /*
+ * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
+ * but may be changed by the user.
+ */
+ unsigned char address;
+
+ /*
+ * My LUN. This should generally stay the SMS LUN, but just in
+ * case...
+ */
+ unsigned char lun;
+};
+
+/*
+ * Note that the product id, manufacturer id, guid, and device id are
+ * immutable in this structure, so dyn_mutex is not required for
+ * accessing those. If those change on a BMC, a new BMC is allocated.
+ */
+struct bmc_device {
+ struct platform_device pdev;
+ struct list_head intfs; /* Interfaces on this BMC. */
+ struct ipmi_device_id id;
+ struct ipmi_device_id fetch_id;
+ int dyn_id_set;
+ unsigned long dyn_id_expiry;
+ struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
+ guid_t guid;
+ guid_t fetch_guid;
+ int dyn_guid_set;
+ struct kref usecount;
+ struct work_struct remove_work;
+ unsigned char cc; /* completion code */
+};
+#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+
+static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid);
+
+/*
+ * Various statistics for IPMI, these index stats[] in the ipmi_smi
+ * structure.
+ */
+enum ipmi_stat_indexes {
+ /* Commands we got from the user that were invalid. */
+ IPMI_STAT_sent_invalid_commands = 0,
+
+ /* Commands we sent to the MC. */
+ IPMI_STAT_sent_local_commands,
+
+ /* Responses from the MC that were delivered to a user. */
+ IPMI_STAT_handled_local_responses,
+
+ /* Responses from the MC that were not delivered to a user. */
+ IPMI_STAT_unhandled_local_responses,
+
+ /* Commands we sent out to the IPMB bus. */
+ IPMI_STAT_sent_ipmb_commands,
+
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ IPMI_STAT_sent_ipmb_command_errs,
+
+ /* Each retransmit increments this count. */
+ IPMI_STAT_retransmitted_ipmb_commands,
+
+ /*
+ * When a message times out (runs out of retransmits) this is
+ * incremented.
+ */
+ IPMI_STAT_timed_out_ipmb_commands,
+
+ /*
+ * This is like above, but for broadcasts. Broadcasts are
+ * *not* included in the above count (they are expected to
+ * time out).
+ */
+ IPMI_STAT_timed_out_ipmb_broadcasts,
+
+ /* Responses I have sent to the IPMB bus. */
+ IPMI_STAT_sent_ipmb_responses,
+
+ /* The response was delivered to the user. */
+ IPMI_STAT_handled_ipmb_responses,
+
+ /* The response had invalid data in it. */
+ IPMI_STAT_invalid_ipmb_responses,
+
+ /* The response didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_ipmb_responses,
+
+ /* Commands we sent out to the IPMB bus. */
+ IPMI_STAT_sent_lan_commands,
+
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ IPMI_STAT_sent_lan_command_errs,
+
+ /* Each retransmit increments this count. */
+ IPMI_STAT_retransmitted_lan_commands,
+
+ /*
+ * When a message times out (runs out of retransmits) this is
+ * incremented.
+ */
+ IPMI_STAT_timed_out_lan_commands,
+
+ /* Responses I have sent to the IPMB bus. */
+ IPMI_STAT_sent_lan_responses,
+
+ /* The response was delivered to the user. */
+ IPMI_STAT_handled_lan_responses,
+
+ /* The response had invalid data in it. */
+ IPMI_STAT_invalid_lan_responses,
+
+ /* The response didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_lan_responses,
+
+ /* The command was delivered to the user. */
+ IPMI_STAT_handled_commands,
+
+ /* The command had invalid data in it. */
+ IPMI_STAT_invalid_commands,
+
+ /* The command didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_commands,
+
+ /* Invalid data in an event. */
+ IPMI_STAT_invalid_events,
+
+ /* Events that were received with the proper format. */
+ IPMI_STAT_events,
+
+ /* Retransmissions on IPMB that failed. */
+ IPMI_STAT_dropped_rexmit_ipmb_commands,
+
+ /* Retransmissions on LAN that failed. */
+ IPMI_STAT_dropped_rexmit_lan_commands,
+
+ /* This *must* remain last, add new values above this. */
+ IPMI_NUM_STATS
+};
+
+
+#define IPMI_IPMB_NUM_SEQ 64
+struct ipmi_smi {
+ struct module *owner;
+
+ /* What interface number are we? */
+ int intf_num;
+
+ struct kref refcount;
+
+ /* Set when the interface is being unregistered. */
+ bool in_shutdown;
+
+ /* Used for a list of interfaces. */
+ struct list_head link;
+
+ /*
+ * The list of upper layers that are using me. seq_lock write
+ * protects this. Read protection is with srcu.
+ */
+ struct list_head users;
+ struct srcu_struct users_srcu;
+ atomic_t nr_users;
+ struct device_attribute nr_users_devattr;
+ struct device_attribute nr_msgs_devattr;
+
+
+ /* Used for wake ups at startup. */
+ wait_queue_head_t waitq;
+
+ /*
+ * Prevents the interface from being unregistered when the
+ * interface is used by being looked up through the BMC
+ * structure.
+ */
+ struct mutex bmc_reg_mutex;
+
+ struct bmc_device tmp_bmc;
+ struct bmc_device *bmc;
+ bool bmc_registered;
+ struct list_head bmc_link;
+ char *my_dev_name;
+ bool in_bmc_register; /* Handle recursive situations. Yuck. */
+ struct work_struct bmc_reg_work;
+
+ const struct ipmi_smi_handlers *handlers;
+ void *send_info;
+
+ /* Driver-model device for the system interface. */
+ struct device *si_dev;
+
+ /*
+ * A table of sequence numbers for this interface. We use the
+ * sequence numbers for IPMB messages that go out of the
+ * interface to match them up with their responses. A routine
+ * is called periodically to time the items in this list.
+ */
+ spinlock_t seq_lock;
+ struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
+ int curr_seq;
+
+ /*
+ * Messages queued for delivery. If delivery fails (out of memory
+ * for instance), They will stay in here to be processed later in a
+ * periodic timer interrupt. The tasklet is for handling received
+ * messages directly from the handler.
+ */
+ spinlock_t waiting_rcv_msgs_lock;
+ struct list_head waiting_rcv_msgs;
+ atomic_t watchdog_pretimeouts_to_deliver;
+ struct tasklet_struct recv_tasklet;
+
+ spinlock_t xmit_msgs_lock;
+ struct list_head xmit_msgs;
+ struct ipmi_smi_msg *curr_msg;
+ struct list_head hp_xmit_msgs;
+
+ /*
+ * The list of command receivers that are registered for commands
+ * on this interface.
+ */
+ struct mutex cmd_rcvrs_mutex;
+ struct list_head cmd_rcvrs;
+
+ /*
+ * Events that were queues because no one was there to receive
+ * them.
+ */
+ spinlock_t events_lock; /* For dealing with event stuff. */
+ struct list_head waiting_events;
+ unsigned int waiting_events_count; /* How many events in queue? */
+ char delivering_events;
+ char event_msg_printed;
+
+ /* How many users are waiting for events? */
+ atomic_t event_waiters;
+ unsigned int ticks_to_req_ev;
+
+ spinlock_t watch_lock; /* For dealing with watch stuff below. */
+
+ /* How many users are waiting for commands? */
+ unsigned int command_waiters;
+
+ /* How many users are waiting for watchdogs? */
+ unsigned int watchdog_waiters;
+
+ /* How many users are waiting for message responses? */
+ unsigned int response_waiters;
+
+ /*
+ * Tells what the lower layer has last been asked to watch for,
+ * messages and/or watchdogs. Protected by watch_lock.
+ */
+ unsigned int last_watch_mask;
+
+ /*
+ * The event receiver for my BMC, only really used at panic
+ * shutdown as a place to store this.
+ */
+ unsigned char event_receiver;
+ unsigned char event_receiver_lun;
+ unsigned char local_sel_device;
+ unsigned char local_event_generator;
+
+ /* For handling of maintenance mode. */
+ int maintenance_mode;
+ bool maintenance_mode_enable;
+ int auto_maintenance_timeout;
+ spinlock_t maintenance_mode_lock; /* Used in a timer... */
+
+ /*
+ * If we are doing maintenance on something on IPMB, extend
+ * the timeout time to avoid timeouts writing firmware and
+ * such.
+ */
+ int ipmb_maintenance_mode_timeout;
+
+ /*
+ * A cheap hack, if this is non-null and a message to an
+ * interface comes in with a NULL user, call this routine with
+ * it. Note that the message will still be freed by the
+ * caller. This only works on the system interface.
+ *
+ * Protected by bmc_reg_mutex.
+ */
+ void (*null_user_handler)(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg);
+
+ /*
+ * When we are scanning the channels for an SMI, this will
+ * tell which channel we are scanning.
+ */
+ int curr_channel;
+
+ /* Channel information */
+ struct ipmi_channel_set *channel_list;
+ unsigned int curr_working_cset; /* First index into the following. */
+ struct ipmi_channel_set wchannels[2];
+ struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
+ bool channels_ready;
+
+ atomic_t stats[IPMI_NUM_STATS];
+
+ /*
+ * run_to_completion duplicate of smb_info, smi_info
+ * and ipmi_serial_info structures. Used to decrease numbers of
+ * parameters passed by "low" level IPMI code.
+ */
+ int run_to_completion;
+};
+#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
+
+static void __get_guid(struct ipmi_smi *intf);
+static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
+static int __ipmi_bmc_register(struct ipmi_smi *intf,
+ struct ipmi_device_id *id,
+ bool guid_set, guid_t *guid, int intf_num);
+static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+
+
+/**
+ * The driver model view of the IPMI messaging driver.
+ */
+static struct platform_driver ipmidriver = {
+ .driver = {
+ .name = "ipmi",
+ .bus = &platform_bus_type
+ }
+};
+/*
+ * This mutex keeps us from adding the same BMC twice.
+ */
+static DEFINE_MUTEX(ipmidriver_mutex);
+
+static LIST_HEAD(ipmi_interfaces);
+static DEFINE_MUTEX(ipmi_interfaces_mutex);
+#define ipmi_interfaces_mutex_held() \
+ lockdep_is_held(&ipmi_interfaces_mutex)
+static struct srcu_struct ipmi_interfaces_srcu;
+
+/*
+ * List of watchers that want to know when smi's are added and deleted.
+ */
+static LIST_HEAD(smi_watchers);
+static DEFINE_MUTEX(smi_watchers_mutex);
+
+#define ipmi_inc_stat(intf, stat) \
+ atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+#define ipmi_get_stat(intf, stat) \
+ ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+static const char * const addr_src_to_str[] = {
+ "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
+ "device-tree", "platform"
+};
+
+const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
+{
+ if (src >= SI_LAST)
+ src = 0; /* Invalid */
+ return addr_src_to_str[src];
+}
+EXPORT_SYMBOL(ipmi_addr_src_to_str);
+
+static int is_lan_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_LAN_ADDR_TYPE;
+}
+
+static int is_ipmb_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
+}
+
+static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
+}
+
+static int is_ipmb_direct_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
+}
+
+static void free_recv_msg_list(struct list_head *q)
+{
+ struct ipmi_recv_msg *msg, *msg2;
+
+ list_for_each_entry_safe(msg, msg2, q, link) {
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+}
+
+static void free_smi_msg_list(struct list_head *q)
+{
+ struct ipmi_smi_msg *msg, *msg2;
+
+ list_for_each_entry_safe(msg, msg2, q, link) {
+ list_del(&msg->link);
+ ipmi_free_smi_msg(msg);
+ }
+}
+
+static void clean_up_interface_data(struct ipmi_smi *intf)
+{
+ int i;
+ struct cmd_rcvr *rcvr, *rcvr2;
+ struct list_head list;
+
+ tasklet_kill(&intf->recv_tasklet);
+
+ free_smi_msg_list(&intf->waiting_rcv_msgs);
+ free_recv_msg_list(&intf->waiting_events);
+
+ /*
+ * Wholesale remove all the entries from the list in the
+ * interface and wait for RCU to know that none are in use.
+ */
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ INIT_LIST_HEAD(&list);
+ list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+
+ list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+ kfree(rcvr);
+
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ if ((intf->seq_table[i].inuse)
+ && (intf->seq_table[i].recv_msg))
+ ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+ }
+}
+
+static void intf_free(struct kref *ref)
+{
+ struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
+
+ clean_up_interface_data(intf);
+ kfree(intf);
+}
+
+int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
+{
+ struct ipmi_smi *intf;
+ int index, rv;
+
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+
+ mutex_lock(&smi_watchers_mutex);
+
+ list_add(&watcher->link, &smi_watchers);
+
+ index = srcu_read_lock(&ipmi_interfaces_srcu);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
+ lockdep_is_held(&smi_watchers_mutex)) {
+ int intf_num = READ_ONCE(intf->intf_num);
+
+ if (intf_num == -1)
+ continue;
+ watcher->new_smi(intf_num, intf->si_dev);
+ }
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+ mutex_unlock(&smi_watchers_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_register);
+
+int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
+{
+ mutex_lock(&smi_watchers_mutex);
+ list_del(&watcher->link);
+ mutex_unlock(&smi_watchers_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
+
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
+static void
+call_smi_watchers(int i, struct device *dev)
+{
+ struct ipmi_smi_watcher *w;
+
+ mutex_lock(&smi_watchers_mutex);
+ list_for_each_entry(w, &smi_watchers, link) {
+ if (try_module_get(w->owner)) {
+ w->new_smi(i, dev);
+ module_put(w->owner);
+ }
+ }
+ mutex_unlock(&smi_watchers_mutex);
+}
+
+static int
+ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
+{
+ if (addr1->addr_type != addr2->addr_type)
+ return 0;
+
+ if (addr1->channel != addr2->channel)
+ return 0;
+
+ if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ struct ipmi_system_interface_addr *smi_addr1
+ = (struct ipmi_system_interface_addr *) addr1;
+ struct ipmi_system_interface_addr *smi_addr2
+ = (struct ipmi_system_interface_addr *) addr2;
+ return (smi_addr1->lun == smi_addr2->lun);
+ }
+
+ if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
+ struct ipmi_ipmb_addr *ipmb_addr1
+ = (struct ipmi_ipmb_addr *) addr1;
+ struct ipmi_ipmb_addr *ipmb_addr2
+ = (struct ipmi_ipmb_addr *) addr2;
+
+ return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
+ && (ipmb_addr1->lun == ipmb_addr2->lun));
+ }
+
+ if (is_ipmb_direct_addr(addr1)) {
+ struct ipmi_ipmb_direct_addr *daddr1
+ = (struct ipmi_ipmb_direct_addr *) addr1;
+ struct ipmi_ipmb_direct_addr *daddr2
+ = (struct ipmi_ipmb_direct_addr *) addr2;
+
+ return daddr1->slave_addr == daddr2->slave_addr &&
+ daddr1->rq_lun == daddr2->rq_lun &&
+ daddr1->rs_lun == daddr2->rs_lun;
+ }
+
+ if (is_lan_addr(addr1)) {
+ struct ipmi_lan_addr *lan_addr1
+ = (struct ipmi_lan_addr *) addr1;
+ struct ipmi_lan_addr *lan_addr2
+ = (struct ipmi_lan_addr *) addr2;
+
+ return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
+ && (lan_addr1->local_SWID == lan_addr2->local_SWID)
+ && (lan_addr1->session_handle
+ == lan_addr2->session_handle)
+ && (lan_addr1->lun == lan_addr2->lun));
+ }
+
+ return 1;
+}
+
+int ipmi_validate_addr(struct ipmi_addr *addr, int len)
+{
+ if (len < sizeof(struct ipmi_system_interface_addr))
+ return -EINVAL;
+
+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ if (addr->channel != IPMI_BMC_CHANNEL)
+ return -EINVAL;
+ return 0;
+ }
+
+ if ((addr->channel == IPMI_BMC_CHANNEL)
+ || (addr->channel >= IPMI_MAX_CHANNELS)
+ || (addr->channel < 0))
+ return -EINVAL;
+
+ if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+ if (len < sizeof(struct ipmi_ipmb_addr))
+ return -EINVAL;
+ return 0;
+ }
+
+ if (is_ipmb_direct_addr(addr)) {
+ struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
+
+ if (addr->channel != 0)
+ return -EINVAL;
+ if (len < sizeof(struct ipmi_ipmb_direct_addr))
+ return -EINVAL;
+
+ if (daddr->slave_addr & 0x01)
+ return -EINVAL;
+ if (daddr->rq_lun >= 4)
+ return -EINVAL;
+ if (daddr->rs_lun >= 4)
+ return -EINVAL;
+ return 0;
+ }
+
+ if (is_lan_addr(addr)) {
+ if (len < sizeof(struct ipmi_lan_addr))
+ return -EINVAL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ipmi_validate_addr);
+
+unsigned int ipmi_addr_length(int addr_type)
+{
+ if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ return sizeof(struct ipmi_system_interface_addr);
+
+ if ((addr_type == IPMI_IPMB_ADDR_TYPE)
+ || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
+ return sizeof(struct ipmi_ipmb_addr);
+
+ if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
+ return sizeof(struct ipmi_ipmb_direct_addr);
+
+ if (addr_type == IPMI_LAN_ADDR_TYPE)
+ return sizeof(struct ipmi_lan_addr);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_addr_length);
+
+static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+ int rv = 0;
+
+ if (!msg->user) {
+ /* Special handling for NULL users. */
+ if (intf->null_user_handler) {
+ intf->null_user_handler(intf, msg);
+ } else {
+ /* No handler, so give up. */
+ rv = -EINVAL;
+ }
+ ipmi_free_recv_msg(msg);
+ } else if (oops_in_progress) {
+ /*
+ * If we are running in the panic context, calling the
+ * receive handler doesn't much meaning and has a deadlock
+ * risk. At this moment, simply skip it in that case.
+ */
+ ipmi_free_recv_msg(msg);
+ atomic_dec(&msg->user->nr_msgs);
+ } else {
+ int index;
+ struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
+
+ if (user) {
+ atomic_dec(&user->nr_msgs);
+ user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ release_ipmi_user(user, index);
+ } else {
+ /* User went away, give up. */
+ ipmi_free_recv_msg(msg);
+ rv = -EINVAL;
+ }
+ }
+
+ return rv;
+}
+
+static void deliver_local_response(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg)
+{
+ if (deliver_response(intf, msg))
+ ipmi_inc_stat(intf, unhandled_local_responses);
+ else
+ ipmi_inc_stat(intf, handled_local_responses);
+}
+
+static void deliver_err_response(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg, int err)
+{
+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ msg->msg_data[0] = err;
+ msg->msg.netfn |= 1; /* Convert to a response. */
+ msg->msg.data_len = 1;
+ msg->msg.data = msg->msg_data;
+ deliver_local_response(intf, msg);
+}
+
+static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
+{
+ unsigned long iflags;
+
+ if (!intf->handlers->set_need_watch)
+ return;
+
+ spin_lock_irqsave(&intf->watch_lock, iflags);
+ if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
+ intf->response_waiters++;
+
+ if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
+ intf->watchdog_waiters++;
+
+ if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
+ intf->command_waiters++;
+
+ if ((intf->last_watch_mask & flags) != flags) {
+ intf->last_watch_mask |= flags;
+ intf->handlers->set_need_watch(intf->send_info,
+ intf->last_watch_mask);
+ }
+ spin_unlock_irqrestore(&intf->watch_lock, iflags);
+}
+
+static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
+{
+ unsigned long iflags;
+
+ if (!intf->handlers->set_need_watch)
+ return;
+
+ spin_lock_irqsave(&intf->watch_lock, iflags);
+ if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
+ intf->response_waiters--;
+
+ if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
+ intf->watchdog_waiters--;
+
+ if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
+ intf->command_waiters--;
+
+ flags = 0;
+ if (intf->response_waiters)
+ flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
+ if (intf->watchdog_waiters)
+ flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
+ if (intf->command_waiters)
+ flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
+
+ if (intf->last_watch_mask != flags) {
+ intf->last_watch_mask = flags;
+ intf->handlers->set_need_watch(intf->send_info,
+ intf->last_watch_mask);
+ }
+ spin_unlock_irqrestore(&intf->watch_lock, iflags);
+}
+
+/*
+ * Find the next sequence number not being used and add the given
+ * message with the given timeout to the sequence table. This must be
+ * called with the interface's seq_lock held.
+ */
+static int intf_next_seq(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned long timeout,
+ int retries,
+ int broadcast,
+ unsigned char *seq,
+ long *seqid)
+{
+ int rv = 0;
+ unsigned int i;
+
+ if (timeout == 0)
+ timeout = default_retry_ms;
+ if (retries < 0)
+ retries = default_max_retries;
+
+ for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
+ i = (i+1)%IPMI_IPMB_NUM_SEQ) {
+ if (!intf->seq_table[i].inuse)
+ break;
+ }
+
+ if (!intf->seq_table[i].inuse) {
+ intf->seq_table[i].recv_msg = recv_msg;
+
+ /*
+ * Start with the maximum timeout, when the send response
+ * comes in we will start the real timer.
+ */
+ intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
+ intf->seq_table[i].orig_timeout = timeout;
+ intf->seq_table[i].retries_left = retries;
+ intf->seq_table[i].broadcast = broadcast;
+ intf->seq_table[i].inuse = 1;
+ intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
+ *seq = i;
+ *seqid = intf->seq_table[i].seqid;
+ intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
+ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ need_waiter(intf);
+ } else {
+ rv = -EAGAIN;
+ }
+
+ return rv;
+}
+
+/*
+ * Return the receive message for the given sequence number and
+ * release the sequence number so it can be reused. Some other data
+ * is passed in to be sure the message matches up correctly (to help
+ * guard against message coming in after their timeout and the
+ * sequence number being reused).
+ */
+static int intf_find_seq(struct ipmi_smi *intf,
+ unsigned char seq,
+ short channel,
+ unsigned char cmd,
+ unsigned char netfn,
+ struct ipmi_addr *addr,
+ struct ipmi_recv_msg **recv_msg)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+
+ if (seq >= IPMI_IPMB_NUM_SEQ)
+ return -EINVAL;
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ if (intf->seq_table[seq].inuse) {
+ struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
+
+ if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
+ && (msg->msg.netfn == netfn)
+ && (ipmi_addr_equal(addr, &msg->addr))) {
+ *recv_msg = msg;
+ intf->seq_table[seq].inuse = 0;
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ rv = 0;
+ }
+ }
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ return rv;
+}
+
+
+/* Start the timer for a specific sequence table entry. */
+static int intf_start_seq_timer(struct ipmi_smi *intf,
+ long msgid)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+ unsigned char seq;
+ unsigned long seqid;
+
+
+ GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ /*
+ * We do this verification because the user can be deleted
+ * while a message is outstanding.
+ */
+ if ((intf->seq_table[seq].inuse)
+ && (intf->seq_table[seq].seqid == seqid)) {
+ struct seq_table *ent = &intf->seq_table[seq];
+ ent->timeout = ent->orig_timeout;
+ rv = 0;
+ }
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ return rv;
+}
+
+/* Got an error for the send message for a specific sequence number. */
+static int intf_err_seq(struct ipmi_smi *intf,
+ long msgid,
+ unsigned int err)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+ unsigned char seq;
+ unsigned long seqid;
+ struct ipmi_recv_msg *msg = NULL;
+
+
+ GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ /*
+ * We do this verification because the user can be deleted
+ * while a message is outstanding.
+ */
+ if ((intf->seq_table[seq].inuse)
+ && (intf->seq_table[seq].seqid == seqid)) {
+ struct seq_table *ent = &intf->seq_table[seq];
+
+ ent->inuse = 0;
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ msg = ent->recv_msg;
+ rv = 0;
+ }
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ if (msg)
+ deliver_err_response(intf, msg, err);
+
+ return rv;
+}
+
+static void free_user_work(struct work_struct *work)
+{
+ struct ipmi_user *user = container_of(work, struct ipmi_user,
+ remove_work);
+
+ cleanup_srcu_struct(&user->release_barrier);
+ vfree(user);
+}
+
+int ipmi_create_user(unsigned int if_num,
+ const struct ipmi_user_hndl *handler,
+ void *handler_data,
+ struct ipmi_user **user)
+{
+ unsigned long flags;
+ struct ipmi_user *new_user;
+ int rv, index;
+ struct ipmi_smi *intf;
+
+ /*
+ * There is no module usecount here, because it's not
+ * required. Since this can only be used by and called from
+ * other modules, they will implicitly use this module, and
+ * thus this can't be removed unless the other modules are
+ * removed.
+ */
+
+ if (handler == NULL)
+ return -EINVAL;
+
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+
+ new_user = vzalloc(sizeof(*new_user));
+ if (!new_user)
+ return -ENOMEM;
+
+ index = srcu_read_lock(&ipmi_interfaces_srcu);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num)
+ goto found;
+ }
+ /* Not found, return an error */
+ rv = -EINVAL;
+ goto out_kfree;
+
+ found:
+ if (atomic_add_return(1, &intf->nr_users) > max_users) {
+ rv = -EBUSY;
+ goto out_kfree;
+ }
+
+ INIT_WORK(&new_user->remove_work, free_user_work);
+
+ rv = init_srcu_struct(&new_user->release_barrier);
+ if (rv)
+ goto out_kfree;
+
+ if (!try_module_get(intf->owner)) {
+ rv = -ENODEV;
+ goto out_kfree;
+ }
+
+ /* Note that each existing user holds a refcount to the interface. */
+ kref_get(&intf->refcount);
+
+ atomic_set(&new_user->nr_msgs, 0);
+ kref_init(&new_user->refcount);
+ new_user->handler = handler;
+ new_user->handler_data = handler_data;
+ new_user->intf = intf;
+ new_user->gets_events = false;
+
+ rcu_assign_pointer(new_user->self, new_user);
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ list_add_rcu(&new_user->link, &intf->users);
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+ if (handler->ipmi_watchdog_pretimeout)
+ /* User wants pretimeouts, so make sure to watch for them. */
+ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ *user = new_user;
+ return 0;
+
+out_kfree:
+ atomic_dec(&intf->nr_users);
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ vfree(new_user);
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_create_user);
+
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+ int rv, index;
+ struct ipmi_smi *intf;
+
+ index = srcu_read_lock(&ipmi_interfaces_srcu);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num)
+ goto found;
+ }
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+ /* Not found, return an error */
+ return -EINVAL;
+
+found:
+ if (!intf->handlers->get_smi_info)
+ rv = -ENOTTY;
+ else
+ rv = intf->handlers->get_smi_info(intf->send_info, data);
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
+static void free_user(struct kref *ref)
+{
+ struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+
+ /* SRCU cleanup must happen in task context. */
+ queue_work(remove_work_wq, &user->remove_work);
+}
+
+static void _ipmi_destroy_user(struct ipmi_user *user)
+{
+ struct ipmi_smi *intf = user->intf;
+ int i;
+ unsigned long flags;
+ struct cmd_rcvr *rcvr;
+ struct cmd_rcvr *rcvrs = NULL;
+ struct module *owner;
+
+ if (!acquire_ipmi_user(user, &i)) {
+ /*
+ * The user has already been cleaned up, just make sure
+ * nothing is using it and return.
+ */
+ synchronize_srcu(&user->release_barrier);
+ return;
+ }
+
+ rcu_assign_pointer(user->self, NULL);
+ release_ipmi_user(user, i);
+
+ synchronize_srcu(&user->release_barrier);
+
+ if (user->handler->shutdown)
+ user->handler->shutdown(user->handler_data);
+
+ if (user->handler->ipmi_watchdog_pretimeout)
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
+
+ if (user->gets_events)
+ atomic_dec(&intf->event_waiters);
+
+ /* Remove the user from the interface's sequence table. */
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ list_del_rcu(&user->link);
+ atomic_dec(&intf->nr_users);
+
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ if (intf->seq_table[i].inuse
+ && (intf->seq_table[i].recv_msg->user == user)) {
+ intf->seq_table[i].inuse = 0;
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+ }
+ }
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ /*
+ * Remove the user from the command receiver's table. First
+ * we build a list of everything (not using the standard link,
+ * since other things may be using it till we do
+ * synchronize_srcu()) then free everything in that list.
+ */
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
+ if (rcvr->user == user) {
+ list_del_rcu(&rcvr->link);
+ rcvr->next = rcvrs;
+ rcvrs = rcvr;
+ }
+ }
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+ synchronize_rcu();
+ while (rcvrs) {
+ rcvr = rcvrs;
+ rcvrs = rcvr->next;
+ kfree(rcvr);
+ }
+
+ owner = intf->owner;
+ kref_put(&intf->refcount, intf_free);
+ module_put(owner);
+}
+
+int ipmi_destroy_user(struct ipmi_user *user)
+{
+ _ipmi_destroy_user(user);
+
+ kref_put(&user->refcount, free_user);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_destroy_user);
+
+int ipmi_get_version(struct ipmi_user *user,
+ unsigned char *major,
+ unsigned char *minor)
+{
+ struct ipmi_device_id id;
+ int rv, index;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
+ if (!rv) {
+ *major = ipmi_version_major(&id);
+ *minor = ipmi_version_minor(&id);
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_version);
+
+int ipmi_set_my_address(struct ipmi_user *user,
+ unsigned int channel,
+ unsigned char address)
+{
+ int index, rv = 0;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ if (channel >= IPMI_MAX_CHANNELS) {
+ rv = -EINVAL;
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
+ user->intf->addrinfo[channel].address = address;
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_set_my_address);
+
+int ipmi_get_my_address(struct ipmi_user *user,
+ unsigned int channel,
+ unsigned char *address)
+{
+ int index, rv = 0;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ if (channel >= IPMI_MAX_CHANNELS) {
+ rv = -EINVAL;
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
+ *address = user->intf->addrinfo[channel].address;
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_my_address);
+
+int ipmi_set_my_LUN(struct ipmi_user *user,
+ unsigned int channel,
+ unsigned char LUN)
+{
+ int index, rv = 0;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ if (channel >= IPMI_MAX_CHANNELS) {
+ rv = -EINVAL;
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
+ user->intf->addrinfo[channel].lun = LUN & 0x3;
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_set_my_LUN);
+
+int ipmi_get_my_LUN(struct ipmi_user *user,
+ unsigned int channel,
+ unsigned char *address)
+{
+ int index, rv = 0;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ if (channel >= IPMI_MAX_CHANNELS) {
+ rv = -EINVAL;
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
+ *address = user->intf->addrinfo[channel].lun;
+ }
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_my_LUN);
+
+int ipmi_get_maintenance_mode(struct ipmi_user *user)
+{
+ int mode, index;
+ unsigned long flags;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
+ mode = user->intf->maintenance_mode;
+ spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
+ release_ipmi_user(user, index);
+
+ return mode;
+}
+EXPORT_SYMBOL(ipmi_get_maintenance_mode);
+
+static void maintenance_mode_update(struct ipmi_smi *intf)
+{
+ if (intf->handlers->set_maintenance_mode)
+ intf->handlers->set_maintenance_mode(
+ intf->send_info, intf->maintenance_mode_enable);
+}
+
+int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
+{
+ int rv = 0, index;
+ unsigned long flags;
+ struct ipmi_smi *intf = user->intf;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ if (intf->maintenance_mode != mode) {
+ switch (mode) {
+ case IPMI_MAINTENANCE_MODE_AUTO:
+ intf->maintenance_mode_enable
+ = (intf->auto_maintenance_timeout > 0);
+ break;
+
+ case IPMI_MAINTENANCE_MODE_OFF:
+ intf->maintenance_mode_enable = false;
+ break;
+
+ case IPMI_MAINTENANCE_MODE_ON:
+ intf->maintenance_mode_enable = true;
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto out_unlock;
+ }
+ intf->maintenance_mode = mode;
+
+ maintenance_mode_update(intf);
+ }
+ out_unlock:
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_set_maintenance_mode);
+
+int ipmi_set_gets_events(struct ipmi_user *user, bool val)
+{
+ unsigned long flags;
+ struct ipmi_smi *intf = user->intf;
+ struct ipmi_recv_msg *msg, *msg2;
+ struct list_head msgs;
+ int index;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&msgs);
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+ if (user->gets_events == val)
+ goto out;
+
+ user->gets_events = val;
+
+ if (val) {
+ if (atomic_inc_return(&intf->event_waiters) == 1)
+ need_waiter(intf);
+ } else {
+ atomic_dec(&intf->event_waiters);
+ }
+
+ if (intf->delivering_events)
+ /*
+ * Another thread is delivering events for this, so
+ * let it handle any new events.
+ */
+ goto out;
+
+ /* Deliver any queued events. */
+ while (user->gets_events && !list_empty(&intf->waiting_events)) {
+ list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
+ list_move_tail(&msg->link, &msgs);
+ intf->waiting_events_count = 0;
+ if (intf->event_msg_printed) {
+ dev_warn(intf->si_dev, "Event queue no longer full\n");
+ intf->event_msg_printed = 0;
+ }
+
+ intf->delivering_events = 1;
+ spin_unlock_irqrestore(&intf->events_lock, flags);
+
+ list_for_each_entry_safe(msg, msg2, &msgs, link) {
+ msg->user = user;
+ kref_get(&user->refcount);
+ deliver_local_response(intf, msg);
+ }
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+ intf->delivering_events = 0;
+ }
+
+ out:
+ spin_unlock_irqrestore(&intf->events_lock, flags);
+ release_ipmi_user(user, index);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_set_gets_events);
+
+static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned char chan)
+{
+ struct cmd_rcvr *rcvr;
+
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+ && (rcvr->chans & (1 << chan)))
+ return rcvr;
+ }
+ return NULL;
+}
+
+static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans)
+{
+ struct cmd_rcvr *rcvr;
+
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+ && (rcvr->chans & chans))
+ return 0;
+ }
+ return 1;
+}
+
+int ipmi_register_for_cmd(struct ipmi_user *user,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans)
+{
+ struct ipmi_smi *intf = user->intf;
+ struct cmd_rcvr *rcvr;
+ int rv = 0, index;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
+ if (!rcvr) {
+ rv = -ENOMEM;
+ goto out_release;
+ }
+ rcvr->cmd = cmd;
+ rcvr->netfn = netfn;
+ rcvr->chans = chans;
+ rcvr->user = user;
+
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ /* Make sure the command/netfn is not already registered. */
+ if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
+ rv = -EBUSY;
+ goto out_unlock;
+ }
+
+ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
+
+ list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
+
+out_unlock:
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+ if (rv)
+ kfree(rcvr);
+out_release:
+ release_ipmi_user(user, index);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_register_for_cmd);
+
+int ipmi_unregister_for_cmd(struct ipmi_user *user,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans)
+{
+ struct ipmi_smi *intf = user->intf;
+ struct cmd_rcvr *rcvr;
+ struct cmd_rcvr *rcvrs = NULL;
+ int i, rv = -ENOENT, index;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
+ if (((1 << i) & chans) == 0)
+ continue;
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
+ if (rcvr == NULL)
+ continue;
+ if (rcvr->user == user) {
+ rv = 0;
+ rcvr->chans &= ~chans;
+ if (rcvr->chans == 0) {
+ list_del_rcu(&rcvr->link);
+ rcvr->next = rcvrs;
+ rcvrs = rcvr;
+ }
+ }
+ }
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+ synchronize_rcu();
+ release_ipmi_user(user, index);
+ while (rcvrs) {
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
+ rcvr = rcvrs;
+ rcvrs = rcvr->next;
+ kfree(rcvr);
+ }
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_unregister_for_cmd);
+
+unsigned char
+ipmb_checksum(unsigned char *data, int size)
+{
+ unsigned char csum = 0;
+
+ for (; size > 0; size--, data++)
+ csum += *data;
+
+ return -csum;
+}
+EXPORT_SYMBOL(ipmb_checksum);
+
+static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_ipmb_addr *ipmb_addr,
+ long msgid,
+ unsigned char ipmb_seq,
+ int broadcast,
+ unsigned char source_address,
+ unsigned char source_lun)
+{
+ int i = broadcast;
+
+ /* Format the IPMB header data. */
+ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+ smi_msg->data[2] = ipmb_addr->channel;
+ if (broadcast)
+ smi_msg->data[3] = 0;
+ smi_msg->data[i+3] = ipmb_addr->slave_addr;
+ smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
+ smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
+ smi_msg->data[i+6] = source_address;
+ smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
+ smi_msg->data[i+8] = msg->cmd;
+
+ /* Now tack on the data to the message. */
+ if (msg->data_len > 0)
+ memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 9;
+
+ /* Now calculate the checksum and tack it on. */
+ smi_msg->data[i+smi_msg->data_size]
+ = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
+
+ /*
+ * Add on the checksum size and the offset from the
+ * broadcast.
+ */
+ smi_msg->data_size += 1 + i;
+
+ smi_msg->msgid = msgid;
+}
+
+static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_lan_addr *lan_addr,
+ long msgid,
+ unsigned char ipmb_seq,
+ unsigned char source_lun)
+{
+ /* Format the IPMB header data. */
+ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+ smi_msg->data[2] = lan_addr->channel;
+ smi_msg->data[3] = lan_addr->session_handle;
+ smi_msg->data[4] = lan_addr->remote_SWID;
+ smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
+ smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
+ smi_msg->data[7] = lan_addr->local_SWID;
+ smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
+ smi_msg->data[9] = msg->cmd;
+
+ /* Now tack on the data to the message. */
+ if (msg->data_len > 0)
+ memcpy(&smi_msg->data[10], msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 10;
+
+ /* Now calculate the checksum and tack it on. */
+ smi_msg->data[smi_msg->data_size]
+ = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
+
+ /*
+ * Add on the checksum size and the offset from the
+ * broadcast.
+ */
+ smi_msg->data_size += 1;
+
+ smi_msg->msgid = msgid;
+}
+
+static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *smi_msg,
+ int priority)
+{
+ if (intf->curr_msg) {
+ if (priority > 0)
+ list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
+ else
+ list_add_tail(&smi_msg->link, &intf->xmit_msgs);
+ smi_msg = NULL;
+ } else {
+ intf->curr_msg = smi_msg;
+ }
+
+ return smi_msg;
+}
+
+static void smi_send(struct ipmi_smi *intf,
+ const struct ipmi_smi_handlers *handlers,
+ struct ipmi_smi_msg *smi_msg, int priority)
+{
+ int run_to_completion = intf->run_to_completion;
+ unsigned long flags = 0;
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
+ if (smi_msg)
+ handlers->sender(intf->send_info, smi_msg);
+}
+
+static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
+{
+ return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
+ && ((msg->cmd == IPMI_COLD_RESET_CMD)
+ || (msg->cmd == IPMI_WARM_RESET_CMD)))
+ || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
+}
+
+static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ struct ipmi_system_interface_addr *smi_addr;
+
+ if (msg->netfn & 1)
+ /* Responses are not allowed to the SMI. */
+ return -EINVAL;
+
+ smi_addr = (struct ipmi_system_interface_addr *) addr;
+ if (smi_addr->lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
+
+ if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
+ && ((msg->cmd == IPMI_SEND_MSG_CMD)
+ || (msg->cmd == IPMI_GET_MSG_CMD)
+ || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
+ /*
+ * We don't let the user do these, since we manage
+ * the sequence numbers.
+ */
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ if (is_maintenance_mode_cmd(msg)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ intf->auto_maintenance_timeout
+ = maintenance_mode_timeout_ms;
+ if (!intf->maintenance_mode
+ && !intf->maintenance_mode_enable) {
+ intf->maintenance_mode_enable = true;
+ maintenance_mode_update(intf);
+ }
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
+ }
+
+ if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
+ smi_msg->data[1] = msg->cmd;
+ smi_msg->msgid = msgid;
+ smi_msg->user_data = recv_msg;
+ if (msg->data_len > 0)
+ memcpy(&smi_msg->data[2], msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 2;
+ ipmi_inc_stat(intf, sent_local_commands);
+
+ return 0;
+}
+
+static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned char source_address,
+ unsigned char source_lun,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ struct ipmi_ipmb_addr *ipmb_addr;
+ unsigned char ipmb_seq;
+ long seqid;
+ int broadcast = 0;
+ struct ipmi_channel *chans;
+ int rv = 0;
+
+ if (addr->channel >= IPMI_MAX_CHANNELS) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
+ /*
+ * Broadcasts add a zero at the beginning of the
+ * message, but otherwise is the same as an IPMB
+ * address.
+ */
+ addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ broadcast = 1;
+ retries = 0; /* Don't retry broadcasts. */
+ }
+
+ /*
+ * 9 for the header and 1 for the checksum, plus
+ * possibly one for the broadcast.
+ */
+ if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ ipmb_addr = (struct ipmi_ipmb_addr *) addr;
+ if (ipmb_addr->lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
+
+ if (recv_msg->msg.netfn & 0x1) {
+ /*
+ * It's a response, so use the user's sequence
+ * from msgid.
+ */
+ ipmi_inc_stat(intf, sent_ipmb_responses);
+ format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
+ msgid, broadcast,
+ source_address, source_lun);
+
+ /*
+ * Save the receive message so we can use it
+ * to deliver the response.
+ */
+ smi_msg->user_data = recv_msg;
+ } else {
+ /* It's a command, so get a sequence for it. */
+ unsigned long flags;
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+
+ if (is_maintenance_mode_cmd(msg))
+ intf->ipmb_maintenance_mode_timeout =
+ maintenance_mode_timeout_ms;
+
+ if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
+ /* Different default in maintenance mode */
+ retry_time_ms = default_maintenance_retry_ms;
+
+ /*
+ * Create a sequence number with a 1 second
+ * timeout and 4 retries.
+ */
+ rv = intf_next_seq(intf,
+ recv_msg,
+ retry_time_ms,
+ retries,
+ broadcast,
+ &ipmb_seq,
+ &seqid);
+ if (rv)
+ /*
+ * We have used up all the sequence numbers,
+ * probably, so abort.
+ */
+ goto out_err;
+
+ ipmi_inc_stat(intf, sent_ipmb_commands);
+
+ /*
+ * Store the sequence number in the message,
+ * so that when the send message response
+ * comes back we can start the timer.
+ */
+ format_ipmb_msg(smi_msg, msg, ipmb_addr,
+ STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+ ipmb_seq, broadcast,
+ source_address, source_lun);
+
+ /*
+ * Copy the message into the recv message data, so we
+ * can retransmit it later if necessary.
+ */
+ memcpy(recv_msg->msg_data, smi_msg->data,
+ smi_msg->data_size);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = smi_msg->data_size;
+
+ /*
+ * We don't unlock until here, because we need
+ * to copy the completed message into the
+ * recv_msg before we release the lock.
+ * Otherwise, race conditions may bite us. I
+ * know that's pretty paranoid, but I prefer
+ * to be correct.
+ */
+out_err:
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+ }
+
+ return rv;
+}
+
+static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned char source_lun)
+{
+ struct ipmi_ipmb_direct_addr *daddr;
+ bool is_cmd = !(recv_msg->msg.netfn & 0x1);
+
+ if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
+ return -EAFNOSUPPORT;
+
+ /* Responses must have a completion code. */
+ if (!is_cmd && msg->data_len < 1) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ daddr = (struct ipmi_ipmb_direct_addr *) addr;
+ if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ smi_msg->msgid = msgid;
+
+ if (is_cmd) {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
+ } else {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
+ }
+ smi_msg->data[1] = daddr->slave_addr;
+ smi_msg->data[3] = msg->cmd;
+
+ memcpy(smi_msg->data + 4, msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 4;
+
+ smi_msg->user_data = recv_msg;
+
+ return 0;
+}
+
+static int i_ipmi_req_lan(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned char source_lun,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ struct ipmi_lan_addr *lan_addr;
+ unsigned char ipmb_seq;
+ long seqid;
+ struct ipmi_channel *chans;
+ int rv = 0;
+
+ if (addr->channel >= IPMI_MAX_CHANNELS) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ if ((chans[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_8023LAN)
+ && (chans[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_ASYNC)) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ /* 11 for the header and 1 for the checksum. */
+ if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ lan_addr = (struct ipmi_lan_addr *) addr;
+ if (lan_addr->lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
+
+ if (recv_msg->msg.netfn & 0x1) {
+ /*
+ * It's a response, so use the user's sequence
+ * from msgid.
+ */
+ ipmi_inc_stat(intf, sent_lan_responses);
+ format_lan_msg(smi_msg, msg, lan_addr, msgid,
+ msgid, source_lun);
+
+ /*
+ * Save the receive message so we can use it
+ * to deliver the response.
+ */
+ smi_msg->user_data = recv_msg;
+ } else {
+ /* It's a command, so get a sequence for it. */
+ unsigned long flags;
+
+ spin_lock_irqsave(&intf->seq_lock, flags);
+
+ /*
+ * Create a sequence number with a 1 second
+ * timeout and 4 retries.
+ */
+ rv = intf_next_seq(intf,
+ recv_msg,
+ retry_time_ms,
+ retries,
+ 0,
+ &ipmb_seq,
+ &seqid);
+ if (rv)
+ /*
+ * We have used up all the sequence numbers,
+ * probably, so abort.
+ */
+ goto out_err;
+
+ ipmi_inc_stat(intf, sent_lan_commands);
+
+ /*
+ * Store the sequence number in the message,
+ * so that when the send message response
+ * comes back we can start the timer.
+ */
+ format_lan_msg(smi_msg, msg, lan_addr,
+ STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+ ipmb_seq, source_lun);
+
+ /*
+ * Copy the message into the recv message data, so we
+ * can retransmit it later if necessary.
+ */
+ memcpy(recv_msg->msg_data, smi_msg->data,
+ smi_msg->data_size);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = smi_msg->data_size;
+
+ /*
+ * We don't unlock until here, because we need
+ * to copy the completed message into the
+ * recv_msg before we release the lock.
+ * Otherwise, race conditions may bite us. I
+ * know that's pretty paranoid, but I prefer
+ * to be correct.
+ */
+out_err:
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+ }
+
+ return rv;
+}
+
+/*
+ * Separate from ipmi_request so that the user does not have to be
+ * supplied in certain circumstances (mainly at panic time). If
+ * messages are supplied, they will be freed, even if an error
+ * occurs.
+ */
+static int i_ipmi_request(struct ipmi_user *user,
+ struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ void *supplied_smi,
+ struct ipmi_recv_msg *supplied_recv,
+ int priority,
+ unsigned char source_address,
+ unsigned char source_lun,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ struct ipmi_smi_msg *smi_msg;
+ struct ipmi_recv_msg *recv_msg;
+ int rv = 0;
+
+ if (user) {
+ if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+ /* Decrement will happen at the end of the routine. */
+ rv = -EBUSY;
+ goto out;
+ }
+ }
+
+ if (supplied_recv)
+ recv_msg = supplied_recv;
+ else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (recv_msg == NULL) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ }
+ recv_msg->user_msg_data = user_msg_data;
+
+ if (supplied_smi)
+ smi_msg = supplied_smi;
+ else {
+ smi_msg = ipmi_alloc_smi_msg();
+ if (smi_msg == NULL) {
+ if (!supplied_recv)
+ ipmi_free_recv_msg(recv_msg);
+ rv = -ENOMEM;
+ goto out;
+ }
+ }
+
+ rcu_read_lock();
+ if (intf->in_shutdown) {
+ rv = -ENODEV;
+ goto out_err;
+ }
+
+ recv_msg->user = user;
+ if (user)
+ /* The put happens when the message is freed. */
+ kref_get(&user->refcount);
+ recv_msg->msgid = msgid;
+ /*
+ * Store the message to send in the receive message so timeout
+ * responses can get the proper response data.
+ */
+ recv_msg->msg = *msg;
+
+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
+ recv_msg, retries, retry_time_ms);
+ } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+ rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
+ source_address, source_lun,
+ retries, retry_time_ms);
+ } else if (is_ipmb_direct_addr(addr)) {
+ rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
+ recv_msg, source_lun);
+ } else if (is_lan_addr(addr)) {
+ rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
+ source_lun, retries, retry_time_ms);
+ } else {
+ /* Unknown address type. */
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ }
+
+ if (rv) {
+out_err:
+ ipmi_free_smi_msg(smi_msg);
+ ipmi_free_recv_msg(recv_msg);
+ } else {
+ dev_dbg(intf->si_dev, "Send: %*ph\n",
+ smi_msg->data_size, smi_msg->data);
+
+ smi_send(intf, intf->handlers, smi_msg, priority);
+ }
+ rcu_read_unlock();
+
+out:
+ if (rv && user)
+ atomic_dec(&user->nr_msgs);
+ return rv;
+}
+
+static int check_addr(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ unsigned char *saddr,
+ unsigned char *lun)
+{
+ if (addr->channel >= IPMI_MAX_CHANNELS)
+ return -EINVAL;
+ addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
+ *lun = intf->addrinfo[addr->channel].lun;
+ *saddr = intf->addrinfo[addr->channel].address;
+ return 0;
+}
+
+int ipmi_request_settime(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ int priority,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ unsigned char saddr = 0, lun = 0;
+ int rv, index;
+
+ if (!user)
+ return -EINVAL;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ rv = check_addr(user->intf, addr, &saddr, &lun);
+ if (!rv)
+ rv = i_ipmi_request(user,
+ user->intf,
+ addr,
+ msgid,
+ msg,
+ user_msg_data,
+ NULL, NULL,
+ priority,
+ saddr,
+ lun,
+ retries,
+ retry_time_ms);
+
+ release_ipmi_user(user, index);
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_request_settime);
+
+int ipmi_request_supply_msgs(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ void *supplied_smi,
+ struct ipmi_recv_msg *supplied_recv,
+ int priority)
+{
+ unsigned char saddr = 0, lun = 0;
+ int rv, index;
+
+ if (!user)
+ return -EINVAL;
+
+ user = acquire_ipmi_user(user, &index);
+ if (!user)
+ return -ENODEV;
+
+ rv = check_addr(user->intf, addr, &saddr, &lun);
+ if (!rv)
+ rv = i_ipmi_request(user,
+ user->intf,
+ addr,
+ msgid,
+ msg,
+ user_msg_data,
+ supplied_smi,
+ supplied_recv,
+ priority,
+ saddr,
+ lun,
+ -1, 0);
+
+ release_ipmi_user(user, index);
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_request_supply_msgs);
+
+static void bmc_device_id_handler(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg)
+{
+ int rv;
+
+ if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+ || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
+ dev_warn(intf->si_dev,
+ "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
+ msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
+ return;
+ }
+
+ if (msg->msg.data[0]) {
+ dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
+ msg->msg.data[0]);
+ intf->bmc->dyn_id_set = 0;
+ goto out;
+ }
+
+ rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
+ msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
+ if (rv) {
+ dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
+ /* record completion code when error */
+ intf->bmc->cc = msg->msg.data[0];
+ intf->bmc->dyn_id_set = 0;
+ } else {
+ /*
+ * Make sure the id data is available before setting
+ * dyn_id_set.
+ */
+ smp_wmb();
+ intf->bmc->dyn_id_set = 1;
+ }
+out:
+ wake_up(&intf->waitq);
+}
+
+static int
+send_get_device_id_cmd(struct ipmi_smi *intf)
+{
+ struct ipmi_system_interface_addr si;
+ struct kernel_ipmi_msg msg;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ -1, 0);
+}
+
+static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
+{
+ int rv;
+ unsigned int retry_count = 0;
+
+ intf->null_user_handler = bmc_device_id_handler;
+
+retry:
+ bmc->cc = 0;
+ bmc->dyn_id_set = 2;
+
+ rv = send_get_device_id_cmd(intf);
+ if (rv)
+ goto out_reset_handler;
+
+ wait_event(intf->waitq, bmc->dyn_id_set != 2);
+
+ if (!bmc->dyn_id_set) {
+ if (bmc->cc != IPMI_CC_NO_ERROR &&
+ ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
+ msleep(500);
+ dev_warn(intf->si_dev,
+ "BMC returned 0x%2.2x, retry get bmc device id\n",
+ bmc->cc);
+ goto retry;
+ }
+
+ rv = -EIO; /* Something went wrong in the fetch. */
+ }
+
+ /* dyn_id_set makes the id data available. */
+ smp_rmb();
+
+out_reset_handler:
+ intf->null_user_handler = NULL;
+
+ return rv;
+}
+
+/*
+ * Fetch the device id for the bmc/interface. You must pass in either
+ * bmc or intf, this code will get the other one. If the data has
+ * been recently fetched, this will just use the cached data. Otherwise
+ * it will run a new fetch.
+ *
+ * Except for the first time this is called (in ipmi_add_smi()),
+ * this will always return good data;
+ */
+static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid, int intf_num)
+{
+ int rv = 0;
+ int prev_dyn_id_set, prev_guid_set;
+ bool intf_set = intf != NULL;
+
+ if (!intf) {
+ mutex_lock(&bmc->dyn_mutex);
+retry_bmc_lock:
+ if (list_empty(&bmc->intfs)) {
+ mutex_unlock(&bmc->dyn_mutex);
+ return -ENOENT;
+ }
+ intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
+ bmc_link);
+ kref_get(&intf->refcount);
+ mutex_unlock(&bmc->dyn_mutex);
+ mutex_lock(&intf->bmc_reg_mutex);
+ mutex_lock(&bmc->dyn_mutex);
+ if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
+ bmc_link)) {
+ mutex_unlock(&intf->bmc_reg_mutex);
+ kref_put(&intf->refcount, intf_free);
+ goto retry_bmc_lock;
+ }
+ } else {
+ mutex_lock(&intf->bmc_reg_mutex);
+ bmc = intf->bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ kref_get(&intf->refcount);
+ }
+
+ /* If we have a valid and current ID, just return that. */
+ if (intf->in_bmc_register ||
+ (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
+ goto out_noprocessing;
+
+ prev_guid_set = bmc->dyn_guid_set;
+ __get_guid(intf);
+
+ prev_dyn_id_set = bmc->dyn_id_set;
+ rv = __get_device_id(intf, bmc);
+ if (rv)
+ goto out;
+
+ /*
+ * The guid, device id, manufacturer id, and product id should
+ * not change on a BMC. If it does we have to do some dancing.
+ */
+ if (!intf->bmc_registered
+ || (!prev_guid_set && bmc->dyn_guid_set)
+ || (!prev_dyn_id_set && bmc->dyn_id_set)
+ || (prev_guid_set && bmc->dyn_guid_set
+ && !guid_equal(&bmc->guid, &bmc->fetch_guid))
+ || bmc->id.device_id != bmc->fetch_id.device_id
+ || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
+ || bmc->id.product_id != bmc->fetch_id.product_id) {
+ struct ipmi_device_id id = bmc->fetch_id;
+ int guid_set = bmc->dyn_guid_set;
+ guid_t guid;
+
+ guid = bmc->fetch_guid;
+ mutex_unlock(&bmc->dyn_mutex);
+
+ __ipmi_bmc_unregister(intf);
+ /* Fill in the temporary BMC for good measure. */
+ intf->bmc->id = id;
+ intf->bmc->dyn_guid_set = guid_set;
+ intf->bmc->guid = guid;
+ if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
+ need_waiter(intf); /* Retry later on an error. */
+ else
+ __scan_channels(intf, &id);
+
+
+ if (!intf_set) {
+ /*
+ * We weren't given the interface on the
+ * command line, so restart the operation on
+ * the next interface for the BMC.
+ */
+ mutex_unlock(&intf->bmc_reg_mutex);
+ mutex_lock(&bmc->dyn_mutex);
+ goto retry_bmc_lock;
+ }
+
+ /* We have a new BMC, set it up. */
+ bmc = intf->bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ goto out_noprocessing;
+ } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
+ /* Version info changes, scan the channels again. */
+ __scan_channels(intf, &bmc->fetch_id);
+
+ bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+out:
+ if (rv && prev_dyn_id_set) {
+ rv = 0; /* Ignore failures if we have previous data. */
+ bmc->dyn_id_set = prev_dyn_id_set;
+ }
+ if (!rv) {
+ bmc->id = bmc->fetch_id;
+ if (bmc->dyn_guid_set)
+ bmc->guid = bmc->fetch_guid;
+ else if (prev_guid_set)
+ /*
+ * The guid used to be valid and it failed to fetch,
+ * just use the cached value.
+ */
+ bmc->dyn_guid_set = prev_guid_set;
+ }
+out_noprocessing:
+ if (!rv) {
+ if (id)
+ *id = bmc->id;
+
+ if (guid_set)
+ *guid_set = bmc->dyn_guid_set;
+
+ if (guid && bmc->dyn_guid_set)
+ *guid = bmc->guid;
+ }
+
+ mutex_unlock(&bmc->dyn_mutex);
+ mutex_unlock(&intf->bmc_reg_mutex);
+
+ kref_put(&intf->refcount, intf_free);
+ return rv;
+}
+
+static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid)
+{
+ return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
+}
+
+static ssize_t device_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u\n", id.device_id);
+}
+static DEVICE_ATTR_RO(device_id);
+
+static ssize_t provides_device_sdrs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
+}
+static DEVICE_ATTR_RO(provides_device_sdrs);
+
+static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
+}
+static DEVICE_ATTR_RO(revision);
+
+static ssize_t firmware_revision_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
+ id.firmware_revision_2);
+}
+static DEVICE_ATTR_RO(firmware_revision);
+
+static ssize_t ipmi_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "%u.%u\n",
+ ipmi_version_major(&id),
+ ipmi_version_minor(&id));
+}
+static DEVICE_ATTR_RO(ipmi_version);
+
+static ssize_t add_dev_support_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
+}
+static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
+ NULL);
+
+static ssize_t manufacturer_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
+}
+static DEVICE_ATTR_RO(manufacturer_id);
+
+static ssize_t product_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
+}
+static DEVICE_ATTR_RO(product_id);
+
+static ssize_t aux_firmware_rev_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ id.aux_firmware_revision[3],
+ id.aux_firmware_revision[2],
+ id.aux_firmware_revision[1],
+ id.aux_firmware_revision[0]);
+}
+static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
+
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = to_bmc_device(dev);
+ bool guid_set;
+ guid_t guid;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
+ if (rv)
+ return rv;
+ if (!guid_set)
+ return -ENOENT;
+
+ return sysfs_emit(buf, "%pUl\n", &guid);
+}
+static DEVICE_ATTR_RO(guid);
+
+static struct attribute *bmc_dev_attrs[] = {
+ &dev_attr_device_id.attr,
+ &dev_attr_provides_device_sdrs.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_firmware_revision.attr,
+ &dev_attr_ipmi_version.attr,
+ &dev_attr_additional_device_support.attr,
+ &dev_attr_manufacturer_id.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_aux_firmware_revision.attr,
+ &dev_attr_guid.attr,
+ NULL
+};
+
+static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct bmc_device *bmc = to_bmc_device(dev);
+ umode_t mode = attr->mode;
+ int rv;
+
+ if (attr == &dev_attr_aux_firmware_revision.attr) {
+ struct ipmi_device_id id;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ return (!rv && id.aux_firmware_revision_set) ? mode : 0;
+ }
+ if (attr == &dev_attr_guid.attr) {
+ bool guid_set;
+
+ rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
+ return (!rv && guid_set) ? mode : 0;
+ }
+ return mode;
+}
+
+static const struct attribute_group bmc_dev_attr_group = {
+ .attrs = bmc_dev_attrs,
+ .is_visible = bmc_dev_attr_is_visible,
+};
+
+static const struct attribute_group *bmc_dev_attr_groups[] = {
+ &bmc_dev_attr_group,
+ NULL
+};
+
+static const struct device_type bmc_device_type = {
+ .groups = bmc_dev_attr_groups,
+};
+
+static int __find_bmc_guid(struct device *dev, const void *data)
+{
+ const guid_t *guid = data;
+ struct bmc_device *bmc;
+ int rv;
+
+ if (dev->type != &bmc_device_type)
+ return 0;
+
+ bmc = to_bmc_device(dev);
+ rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
+ if (rv)
+ rv = kref_get_unless_zero(&bmc->usecount);
+ return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
+ guid_t *guid)
+{
+ struct device *dev;
+ struct bmc_device *bmc = NULL;
+
+ dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
+ if (dev) {
+ bmc = to_bmc_device(dev);
+ put_device(dev);
+ }
+ return bmc;
+}
+
+struct prod_dev_id {
+ unsigned int product_id;
+ unsigned char device_id;
+};
+
+static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
+{
+ const struct prod_dev_id *cid = data;
+ struct bmc_device *bmc;
+ int rv;
+
+ if (dev->type != &bmc_device_type)
+ return 0;
+
+ bmc = to_bmc_device(dev);
+ rv = (bmc->id.product_id == cid->product_id
+ && bmc->id.device_id == cid->device_id);
+ if (rv)
+ rv = kref_get_unless_zero(&bmc->usecount);
+ return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_prod_dev_id(
+ struct device_driver *drv,
+ unsigned int product_id, unsigned char device_id)
+{
+ struct prod_dev_id id = {
+ .product_id = product_id,
+ .device_id = device_id,
+ };
+ struct device *dev;
+ struct bmc_device *bmc = NULL;
+
+ dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
+ if (dev) {
+ bmc = to_bmc_device(dev);
+ put_device(dev);
+ }
+ return bmc;
+}
+
+static DEFINE_IDA(ipmi_bmc_ida);
+
+static void
+release_bmc_device(struct device *dev)
+{
+ kfree(to_bmc_device(dev));
+}
+
+static void cleanup_bmc_work(struct work_struct *work)
+{
+ struct bmc_device *bmc = container_of(work, struct bmc_device,
+ remove_work);
+ int id = bmc->pdev.id; /* Unregister overwrites id */
+
+ platform_device_unregister(&bmc->pdev);
+ ida_simple_remove(&ipmi_bmc_ida, id);
+}
+
+static void
+cleanup_bmc_device(struct kref *ref)
+{
+ struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
+
+ /*
+ * Remove the platform device in a work queue to avoid issues
+ * with removing the device attributes while reading a device
+ * attribute.
+ */
+ queue_work(remove_work_wq, &bmc->remove_work);
+}
+
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
+{
+ struct bmc_device *bmc = intf->bmc;
+
+ if (!intf->bmc_registered)
+ return;
+
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+ sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ kref_put(&bmc->usecount, cleanup_bmc_device);
+ intf->bmc_registered = false;
+}
+
+static void ipmi_bmc_unregister(struct ipmi_smi *intf)
+{
+ mutex_lock(&intf->bmc_reg_mutex);
+ __ipmi_bmc_unregister(intf);
+ mutex_unlock(&intf->bmc_reg_mutex);
+}
+
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static int __ipmi_bmc_register(struct ipmi_smi *intf,
+ struct ipmi_device_id *id,
+ bool guid_set, guid_t *guid, int intf_num)
+{
+ int rv;
+ struct bmc_device *bmc;
+ struct bmc_device *old_bmc;
+
+ /*
+ * platform_device_register() can cause bmc_reg_mutex to
+ * be claimed because of the is_visible functions of
+ * the attributes. Eliminate possible recursion and
+ * release the lock.
+ */
+ intf->in_bmc_register = true;
+ mutex_unlock(&intf->bmc_reg_mutex);
+
+ /*
+ * Try to find if there is an bmc_device struct
+ * representing the interfaced BMC already
+ */
+ mutex_lock(&ipmidriver_mutex);
+ if (guid_set)
+ old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
+ else
+ old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
+ id->product_id,
+ id->device_id);
+
+ /*
+ * If there is already an bmc_device, free the new one,
+ * otherwise register the new BMC device
+ */
+ if (old_bmc) {
+ bmc = old_bmc;
+ /*
+ * Note: old_bmc already has usecount incremented by
+ * the BMC find functions.
+ */
+ intf->bmc = old_bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ list_add_tail(&intf->bmc_link, &bmc->intfs);
+ mutex_unlock(&bmc->dyn_mutex);
+
+ dev_info(intf->si_dev,
+ "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
+ } else {
+ bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
+ if (!bmc) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(&bmc->intfs);
+ mutex_init(&bmc->dyn_mutex);
+ INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
+
+ bmc->id = *id;
+ bmc->dyn_id_set = 1;
+ bmc->dyn_guid_set = guid_set;
+ bmc->guid = *guid;
+ bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+ bmc->pdev.name = "ipmi_bmc";
+
+ rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
+ if (rv < 0) {
+ kfree(bmc);
+ goto out;
+ }
+
+ bmc->pdev.dev.driver = &ipmidriver.driver;
+ bmc->pdev.id = rv;
+ bmc->pdev.dev.release = release_bmc_device;
+ bmc->pdev.dev.type = &bmc_device_type;
+ kref_init(&bmc->usecount);
+
+ intf->bmc = bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ list_add_tail(&intf->bmc_link, &bmc->intfs);
+ mutex_unlock(&bmc->dyn_mutex);
+
+ rv = platform_device_register(&bmc->pdev);
+ if (rv) {
+ dev_err(intf->si_dev,
+ "Unable to register bmc device: %d\n",
+ rv);
+ goto out_list_del;
+ }
+
+ dev_info(intf->si_dev,
+ "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
+ }
+
+ /*
+ * create symlink from system interface device to bmc device
+ * and back.
+ */
+ rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
+ if (rv) {
+ dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
+ goto out_put_bmc;
+ }
+
+ if (intf_num == -1)
+ intf_num = intf->intf_num;
+ intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
+ if (!intf->my_dev_name) {
+ rv = -ENOMEM;
+ dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
+ rv);
+ goto out_unlink1;
+ }
+
+ rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
+ intf->my_dev_name);
+ if (rv) {
+ dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
+ rv);
+ goto out_free_my_dev_name;
+ }
+
+ intf->bmc_registered = true;
+
+out:
+ mutex_unlock(&ipmidriver_mutex);
+ mutex_lock(&intf->bmc_reg_mutex);
+ intf->in_bmc_register = false;
+ return rv;
+
+
+out_free_my_dev_name:
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+
+out_unlink1:
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+
+out_put_bmc:
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ kref_put(&bmc->usecount, cleanup_bmc_device);
+ goto out;
+
+out_list_del:
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ put_device(&bmc->pdev.dev);
+ goto out;
+}
+
+static int
+send_guid_cmd(struct ipmi_smi *intf, int chan)
+{
+ struct kernel_ipmi_msg msg;
+ struct ipmi_system_interface_addr si;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ -1, 0);
+}
+
+static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+ struct bmc_device *bmc = intf->bmc;
+
+ if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+ || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
+ /* Not for me */
+ return;
+
+ if (msg->msg.data[0] != 0) {
+ /* Error from getting the GUID, the BMC doesn't have one. */
+ bmc->dyn_guid_set = 0;
+ goto out;
+ }
+
+ if (msg->msg.data_len < UUID_SIZE + 1) {
+ bmc->dyn_guid_set = 0;
+ dev_warn(intf->si_dev,
+ "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
+ msg->msg.data_len, UUID_SIZE + 1);
+ goto out;
+ }
+
+ import_guid(&bmc->fetch_guid, msg->msg.data + 1);
+ /*
+ * Make sure the guid data is available before setting
+ * dyn_guid_set.
+ */
+ smp_wmb();
+ bmc->dyn_guid_set = 1;
+ out:
+ wake_up(&intf->waitq);
+}
+
+static void __get_guid(struct ipmi_smi *intf)
+{
+ int rv;
+ struct bmc_device *bmc = intf->bmc;
+
+ bmc->dyn_guid_set = 2;
+ intf->null_user_handler = guid_handler;
+ rv = send_guid_cmd(intf, 0);
+ if (rv)
+ /* Send failed, no GUID available. */
+ bmc->dyn_guid_set = 0;
+ else
+ wait_event(intf->waitq, bmc->dyn_guid_set != 2);
+
+ /* dyn_guid_set makes the guid data available. */
+ smp_rmb();
+
+ intf->null_user_handler = NULL;
+}
+
+static int
+send_channel_info_cmd(struct ipmi_smi *intf, int chan)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[1];
+ struct ipmi_system_interface_addr si;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
+ msg.data = data;
+ msg.data_len = 1;
+ data[0] = chan;
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ -1, 0);
+}
+
+static void
+channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+ int rv = 0;
+ int ch;
+ unsigned int set = intf->curr_working_cset;
+ struct ipmi_channel *chans;
+
+ if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+ && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
+ /* It's the one we want */
+ if (msg->msg.data[0] != 0) {
+ /* Got an error from the channel, just go on. */
+ if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
+ /*
+ * If the MC does not support this
+ * command, that is legal. We just
+ * assume it has one IPMB at channel
+ * zero.
+ */
+ intf->wchannels[set].c[0].medium
+ = IPMI_CHANNEL_MEDIUM_IPMB;
+ intf->wchannels[set].c[0].protocol
+ = IPMI_CHANNEL_PROTOCOL_IPMB;
+
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ wake_up(&intf->waitq);
+ goto out;
+ }
+ goto next_channel;
+ }
+ if (msg->msg.data_len < 4) {
+ /* Message not big enough, just go on. */
+ goto next_channel;
+ }
+ ch = intf->curr_channel;
+ chans = intf->wchannels[set].c;
+ chans[ch].medium = msg->msg.data[2] & 0x7f;
+ chans[ch].protocol = msg->msg.data[3] & 0x1f;
+
+ next_channel:
+ intf->curr_channel++;
+ if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ wake_up(&intf->waitq);
+ } else {
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ rv = send_channel_info_cmd(intf, intf->curr_channel);
+ }
+
+ if (rv) {
+ /* Got an error somehow, just give up. */
+ dev_warn(intf->si_dev,
+ "Error sending channel information for channel %d: %d\n",
+ intf->curr_channel, rv);
+
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ wake_up(&intf->waitq);
+ }
+ }
+ out:
+ return;
+}
+
+/*
+ * Must be holding intf->bmc_reg_mutex to call this.
+ */
+static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
+{
+ int rv;
+
+ if (ipmi_version_major(id) > 1
+ || (ipmi_version_major(id) == 1
+ && ipmi_version_minor(id) >= 5)) {
+ unsigned int set;
+
+ /*
+ * Start scanning the channels to see what is
+ * available.
+ */
+ set = !intf->curr_working_cset;
+ intf->curr_working_cset = set;
+ memset(&intf->wchannels[set], 0,
+ sizeof(struct ipmi_channel_set));
+
+ intf->null_user_handler = channel_handler;
+ intf->curr_channel = 0;
+ rv = send_channel_info_cmd(intf, 0);
+ if (rv) {
+ dev_warn(intf->si_dev,
+ "Error sending channel information for channel 0, %d\n",
+ rv);
+ intf->null_user_handler = NULL;
+ return -EIO;
+ }
+
+ /* Wait for the channel info to be read. */
+ wait_event(intf->waitq, intf->channels_ready);
+ intf->null_user_handler = NULL;
+ } else {
+ unsigned int set = intf->curr_working_cset;
+
+ /* Assume a single IPMB channel at zero. */
+ intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
+ intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ }
+
+ return 0;
+}
+
+static void ipmi_poll(struct ipmi_smi *intf)
+{
+ if (intf->handlers->poll)
+ intf->handlers->poll(intf->send_info);
+ /* In case something came in */
+ handle_new_recv_msgs(intf);
+}
+
+void ipmi_poll_interface(struct ipmi_user *user)
+{
+ ipmi_poll(user->intf);
+}
+EXPORT_SYMBOL(ipmi_poll_interface);
+
+static ssize_t nr_users_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi, nr_users_devattr);
+
+ return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users));
+}
+static DEVICE_ATTR_RO(nr_users);
+
+static ssize_t nr_msgs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi, nr_msgs_devattr);
+ struct ipmi_user *user;
+ int index;
+ unsigned int count = 0;
+
+ index = srcu_read_lock(&intf->users_srcu);
+ list_for_each_entry_rcu(user, &intf->users, link)
+ count += atomic_read(&user->nr_msgs);
+ srcu_read_unlock(&intf->users_srcu, index);
+
+ return sysfs_emit(buf, "%u\n", count);
+}
+static DEVICE_ATTR_RO(nr_msgs);
+
+static void redo_bmc_reg(struct work_struct *work)
+{
+ struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
+ bmc_reg_work);
+
+ if (!intf->in_shutdown)
+ bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
+
+ kref_put(&intf->refcount, intf_free);
+}
+
+int ipmi_add_smi(struct module *owner,
+ const struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct device *si_dev,
+ unsigned char slave_addr)
+{
+ int i, j;
+ int rv;
+ struct ipmi_smi *intf, *tintf;
+ struct list_head *link;
+ struct ipmi_device_id id;
+
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+
+ intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ if (!intf)
+ return -ENOMEM;
+
+ rv = init_srcu_struct(&intf->users_srcu);
+ if (rv) {
+ kfree(intf);
+ return rv;
+ }
+
+ intf->owner = owner;
+ intf->bmc = &intf->tmp_bmc;
+ INIT_LIST_HEAD(&intf->bmc->intfs);
+ mutex_init(&intf->bmc->dyn_mutex);
+ INIT_LIST_HEAD(&intf->bmc_link);
+ mutex_init(&intf->bmc_reg_mutex);
+ intf->intf_num = -1; /* Mark it invalid for now. */
+ kref_init(&intf->refcount);
+ INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
+ intf->si_dev = si_dev;
+ for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
+ intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
+ intf->addrinfo[j].lun = 2;
+ }
+ if (slave_addr != 0)
+ intf->addrinfo[0].address = slave_addr;
+ INIT_LIST_HEAD(&intf->users);
+ atomic_set(&intf->nr_users, 0);
+ intf->handlers = handlers;
+ intf->send_info = send_info;
+ spin_lock_init(&intf->seq_lock);
+ for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
+ intf->seq_table[j].inuse = 0;
+ intf->seq_table[j].seqid = 0;
+ }
+ intf->curr_seq = 0;
+ spin_lock_init(&intf->waiting_rcv_msgs_lock);
+ INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+ tasklet_setup(&intf->recv_tasklet,
+ smi_recv_tasklet);
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
+ spin_lock_init(&intf->xmit_msgs_lock);
+ INIT_LIST_HEAD(&intf->xmit_msgs);
+ INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+ spin_lock_init(&intf->events_lock);
+ spin_lock_init(&intf->watch_lock);
+ atomic_set(&intf->event_waiters, 0);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ INIT_LIST_HEAD(&intf->waiting_events);
+ intf->waiting_events_count = 0;
+ mutex_init(&intf->cmd_rcvrs_mutex);
+ spin_lock_init(&intf->maintenance_mode_lock);
+ INIT_LIST_HEAD(&intf->cmd_rcvrs);
+ init_waitqueue_head(&intf->waitq);
+ for (i = 0; i < IPMI_NUM_STATS; i++)
+ atomic_set(&intf->stats[i], 0);
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ /* Look for a hole in the numbers. */
+ i = 0;
+ link = &ipmi_interfaces;
+ list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
+ ipmi_interfaces_mutex_held()) {
+ if (tintf->intf_num != i) {
+ link = &tintf->link;
+ break;
+ }
+ i++;
+ }
+ /* Add the new interface in numeric order. */
+ if (i == 0)
+ list_add_rcu(&intf->link, &ipmi_interfaces);
+ else
+ list_add_tail_rcu(&intf->link, link);
+
+ rv = handlers->start_processing(send_info, intf);
+ if (rv)
+ goto out_err;
+
+ rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
+ if (rv) {
+ dev_err(si_dev, "Unable to get the device id: %d\n", rv);
+ goto out_err_started;
+ }
+
+ mutex_lock(&intf->bmc_reg_mutex);
+ rv = __scan_channels(intf, &id);
+ mutex_unlock(&intf->bmc_reg_mutex);
+ if (rv)
+ goto out_err_bmc_reg;
+
+ intf->nr_users_devattr = dev_attr_nr_users;
+ sysfs_attr_init(&intf->nr_users_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->nr_users_devattr);
+ if (rv)
+ goto out_err_bmc_reg;
+
+ intf->nr_msgs_devattr = dev_attr_nr_msgs;
+ sysfs_attr_init(&intf->nr_msgs_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr);
+ if (rv) {
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+ goto out_err_bmc_reg;
+ }
+
+ /*
+ * Keep memory order straight for RCU readers. Make
+ * sure everything else is committed to memory before
+ * setting intf_num to mark the interface valid.
+ */
+ smp_wmb();
+ intf->intf_num = i;
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ /* After this point the interface is legal to use. */
+ call_smi_watchers(i, intf->si_dev);
+
+ return 0;
+
+ out_err_bmc_reg:
+ ipmi_bmc_unregister(intf);
+ out_err_started:
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
+ out_err:
+ list_del_rcu(&intf->link);
+ mutex_unlock(&ipmi_interfaces_mutex);
+ synchronize_srcu(&ipmi_interfaces_srcu);
+ cleanup_srcu_struct(&intf->users_srcu);
+ kref_put(&intf->refcount, intf_free);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_add_smi);
+
+static void deliver_smi_err_response(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg,
+ unsigned char err)
+{
+ int rv;
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = err;
+ msg->rsp_size = 3;
+
+ /* This will never requeue, but it may ask us to free the message. */
+ rv = handle_one_recv_msg(intf, msg);
+ if (rv == 0)
+ ipmi_free_smi_msg(msg);
+}
+
+static void cleanup_smi_msgs(struct ipmi_smi *intf)
+{
+ int i;
+ struct seq_table *ent;
+ struct ipmi_smi_msg *msg;
+ struct list_head *entry;
+ struct list_head tmplist;
+
+ /* Clear out our transmit queues and hold the messages. */
+ INIT_LIST_HEAD(&tmplist);
+ list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
+ list_splice_tail(&intf->xmit_msgs, &tmplist);
+
+ /* Current message first, to preserve order */
+ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+ /* Wait for the message to clear out. */
+ schedule_timeout(1);
+ }
+
+ /* No need for locks, the interface is down. */
+
+ /*
+ * Return errors for all pending messages in queue and in the
+ * tables waiting for remote responses.
+ */
+ while (!list_empty(&tmplist)) {
+ entry = tmplist.next;
+ list_del(entry);
+ msg = list_entry(entry, struct ipmi_smi_msg, link);
+ deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
+ }
+
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ ent = &intf->seq_table[i];
+ if (!ent->inuse)
+ continue;
+ deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+ }
+}
+
+void ipmi_unregister_smi(struct ipmi_smi *intf)
+{
+ struct ipmi_smi_watcher *w;
+ int intf_num, index;
+
+ if (!intf)
+ return;
+ intf_num = intf->intf_num;
+ mutex_lock(&ipmi_interfaces_mutex);
+ intf->intf_num = -1;
+ intf->in_shutdown = true;
+ list_del_rcu(&intf->link);
+ mutex_unlock(&ipmi_interfaces_mutex);
+ synchronize_srcu(&ipmi_interfaces_srcu);
+
+ /* At this point no users can be added to the interface. */
+
+ device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+
+ /*
+ * Call all the watcher interfaces to tell them that
+ * an interface is going away.
+ */
+ mutex_lock(&smi_watchers_mutex);
+ list_for_each_entry(w, &smi_watchers, link)
+ w->smi_gone(intf_num);
+ mutex_unlock(&smi_watchers_mutex);
+
+ index = srcu_read_lock(&intf->users_srcu);
+ while (!list_empty(&intf->users)) {
+ struct ipmi_user *user =
+ container_of(list_next_rcu(&intf->users),
+ struct ipmi_user, link);
+
+ _ipmi_destroy_user(user);
+ }
+ srcu_read_unlock(&intf->users_srcu, index);
+
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
+
+ cleanup_smi_msgs(intf);
+
+ ipmi_bmc_unregister(intf);
+
+ cleanup_srcu_struct(&intf->users_srcu);
+ kref_put(&intf->refcount, intf_free);
+}
+EXPORT_SYMBOL(ipmi_unregister_smi);
+
+static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_ipmb_addr ipmb_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ /*
+ * This is 11, not 10, because the response must contain a
+ * completion code.
+ */
+ if (msg->rsp_size < 11) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_ipmb_responses);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr.slave_addr = msg->rsp[6];
+ ipmb_addr.channel = msg->rsp[3] & 0x0f;
+ ipmb_addr.lun = msg->rsp[7] & 3;
+
+ /*
+ * It's a response from a remote entity. Look up the sequence
+ * number and handle the response.
+ */
+ if (intf_find_seq(intf,
+ msg->rsp[7] >> 2,
+ msg->rsp[3] & 0x0f,
+ msg->rsp[8],
+ (msg->rsp[4] >> 2) & (~1),
+ (struct ipmi_addr *) &ipmb_addr,
+ &recv_msg)) {
+ /*
+ * We were unable to find the sequence number,
+ * so just nuke the message.
+ */
+ ipmi_inc_stat(intf, unhandled_ipmb_responses);
+ return 0;
+ }
+
+ memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
+ /*
+ * The other fields matched, so no need to set them, except
+ * for netfn, which needs to be the response that was
+ * returned, not the request value.
+ */
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_ipmb_responses);
+ else
+ ipmi_inc_stat(intf, handled_ipmb_responses);
+
+ return 0;
+}
+
+static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ struct ipmi_user *user = NULL;
+ struct ipmi_ipmb_addr *ipmb_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ if (msg->rsp_size < 10) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ netfn = msg->rsp[4] >> 2;
+ cmd = msg->rsp[8];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = msg->rsp[3];
+ msg->data[3] = msg->rsp[6];
+ msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
+ msg->data[5] = ipmb_checksum(&msg->data[3], 2);
+ msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
+ /* rqseq/lun */
+ msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
+ msg->data[8] = msg->rsp[8]; /* cmd */
+ msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[10] = ipmb_checksum(&msg->data[6], 4);
+ msg->data_size = 11;
+
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+ rcu_read_lock();
+ if (!intf->in_shutdown) {
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value
+ * that causes it to not be freed or
+ * queued.
+ */
+ rv = -1;
+ }
+ rcu_read_unlock();
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data, &msg->rsp[9],
+ msg->rsp_size - 10);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ struct ipmi_user *user = NULL;
+ struct ipmi_ipmb_direct_addr *daddr;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned char netfn = msg->rsp[0] >> 2;
+ unsigned char cmd = msg->rsp[3];
+
+ rcu_read_lock();
+ /* We always use channel 0 for direct messages. */
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ msg->data[0] = (netfn + 1) << 2;
+ msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
+ msg->data[1] = msg->rsp[1]; /* Addr */
+ msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
+ msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
+ msg->data[3] = cmd;
+ msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data_size = 5;
+
+ rcu_read_lock();
+ if (!intf->in_shutdown) {
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value
+ * that causes it to not be freed or
+ * queued.
+ */
+ rv = -1;
+ }
+ rcu_read_unlock();
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg;
+ struct ipmi_ipmb_direct_addr *daddr;
+
+ recv_msg = msg->user_data;
+ if (recv_msg == NULL) {
+ dev_warn(intf->si_dev,
+ "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
+ return 0;
+ }
+
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ recv_msg->msgid = msg->msgid;
+ daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rq_lun = msg->rsp[0] & 3;
+ daddr->rs_lun = msg->rsp[2] & 3;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ deliver_local_response(intf, recv_msg);
+
+ return 0;
+}
+
+static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_lan_addr lan_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+
+ /*
+ * This is 13, not 12, because the response must contain a
+ * completion code.
+ */
+ if (msg->rsp_size < 13) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_lan_responses);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr.session_handle = msg->rsp[4];
+ lan_addr.remote_SWID = msg->rsp[8];
+ lan_addr.local_SWID = msg->rsp[5];
+ lan_addr.channel = msg->rsp[3] & 0x0f;
+ lan_addr.privilege = msg->rsp[3] >> 4;
+ lan_addr.lun = msg->rsp[9] & 3;
+
+ /*
+ * It's a response from a remote entity. Look up the sequence
+ * number and handle the response.
+ */
+ if (intf_find_seq(intf,
+ msg->rsp[9] >> 2,
+ msg->rsp[3] & 0x0f,
+ msg->rsp[10],
+ (msg->rsp[6] >> 2) & (~1),
+ (struct ipmi_addr *) &lan_addr,
+ &recv_msg)) {
+ /*
+ * We were unable to find the sequence number,
+ * so just nuke the message.
+ */
+ ipmi_inc_stat(intf, unhandled_lan_responses);
+ return 0;
+ }
+
+ memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
+ /*
+ * The other fields matched, so no need to set them, except
+ * for netfn, which needs to be the response that was
+ * returned, not the request value.
+ */
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_lan_responses);
+ else
+ ipmi_inc_stat(intf, handled_lan_responses);
+
+ return 0;
+}
+
+static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ struct ipmi_user *user = NULL;
+ struct ipmi_lan_addr *lan_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ if (msg->rsp_size < 12) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ netfn = msg->rsp[6] >> 2;
+ cmd = msg->rsp[10];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, just give up. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ /*
+ * Don't do anything with these messages, just allow
+ * them to be freed.
+ */
+ rv = 0;
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data, &msg->rsp[11],
+ msg->rsp_size - 12);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+/*
+ * This routine will handle "Get Message" command responses with
+ * channels that use an OEM Medium. The message format belongs to
+ * the OEM. See IPMI 2.0 specification, Chapter 6 and
+ * Chapter 22, sections 22.6 and 22.24 for more details.
+ */
+static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ struct ipmi_user *user = NULL;
+ struct ipmi_system_interface_addr *smi_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ /*
+ * We expect the OEM SW to perform error checking
+ * so we just do some basic sanity checks
+ */
+ if (msg->rsp_size < 4) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ /*
+ * This is an OEM Message so the OEM needs to know how
+ * handle the message. We do no interpretation.
+ */
+ netfn = msg->rsp[0] >> 2;
+ cmd = msg->rsp[1];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, just give up. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ /*
+ * Don't do anything with these messages, just allow
+ * them to be freed.
+ */
+
+ rv = 0;
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user = user;
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * The message starts at byte 4 which follows the
+ * Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, &msg->rsp[4],
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_system_interface_addr *smi_addr;
+
+ recv_msg->msgid = 0;
+ smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+ recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 3;
+}
+
+static int handle_read_event_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg, *recv_msg2;
+ struct list_head msgs;
+ struct ipmi_user *user;
+ int rv = 0, deliver_count = 0, index;
+ unsigned long flags;
+
+ if (msg->rsp_size < 19) {
+ /* Message is too small to be an IPMB event. */
+ ipmi_inc_stat(intf, invalid_events);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the event, just ignore it. */
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&msgs);
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+
+ ipmi_inc_stat(intf, events);
+
+ /*
+ * Allocate and fill in one message for every user that is
+ * getting events.
+ */
+ index = srcu_read_lock(&intf->users_srcu);
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (!user->gets_events)
+ continue;
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ rcu_read_unlock();
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+ link) {
+ list_del(&recv_msg->link);
+ ipmi_free_recv_msg(recv_msg);
+ }
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ goto out;
+ }
+
+ deliver_count++;
+
+ copy_event_into_recv_msg(recv_msg, msg);
+ recv_msg->user = user;
+ kref_get(&user->refcount);
+ list_add_tail(&recv_msg->link, &msgs);
+ }
+ srcu_read_unlock(&intf->users_srcu, index);
+
+ if (deliver_count) {
+ /* Now deliver all the messages. */
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
+ list_del(&recv_msg->link);
+ deliver_local_response(intf, recv_msg);
+ }
+ } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
+ /*
+ * No one to receive the message, put it in queue if there's
+ * not already too many things in the queue.
+ */
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ goto out;
+ }
+
+ copy_event_into_recv_msg(recv_msg, msg);
+ list_add_tail(&recv_msg->link, &intf->waiting_events);
+ intf->waiting_events_count++;
+ } else if (!intf->event_msg_printed) {
+ /*
+ * There's too many things in the queue, discard this
+ * message.
+ */
+ dev_warn(intf->si_dev,
+ "Event queue full, discarding incoming events\n");
+ intf->event_msg_printed = 1;
+ }
+
+ out:
+ spin_unlock_irqrestore(&intf->events_lock, flags);
+
+ return rv;
+}
+
+static int handle_bmc_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg;
+ struct ipmi_system_interface_addr *smi_addr;
+
+ recv_msg = msg->user_data;
+ if (recv_msg == NULL) {
+ dev_warn(intf->si_dev,
+ "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
+ return 0;
+ }
+
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ recv_msg->msgid = msg->msgid;
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 2;
+ deliver_local_response(intf, recv_msg);
+
+ return 0;
+}
+
+/*
+ * Handle a received message. Return 1 if the message should be requeued,
+ * 0 if the message should be freed, or -1 if the message should not
+ * be freed or requeued.
+ */
+static int handle_one_recv_msg(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ int requeue = 0;
+ int chan;
+ unsigned char cc;
+ bool is_cmd = !((msg->rsp[0] >> 2) & 1);
+
+ dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp);
+
+ if (msg->rsp_size < 2) {
+ /* Message is too small to be correct. */
+ dev_warn(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+
+return_unspecified:
+ /* Generate an error response for the message. */
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ /* commands must have at least 4 bytes, responses 5. */
+ if (is_cmd && (msg->rsp_size < 4)) {
+ ipmi_inc_stat(intf, invalid_commands);
+ goto out;
+ }
+ if (!is_cmd && (msg->rsp_size < 5)) {
+ ipmi_inc_stat(intf, invalid_ipmb_responses);
+ /* Construct a valid error response. */
+ msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
+ msg->rsp[0] |= (1 << 2); /* Make it a response */
+ msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
+ msg->rsp[1] = msg->data[1]; /* Addr */
+ msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
+ msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
+ msg->rsp[3] = msg->data[3]; /* Cmd */
+ msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 5;
+ }
+ } else if ((msg->data_size >= 2)
+ && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
+ && (msg->data[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data == NULL)) {
+
+ if (intf->in_shutdown)
+ goto out;
+
+ /*
+ * This is the local response to a command send, start
+ * the timer for these. The user_data will not be
+ * NULL if this is a response send, and we will let
+ * response sends just go through.
+ */
+
+ /*
+ * Check for errors, if we get certain errors (ones
+ * that mean basically we can try again later), we
+ * ignore them and start the timer. Otherwise we
+ * report the error immediately.
+ */
+ if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
+ && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
+ && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
+ && (msg->rsp[2] != IPMI_BUS_ERR)
+ && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
+ int ch = msg->rsp[3] & 0xf;
+ struct ipmi_channel *chans;
+
+ /* Got an error sending the message, handle it. */
+
+ chans = READ_ONCE(intf->channel_list)->c;
+ if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
+ || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
+ ipmi_inc_stat(intf, sent_lan_command_errs);
+ else
+ ipmi_inc_stat(intf, sent_ipmb_command_errs);
+ intf_err_seq(intf, msg->msgid, msg->rsp[2]);
+ } else
+ /* The message was sent, start the timer. */
+ intf_start_seq_timer(intf, msg->msgid);
+ requeue = 0;
+ goto out;
+ } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
+ || (msg->rsp[1] != msg->data[1])) {
+ /*
+ * The NetFN and Command in the response is not even
+ * marginally correct.
+ */
+ dev_warn(intf->si_dev,
+ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
+
+ goto return_unspecified;
+ }
+
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ if ((msg->data[0] >> 2) & 1) {
+ /* It's a response to a sent response. */
+ chan = 0;
+ cc = msg->rsp[4];
+ goto process_response_response;
+ }
+ if (is_cmd)
+ requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
+ else
+ requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data != NULL)) {
+ /*
+ * It's a response to a response we sent. For this we
+ * deliver a send message response to the user.
+ */
+ struct ipmi_recv_msg *recv_msg;
+
+ chan = msg->data[2] & 0x0f;
+ if (chan >= IPMI_MAX_CHANNELS)
+ /* Invalid channel number */
+ goto out;
+ cc = msg->rsp[2];
+
+process_response_response:
+ recv_msg = msg->user_data;
+
+ requeue = 0;
+ if (!recv_msg)
+ goto out;
+
+ recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg_data[0] = cc;
+ recv_msg->msg.data_len = 1;
+ deliver_local_response(intf, recv_msg);
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
+ struct ipmi_channel *chans;
+
+ /* It's from the receive queue. */
+ chan = msg->rsp[3] & 0xf;
+ if (chan >= IPMI_MAX_CHANNELS) {
+ /* Invalid channel number */
+ requeue = 0;
+ goto out;
+ }
+
+ /*
+ * We need to make sure the channels have been initialized.
+ * The channel_handler routine will set the "curr_channel"
+ * equal to or greater than IPMI_MAX_CHANNELS when all the
+ * channels for this interface have been initialized.
+ */
+ if (!intf->channels_ready) {
+ requeue = 0; /* Throw the message away */
+ goto out;
+ }
+
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ switch (chans[chan].medium) {
+ case IPMI_CHANNEL_MEDIUM_IPMB:
+ if (msg->rsp[4] & 0x04) {
+ /*
+ * It's a response, so find the
+ * requesting message and send it up.
+ */
+ requeue = handle_ipmb_get_msg_rsp(intf, msg);
+ } else {
+ /*
+ * It's a command to the SMS from some other
+ * entity. Handle that.
+ */
+ requeue = handle_ipmb_get_msg_cmd(intf, msg);
+ }
+ break;
+
+ case IPMI_CHANNEL_MEDIUM_8023LAN:
+ case IPMI_CHANNEL_MEDIUM_ASYNC:
+ if (msg->rsp[6] & 0x04) {
+ /*
+ * It's a response, so find the
+ * requesting message and send it up.
+ */
+ requeue = handle_lan_get_msg_rsp(intf, msg);
+ } else {
+ /*
+ * It's a command to the SMS from some other
+ * entity. Handle that.
+ */
+ requeue = handle_lan_get_msg_cmd(intf, msg);
+ }
+ break;
+
+ default:
+ /* Check for OEM Channels. Clients had better
+ register for these commands. */
+ if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
+ && (chans[chan].medium
+ <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
+ requeue = handle_oem_get_msg_cmd(intf, msg);
+ } else {
+ /*
+ * We don't handle the channel type, so just
+ * free the message.
+ */
+ requeue = 0;
+ }
+ }
+
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
+ /* It's an asynchronous event. */
+ requeue = handle_read_event_rsp(intf, msg);
+ } else {
+ /* It's a response from the local BMC. */
+ requeue = handle_bmc_rsp(intf, msg);
+ }
+
+ out:
+ return requeue;
+}
+
+/*
+ * If there are messages in the queue or pretimeouts, handle them.
+ */
+static void handle_new_recv_msgs(struct ipmi_smi *intf)
+{
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = intf->run_to_completion;
+
+ /* See if any waiting messages need to be processed. */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ while (!list_empty(&intf->waiting_rcv_msgs)) {
+ smi_msg = list_entry(intf->waiting_rcv_msgs.next,
+ struct ipmi_smi_msg, link);
+ list_del(&smi_msg->link);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
+ rv = handle_one_recv_msg(intf, smi_msg);
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ if (rv > 0) {
+ /*
+ * To preserve message order, quit if we
+ * can't handle a message. Add the message
+ * back at the head, this is safe because this
+ * tasklet is the only thing that pulls the
+ * messages.
+ */
+ list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
+ break;
+ } else {
+ if (rv == 0)
+ /* Message handled */
+ ipmi_free_smi_msg(smi_msg);
+ /* If rv < 0, fatal error, del but don't free. */
+ }
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ struct ipmi_user *user;
+ int index;
+
+ index = srcu_read_lock(&intf->users_srcu);
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ srcu_read_unlock(&intf->users_srcu, index);
+ }
+}
+
+static void smi_recv_tasklet(struct tasklet_struct *t)
+{
+ unsigned long flags = 0; /* keep us warning-free. */
+ struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
+ int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *newmsg = NULL;
+
+ /*
+ * Start the next message if available.
+ *
+ * Do this here, not in the actual receiver, because we may deadlock
+ * because the lower layer is allowed to hold locks while calling
+ * message delivery.
+ */
+
+ rcu_read_lock();
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ if (intf->curr_msg == NULL && !intf->in_shutdown) {
+ struct list_head *entry = NULL;
+
+ /* Pick the high priority queue first. */
+ if (!list_empty(&intf->hp_xmit_msgs))
+ entry = intf->hp_xmit_msgs.next;
+ else if (!list_empty(&intf->xmit_msgs))
+ entry = intf->xmit_msgs.next;
+
+ if (entry) {
+ list_del(entry);
+ newmsg = list_entry(entry, struct ipmi_smi_msg, link);
+ intf->curr_msg = newmsg;
+ }
+ }
+
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+ if (newmsg)
+ intf->handlers->sender(intf->send_info, newmsg);
+
+ rcu_read_unlock();
+
+ handle_new_recv_msgs(intf);
+}
+
+/* Handle a new message from the lower layer. */
+void ipmi_smi_msg_received(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ unsigned long flags = 0; /* keep us warning-free. */
+ int run_to_completion = intf->run_to_completion;
+
+ /*
+ * To preserve message order, we keep a queue and deliver from
+ * a tasklet.
+ */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ /*
+ * We can get an asynchronous event or receive message in addition
+ * to commands we send.
+ */
+ if (msg == intf->curr_msg)
+ intf->curr_msg = NULL;
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
+ if (run_to_completion)
+ smi_recv_tasklet(&intf->recv_tasklet);
+ else
+ tasklet_schedule(&intf->recv_tasklet);
+}
+EXPORT_SYMBOL(ipmi_smi_msg_received);
+
+void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
+{
+ if (intf->in_shutdown)
+ return;
+
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
+ tasklet_schedule(&intf->recv_tasklet);
+}
+EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
+
+static struct ipmi_smi_msg *
+smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
+ unsigned char seq, long seqid)
+{
+ struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
+ if (!smi_msg)
+ /*
+ * If we can't allocate the message, then just return, we
+ * get 4 retries, so this should be ok.
+ */
+ return NULL;
+
+ memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
+ smi_msg->data_size = recv_msg->msg.data_len;
+ smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
+
+ dev_dbg(intf->si_dev, "Resend: %*ph\n",
+ smi_msg->data_size, smi_msg->data);
+
+ return smi_msg;
+}
+
+static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
+ struct list_head *timeouts,
+ unsigned long timeout_period,
+ int slot, unsigned long *flags,
+ bool *need_timer)
+{
+ struct ipmi_recv_msg *msg;
+
+ if (intf->in_shutdown)
+ return;
+
+ if (!ent->inuse)
+ return;
+
+ if (timeout_period < ent->timeout) {
+ ent->timeout -= timeout_period;
+ *need_timer = true;
+ return;
+ }
+
+ if (ent->retries_left == 0) {
+ /* The message has used all its retries. */
+ ent->inuse = 0;
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+ msg = ent->recv_msg;
+ list_add_tail(&msg->link, timeouts);
+ if (ent->broadcast)
+ ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
+ else if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf, timed_out_lan_commands);
+ else
+ ipmi_inc_stat(intf, timed_out_ipmb_commands);
+ } else {
+ struct ipmi_smi_msg *smi_msg;
+ /* More retries, send again. */
+
+ *need_timer = true;
+
+ /*
+ * Start with the max timer, set to normal timer after
+ * the message is sent.
+ */
+ ent->timeout = MAX_MSG_TIMEOUT;
+ ent->retries_left--;
+ smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
+ ent->seqid);
+ if (!smi_msg) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ dropped_rexmit_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ dropped_rexmit_ipmb_commands);
+ return;
+ }
+
+ spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
+ /*
+ * Send the new message. We send with a zero
+ * priority. It timed out, I doubt time is that
+ * critical now, and high priority messages are really
+ * only for messages to the local MC, which don't get
+ * resent.
+ */
+ if (intf->handlers) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ retransmitted_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ retransmitted_ipmb_commands);
+
+ smi_send(intf, intf->handlers, smi_msg, 0);
+ } else
+ ipmi_free_smi_msg(smi_msg);
+
+ spin_lock_irqsave(&intf->seq_lock, *flags);
+ }
+}
+
+static bool ipmi_timeout_handler(struct ipmi_smi *intf,
+ unsigned long timeout_period)
+{
+ struct list_head timeouts;
+ struct ipmi_recv_msg *msg, *msg2;
+ unsigned long flags;
+ int i;
+ bool need_timer = false;
+
+ if (!intf->bmc_registered) {
+ kref_get(&intf->refcount);
+ if (!schedule_work(&intf->bmc_reg_work)) {
+ kref_put(&intf->refcount, intf_free);
+ need_timer = true;
+ }
+ }
+
+ /*
+ * Go through the seq table and find any messages that
+ * have timed out, putting them in the timeouts
+ * list.
+ */
+ INIT_LIST_HEAD(&timeouts);
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ if (intf->ipmb_maintenance_mode_timeout) {
+ if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
+ intf->ipmb_maintenance_mode_timeout = 0;
+ else
+ intf->ipmb_maintenance_mode_timeout -= timeout_period;
+ }
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+ check_msg_timeout(intf, &intf->seq_table[i],
+ &timeouts, timeout_period, i,
+ &flags, &need_timer);
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ list_for_each_entry_safe(msg, msg2, &timeouts, link)
+ deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
+
+ /*
+ * Maintenance mode handling. Check the timeout
+ * optimistically before we claim the lock. It may
+ * mean a timeout gets missed occasionally, but that
+ * only means the timeout gets extended by one period
+ * in that case. No big deal, and it avoids the lock
+ * most of the time.
+ */
+ if (intf->auto_maintenance_timeout > 0) {
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ if (intf->auto_maintenance_timeout > 0) {
+ intf->auto_maintenance_timeout
+ -= timeout_period;
+ if (!intf->maintenance_mode
+ && (intf->auto_maintenance_timeout <= 0)) {
+ intf->maintenance_mode_enable = false;
+ maintenance_mode_update(intf);
+ }
+ }
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
+ }
+
+ tasklet_schedule(&intf->recv_tasklet);
+
+ return need_timer;
+}
+
+static void ipmi_request_event(struct ipmi_smi *intf)
+{
+ /* No event requests when in maintenance mode. */
+ if (intf->maintenance_mode_enable)
+ return;
+
+ if (!intf->in_shutdown)
+ intf->handlers->request_events(intf->send_info);
+}
+
+static struct timer_list ipmi_timer;
+
+static atomic_t stop_operation;
+
+static void ipmi_timeout(struct timer_list *unused)
+{
+ struct ipmi_smi *intf;
+ bool need_timer = false;
+ int index;
+
+ if (atomic_read(&stop_operation))
+ return;
+
+ index = srcu_read_lock(&ipmi_interfaces_srcu);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (atomic_read(&intf->event_waiters)) {
+ intf->ticks_to_req_ev--;
+ if (intf->ticks_to_req_ev == 0) {
+ ipmi_request_event(intf);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ }
+ need_timer = true;
+ }
+
+ need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
+ }
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+ if (need_timer)
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
+
+static void need_waiter(struct ipmi_smi *intf)
+{
+ /* Racy, but worst case we start the timer twice. */
+ if (!timer_pending(&ipmi_timer))
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
+
+static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
+static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
+
+static void free_smi_msg(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&smi_msg_inuse_count);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
+}
+
+struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
+{
+ struct ipmi_smi_msg *rv;
+ rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
+ if (rv) {
+ rv->done = free_smi_msg;
+ rv->user_data = NULL;
+ rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
+ atomic_inc(&smi_msg_inuse_count);
+ }
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_alloc_smi_msg);
+
+static void free_recv_msg(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&recv_msg_inuse_count);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
+}
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+{
+ struct ipmi_recv_msg *rv;
+
+ rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+ if (rv) {
+ rv->user = NULL;
+ rv->done = free_recv_msg;
+ atomic_inc(&recv_msg_inuse_count);
+ }
+ return rv;
+}
+
+void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
+{
+ if (msg->user && !oops_in_progress)
+ kref_put(&msg->user->refcount, free_user);
+ msg->done(msg);
+}
+EXPORT_SYMBOL(ipmi_free_recv_msg);
+
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
+static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+
+static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+
+/*
+ * Inside a panic, send a message and wait for a response.
+ */
+static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ struct ipmi_smi_msg smi_msg;
+ struct ipmi_recv_msg recv_msg;
+ int rv;
+
+ smi_msg.done = dummy_smi_done_handler;
+ recv_msg.done = dummy_recv_done_handler;
+ atomic_add(2, &panic_done_count);
+ rv = i_ipmi_request(NULL,
+ intf,
+ addr,
+ 0,
+ msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ 0, 1); /* Don't retry, and don't wait. */
+ if (rv)
+ atomic_sub(2, &panic_done_count);
+ else if (intf->handlers->flush_messages)
+ intf->handlers->flush_messages(intf->send_info);
+
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll(intf);
+}
+
+static void event_receiver_fetcher(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg)
+{
+ if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
+ && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
+ && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+ /* A get event receiver command, save it. */
+ intf->event_receiver = msg->msg.data[1];
+ intf->event_receiver_lun = msg->msg.data[2] & 0x3;
+ }
+}
+
+static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+{
+ if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+ && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
+ && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+ /*
+ * A get device id command, save if we are an event
+ * receiver or generator.
+ */
+ intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
+ intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
+ }
+}
+
+static void send_panic_events(struct ipmi_smi *intf, char *str)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[16];
+ struct ipmi_system_interface_addr *si;
+ struct ipmi_addr addr;
+ char *p = str;
+ struct ipmi_ipmb_addr *ipmb;
+ int j;
+
+ if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
+ return;
+
+ si = (struct ipmi_system_interface_addr *) &addr;
+ si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si->channel = IPMI_BMC_CHANNEL;
+ si->lun = 0;
+
+ /* Fill in an event telling that we have failed. */
+ msg.netfn = 0x04; /* Sensor or Event. */
+ msg.cmd = 2; /* Platform event command. */
+ msg.data = data;
+ msg.data_len = 8;
+ data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
+ data[1] = 0x03; /* This is for IPMI 1.0. */
+ data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
+ data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
+ data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
+
+ /*
+ * Put a few breadcrumbs in. Hopefully later we can add more things
+ * to make the panic events more useful.
+ */
+ if (str) {
+ data[3] = str[0];
+ data[6] = str[1];
+ data[7] = str[2];
+ }
+
+ /* Send the event announcing the panic. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
+
+ /*
+ * On every interface, dump a bunch of OEM event holding the
+ * string.
+ */
+ if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
+ return;
+
+ /*
+ * intf_num is used as an marker to tell if the
+ * interface is valid. Thus we need a read barrier to
+ * make sure data fetched before checking intf_num
+ * won't be used.
+ */
+ smp_rmb();
+
+ /*
+ * First job here is to figure out where to send the
+ * OEM events. There's no way in IPMI to send OEM
+ * events using an event send command, so we have to
+ * find the SEL to put them in and stick them in
+ * there.
+ */
+
+ /* Get capabilities from the get device id. */
+ intf->local_sel_device = 0;
+ intf->local_event_generator = 0;
+ intf->event_receiver = 0;
+
+ /* Request the device info from the local MC. */
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ intf->null_user_handler = device_id_fetcher;
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
+
+ if (intf->local_event_generator) {
+ /* Request the event receiver from the local MC. */
+ msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
+ msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ intf->null_user_handler = event_receiver_fetcher;
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
+ }
+ intf->null_user_handler = NULL;
+
+ /*
+ * Validate the event receiver. The low bit must not
+ * be 1 (it must be a valid IPMB address), it cannot
+ * be zero, and it must not be my address.
+ */
+ if (((intf->event_receiver & 1) == 0)
+ && (intf->event_receiver != 0)
+ && (intf->event_receiver != intf->addrinfo[0].address)) {
+ /*
+ * The event receiver is valid, send an IPMB
+ * message.
+ */
+ ipmb = (struct ipmi_ipmb_addr *) &addr;
+ ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb->channel = 0; /* FIXME - is this right? */
+ ipmb->lun = intf->event_receiver_lun;
+ ipmb->slave_addr = intf->event_receiver;
+ } else if (intf->local_sel_device) {
+ /*
+ * The event receiver was not valid (or was
+ * me), but I am an SEL device, just dump it
+ * in my SEL.
+ */
+ si = (struct ipmi_system_interface_addr *) &addr;
+ si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si->channel = IPMI_BMC_CHANNEL;
+ si->lun = 0;
+ } else
+ return; /* No where to send the event. */
+
+ msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
+ msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
+ msg.data = data;
+ msg.data_len = 16;
+
+ j = 0;
+ while (*p) {
+ int size = strlen(p);
+
+ if (size > 11)
+ size = 11;
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0xf0; /* OEM event without timestamp. */
+ data[3] = intf->addrinfo[0].address;
+ data[4] = j++; /* sequence # */
+ /*
+ * Always give 11 bytes, so strncpy will fill
+ * it with zeroes for me.
+ */
+ strncpy(data+5, p, 11);
+ p += size;
+
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
+ }
+}
+
+static int has_panicked;
+
+static int panic_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ struct ipmi_smi *intf;
+ struct ipmi_user *user;
+
+ if (has_panicked)
+ return NOTIFY_DONE;
+ has_panicked = 1;
+
+ /* For every registered interface, set it to run to completion. */
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (!intf->handlers || intf->intf_num == -1)
+ /* Interface is not ready. */
+ continue;
+
+ if (!intf->handlers->poll)
+ continue;
+
+ /*
+ * If we were interrupted while locking xmit_msgs_lock or
+ * waiting_rcv_msgs_lock, the corresponding list may be
+ * corrupted. In this case, drop items on the list for
+ * the safety.
+ */
+ if (!spin_trylock(&intf->xmit_msgs_lock)) {
+ INIT_LIST_HEAD(&intf->xmit_msgs);
+ INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+ } else
+ spin_unlock(&intf->xmit_msgs_lock);
+
+ if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
+ INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+ else
+ spin_unlock(&intf->waiting_rcv_msgs_lock);
+
+ intf->run_to_completion = 1;
+ if (intf->handlers->set_run_to_completion)
+ intf->handlers->set_run_to_completion(intf->send_info,
+ 1);
+
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (user->handler->ipmi_panic_handler)
+ user->handler->ipmi_panic_handler(
+ user->handler_data);
+ }
+
+ send_panic_events(intf, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* Must be called with ipmi_interfaces_mutex held. */
+static int ipmi_register_driver(void)
+{
+ int rv;
+
+ if (drvregistered)
+ return 0;
+
+ rv = driver_register(&ipmidriver.driver);
+ if (rv)
+ pr_err("Could not register IPMI driver\n");
+ else
+ drvregistered = true;
+ return rv;
+}
+
+static struct notifier_block panic_block = {
+ .notifier_call = panic_event,
+ .next = NULL,
+ .priority = 200 /* priority: INT_MAX >= x >= 0 */
+};
+
+static int ipmi_init_msghandler(void)
+{
+ int rv;
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ rv = ipmi_register_driver();
+ if (rv)
+ goto out;
+ if (initialized)
+ goto out;
+
+ rv = init_srcu_struct(&ipmi_interfaces_srcu);
+ if (rv)
+ goto out;
+
+ remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!remove_work_wq) {
+ pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
+ rv = -ENOMEM;
+ goto out_wq;
+ }
+
+ timer_setup(&ipmi_timer, ipmi_timeout, 0);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+ initialized = true;
+
+out_wq:
+ if (rv)
+ cleanup_srcu_struct(&ipmi_interfaces_srcu);
+out:
+ mutex_unlock(&ipmi_interfaces_mutex);
+ return rv;
+}
+
+static int __init ipmi_init_msghandler_mod(void)
+{
+ int rv;
+
+ pr_info("version " IPMI_DRIVER_VERSION "\n");
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ rv = ipmi_register_driver();
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ return rv;
+}
+
+static void __exit cleanup_ipmi(void)
+{
+ int count;
+
+ if (initialized) {
+ destroy_workqueue(remove_work_wq);
+
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &panic_block);
+
+ /*
+ * This can't be called if any interfaces exist, so no worry
+ * about shutting down the interfaces.
+ */
+
+ /*
+ * Tell the timer to stop, then wait for it to stop. This
+ * avoids problems with race conditions removing the timer
+ * here.
+ */
+ atomic_set(&stop_operation, 1);
+ del_timer_sync(&ipmi_timer);
+
+ initialized = false;
+
+ /* Check for buffer leaks. */
+ count = atomic_read(&smi_msg_inuse_count);
+ if (count != 0)
+ pr_warn("SMI message count %d at exit\n", count);
+ count = atomic_read(&recv_msg_inuse_count);
+ if (count != 0)
+ pr_warn("recv message count %d at exit\n", count);
+
+ cleanup_srcu_struct(&ipmi_interfaces_srcu);
+ }
+ if (drvregistered)
+ driver_unregister(&ipmidriver.driver);
+}
+module_exit(cleanup_ipmi);
+
+module_init(ipmi_init_msghandler_mod);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
+MODULE_VERSION(IPMI_DRIVER_VERSION);
+MODULE_SOFTDEP("post: ipmi_devintf");
diff --git a/drivers/char/ipmi/ipmi_plat_data.c b/drivers/char/ipmi/ipmi_plat_data.c
new file mode 100644
index 000000000..747b51ae0
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_plat_data.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Add an IPMI platform device.
+ */
+
+#include <linux/platform_device.h>
+#include "ipmi_plat_data.h"
+#include "ipmi_si.h"
+
+struct platform_device *ipmi_platform_add(const char *name, unsigned int inst,
+ struct ipmi_plat_data *p)
+{
+ struct platform_device *pdev;
+ unsigned int num_r = 1, size = 0, pidx = 0;
+ struct resource r[4];
+ struct property_entry pr[6];
+ u32 flags;
+ int rv;
+
+ memset(pr, 0, sizeof(pr));
+ memset(r, 0, sizeof(r));
+
+ if (p->iftype == IPMI_PLAT_IF_SI) {
+ if (p->type == SI_BT)
+ size = 3;
+ else if (p->type != SI_TYPE_INVALID)
+ size = 2;
+
+ if (p->regsize == 0)
+ p->regsize = DEFAULT_REGSIZE;
+ if (p->regspacing == 0)
+ p->regspacing = p->regsize;
+
+ pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type);
+ } else if (p->iftype == IPMI_PLAT_IF_SSIF) {
+ pr[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", p->addr);
+ }
+
+ if (p->slave_addr)
+ pr[pidx++] = PROPERTY_ENTRY_U8("slave-addr", p->slave_addr);
+ pr[pidx++] = PROPERTY_ENTRY_U8("addr-source", p->addr_source);
+ if (p->regshift)
+ pr[pidx++] = PROPERTY_ENTRY_U8("reg-shift", p->regshift);
+ pr[pidx++] = PROPERTY_ENTRY_U8("reg-size", p->regsize);
+ /* Last entry must be left NULL to terminate it. */
+
+ pdev = platform_device_alloc(name, inst);
+ if (!pdev) {
+ pr_err("Error allocating IPMI platform device %s.%d\n",
+ name, inst);
+ return NULL;
+ }
+
+ if (size == 0)
+ /* An invalid or SSIF interface, no resources. */
+ goto add_properties;
+
+ /*
+ * Register spacing is derived from the resources in
+ * the IPMI platform code.
+ */
+
+ if (p->space == IPMI_IO_ADDR_SPACE)
+ flags = IORESOURCE_IO;
+ else
+ flags = IORESOURCE_MEM;
+
+ r[0].start = p->addr;
+ r[0].end = r[0].start + p->regsize - 1;
+ r[0].name = "IPMI Address 1";
+ r[0].flags = flags;
+
+ if (size > 1) {
+ r[1].start = r[0].start + p->regspacing;
+ r[1].end = r[1].start + p->regsize - 1;
+ r[1].name = "IPMI Address 2";
+ r[1].flags = flags;
+ num_r++;
+ }
+
+ if (size > 2) {
+ r[2].start = r[1].start + p->regspacing;
+ r[2].end = r[2].start + p->regsize - 1;
+ r[2].name = "IPMI Address 3";
+ r[2].flags = flags;
+ num_r++;
+ }
+
+ if (p->irq) {
+ r[num_r].start = p->irq;
+ r[num_r].end = p->irq;
+ r[num_r].name = "IPMI IRQ";
+ r[num_r].flags = IORESOURCE_IRQ;
+ num_r++;
+ }
+
+ rv = platform_device_add_resources(pdev, r, num_r);
+ if (rv) {
+ dev_err(&pdev->dev,
+ "Unable to add hard-code resources: %d\n", rv);
+ goto err;
+ }
+ add_properties:
+ rv = device_create_managed_software_node(&pdev->dev, pr, NULL);
+ if (rv) {
+ dev_err(&pdev->dev,
+ "Unable to add hard-code properties: %d\n", rv);
+ goto err;
+ }
+
+ rv = platform_device_add(pdev);
+ if (rv) {
+ dev_err(&pdev->dev,
+ "Unable to add hard-code device: %d\n", rv);
+ goto err;
+ }
+ return pdev;
+
+err:
+ platform_device_put(pdev);
+ return NULL;
+}
+EXPORT_SYMBOL(ipmi_platform_add);
diff --git a/drivers/char/ipmi/ipmi_plat_data.h b/drivers/char/ipmi/ipmi_plat_data.h
new file mode 100644
index 000000000..9ba744ea9
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_plat_data.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Generic code to add IPMI platform devices.
+ */
+
+#include <linux/ipmi.h>
+
+enum ipmi_plat_interface_type { IPMI_PLAT_IF_SI, IPMI_PLAT_IF_SSIF };
+
+struct ipmi_plat_data {
+ enum ipmi_plat_interface_type iftype;
+ unsigned int type; /* si_type for si, SI_INVALID for others */
+ unsigned int space; /* addr_space for si, intf# for ssif. */
+ unsigned long addr;
+ unsigned int regspacing;
+ unsigned int regsize;
+ unsigned int regshift;
+ unsigned int irq;
+ unsigned int slave_addr;
+ enum ipmi_addr_src addr_source;
+};
+
+struct platform_device *ipmi_platform_add(const char *name, unsigned int inst,
+ struct ipmi_plat_data *p);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
new file mode 100644
index 000000000..da22a8cbe
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PowerNV OPAL IPMI driver
+ *
+ * Copyright 2014 IBM Corp.
+ */
+
+#define pr_fmt(fmt) "ipmi-powernv: " fmt
+
+#include <linux/ipmi_smi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/opal.h>
+
+
+struct ipmi_smi_powernv {
+ u64 interface_id;
+ struct ipmi_smi *intf;
+ unsigned int irq;
+
+ /**
+ * We assume that there can only be one outstanding request, so
+ * keep the pending message in cur_msg. We protect this from concurrent
+ * updates through send & recv calls, (and consequently opal_msg, which
+ * is in-use when cur_msg is set) with msg_lock
+ */
+ spinlock_t msg_lock;
+ struct ipmi_smi_msg *cur_msg;
+ struct opal_ipmi_msg *opal_msg;
+};
+
+static int ipmi_powernv_start_processing(void *send_info, struct ipmi_smi *intf)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+
+ smi->intf = intf;
+ return 0;
+}
+
+static void send_error_reply(struct ipmi_smi_powernv *smi,
+ struct ipmi_smi_msg *msg, u8 completion_code)
+{
+ msg->rsp[0] = msg->data[0] | 0x4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = completion_code;
+ msg->rsp_size = 3;
+ ipmi_smi_msg_received(smi->intf, msg);
+}
+
+static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+ struct opal_ipmi_msg *opal_msg;
+ unsigned long flags;
+ int comp, rc;
+ size_t size;
+
+ /* ensure data_len will fit in the opal_ipmi_msg buffer... */
+ if (msg->data_size > IPMI_MAX_MSG_LENGTH) {
+ comp = IPMI_REQ_LEN_EXCEEDED_ERR;
+ goto err;
+ }
+
+ /* ... and that we at least have netfn and cmd bytes */
+ if (msg->data_size < 2) {
+ comp = IPMI_REQ_LEN_INVALID_ERR;
+ goto err;
+ }
+
+ spin_lock_irqsave(&smi->msg_lock, flags);
+
+ if (smi->cur_msg) {
+ comp = IPMI_NODE_BUSY_ERR;
+ goto err_unlock;
+ }
+
+ /* format our data for the OPAL API */
+ opal_msg = smi->opal_msg;
+ opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1;
+ opal_msg->netfn = msg->data[0];
+ opal_msg->cmd = msg->data[1];
+ if (msg->data_size > 2)
+ memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2);
+
+ /* data_size already includes the netfn and cmd bytes */
+ size = sizeof(*opal_msg) + msg->data_size - 2;
+
+ pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__,
+ smi->interface_id, opal_msg, size);
+ rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
+ pr_devel("%s: -> %d\n", __func__, rc);
+
+ if (!rc) {
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return;
+ }
+
+ comp = IPMI_ERR_UNSPECIFIED;
+err_unlock:
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+err:
+ send_error_reply(smi, msg, comp);
+}
+
+static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
+{
+ struct opal_ipmi_msg *opal_msg;
+ struct ipmi_smi_msg *msg;
+ unsigned long flags;
+ uint64_t size;
+ int rc;
+
+ pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__,
+ smi->interface_id);
+
+ spin_lock_irqsave(&smi->msg_lock, flags);
+
+ if (!smi->cur_msg) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("no current message?\n");
+ return 0;
+ }
+
+ msg = smi->cur_msg;
+ opal_msg = smi->opal_msg;
+
+ size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH);
+
+ rc = opal_ipmi_recv(smi->interface_id,
+ opal_msg,
+ &size);
+ size = be64_to_cpu(size);
+ pr_devel("%s: -> %d (size %lld)\n", __func__,
+ rc, rc == 0 ? size : 0);
+ if (rc) {
+ /* If came via the poll, and response was not yet ready */
+ if (rc == OPAL_EMPTY) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return 0;
+ }
+
+ smi->cur_msg = NULL;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ send_error_reply(smi, msg, IPMI_ERR_UNSPECIFIED);
+ return 0;
+ }
+
+ if (size < sizeof(*opal_msg)) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("unexpected IPMI message size %lld\n", size);
+ return 0;
+ }
+
+ if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("unexpected IPMI message format (version %d)\n",
+ opal_msg->version);
+ return 0;
+ }
+
+ msg->rsp[0] = opal_msg->netfn;
+ msg->rsp[1] = opal_msg->cmd;
+ if (size > sizeof(*opal_msg))
+ memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg));
+ msg->rsp_size = 2 + size - sizeof(*opal_msg);
+
+ smi->cur_msg = NULL;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ ipmi_smi_msg_received(smi->intf, msg);
+ return 0;
+}
+
+static void ipmi_powernv_request_events(void *send_info)
+{
+}
+
+static void ipmi_powernv_set_run_to_completion(void *send_info,
+ bool run_to_completion)
+{
+}
+
+static void ipmi_powernv_poll(void *send_info)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+
+ ipmi_powernv_recv(smi);
+}
+
+static const struct ipmi_smi_handlers ipmi_powernv_smi_handlers = {
+ .owner = THIS_MODULE,
+ .start_processing = ipmi_powernv_start_processing,
+ .sender = ipmi_powernv_send,
+ .request_events = ipmi_powernv_request_events,
+ .set_run_to_completion = ipmi_powernv_set_run_to_completion,
+ .poll = ipmi_powernv_poll,
+};
+
+static irqreturn_t ipmi_opal_event(int irq, void *data)
+{
+ struct ipmi_smi_powernv *smi = data;
+
+ ipmi_powernv_recv(smi);
+ return IRQ_HANDLED;
+}
+
+static int ipmi_powernv_probe(struct platform_device *pdev)
+{
+ struct ipmi_smi_powernv *ipmi;
+ struct device *dev;
+ u32 prop;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ dev = &pdev->dev;
+
+ ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL);
+ if (!ipmi)
+ return -ENOMEM;
+
+ spin_lock_init(&ipmi->msg_lock);
+
+ rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id",
+ &prop);
+ if (rc) {
+ dev_warn(dev, "No interface ID property\n");
+ goto err_free;
+ }
+ ipmi->interface_id = prop;
+
+ rc = of_property_read_u32(dev->of_node, "interrupts", &prop);
+ if (rc) {
+ dev_warn(dev, "No interrupts property\n");
+ goto err_free;
+ }
+
+ ipmi->irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!ipmi->irq) {
+ dev_info(dev, "Unable to map irq from device tree\n");
+ ipmi->irq = opal_event_request(prop);
+ }
+
+ rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+ "opal-ipmi", ipmi);
+ if (rc) {
+ dev_warn(dev, "Unable to request irq\n");
+ goto err_dispose;
+ }
+
+ ipmi->opal_msg = devm_kmalloc(dev,
+ sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH,
+ GFP_KERNEL);
+ if (!ipmi->opal_msg) {
+ rc = -ENOMEM;
+ goto err_unregister;
+ }
+
+ rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
+ if (rc) {
+ dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
+ goto err_free_msg;
+ }
+
+ dev_set_drvdata(dev, ipmi);
+ return 0;
+
+err_free_msg:
+ devm_kfree(dev, ipmi->opal_msg);
+err_unregister:
+ free_irq(ipmi->irq, ipmi);
+err_dispose:
+ irq_dispose_mapping(ipmi->irq);
+err_free:
+ devm_kfree(dev, ipmi);
+ return rc;
+}
+
+static int ipmi_powernv_remove(struct platform_device *pdev)
+{
+ struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
+
+ ipmi_unregister_smi(smi->intf);
+ free_irq(smi->irq, smi);
+ irq_dispose_mapping(smi->irq);
+
+ return 0;
+}
+
+static const struct of_device_id ipmi_powernv_match[] = {
+ { .compatible = "ibm,opal-ipmi" },
+ { },
+};
+
+
+static struct platform_driver powernv_ipmi_driver = {
+ .driver = {
+ .name = "ipmi-powernv",
+ .of_match_table = ipmi_powernv_match,
+ },
+ .probe = ipmi_powernv_probe,
+ .remove = ipmi_powernv_remove,
+};
+
+
+module_platform_driver(powernv_ipmi_driver);
+
+MODULE_DEVICE_TABLE(of, ipmi_powernv_match);
+MODULE_DESCRIPTION("powernv IPMI driver");
+MODULE_AUTHOR("Jeremy Kerr <jk@ozlabs.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
new file mode 100644
index 000000000..163ec9749
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_poweroff.c
+ *
+ * MontaVista IPMI Poweroff extension to sys_reboot
+ *
+ * Author: MontaVista Software, Inc.
+ * Steven Dake <sdake@mvista.com>
+ * Corey Minyard <cminyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002,2004 MontaVista Software Inc.
+ */
+
+#define pr_fmt(fmt) "IPMI poweroff: " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/pm.h>
+#include <linux/kdev_t.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+
+static void ipmi_po_smi_gone(int if_num);
+static void ipmi_po_new_smi(int if_num, struct device *device);
+
+/* Definitions for controlling power off (if the system supports it). It
+ * conveniently matches the IPMI chassis control values. */
+#define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */
+#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */
+
+/* the IPMI data command */
+static int poweroff_powercycle;
+
+/* Which interface to use, -1 means the first we see. */
+static int ifnum_to_use = -1;
+
+/* Our local state. */
+static int ready;
+static struct ipmi_user *ipmi_user;
+static int ipmi_ifnum;
+static void (*specific_poweroff_func)(struct ipmi_user *user);
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+static int set_param_ifnum(const char *val, const struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+ if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
+ return 0;
+
+ ipmi_po_smi_gone(ipmi_ifnum);
+ ipmi_po_new_smi(ifnum_to_use, NULL);
+ return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
+ &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+ "timer. Setting to -1 defaults to the first registered "
+ "interface");
+
+/* parameter definition to allow user to flag power cycle */
+module_param(poweroff_powercycle, int, 0644);
+MODULE_PARM_DESC(poweroff_powercycle,
+ " Set to non-zero to enable power cycle instead of power"
+ " down. Power cycle is contingent on hardware support,"
+ " otherwise it defaults back to power down.");
+
+/* Stuff from the get device id command. */
+static unsigned int mfg_id;
+static unsigned int prod_id;
+static unsigned char capabilities;
+static unsigned char ipmi_version;
+
+/*
+ * We use our own messages for this operation, we don't let the system
+ * allocate them, since we may be in a panic situation. The whole
+ * thing is single-threaded, anyway, so multiple messages are not
+ * required.
+ */
+static atomic_t dummy_count = ATOMIC_INIT(0);
+static void dummy_smi_free(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&dummy_count);
+}
+static void dummy_recv_free(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&dummy_count);
+}
+static struct ipmi_smi_msg halt_smi_msg = INIT_IPMI_SMI_MSG(dummy_smi_free);
+static struct ipmi_recv_msg halt_recv_msg = INIT_IPMI_RECV_MSG(dummy_recv_free);
+
+
+/*
+ * Code to send a message and wait for the response.
+ */
+
+static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
+{
+ struct completion *comp = recv_msg->user_msg_data;
+
+ if (comp)
+ complete(comp);
+}
+
+static const struct ipmi_user_hndl ipmi_poweroff_handler = {
+ .ipmi_recv_hndl = receive_handler
+};
+
+
+static int ipmi_request_wait_for_response(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *send_msg)
+{
+ int rv;
+ struct completion comp;
+
+ init_completion(&comp);
+
+ rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp,
+ &halt_smi_msg, &halt_recv_msg, 0);
+ if (rv)
+ return rv;
+
+ wait_for_completion(&comp);
+
+ return halt_recv_msg.msg.data[0];
+}
+
+/* Wait for message to complete, spinning. */
+static int ipmi_request_in_rc_mode(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *send_msg)
+{
+ int rv;
+
+ atomic_set(&dummy_count, 2);
+ rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
+ &halt_smi_msg, &halt_recv_msg, 0);
+ if (rv) {
+ atomic_set(&dummy_count, 0);
+ return rv;
+ }
+
+ /*
+ * Spin until our message is done.
+ */
+ while (atomic_read(&dummy_count) > 0) {
+ ipmi_poll_interface(user);
+ cpu_relax();
+ }
+
+ return halt_recv_msg.msg.data[0];
+}
+
+/*
+ * ATCA Support
+ */
+
+#define IPMI_NETFN_ATCA 0x2c
+#define IPMI_ATCA_SET_POWER_CMD 0x11
+#define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01
+#define IPMI_PICMG_ID 0
+
+#define IPMI_NETFN_OEM 0x2e
+#define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11
+#define IPMI_ATCA_PPS_IANA "\x00\x40\x0A"
+#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1
+#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051
+
+static void (*atca_oem_poweroff_hook)(struct ipmi_user *user);
+
+static void pps_poweroff_atca(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ pr_info("PPS powerdown hook used\n");
+
+ send_msg.netfn = IPMI_NETFN_OEM;
+ send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+ send_msg.data = IPMI_ATCA_PPS_IANA;
+ send_msg.data_len = 3;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE)
+ pr_err("Unable to send ATCA, IPMI error 0x%x\n", rv);
+
+ return;
+}
+
+static int ipmi_atca_detect(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ /*
+ * Use get address info to check and see if we are ATCA
+ */
+ send_msg.netfn = IPMI_NETFN_ATCA;
+ send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD;
+ data[0] = IPMI_PICMG_ID;
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_wait_for_response(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+
+ pr_info("ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
+ if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+ && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+ pr_info("Installing Pigeon Point Systems Poweroff Hook\n");
+ atca_oem_poweroff_hook = pps_poweroff_atca;
+ }
+ return !rv;
+}
+
+static void ipmi_poweroff_atca(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[4];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ pr_info("Powering down via ATCA power command\n");
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_ATCA;
+ send_msg.cmd = IPMI_ATCA_SET_POWER_CMD;
+ data[0] = IPMI_PICMG_ID;
+ data[1] = 0; /* FRU id */
+ data[2] = 0; /* Power Level */
+ data[3] = 0; /* Don't change saved presets */
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ /*
+ * At this point, the system may be shutting down, and most
+ * serial drivers (if used) will have interrupts turned off
+ * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+ * return code
+ */
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+ pr_err("Unable to send ATCA powerdown message, IPMI error 0x%x\n",
+ rv);
+ goto out;
+ }
+
+ if (atca_oem_poweroff_hook)
+ atca_oem_poweroff_hook(user);
+ out:
+ return;
+}
+
+/*
+ * CPI1 Support
+ */
+
+#define IPMI_NETFN_OEM_1 0xf8
+#define OEM_GRP_CMD_SET_RESET_STATE 0x84
+#define OEM_GRP_CMD_SET_POWER_STATE 0x82
+#define IPMI_NETFN_OEM_8 0xf8
+#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL 0x80
+#define OEM_GRP_CMD_GET_SLOT_GA 0xa3
+#define IPMI_NETFN_SENSOR_EVT 0x10
+#define IPMI_CMD_GET_EVENT_RECEIVER 0x01
+
+#define IPMI_CPI1_PRODUCT_ID 0x000157
+#define IPMI_CPI1_MANUFACTURER_ID 0x0108
+
+static int ipmi_cpi1_detect(struct ipmi_user *user)
+{
+ return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
+ && (prod_id == IPMI_CPI1_PRODUCT_ID));
+}
+
+static void ipmi_poweroff_cpi1(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct ipmi_ipmb_addr ipmb_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+ int slot;
+ unsigned char hotswap_ipmb;
+ unsigned char aer_addr;
+ unsigned char aer_lun;
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ pr_info("Powering down via CPI1 power command\n");
+
+ /*
+ * Get IPMI ipmb address
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+ slot = halt_recv_msg.msg.data[1];
+ hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot);
+
+ /*
+ * Get active event receiver
+ */
+ send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2;
+ send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+ aer_addr = halt_recv_msg.msg.data[1];
+ aer_lun = halt_recv_msg.msg.data[2];
+
+ /*
+ * Setup IPMB address target instead of local target
+ */
+ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr.channel = 0;
+ ipmb_addr.slave_addr = aer_addr;
+ ipmb_addr.lun = aer_lun;
+
+ /*
+ * Send request hotswap control to remove blade from dpv
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL;
+ send_msg.data = &hotswap_ipmb;
+ send_msg.data_len = 1;
+ ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &ipmb_addr,
+ &send_msg);
+
+ /*
+ * Set reset asserted
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE;
+ send_msg.data = data;
+ data[0] = 1; /* Reset asserted state */
+ send_msg.data_len = 1;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE;
+ send_msg.data = data;
+ data[0] = 1; /* Power down state */
+ send_msg.data_len = 1;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+
+ out:
+ return;
+}
+
+/*
+ * ipmi_dell_chassis_detect()
+ * Dell systems with IPMI < 1.5 don't set the chassis capability bit
+ * but they can handle a chassis poweroff or powercycle command.
+ */
+
+#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
+static int ipmi_dell_chassis_detect(struct ipmi_user *user)
+{
+ const char ipmi_version_major = ipmi_version & 0xF;
+ const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
+ const char mfr[3] = DELL_IANA_MFR_ID;
+ if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
+ ipmi_version_major <= 1 &&
+ ipmi_version_minor < 5)
+ return 1;
+ return 0;
+}
+
+/*
+ * ipmi_hp_chassis_detect()
+ * HP PA-RISC servers rp3410/rp3440, the C8000 workstation and the rx2600 and
+ * zx6000 machines support IPMI vers 1 and don't set the chassis capability bit
+ * but they can handle a chassis poweroff or powercycle command.
+ */
+
+#define HP_IANA_MFR_ID 0x0b
+#define HP_BMC_PROD_ID 0x8201
+static int ipmi_hp_chassis_detect(struct ipmi_user *user)
+{
+ if (mfg_id == HP_IANA_MFR_ID
+ && prod_id == HP_BMC_PROD_ID
+ && ipmi_version == 1)
+ return 1;
+ return 0;
+}
+
+/*
+ * Standard chassis support
+ */
+
+#define IPMI_NETFN_CHASSIS_REQUEST 0
+#define IPMI_CHASSIS_CONTROL_CMD 0x02
+
+static int ipmi_chassis_detect(struct ipmi_user *user)
+{
+ /* Chassis support, use it. */
+ return (capabilities & 0x80);
+}
+
+static void ipmi_poweroff_chassis(struct ipmi_user *user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ powercyclefailed:
+ pr_info("Powering %s via IPMI chassis control command\n",
+ (poweroff_powercycle ? "cycle" : "down"));
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
+ send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
+ if (poweroff_powercycle)
+ data[0] = IPMI_CHASSIS_POWER_CYCLE;
+ else
+ data[0] = IPMI_CHASSIS_POWER_DOWN;
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv) {
+ if (poweroff_powercycle) {
+ /* power cycle failed, default to power down */
+ pr_err("Unable to send chassis power cycle message, IPMI error 0x%x\n",
+ rv);
+ poweroff_powercycle = 0;
+ goto powercyclefailed;
+ }
+
+ pr_err("Unable to send chassis power down message, IPMI error 0x%x\n",
+ rv);
+ }
+}
+
+
+/* Table of possible power off functions. */
+struct poweroff_function {
+ char *platform_type;
+ int (*detect)(struct ipmi_user *user);
+ void (*poweroff_func)(struct ipmi_user *user);
+};
+
+static struct poweroff_function poweroff_functions[] = {
+ { .platform_type = "ATCA",
+ .detect = ipmi_atca_detect,
+ .poweroff_func = ipmi_poweroff_atca },
+ { .platform_type = "CPI1",
+ .detect = ipmi_cpi1_detect,
+ .poweroff_func = ipmi_poweroff_cpi1 },
+ { .platform_type = "chassis",
+ .detect = ipmi_dell_chassis_detect,
+ .poweroff_func = ipmi_poweroff_chassis },
+ { .platform_type = "chassis",
+ .detect = ipmi_hp_chassis_detect,
+ .poweroff_func = ipmi_poweroff_chassis },
+ /* Chassis should generally be last, other things should override
+ it. */
+ { .platform_type = "chassis",
+ .detect = ipmi_chassis_detect,
+ .poweroff_func = ipmi_poweroff_chassis },
+};
+#define NUM_PO_FUNCS ARRAY_SIZE(poweroff_functions)
+
+
+/* Called on a powerdown request. */
+static void ipmi_poweroff_function(void)
+{
+ if (!ready)
+ return;
+
+ /* Use run-to-completion mode, since interrupts may be off. */
+ specific_poweroff_func(ipmi_user);
+}
+
+/* Wait for an IPMI interface to be installed, the first one installed
+ will be grabbed by this code and used to perform the powerdown. */
+static void ipmi_po_new_smi(int if_num, struct device *device)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ int i;
+
+ if (ready)
+ return;
+
+ if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
+ return;
+
+ rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
+ &ipmi_user);
+ if (rv) {
+ pr_err("could not create IPMI user, error %d\n", rv);
+ return;
+ }
+
+ ipmi_ifnum = if_num;
+
+ /*
+ * Do a get device ide and store some results, since this is
+ * used by several functions.
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ send_msg.netfn = IPMI_NETFN_APP_REQUEST;
+ send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_wait_for_response(ipmi_user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv) {
+ pr_err("Unable to send IPMI get device id info, IPMI error 0x%x\n",
+ rv);
+ goto out_err;
+ }
+
+ if (halt_recv_msg.msg.data_len < 12) {
+ pr_err("(chassis) IPMI get device id info too short, was %d bytes, needed %d bytes\n",
+ halt_recv_msg.msg.data_len, 12);
+ goto out_err;
+ }
+
+ mfg_id = (halt_recv_msg.msg.data[7]
+ | (halt_recv_msg.msg.data[8] << 8)
+ | (halt_recv_msg.msg.data[9] << 16));
+ prod_id = (halt_recv_msg.msg.data[10]
+ | (halt_recv_msg.msg.data[11] << 8));
+ capabilities = halt_recv_msg.msg.data[6];
+ ipmi_version = halt_recv_msg.msg.data[5];
+
+
+ /* Scan for a poweroff method */
+ for (i = 0; i < NUM_PO_FUNCS; i++) {
+ if (poweroff_functions[i].detect(ipmi_user))
+ goto found;
+ }
+
+ out_err:
+ pr_err("Unable to find a poweroff function that will work, giving up\n");
+ ipmi_destroy_user(ipmi_user);
+ return;
+
+ found:
+ pr_info("Found a %s style poweroff function\n",
+ poweroff_functions[i].platform_type);
+ specific_poweroff_func = poweroff_functions[i].poweroff_func;
+ old_poweroff_func = pm_power_off;
+ pm_power_off = ipmi_poweroff_function;
+ ready = 1;
+}
+
+static void ipmi_po_smi_gone(int if_num)
+{
+ if (!ready)
+ return;
+
+ if (ipmi_ifnum != if_num)
+ return;
+
+ ready = 0;
+ ipmi_destroy_user(ipmi_user);
+ pm_power_off = old_poweroff_func;
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_po_new_smi,
+ .smi_gone = ipmi_po_smi_gone
+};
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/sysctl.h>
+
+static struct ctl_table ipmi_table[] = {
+ { .procname = "poweroff_powercycle",
+ .data = &poweroff_powercycle,
+ .maxlen = sizeof(poweroff_powercycle),
+ .mode = 0644,
+ .proc_handler = proc_dointvec },
+ { }
+};
+
+static struct ctl_table ipmi_dir_table[] = {
+ { .procname = "ipmi",
+ .mode = 0555,
+ .child = ipmi_table },
+ { }
+};
+
+static struct ctl_table ipmi_root_table[] = {
+ { .procname = "dev",
+ .mode = 0555,
+ .child = ipmi_dir_table },
+ { }
+};
+
+static struct ctl_table_header *ipmi_table_header;
+#endif /* CONFIG_PROC_FS */
+
+/*
+ * Startup and shutdown functions.
+ */
+static int __init ipmi_poweroff_init(void)
+{
+ int rv;
+
+ pr_info("Copyright (C) 2004 MontaVista Software - IPMI Powerdown via sys_reboot\n");
+
+ if (poweroff_powercycle)
+ pr_info("Power cycle is enabled\n");
+
+#ifdef CONFIG_PROC_FS
+ ipmi_table_header = register_sysctl_table(ipmi_root_table);
+ if (!ipmi_table_header) {
+ pr_err("Unable to register powercycle sysctl\n");
+ rv = -ENOMEM;
+ goto out_err;
+ }
+#endif
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+
+#ifdef CONFIG_PROC_FS
+ if (rv) {
+ unregister_sysctl_table(ipmi_table_header);
+ pr_err("Unable to register SMI watcher: %d\n", rv);
+ goto out_err;
+ }
+
+ out_err:
+#endif
+ return rv;
+}
+
+#ifdef MODULE
+static void __exit ipmi_poweroff_cleanup(void)
+{
+ int rv;
+
+#ifdef CONFIG_PROC_FS
+ unregister_sysctl_table(ipmi_table_header);
+#endif
+
+ ipmi_smi_watcher_unregister(&smi_watcher);
+
+ if (ready) {
+ rv = ipmi_destroy_user(ipmi_user);
+ if (rv)
+ pr_err("could not cleanup the IPMI user: 0x%x\n", rv);
+ pm_power_off = old_poweroff_func;
+ }
+}
+module_exit(ipmi_poweroff_cleanup);
+#endif
+
+module_init(ipmi_poweroff_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
new file mode 100644
index 000000000..a7ead2a4c
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ipmi_si.h
+ *
+ * Interface from the device-specific interfaces (OF, DMI, ACPI, PCI,
+ * etc) to the base ipmi system interface code.
+ */
+
+#ifndef __IPMI_SI_H__
+#define __IPMI_SI_H__
+
+#include <linux/ipmi.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#define SI_DEVICE_NAME "ipmi_si"
+
+#define DEFAULT_REGSPACING 1
+#define DEFAULT_REGSIZE 1
+
+/* Numbers in this enumerator should be mapped to si_to_str[] */
+enum si_type {
+ SI_TYPE_INVALID, SI_KCS, SI_SMIC, SI_BT, SI_TYPE_MAX
+};
+
+/* Array is defined in the ipmi_si_intf.c */
+extern const char *const si_to_str[];
+
+enum ipmi_addr_space {
+ IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE
+};
+
+/*
+ * The structure for doing I/O in the state machine. The state
+ * machine doesn't have the actual I/O routines, they are done through
+ * this interface.
+ */
+struct si_sm_io {
+ unsigned char (*inputb)(const struct si_sm_io *io, unsigned int offset);
+ void (*outputb)(const struct si_sm_io *io,
+ unsigned int offset,
+ unsigned char b);
+
+ /*
+ * Generic info used by the actual handling routines, the
+ * state machine shouldn't touch these.
+ */
+ void __iomem *addr;
+ unsigned int regspacing;
+ unsigned int regsize;
+ unsigned int regshift;
+ enum ipmi_addr_space addr_space;
+ unsigned long addr_data;
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+ union ipmi_smi_info_union addr_info;
+
+ int (*io_setup)(struct si_sm_io *info);
+ void (*io_cleanup)(struct si_sm_io *info);
+ unsigned int io_size;
+
+ int irq;
+ int (*irq_setup)(struct si_sm_io *io);
+ void *irq_handler_data;
+ void (*irq_cleanup)(struct si_sm_io *io);
+
+ u8 slave_addr;
+ enum si_type si_type;
+ struct device *dev;
+};
+
+int ipmi_si_add_smi(struct si_sm_io *io);
+irqreturn_t ipmi_si_irq_handler(int irq, void *data);
+void ipmi_irq_start_cleanup(struct si_sm_io *io);
+int ipmi_std_irq_setup(struct si_sm_io *io);
+void ipmi_irq_finish_setup(struct si_sm_io *io);
+void ipmi_si_remove_by_dev(struct device *dev);
+struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+ unsigned long addr);
+void ipmi_hardcode_init(void);
+void ipmi_si_hardcode_exit(void);
+void ipmi_si_hotmod_exit(void);
+int ipmi_si_hardcode_match(int addr_space, unsigned long addr);
+void ipmi_si_platform_init(void);
+void ipmi_si_platform_shutdown(void);
+void ipmi_remove_platform_device_by_name(char *name);
+
+extern struct platform_driver ipmi_platform_driver;
+
+#ifdef CONFIG_PCI
+void ipmi_si_pci_init(void);
+void ipmi_si_pci_shutdown(void);
+#else
+static inline void ipmi_si_pci_init(void) { }
+static inline void ipmi_si_pci_shutdown(void) { }
+#endif
+#ifdef CONFIG_PARISC
+void ipmi_si_parisc_init(void);
+void ipmi_si_parisc_shutdown(void);
+#else
+static inline void ipmi_si_parisc_init(void) { }
+static inline void ipmi_si_parisc_shutdown(void) { }
+#endif
+
+int ipmi_si_port_setup(struct si_sm_io *io);
+int ipmi_si_mem_setup(struct si_sm_io *io);
+
+#endif /* __IPMI_SI_H__ */
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
new file mode 100644
index 000000000..ed5e91b1e
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#define pr_fmt(fmt) "ipmi_hardcode: " fmt
+
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include "ipmi_si.h"
+#include "ipmi_plat_data.h"
+
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
+ */
+
+#define SI_MAX_PARMS 4
+
+#define MAX_SI_TYPE_STR 30
+static char si_type_str[MAX_SI_TYPE_STR] __initdata;
+static unsigned long addrs[SI_MAX_PARMS];
+static unsigned int num_addrs;
+static unsigned int ports[SI_MAX_PARMS];
+static unsigned int num_ports;
+static int irqs[SI_MAX_PARMS] __initdata;
+static unsigned int num_irqs __initdata;
+static int regspacings[SI_MAX_PARMS] __initdata;
+static unsigned int num_regspacings __initdata;
+static int regsizes[SI_MAX_PARMS] __initdata;
+static unsigned int num_regsizes __initdata;
+static int regshifts[SI_MAX_PARMS] __initdata;
+static unsigned int num_regshifts __initdata;
+static int slave_addrs[SI_MAX_PARMS] __initdata;
+static unsigned int num_slave_addrs __initdata;
+
+module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
+MODULE_PARM_DESC(type,
+ "Defines the type of each interface, each interface separated by commas. The types are 'kcs', 'smic', and 'bt'. For example si_type=kcs,bt will set the first interface to kcs and the second to bt");
+module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0);
+MODULE_PARM_DESC(addrs,
+ "Sets the memory address of each interface, the addresses separated by commas. Only use if an interface is in memory. Otherwise, set it to zero or leave it blank.");
+module_param_hw_array(ports, uint, ioport, &num_ports, 0);
+MODULE_PARM_DESC(ports,
+ "Sets the port address of each interface, the addresses separated by commas. Only use if an interface is a port. Otherwise, set it to zero or leave it blank.");
+module_param_hw_array(irqs, int, irq, &num_irqs, 0);
+MODULE_PARM_DESC(irqs,
+ "Sets the interrupt of each interface, the addresses separated by commas. Only use if an interface has an interrupt. Otherwise, set it to zero or leave it blank.");
+module_param_hw_array(regspacings, int, other, &num_regspacings, 0);
+MODULE_PARM_DESC(regspacings,
+ "The number of bytes between the start address and each successive register used by the interface. For instance, if the start address is 0xca2 and the spacing is 2, then the second address is at 0xca4. Defaults to 1.");
+module_param_hw_array(regsizes, int, other, &num_regsizes, 0);
+MODULE_PARM_DESC(regsizes,
+ "The size of the specific IPMI register in bytes. This should generally be 1, 2, 4, or 8 for an 8-bit, 16-bit, 32-bit, or 64-bit register. Use this if you the 8-bit IPMI register has to be read from a larger register.");
+module_param_hw_array(regshifts, int, other, &num_regshifts, 0);
+MODULE_PARM_DESC(regshifts,
+ "The amount to shift the data read from the. IPMI register, in bits. For instance, if the data is read from a 32-bit word and the IPMI data is in bit 8-15, then the shift would be 8");
+module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs,
+ "Set the default IPMB slave address for the controller. Normally this is 0x20, but can be overridden by this parm. This is an array indexed by interface number.");
+
+static void __init ipmi_hardcode_init_one(const char *si_type_str,
+ unsigned int i,
+ unsigned long addr,
+ enum ipmi_addr_space addr_space)
+{
+ struct ipmi_plat_data p;
+ int t;
+
+ memset(&p, 0, sizeof(p));
+
+ p.iftype = IPMI_PLAT_IF_SI;
+ if (!si_type_str || !*si_type_str) {
+ p.type = SI_KCS;
+ } else {
+ t = match_string(si_to_str, -1, si_type_str);
+ if (t < 0) {
+ pr_warn("Interface type specified for interface %d, was invalid: %s\n",
+ i, si_type_str);
+ return;
+ }
+ p.type = t;
+ }
+
+ p.regsize = regsizes[i];
+ p.slave_addr = slave_addrs[i];
+ p.addr_source = SI_HARDCODED;
+ p.regshift = regshifts[i];
+ p.regsize = regsizes[i];
+ p.addr = addr;
+ p.space = addr_space;
+
+ ipmi_platform_add("hardcode-ipmi-si", i, &p);
+}
+
+void __init ipmi_hardcode_init(void)
+{
+ unsigned int i;
+ char *str;
+ char *si_type[SI_MAX_PARMS];
+
+ memset(si_type, 0, sizeof(si_type));
+
+ /* Parse out the si_type string into its components. */
+ str = si_type_str;
+ if (*str != '\0') {
+ for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
+ si_type[i] = str;
+ str = strchr(str, ',');
+ if (str) {
+ *str = '\0';
+ str++;
+ } else {
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < SI_MAX_PARMS; i++) {
+ if (i < num_ports && ports[i])
+ ipmi_hardcode_init_one(si_type[i], i, ports[i],
+ IPMI_IO_ADDR_SPACE);
+ if (i < num_addrs && addrs[i])
+ ipmi_hardcode_init_one(si_type[i], i, addrs[i],
+ IPMI_MEM_ADDR_SPACE);
+ }
+}
+
+
+void ipmi_si_hardcode_exit(void)
+{
+ ipmi_remove_platform_device_by_name("hardcode-ipmi-si");
+}
+
+/*
+ * Returns true of the given address exists as a hardcoded address,
+ * false if not.
+ */
+int ipmi_si_hardcode_match(int addr_space, unsigned long addr)
+{
+ unsigned int i;
+
+ if (addr_space == IPMI_IO_ADDR_SPACE) {
+ for (i = 0; i < num_ports; i++) {
+ if (ports[i] == addr)
+ return 1;
+ }
+ } else {
+ for (i = 0; i < num_addrs; i++) {
+ if (addrs[i] == addr)
+ return 1;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
new file mode 100644
index 000000000..6b12a83cc
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si_hotmod.c
+ *
+ * Handling for dynamically adding/removing IPMI devices through
+ * a module parameter (and thus sysfs).
+ */
+
+#define pr_fmt(fmt) "ipmi_hotmod: " fmt
+
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include <linux/atomic.h>
+#include "ipmi_si.h"
+#include "ipmi_plat_data.h"
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod,
+ "Add and remove interfaces. See Documentation/driver-api/ipmi.rst in the kernel sources for the gory details.");
+
+/*
+ * Parms come in as <op1>[:op2[:op3...]]. ops are:
+ * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ * rsp=<regspacing>
+ * rsi=<regsize>
+ * rsh=<regshift>
+ * irq=<irq>
+ * ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+ const char *name;
+ const int val;
+};
+
+static const struct hotmod_vals hotmod_ops[] = {
+ { "add", HM_ADD },
+ { "remove", HM_REMOVE },
+ { NULL }
+};
+
+static const struct hotmod_vals hotmod_si[] = {
+ { "kcs", SI_KCS },
+ { "smic", SI_SMIC },
+ { "bt", SI_BT },
+ { NULL }
+};
+
+static const struct hotmod_vals hotmod_as[] = {
+ { "mem", IPMI_MEM_ADDR_SPACE },
+ { "i/o", IPMI_IO_ADDR_SPACE },
+ { NULL }
+};
+
+static int parse_str(const struct hotmod_vals *v, unsigned int *val, char *name,
+ const char **curr)
+{
+ char *s;
+ int i;
+
+ s = strchr(*curr, ',');
+ if (!s) {
+ pr_warn("No hotmod %s given\n", name);
+ return -EINVAL;
+ }
+ *s = '\0';
+ s++;
+ for (i = 0; v[i].name; i++) {
+ if (strcmp(*curr, v[i].name) == 0) {
+ *val = v[i].val;
+ *curr = s;
+ return 0;
+ }
+ }
+
+ pr_warn("Invalid hotmod %s '%s'\n", name, *curr);
+ return -EINVAL;
+}
+
+static int check_hotmod_int_op(const char *curr, const char *option,
+ const char *name, unsigned int *val)
+{
+ char *n;
+
+ if (strcmp(curr, name) == 0) {
+ if (!option) {
+ pr_warn("No option given for '%s'\n", curr);
+ return -EINVAL;
+ }
+ *val = simple_strtoul(option, &n, 0);
+ if ((*n != '\0') || (*option == '\0')) {
+ pr_warn("Bad option given for '%s'\n", curr);
+ return -EINVAL;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int parse_hotmod_str(const char *curr, enum hotmod_op *op,
+ struct ipmi_plat_data *h)
+{
+ char *s, *o;
+ int rv;
+ unsigned int ival;
+
+ h->iftype = IPMI_PLAT_IF_SI;
+ rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+ if (rv)
+ return rv;
+ *op = ival;
+
+ rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+ if (rv)
+ return rv;
+ h->type = ival;
+
+ rv = parse_str(hotmod_as, &ival, "address space", &curr);
+ if (rv)
+ return rv;
+ h->space = ival;
+
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ rv = kstrtoul(curr, 0, &h->addr);
+ if (rv) {
+ pr_warn("Invalid hotmod address '%s': %d\n", curr, rv);
+ return rv;
+ }
+
+ while (s) {
+ curr = s;
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ o = strchr(curr, '=');
+ if (o) {
+ *o = '\0';
+ o++;
+ }
+ rv = check_hotmod_int_op(curr, o, "rsp", &h->regspacing);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsi", &h->regsize);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsh", &h->regshift);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "irq", &h->irq);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "ipmb", &h->slave_addr);
+ if (rv < 0)
+ return rv;
+ else if (rv)
+ continue;
+
+ pr_warn("Invalid hotmod option '%s'\n", curr);
+ return -EINVAL;
+ }
+
+ h->addr_source = SI_HOTMOD;
+ return 0;
+}
+
+static atomic_t hotmod_nr;
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp)
+{
+ int rv;
+ struct ipmi_plat_data h;
+ char *str, *curr, *next;
+
+ str = kstrdup(val, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ /* Kill any trailing spaces, as we can get a "\n" from echo. */
+ for (curr = strstrip(str); curr; curr = next) {
+ enum hotmod_op op;
+
+ next = strchr(curr, ':');
+ if (next) {
+ *next = '\0';
+ next++;
+ }
+
+ memset(&h, 0, sizeof(h));
+ rv = parse_hotmod_str(curr, &op, &h);
+ if (rv)
+ goto out;
+
+ if (op == HM_ADD) {
+ ipmi_platform_add("hotmod-ipmi-si",
+ atomic_inc_return(&hotmod_nr),
+ &h);
+ } else {
+ struct device *dev;
+
+ dev = ipmi_si_remove_by_data(h.space, h.type, h.addr);
+ if (dev && dev_is_platform(dev)) {
+ struct platform_device *pdev;
+
+ pdev = to_platform_device(dev);
+ if (strcmp(pdev->name, "hotmod-ipmi-si") == 0)
+ platform_device_unregister(pdev);
+ }
+ put_device(dev);
+ }
+ }
+ rv = strlen(val);
+out:
+ kfree(str);
+ return rv;
+}
+
+void ipmi_si_hotmod_exit(void)
+{
+ ipmi_remove_platform_device_by_name("hotmod-ipmi-si");
+}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
new file mode 100644
index 000000000..5cd031f3f
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -0,0 +1,2307 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si.c
+ *
+ * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
+ * BT).
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SMI state
+ * machine. It does the configuration, handles timers and interrupts,
+ * and drives the real SMI state machine.
+ */
+
+#define pr_fmt(fmt) "ipmi_si: " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include "ipmi_si.h"
+#include "ipmi_si_sm.h"
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+/* Measure times between events in the driver. */
+#undef DEBUG_TIMING
+
+/* Call every 10 ms. */
+#define SI_TIMEOUT_TIME_USEC 10000
+#define SI_USEC_PER_JIFFY (1000000/HZ)
+#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
+#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
+ short timeout */
+
+enum si_intf_state {
+ SI_NORMAL,
+ SI_GETTING_FLAGS,
+ SI_GETTING_EVENTS,
+ SI_CLEARING_FLAGS,
+ SI_GETTING_MESSAGES,
+ SI_CHECKING_ENABLES,
+ SI_SETTING_ENABLES
+ /* FIXME - add watchdog stuff. */
+};
+
+/* Some BT-specific defines we need here. */
+#define IPMI_BT_INTMASK_REG 2
+#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
+#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
+
+/* 'invalid' to allow a firmware-specified interface to be disabled */
+const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
+
+static bool initialized;
+
+/*
+ * Indexes into stats[] in smi_info below.
+ */
+enum si_stat_indexes {
+ /*
+ * Number of times the driver requested a timer while an operation
+ * was in progress.
+ */
+ SI_STAT_short_timeouts = 0,
+
+ /*
+ * Number of times the driver requested a timer while nothing was in
+ * progress.
+ */
+ SI_STAT_long_timeouts,
+
+ /* Number of times the interface was idle while being polled. */
+ SI_STAT_idles,
+
+ /* Number of interrupts the driver handled. */
+ SI_STAT_interrupts,
+
+ /* Number of time the driver got an ATTN from the hardware. */
+ SI_STAT_attentions,
+
+ /* Number of times the driver requested flags from the hardware. */
+ SI_STAT_flag_fetches,
+
+ /* Number of times the hardware didn't follow the state machine. */
+ SI_STAT_hosed_count,
+
+ /* Number of completed messages. */
+ SI_STAT_complete_transactions,
+
+ /* Number of IPMI events received from the hardware. */
+ SI_STAT_events,
+
+ /* Number of watchdog pretimeouts. */
+ SI_STAT_watchdog_pretimeouts,
+
+ /* Number of asynchronous messages received. */
+ SI_STAT_incoming_messages,
+
+
+ /* This *must* remain last, add new values above this. */
+ SI_NUM_STATS
+};
+
+struct smi_info {
+ int si_num;
+ struct ipmi_smi *intf;
+ struct si_sm_data *si_sm;
+ const struct si_sm_handlers *handlers;
+ spinlock_t si_lock;
+ struct ipmi_smi_msg *waiting_msg;
+ struct ipmi_smi_msg *curr_msg;
+ enum si_intf_state si_state;
+
+ /*
+ * Used to handle the various types of I/O that can occur with
+ * IPMI
+ */
+ struct si_sm_io io;
+
+ /*
+ * Per-OEM handler, called from handle_flags(). Returns 1
+ * when handle_flags() needs to be re-run or 0 indicating it
+ * set si_state itself.
+ */
+ int (*oem_data_avail_handler)(struct smi_info *smi_info);
+
+ /*
+ * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+ * is set to hold the flags until we are done handling everything
+ * from the flags.
+ */
+#define RECEIVE_MSG_AVAIL 0x01
+#define EVENT_MSG_BUFFER_FULL 0x02
+#define WDT_PRE_TIMEOUT_INT 0x08
+#define OEM0_DATA_AVAIL 0x20
+#define OEM1_DATA_AVAIL 0x40
+#define OEM2_DATA_AVAIL 0x80
+#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
+ OEM1_DATA_AVAIL | \
+ OEM2_DATA_AVAIL)
+ unsigned char msg_flags;
+
+ /* Does the BMC have an event buffer? */
+ bool has_event_buffer;
+
+ /*
+ * If set to true, this will request events the next time the
+ * state machine is idle.
+ */
+ atomic_t req_events;
+
+ /*
+ * If true, run the state machine to completion on every send
+ * call. Generally used after a panic to make sure stuff goes
+ * out.
+ */
+ bool run_to_completion;
+
+ /* The timer for this si. */
+ struct timer_list si_timer;
+
+ /* This flag is set, if the timer can be set */
+ bool timer_can_start;
+
+ /* This flag is set, if the timer is running (timer_pending() isn't enough) */
+ bool timer_running;
+
+ /* The time (in jiffies) the last timeout occurred at. */
+ unsigned long last_timeout_jiffies;
+
+ /* Are we waiting for the events, pretimeouts, received msgs? */
+ atomic_t need_watch;
+
+ /*
+ * The driver will disable interrupts when it gets into a
+ * situation where it cannot handle messages due to lack of
+ * memory. Once that situation clears up, it will re-enable
+ * interrupts.
+ */
+ bool interrupt_disabled;
+
+ /*
+ * Does the BMC support events?
+ */
+ bool supports_event_msg_buff;
+
+ /*
+ * Can we disable interrupts the global enables receive irq
+ * bit? There are currently two forms of brokenness, some
+ * systems cannot disable the bit (which is technically within
+ * the spec but a bad idea) and some systems have the bit
+ * forced to zero even though interrupts work (which is
+ * clearly outside the spec). The next bool tells which form
+ * of brokenness is present.
+ */
+ bool cannot_disable_irq;
+
+ /*
+ * Some systems are broken and cannot set the irq enable
+ * bit, even if they support interrupts.
+ */
+ bool irq_enable_broken;
+
+ /* Is the driver in maintenance mode? */
+ bool in_maintenance_mode;
+
+ /*
+ * Did we get an attention that we did not handle?
+ */
+ bool got_attn;
+
+ /* From the get device id response... */
+ struct ipmi_device_id device_id;
+
+ /* Have we added the device group to the device? */
+ bool dev_group_added;
+
+ /* Counters and things for the proc filesystem. */
+ atomic_t stats[SI_NUM_STATS];
+
+ struct task_struct *thread;
+
+ struct list_head link;
+};
+
+#define smi_inc_stat(smi, stat) \
+ atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+#define smi_get_stat(smi, stat) \
+ ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+
+#define IPMI_MAX_INTFS 4
+static int force_kipmid[IPMI_MAX_INTFS];
+static int num_force_kipmid;
+
+static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
+static int num_max_busy_us;
+
+static bool unload_when_empty = true;
+
+static int try_smi_init(struct smi_info *smi);
+static void cleanup_one_si(struct smi_info *smi_info);
+static void cleanup_ipmi_si(void);
+
+#ifdef DEBUG_TIMING
+void debug_timestamp(struct smi_info *smi_info, char *msg)
+{
+ struct timespec64 t;
+
+ ktime_get_ts64(&t);
+ dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n",
+ msg, t.tv_sec, t.tv_nsec);
+}
+#else
+#define debug_timestamp(smi_info, x)
+#endif
+
+static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
+static int register_xaction_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&xaction_notifier_list, nb);
+}
+
+static void deliver_recv_msg(struct smi_info *smi_info,
+ struct ipmi_smi_msg *msg)
+{
+ /* Deliver the message to the upper layer. */
+ ipmi_smi_msg_received(smi_info->intf, msg);
+}
+
+static void return_hosed_msg(struct smi_info *smi_info, int cCode)
+{
+ struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+ if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
+ cCode = IPMI_ERR_UNSPECIFIED;
+ /* else use it as is */
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = cCode;
+ msg->rsp_size = 3;
+
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+}
+
+static enum si_sm_result start_next_msg(struct smi_info *smi_info)
+{
+ int rv;
+
+ if (!smi_info->waiting_msg) {
+ smi_info->curr_msg = NULL;
+ rv = SI_SM_IDLE;
+ } else {
+ int err;
+
+ smi_info->curr_msg = smi_info->waiting_msg;
+ smi_info->waiting_msg = NULL;
+ debug_timestamp(smi_info, "Start2");
+ err = atomic_notifier_call_chain(&xaction_notifier_list,
+ 0, smi_info);
+ if (err & NOTIFY_STOP_MASK) {
+ rv = SI_SM_CALL_WITHOUT_DELAY;
+ goto out;
+ }
+ err = smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ if (err)
+ return_hosed_msg(smi_info, err);
+
+ rv = SI_SM_CALL_WITHOUT_DELAY;
+ }
+out:
+ return rv;
+}
+
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+ if (!smi_info->timer_can_start)
+ return;
+ smi_info->last_timeout_jiffies = jiffies;
+ mod_timer(&smi_info->si_timer, new_val);
+ smi_info->timer_running = true;
+}
+
+/*
+ * Start a new message and (re)start the timer and thread.
+ */
+static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
+ unsigned int size)
+{
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
+}
+
+static void start_check_enables(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_CHECKING_ENABLES;
+}
+
+static void start_clear_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+
+ /* Make sure the watchdog pre-timeout flag is not set at startup. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+
+ start_new_msg(smi_info, msg, 3);
+ smi_info->si_state = SI_CLEARING_FLAGS;
+}
+
+static void start_getting_msg_queue(struct smi_info *smi_info)
+{
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ start_new_msg(smi_info, smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_MESSAGES;
+}
+
+static void start_getting_events(struct smi_info *smi_info)
+{
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ start_new_msg(smi_info, smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_EVENTS;
+}
+
+/*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+ * polled until we can allocate some memory. Once we have some
+ * memory, we will re-enable the interrupt.
+ *
+ * Note that we cannot just use disable_irq(), since the interrupt may
+ * be shared.
+ */
+static inline bool disable_si_irq(struct smi_info *smi_info)
+{
+ if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
+ smi_info->interrupt_disabled = true;
+ start_check_enables(smi_info);
+ return true;
+ }
+ return false;
+}
+
+static inline bool enable_si_irq(struct smi_info *smi_info)
+{
+ if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
+ smi_info->interrupt_disabled = false;
+ start_check_enables(smi_info);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Allocate a message. If unable to allocate, start the interrupt
+ * disable process and return NULL. If able to allocate but
+ * interrupts are disabled, free the message and return NULL after
+ * starting the interrupt enable process.
+ */
+static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ if (!disable_si_irq(smi_info))
+ smi_info->si_state = SI_NORMAL;
+ } else if (enable_si_irq(smi_info)) {
+ ipmi_free_smi_msg(msg);
+ msg = NULL;
+ }
+ return msg;
+}
+
+static void handle_flags(struct smi_info *smi_info)
+{
+retry:
+ if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+ /* Watchdog pre-timeout */
+ smi_inc_stat(smi_info, watchdog_pretimeouts);
+
+ start_clear_flags(smi_info);
+ smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ ipmi_smi_watchdog_pretimeout(smi_info->intf);
+ } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
+ /* Messages available. */
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
+ return;
+
+ start_getting_msg_queue(smi_info);
+ } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
+ /* Events available. */
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
+ return;
+
+ start_getting_events(smi_info);
+ } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
+ smi_info->oem_data_avail_handler) {
+ if (smi_info->oem_data_avail_handler(smi_info))
+ goto retry;
+ } else
+ smi_info->si_state = SI_NORMAL;
+}
+
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+ IPMI_BMC_EVT_MSG_INTR)
+
+static u8 current_global_enables(struct smi_info *smi_info, u8 base,
+ bool *irq_on)
+{
+ u8 enables = 0;
+
+ if (smi_info->supports_event_msg_buff)
+ enables |= IPMI_BMC_EVT_MSG_BUFF;
+
+ if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
+ smi_info->cannot_disable_irq) &&
+ !smi_info->irq_enable_broken)
+ enables |= IPMI_BMC_RCV_MSG_INTR;
+
+ if (smi_info->supports_event_msg_buff &&
+ smi_info->io.irq && !smi_info->interrupt_disabled &&
+ !smi_info->irq_enable_broken)
+ enables |= IPMI_BMC_EVT_MSG_INTR;
+
+ *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
+
+ return enables;
+}
+
+static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
+{
+ u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
+
+ irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
+
+ if ((bool)irqstate == irq_on)
+ return;
+
+ if (irq_on)
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+ else
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
+}
+
+static void handle_transaction_done(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg;
+
+ debug_timestamp(smi_info, "Done");
+ switch (smi_info->si_state) {
+ case SI_NORMAL:
+ if (!smi_info->curr_msg)
+ break;
+
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+ break;
+
+ case SI_GETTING_FLAGS:
+ {
+ unsigned char msg[4];
+ unsigned int len;
+
+ /* We got the flags from the SMI, now handle them. */
+ len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ /* Error fetching flags, just give up for now. */
+ smi_info->si_state = SI_NORMAL;
+ } else if (len < 4) {
+ /*
+ * Hmm, no flags. That's technically illegal, but
+ * don't use uninitialized data.
+ */
+ smi_info->si_state = SI_NORMAL;
+ } else {
+ smi_info->msg_flags = msg[3];
+ handle_flags(smi_info);
+ }
+ break;
+ }
+
+ case SI_CLEARING_FLAGS:
+ {
+ unsigned char msg[3];
+
+ /* We cleared the flags. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
+ if (msg[2] != 0) {
+ /* Error clearing flags */
+ dev_warn_ratelimited(smi_info->io.dev,
+ "Error clearing flags: %2.2x\n", msg[2]);
+ }
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+
+ case SI_GETTING_EVENTS:
+ {
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ if (msg->rsp[2] != 0) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the event flag. */
+ smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(smi_info);
+ } else {
+ smi_inc_stat(smi_info, events);
+
+ /*
+ * Do this before we deliver the message
+ * because delivering the message releases the
+ * lock and something else can mess with the
+ * state.
+ */
+ handle_flags(smi_info);
+
+ deliver_recv_msg(smi_info, msg);
+ }
+ break;
+ }
+
+ case SI_GETTING_MESSAGES:
+ {
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ if (msg->rsp[2] != 0) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(smi_info);
+ } else {
+ smi_inc_stat(smi_info, incoming_messages);
+
+ /*
+ * Do this before we deliver the message
+ * because delivering the message releases the
+ * lock and something else can mess with the
+ * state.
+ */
+ handle_flags(smi_info);
+
+ deliver_recv_msg(smi_info, msg);
+ }
+ break;
+ }
+
+ case SI_CHECKING_ENABLES:
+ {
+ unsigned char msg[4];
+ u8 enables;
+ bool irq_on;
+
+ /* We got the flags from the SMI, now handle them. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ dev_warn_ratelimited(smi_info->io.dev,
+ "Couldn't get irq info: %x,\n"
+ "Maybe ok, but ipmi might run very slowly.\n",
+ msg[2]);
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ enables = current_global_enables(smi_info, 0, &irq_on);
+ if (smi_info->io.si_type == SI_BT)
+ /* BT has its own interrupt enable bit. */
+ check_bt_irq(smi_info, irq_on);
+ if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
+ /* Enables are not correct, fix them. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 3);
+ smi_info->si_state = SI_SETTING_ENABLES;
+ } else if (smi_info->supports_event_msg_buff) {
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ start_getting_events(smi_info);
+ } else {
+ smi_info->si_state = SI_NORMAL;
+ }
+ break;
+ }
+
+ case SI_SETTING_ENABLES:
+ {
+ unsigned char msg[4];
+
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0)
+ dev_warn_ratelimited(smi_info->io.dev,
+ "Could not set the global enables: 0x%x.\n",
+ msg[2]);
+
+ if (smi_info->supports_event_msg_buff) {
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ start_getting_events(smi_info);
+ } else {
+ smi_info->si_state = SI_NORMAL;
+ }
+ break;
+ }
+ }
+}
+
+/*
+ * Called on timeouts and events. Timeouts should pass the elapsed
+ * time, interrupts should pass in zero. Must be called with
+ * si_lock held and interrupts disabled.
+ */
+static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
+ int time)
+{
+ enum si_sm_result si_sm_result;
+
+restart:
+ /*
+ * There used to be a loop here that waited a little while
+ * (around 25us) before giving up. That turned out to be
+ * pointless, the minimum delays I was seeing were in the 300us
+ * range, which is far too long to wait in an interrupt. So
+ * we just run until the state machine tells us something
+ * happened or it needs a delay.
+ */
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
+ time = 0;
+ while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+
+ if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
+ smi_inc_stat(smi_info, complete_transactions);
+
+ handle_transaction_done(smi_info);
+ goto restart;
+ } else if (si_sm_result == SI_SM_HOSED) {
+ smi_inc_stat(smi_info, hosed_count);
+
+ /*
+ * Do the before return_hosed_msg, because that
+ * releases the lock.
+ */
+ smi_info->si_state = SI_NORMAL;
+ if (smi_info->curr_msg != NULL) {
+ /*
+ * If we were handling a user message, format
+ * a response to send to the upper layer to
+ * tell it about the error.
+ */
+ return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+ }
+ goto restart;
+ }
+
+ /*
+ * We prefer handling attn over new messages. But don't do
+ * this if there is not yet an upper layer to handle anything.
+ */
+ if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
+ unsigned char msg[2];
+
+ if (smi_info->si_state != SI_NORMAL) {
+ /*
+ * We got an ATTN, but we are doing something else.
+ * Handle the ATTN later.
+ */
+ smi_info->got_attn = true;
+ } else {
+ smi_info->got_attn = false;
+ smi_inc_stat(smi_info, attentions);
+
+ /*
+ * Got a attn, send down a get message flags to see
+ * what's causing it. It would be better to handle
+ * this in the upper layer, but due to the way
+ * interrupts work with the SMI, that's not really
+ * possible.
+ */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+ goto restart;
+ }
+ }
+
+ /* If we are currently idle, try to start the next message. */
+ if (si_sm_result == SI_SM_IDLE) {
+ smi_inc_stat(smi_info, idles);
+
+ si_sm_result = start_next_msg(smi_info);
+ if (si_sm_result != SI_SM_IDLE)
+ goto restart;
+ }
+
+ if ((si_sm_result == SI_SM_IDLE)
+ && (atomic_read(&smi_info->req_events))) {
+ /*
+ * We are idle and the upper layer requested that I fetch
+ * events, so do so.
+ */
+ atomic_set(&smi_info->req_events, 0);
+
+ /*
+ * Take this opportunity to check the interrupt and
+ * message enable state for the BMC. The BMC can be
+ * asynchronously reset, and may thus get interrupts
+ * disable and messages disabled.
+ */
+ if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
+ start_check_enables(smi_info);
+ } else {
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
+ goto out;
+
+ start_getting_events(smi_info);
+ }
+ goto restart;
+ }
+
+ if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
+ /* Ok it if fails, the timer will just go off. */
+ if (del_timer(&smi_info->si_timer))
+ smi_info->timer_running = false;
+ }
+
+out:
+ return si_sm_result;
+}
+
+static void check_start_timer_thread(struct smi_info *smi_info)
+{
+ if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
+ start_next_msg(smi_info);
+ smi_event_handler(smi_info, 0);
+ }
+}
+
+static void flush_messages(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+ enum si_sm_result result;
+
+ /*
+ * Currently, this function is called only in run-to-completion
+ * mode. This means we are single-threaded, no need for locks.
+ */
+ result = smi_event_handler(smi_info, 0);
+ while (result != SI_SM_IDLE) {
+ udelay(SI_SHORT_TIMEOUT_USEC);
+ result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
+ }
+}
+
+static void sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct smi_info *smi_info = send_info;
+ unsigned long flags;
+
+ debug_timestamp(smi_info, "Enqueue");
+
+ if (smi_info->run_to_completion) {
+ /*
+ * If we are running to completion, start it. Upper
+ * layer will call flush_messages to clear it out.
+ */
+ smi_info->waiting_msg = msg;
+ return;
+ }
+
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ /*
+ * The following two lines don't need to be under the lock for
+ * the lock's sake, but they do need SMP memory barriers to
+ * avoid getting things out of order. We are already claiming
+ * the lock, anyway, so just do it under the lock to avoid the
+ * ordering problem.
+ */
+ BUG_ON(smi_info->waiting_msg);
+ smi_info->waiting_msg = msg;
+ check_start_timer_thread(smi_info);
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void set_run_to_completion(void *send_info, bool i_run_to_completion)
+{
+ struct smi_info *smi_info = send_info;
+
+ smi_info->run_to_completion = i_run_to_completion;
+ if (i_run_to_completion)
+ flush_messages(smi_info);
+}
+
+/*
+ * Use -1 as a special constant to tell that we are spinning in kipmid
+ * looking for something and not delaying between checks
+ */
+#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
+static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
+ const struct smi_info *smi_info,
+ ktime_t *busy_until)
+{
+ unsigned int max_busy_us = 0;
+
+ if (smi_info->si_num < num_max_busy_us)
+ max_busy_us = kipmid_max_busy_us[smi_info->si_num];
+ if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
+ *busy_until = IPMI_TIME_NOT_BUSY;
+ else if (*busy_until == IPMI_TIME_NOT_BUSY) {
+ *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
+ } else {
+ if (unlikely(ktime_get() > *busy_until)) {
+ *busy_until = IPMI_TIME_NOT_BUSY;
+ return false;
+ }
+ }
+ return true;
+}
+
+
+/*
+ * A busy-waiting loop for speeding up IPMI operation.
+ *
+ * Lousy hardware makes this hard. This is only enabled for systems
+ * that are not BT and do not have interrupts. It starts spinning
+ * when an operation is complete or until max_busy tells it to stop
+ * (if that is enabled). See the paragraph on kimid_max_busy_us in
+ * Documentation/driver-api/ipmi.rst for details.
+ */
+static int ipmi_thread(void *data)
+{
+ struct smi_info *smi_info = data;
+ unsigned long flags;
+ enum si_sm_result smi_result;
+ ktime_t busy_until = IPMI_TIME_NOT_BUSY;
+
+ set_user_nice(current, MAX_NICE);
+ while (!kthread_should_stop()) {
+ int busy_wait;
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+ smi_result = smi_event_handler(smi_info, 0);
+
+ /*
+ * If the driver is doing something, there is a possible
+ * race with the timer. If the timer handler see idle,
+ * and the thread here sees something else, the timer
+ * handler won't restart the timer even though it is
+ * required. So start it here if necessary.
+ */
+ if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+ &busy_until);
+ if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ ; /* do nothing */
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
+ /*
+ * In maintenance mode we run as fast as
+ * possible to allow firmware updates to
+ * complete as fast as possible, but normally
+ * don't bang on the scheduler.
+ */
+ if (smi_info->in_maintenance_mode)
+ schedule();
+ else
+ usleep_range(100, 200);
+ } else if (smi_result == SI_SM_IDLE) {
+ if (atomic_read(&smi_info->need_watch)) {
+ schedule_timeout_interruptible(100);
+ } else {
+ /* Wait to be woken up when we are needed. */
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ } else {
+ schedule_timeout_interruptible(1);
+ }
+ }
+ return 0;
+}
+
+
+static void poll(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+ unsigned long flags = 0;
+ bool run_to_completion = smi_info->run_to_completion;
+
+ /*
+ * Make sure there is some delay in the poll loop so we can
+ * drive time forward and timeout things.
+ */
+ udelay(10);
+ if (!run_to_completion)
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ smi_event_handler(smi_info, 10);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void request_events(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+
+ if (!smi_info->has_event_buffer)
+ return;
+
+ atomic_set(&smi_info->req_events, 1);
+}
+
+static void set_need_watch(void *send_info, unsigned int watch_mask)
+{
+ struct smi_info *smi_info = send_info;
+ unsigned long flags;
+ int enable;
+
+ enable = !!watch_mask;
+
+ atomic_set(&smi_info->need_watch, enable);
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ check_start_timer_thread(smi_info);
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void smi_timeout(struct timer_list *t)
+{
+ struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
+ enum si_sm_result smi_result;
+ unsigned long flags;
+ unsigned long jiffies_now;
+ long time_diff;
+ long timeout;
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+ debug_timestamp(smi_info, "Timer");
+
+ jiffies_now = jiffies;
+ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+ * SI_USEC_PER_JIFFY);
+ smi_result = smi_event_handler(smi_info, time_diff);
+
+ if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
+ /* Running with interrupts, only do long timeouts. */
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
+ smi_inc_stat(smi_info, long_timeouts);
+ goto do_mod_timer;
+ }
+
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
+ if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ smi_inc_stat(smi_info, short_timeouts);
+ timeout = jiffies + 1;
+ } else {
+ smi_inc_stat(smi_info, long_timeouts);
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
+ }
+
+do_mod_timer:
+ if (smi_result != SI_SM_IDLE)
+ smi_mod_timer(smi_info, timeout);
+ else
+ smi_info->timer_running = false;
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+}
+
+irqreturn_t ipmi_si_irq_handler(int irq, void *data)
+{
+ struct smi_info *smi_info = data;
+ unsigned long flags;
+
+ if (smi_info->io.si_type == SI_BT)
+ /* We need to clear the IRQ flag for the BT interface. */
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_CLEAR_IRQ_BIT
+ | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+ smi_inc_stat(smi_info, interrupts);
+
+ debug_timestamp(smi_info, "Interrupt");
+
+ smi_event_handler(smi_info, 0);
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ return IRQ_HANDLED;
+}
+
+static int smi_start_processing(void *send_info,
+ struct ipmi_smi *intf)
+{
+ struct smi_info *new_smi = send_info;
+ int enable = 0;
+
+ new_smi->intf = intf;
+
+ /* Set up the timer that drives the interface. */
+ timer_setup(&new_smi->si_timer, smi_timeout, 0);
+ new_smi->timer_can_start = true;
+ smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+
+ /* Try to claim any interrupts. */
+ if (new_smi->io.irq_setup) {
+ new_smi->io.irq_handler_data = new_smi;
+ new_smi->io.irq_setup(&new_smi->io);
+ }
+
+ /*
+ * Check if the user forcefully enabled the daemon.
+ */
+ if (new_smi->si_num < num_force_kipmid)
+ enable = force_kipmid[new_smi->si_num];
+ /*
+ * The BT interface is efficient enough to not need a thread,
+ * and there is no need for a thread if we have interrupts.
+ */
+ else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
+ enable = 1;
+
+ if (enable) {
+ new_smi->thread = kthread_run(ipmi_thread, new_smi,
+ "kipmi%d", new_smi->si_num);
+ if (IS_ERR(new_smi->thread)) {
+ dev_notice(new_smi->io.dev,
+ "Could not start kernel thread due to error %ld, only using timers to drive the interface\n",
+ PTR_ERR(new_smi->thread));
+ new_smi->thread = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+ struct smi_info *smi = send_info;
+
+ data->addr_src = smi->io.addr_source;
+ data->dev = smi->io.dev;
+ data->addr_info = smi->io.addr_info;
+ get_device(smi->io.dev);
+
+ return 0;
+}
+
+static void set_maintenance_mode(void *send_info, bool enable)
+{
+ struct smi_info *smi_info = send_info;
+
+ if (!enable)
+ atomic_set(&smi_info->req_events, 0);
+ smi_info->in_maintenance_mode = enable;
+}
+
+static void shutdown_smi(void *send_info);
+static const struct ipmi_smi_handlers handlers = {
+ .owner = THIS_MODULE,
+ .start_processing = smi_start_processing,
+ .shutdown = shutdown_smi,
+ .get_smi_info = get_smi_info,
+ .sender = sender,
+ .request_events = request_events,
+ .set_need_watch = set_need_watch,
+ .set_maintenance_mode = set_maintenance_mode,
+ .set_run_to_completion = set_run_to_completion,
+ .flush_messages = flush_messages,
+ .poll = poll,
+};
+
+static LIST_HEAD(smi_infos);
+static DEFINE_MUTEX(smi_infos_lock);
+static int smi_num; /* Used to sequence the SMIs */
+
+static const char * const addr_space_to_str[] = { "i/o", "mem" };
+
+module_param_array(force_kipmid, int, &num_force_kipmid, 0);
+MODULE_PARM_DESC(force_kipmid,
+ "Force the kipmi daemon to be enabled (1) or disabled(0). Normally the IPMI driver auto-detects this, but the value may be overridden by this parm.");
+module_param(unload_when_empty, bool, 0);
+MODULE_PARM_DESC(unload_when_empty,
+ "Unload the module if no interfaces are specified or found, default is 1. Setting to 0 is useful for hot add of devices using hotmod.");
+module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
+MODULE_PARM_DESC(kipmid_max_busy_us,
+ "Max time (in microseconds) to busy-wait for IPMI data before sleeping. 0 (default) means to wait forever. Set to 100-500 if kipmid is using up a lot of CPU time.");
+
+void ipmi_irq_finish_setup(struct si_sm_io *io)
+{
+ if (io->si_type == SI_BT)
+ /* Enable the interrupt in the BT interface. */
+ io->outputb(io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+}
+
+void ipmi_irq_start_cleanup(struct si_sm_io *io)
+{
+ if (io->si_type == SI_BT)
+ /* Disable the interrupt in the BT interface. */
+ io->outputb(io, IPMI_BT_INTMASK_REG, 0);
+}
+
+static void std_irq_cleanup(struct si_sm_io *io)
+{
+ ipmi_irq_start_cleanup(io);
+ free_irq(io->irq, io->irq_handler_data);
+}
+
+int ipmi_std_irq_setup(struct si_sm_io *io)
+{
+ int rv;
+
+ if (!io->irq)
+ return 0;
+
+ rv = request_irq(io->irq,
+ ipmi_si_irq_handler,
+ IRQF_SHARED,
+ SI_DEVICE_NAME,
+ io->irq_handler_data);
+ if (rv) {
+ dev_warn(io->dev, "%s unable to claim interrupt %d, running polled\n",
+ SI_DEVICE_NAME, io->irq);
+ io->irq = 0;
+ } else {
+ io->irq_cleanup = std_irq_cleanup;
+ ipmi_irq_finish_setup(io);
+ dev_info(io->dev, "Using irq %d\n", io->irq);
+ }
+
+ return rv;
+}
+
+static int wait_for_msg_done(struct smi_info *smi_info)
+{
+ enum si_sm_result smi_result;
+
+ smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ for (;;) {
+ if (smi_result == SI_SM_CALL_WITH_DELAY ||
+ smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+ schedule_timeout_uninterruptible(1);
+ smi_result = smi_info->handlers->event(
+ smi_info->si_sm, jiffies_to_usecs(1));
+ } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ smi_result = smi_info->handlers->event(
+ smi_info->si_sm, 0);
+ } else
+ break;
+ }
+ if (smi_result == SI_SM_HOSED)
+ /*
+ * We couldn't get the state machine to run, so whatever's at
+ * the port is probably not an IPMI SMI interface.
+ */
+ return -ENODEV;
+
+ return 0;
+}
+
+static int try_get_dev_id(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+ unsigned int retry_count = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /*
+ * Do a Get Device ID command, since it comes back with some
+ * useful info.
+ */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+
+retry:
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv)
+ goto out;
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ /* Check and record info from the get device id, in case we need it. */
+ rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
+ resp + 2, resp_len - 2, &smi_info->device_id);
+ if (rv) {
+ /* record completion code */
+ unsigned char cc = *(resp + 2);
+
+ if (cc != IPMI_CC_NO_ERROR &&
+ ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
+ dev_warn_ratelimited(smi_info->io.dev,
+ "BMC returned 0x%2.2x, retry get bmc device id\n",
+ cc);
+ goto retry;
+ }
+ }
+
+out:
+ kfree(resp);
+ return rv;
+}
+
+static int get_global_enables(struct smi_info *smi_info, u8 *enables)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ dev_warn(smi_info->io.dev,
+ "Error getting response from get global enables command: %d\n",
+ rv);
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 4 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
+ resp[2] != 0) {
+ dev_warn(smi_info->io.dev,
+ "Invalid return from get global enables command: %ld %x %x %x\n",
+ resp_len, resp[0], resp[1], resp[2]);
+ rv = -EINVAL;
+ goto out;
+ } else {
+ *enables = resp[3];
+ }
+
+out:
+ kfree(resp);
+ return rv;
+}
+
+/*
+ * Returns 1 if it gets an error from the command.
+ */
+static int set_global_enables(struct smi_info *smi_info, u8 enables)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = enables;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ dev_warn(smi_info->io.dev,
+ "Error getting response from set global enables command: %d\n",
+ rv);
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 3 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+ dev_warn(smi_info->io.dev,
+ "Invalid return from set global enables command: %ld %x %x\n",
+ resp_len, resp[0], resp[1]);
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[2] != 0)
+ rv = 1;
+
+out:
+ kfree(resp);
+ return rv;
+}
+
+/*
+ * Some BMCs do not support clearing the receive irq bit in the global
+ * enables (even if they don't support interrupts on the BMC). Check
+ * for this and handle it properly.
+ */
+static void check_clr_rcv_irq(struct smi_info *smi_info)
+{
+ u8 enables = 0;
+ int rv;
+
+ rv = get_global_enables(smi_info, &enables);
+ if (!rv) {
+ if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
+ /* Already clear, should work ok. */
+ return;
+
+ enables &= ~IPMI_BMC_RCV_MSG_INTR;
+ rv = set_global_enables(smi_info, enables);
+ }
+
+ if (rv < 0) {
+ dev_err(smi_info->io.dev,
+ "Cannot check clearing the rcv irq: %d\n", rv);
+ return;
+ }
+
+ if (rv) {
+ /*
+ * An error when setting the event buffer bit means
+ * clearing the bit is not supported.
+ */
+ dev_warn(smi_info->io.dev,
+ "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+ smi_info->cannot_disable_irq = true;
+ }
+}
+
+/*
+ * Some BMCs do not support setting the interrupt bits in the global
+ * enables even if they support interrupts. Clearly bad, but we can
+ * compensate.
+ */
+static void check_set_rcv_irq(struct smi_info *smi_info)
+{
+ u8 enables = 0;
+ int rv;
+
+ if (!smi_info->io.irq)
+ return;
+
+ rv = get_global_enables(smi_info, &enables);
+ if (!rv) {
+ enables |= IPMI_BMC_RCV_MSG_INTR;
+ rv = set_global_enables(smi_info, enables);
+ }
+
+ if (rv < 0) {
+ dev_err(smi_info->io.dev,
+ "Cannot check setting the rcv irq: %d\n", rv);
+ return;
+ }
+
+ if (rv) {
+ /*
+ * An error when setting the event buffer bit means
+ * setting the bit is not supported.
+ */
+ dev_warn(smi_info->io.dev,
+ "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+ smi_info->cannot_disable_irq = true;
+ smi_info->irq_enable_broken = true;
+ }
+}
+
+static int try_enable_event_buffer(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 4 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
+ resp[2] != 0) {
+ pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+ /* buffer is already enabled, nothing to do. */
+ smi_info->supports_event_msg_buff = true;
+ goto out;
+ }
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 3 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+ pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[2] != 0)
+ /*
+ * An error when setting the event buffer bit means
+ * that the event buffer is not supported.
+ */
+ rv = -ENOENT;
+ else
+ smi_info->supports_event_msg_buff = true;
+
+out:
+ kfree(resp);
+ return rv;
+}
+
+#define IPMI_SI_ATTR(name) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct smi_info *smi_info = dev_get_drvdata(dev); \
+ \
+ return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \
+} \
+static DEVICE_ATTR_RO(name)
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
+}
+static DEVICE_ATTR_RO(type);
+
+static ssize_t interrupts_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+ int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
+
+ return sysfs_emit(buf, "%d\n", enabled);
+}
+static DEVICE_ATTR_RO(interrupts_enabled);
+
+IPMI_SI_ATTR(short_timeouts);
+IPMI_SI_ATTR(long_timeouts);
+IPMI_SI_ATTR(idles);
+IPMI_SI_ATTR(interrupts);
+IPMI_SI_ATTR(attentions);
+IPMI_SI_ATTR(flag_fetches);
+IPMI_SI_ATTR(hosed_count);
+IPMI_SI_ATTR(complete_transactions);
+IPMI_SI_ATTR(events);
+IPMI_SI_ATTR(watchdog_pretimeouts);
+IPMI_SI_ATTR(incoming_messages);
+
+static ssize_t params_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf,
+ "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+ si_to_str[smi_info->io.si_type],
+ addr_space_to_str[smi_info->io.addr_space],
+ smi_info->io.addr_data,
+ smi_info->io.regspacing,
+ smi_info->io.regsize,
+ smi_info->io.regshift,
+ smi_info->io.irq,
+ smi_info->io.slave_addr);
+}
+static DEVICE_ATTR_RO(params);
+
+static struct attribute *ipmi_si_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_interrupts_enabled.attr,
+ &dev_attr_short_timeouts.attr,
+ &dev_attr_long_timeouts.attr,
+ &dev_attr_idles.attr,
+ &dev_attr_interrupts.attr,
+ &dev_attr_attentions.attr,
+ &dev_attr_flag_fetches.attr,
+ &dev_attr_hosed_count.attr,
+ &dev_attr_complete_transactions.attr,
+ &dev_attr_events.attr,
+ &dev_attr_watchdog_pretimeouts.attr,
+ &dev_attr_incoming_messages.attr,
+ &dev_attr_params.attr,
+ NULL
+};
+
+static const struct attribute_group ipmi_si_dev_attr_group = {
+ .attrs = ipmi_si_dev_attrs,
+};
+
+/*
+ * oem_data_avail_to_receive_msg_avail
+ * @info - smi_info structure with msg_flags set
+ *
+ * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
+ * Returns 1 indicating need to re-run handle_flags().
+ */
+static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
+{
+ smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
+ RECEIVE_MSG_AVAIL);
+ return 1;
+}
+
+/*
+ * setup_dell_poweredge_oem_data_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Systems that match, but have firmware version < 1.40 may assert
+ * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
+ * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
+ * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
+ * as RECEIVE_MSG_AVAIL instead.
+ *
+ * As Dell has no plans to release IPMI 1.5 firmware that *ever*
+ * assert the OEM[012] bits, and if it did, the driver would have to
+ * change to handle that properly, we don't actually check for the
+ * firmware version.
+ * Device ID = 0x20 BMC on PowerEdge 8G servers
+ * Device Revision = 0x80
+ * Firmware Revision1 = 0x01 BMC version 1.40
+ * Firmware Revision2 = 0x40 BCD encoded
+ * IPMI Version = 0x51 IPMI 1.5
+ * Manufacturer ID = A2 02 00 Dell IANA
+ *
+ * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
+ * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
+ *
+ */
+#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
+#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
+#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
+#define DELL_IANA_MFR_ID 0x0002a2
+static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
+{
+ struct ipmi_device_id *id = &smi_info->device_id;
+ if (id->manufacturer_id == DELL_IANA_MFR_ID) {
+ if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
+ id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
+ id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
+ smi_info->oem_data_avail_handler =
+ oem_data_avail_to_receive_msg_avail;
+ } else if (ipmi_version_major(id) < 1 ||
+ (ipmi_version_major(id) == 1 &&
+ ipmi_version_minor(id) < 5)) {
+ smi_info->oem_data_avail_handler =
+ oem_data_avail_to_receive_msg_avail;
+ }
+ }
+}
+
+#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
+static void return_hosed_msg_badsize(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
+ msg->rsp_size = 3;
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+}
+
+/*
+ * dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
+ * not respond to a Get SDR command if the length of the data
+ * requested is exactly 0x3A, which leads to command timeouts and no
+ * data returned. This intercepts such commands, and causes userspace
+ * callers to try again with a different-sized buffer, which succeeds.
+ */
+
+#define STORAGE_NETFN 0x0A
+#define STORAGE_CMD_GET_SDR 0x23
+static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
+ unsigned long unused,
+ void *in)
+{
+ struct smi_info *smi_info = in;
+ unsigned char *data = smi_info->curr_msg->data;
+ unsigned int size = smi_info->curr_msg->data_size;
+ if (size >= 8 &&
+ (data[0]>>2) == STORAGE_NETFN &&
+ data[1] == STORAGE_CMD_GET_SDR &&
+ data[7] == 0x3A) {
+ return_hosed_msg_badsize(smi_info);
+ return NOTIFY_STOP;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dell_poweredge_bt_xaction_notifier = {
+ .notifier_call = dell_poweredge_bt_xaction_handler,
+};
+
+/*
+ * setup_dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.start_transaction_pre_hook
+ * when we know what function to use there.
+ */
+static void
+setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
+{
+ struct ipmi_device_id *id = &smi_info->device_id;
+ if (id->manufacturer_id == DELL_IANA_MFR_ID &&
+ smi_info->io.si_type == SI_BT)
+ register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
+}
+
+/*
+ * setup_oem_data_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.oem_data_available_handler
+ * when we know what function to use there.
+ */
+
+static void setup_oem_data_handler(struct smi_info *smi_info)
+{
+ setup_dell_poweredge_oem_data_handler(smi_info);
+}
+
+static void setup_xaction_handlers(struct smi_info *smi_info)
+{
+ setup_dell_poweredge_bt_xaction_handler(smi_info);
+}
+
+static void check_for_broken_irqs(struct smi_info *smi_info)
+{
+ check_clr_rcv_irq(smi_info);
+ check_set_rcv_irq(smi_info);
+}
+
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
+{
+ if (smi_info->thread != NULL) {
+ kthread_stop(smi_info->thread);
+ smi_info->thread = NULL;
+ }
+
+ smi_info->timer_can_start = false;
+ del_timer_sync(&smi_info->si_timer);
+}
+
+static struct smi_info *find_dup_si(struct smi_info *info)
+{
+ struct smi_info *e;
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (e->io.addr_space != info->io.addr_space)
+ continue;
+ if (e->io.addr_data == info->io.addr_data) {
+ /*
+ * This is a cheap hack, ACPI doesn't have a defined
+ * slave address but SMBIOS does. Pick it up from
+ * any source that has it available.
+ */
+ if (info->io.slave_addr && !e->io.slave_addr)
+ e->io.slave_addr = info->io.slave_addr;
+ return e;
+ }
+ }
+
+ return NULL;
+}
+
+int ipmi_si_add_smi(struct si_sm_io *io)
+{
+ int rv = 0;
+ struct smi_info *new_smi, *dup;
+
+ /*
+ * If the user gave us a hard-coded device at the same
+ * address, they presumably want us to use it and not what is
+ * in the firmware.
+ */
+ if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD &&
+ ipmi_si_hardcode_match(io->addr_space, io->addr_data)) {
+ dev_info(io->dev,
+ "Hard-coded device at this address already exists");
+ return -ENODEV;
+ }
+
+ if (!io->io_setup) {
+ if (io->addr_space == IPMI_IO_ADDR_SPACE) {
+ io->io_setup = ipmi_si_port_setup;
+ } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) {
+ io->io_setup = ipmi_si_mem_setup;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
+ if (!new_smi)
+ return -ENOMEM;
+ spin_lock_init(&new_smi->si_lock);
+
+ new_smi->io = *io;
+
+ mutex_lock(&smi_infos_lock);
+ dup = find_dup_si(new_smi);
+ if (dup) {
+ if (new_smi->io.addr_source == SI_ACPI &&
+ dup->io.addr_source == SI_SMBIOS) {
+ /* We prefer ACPI over SMBIOS. */
+ dev_info(dup->io.dev,
+ "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
+ si_to_str[new_smi->io.si_type]);
+ cleanup_one_si(dup);
+ } else {
+ dev_info(new_smi->io.dev,
+ "%s-specified %s state machine: duplicate\n",
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type]);
+ rv = -EBUSY;
+ kfree(new_smi);
+ goto out_err;
+ }
+ }
+
+ pr_info("Adding %s-specified %s state machine\n",
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type]);
+
+ list_add_tail(&new_smi->link, &smi_infos);
+
+ if (initialized)
+ rv = try_smi_init(new_smi);
+out_err:
+ mutex_unlock(&smi_infos_lock);
+ return rv;
+}
+
+/*
+ * Try to start up an interface. Must be called with smi_infos_lock
+ * held, primarily to keep smi_num consistent, we only one to do these
+ * one at a time.
+ */
+static int try_smi_init(struct smi_info *new_smi)
+{
+ int rv = 0;
+ int i;
+
+ pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type],
+ addr_space_to_str[new_smi->io.addr_space],
+ new_smi->io.addr_data,
+ new_smi->io.slave_addr, new_smi->io.irq);
+
+ switch (new_smi->io.si_type) {
+ case SI_KCS:
+ new_smi->handlers = &kcs_smi_handlers;
+ break;
+
+ case SI_SMIC:
+ new_smi->handlers = &smic_smi_handlers;
+ break;
+
+ case SI_BT:
+ new_smi->handlers = &bt_smi_handlers;
+ break;
+
+ default:
+ /* No support for anything else yet. */
+ rv = -EIO;
+ goto out_err;
+ }
+
+ new_smi->si_num = smi_num;
+
+ /* Do this early so it's available for logs. */
+ if (!new_smi->io.dev) {
+ pr_err("IPMI interface added with no device\n");
+ rv = -EIO;
+ goto out_err;
+ }
+
+ /* Allocate the state machine's data and initialize it. */
+ new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
+ if (!new_smi->si_sm) {
+ rv = -ENOMEM;
+ goto out_err;
+ }
+ new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
+ &new_smi->io);
+
+ /* Now that we know the I/O size, we can set up the I/O. */
+ rv = new_smi->io.io_setup(&new_smi->io);
+ if (rv) {
+ dev_err(new_smi->io.dev, "Could not set up I/O space\n");
+ goto out_err;
+ }
+
+ /* Do low-level detection first. */
+ if (new_smi->handlers->detect(new_smi->si_sm)) {
+ if (new_smi->io.addr_source)
+ dev_err(new_smi->io.dev,
+ "Interface detection failed\n");
+ rv = -ENODEV;
+ goto out_err;
+ }
+
+ /*
+ * Attempt a get device id command. If it fails, we probably
+ * don't have a BMC here.
+ */
+ rv = try_get_dev_id(new_smi);
+ if (rv) {
+ if (new_smi->io.addr_source)
+ dev_err(new_smi->io.dev,
+ "There appears to be no BMC at this location\n");
+ goto out_err;
+ }
+
+ setup_oem_data_handler(new_smi);
+ setup_xaction_handlers(new_smi);
+ check_for_broken_irqs(new_smi);
+
+ new_smi->waiting_msg = NULL;
+ new_smi->curr_msg = NULL;
+ atomic_set(&new_smi->req_events, 0);
+ new_smi->run_to_completion = false;
+ for (i = 0; i < SI_NUM_STATS; i++)
+ atomic_set(&new_smi->stats[i], 0);
+
+ new_smi->interrupt_disabled = true;
+ atomic_set(&new_smi->need_watch, 0);
+
+ rv = try_enable_event_buffer(new_smi);
+ if (rv == 0)
+ new_smi->has_event_buffer = true;
+
+ /*
+ * Start clearing the flags before we enable interrupts or the
+ * timer to avoid racing with the timer.
+ */
+ start_clear_flags(new_smi);
+
+ /*
+ * IRQ is defined to be set when non-zero. req_events will
+ * cause a global flags check that will enable interrupts.
+ */
+ if (new_smi->io.irq) {
+ new_smi->interrupt_disabled = false;
+ atomic_set(&new_smi->req_events, 1);
+ }
+
+ dev_set_drvdata(new_smi->io.dev, new_smi);
+ rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
+ if (rv) {
+ dev_err(new_smi->io.dev,
+ "Unable to add device attributes: error %d\n",
+ rv);
+ goto out_err;
+ }
+ new_smi->dev_group_added = true;
+
+ rv = ipmi_register_smi(&handlers,
+ new_smi,
+ new_smi->io.dev,
+ new_smi->io.slave_addr);
+ if (rv) {
+ dev_err(new_smi->io.dev,
+ "Unable to register device: error %d\n",
+ rv);
+ goto out_err;
+ }
+
+ /* Don't increment till we know we have succeeded. */
+ smi_num++;
+
+ dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
+ si_to_str[new_smi->io.si_type]);
+
+ WARN_ON(new_smi->io.dev->init_name != NULL);
+
+ out_err:
+ if (rv && new_smi->io.io_cleanup) {
+ new_smi->io.io_cleanup(&new_smi->io);
+ new_smi->io.io_cleanup = NULL;
+ }
+
+ if (rv && new_smi->si_sm) {
+ kfree(new_smi->si_sm);
+ new_smi->si_sm = NULL;
+ }
+
+ return rv;
+}
+
+static int __init init_ipmi_si(void)
+{
+ struct smi_info *e;
+ enum ipmi_addr_src type = SI_INVALID;
+
+ if (initialized)
+ return 0;
+
+ ipmi_hardcode_init();
+
+ pr_info("IPMI System Interface driver\n");
+
+ ipmi_si_platform_init();
+
+ ipmi_si_pci_init();
+
+ ipmi_si_parisc_init();
+
+ /* We prefer devices with interrupts, but in the case of a machine
+ with multiple BMCs we assume that there will be several instances
+ of a given type so if we succeed in registering a type then also
+ try to register everything else of the same type */
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ /* Try to register a device if it has an IRQ and we either
+ haven't successfully registered a device yet or this
+ device has the same type as one we successfully registered */
+ if (e->io.irq && (!type || e->io.addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->io.addr_source;
+ }
+ }
+ }
+
+ /* type will only have been set if we successfully registered an si */
+ if (type)
+ goto skip_fallback_noirq;
+
+ /* Fall back to the preferred device */
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (!e->io.irq && (!type || e->io.addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->io.addr_source;
+ }
+ }
+ }
+
+skip_fallback_noirq:
+ initialized = true;
+ mutex_unlock(&smi_infos_lock);
+
+ if (type)
+ return 0;
+
+ mutex_lock(&smi_infos_lock);
+ if (unload_when_empty && list_empty(&smi_infos)) {
+ mutex_unlock(&smi_infos_lock);
+ cleanup_ipmi_si();
+ pr_warn("Unable to find any System Interface(s)\n");
+ return -ENODEV;
+ } else {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+}
+module_init(init_ipmi_si);
+
+static void wait_msg_processed(struct smi_info *smi_info)
+{
+ unsigned long jiffies_now;
+ long time_diff;
+
+ while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+ jiffies_now = jiffies;
+ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+ * SI_USEC_PER_JIFFY);
+ smi_event_handler(smi_info, time_diff);
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+static void shutdown_smi(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+
+ if (smi_info->dev_group_added) {
+ device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
+ smi_info->dev_group_added = false;
+ }
+ if (smi_info->io.dev)
+ dev_set_drvdata(smi_info->io.dev, NULL);
+
+ /*
+ * Make sure that interrupts, the timer and the thread are
+ * stopped and will not run again.
+ */
+ smi_info->interrupt_disabled = true;
+ if (smi_info->io.irq_cleanup) {
+ smi_info->io.irq_cleanup(&smi_info->io);
+ smi_info->io.irq_cleanup = NULL;
+ }
+ stop_timer_and_thread(smi_info);
+
+ /*
+ * Wait until we know that we are out of any interrupt
+ * handlers might have been running before we freed the
+ * interrupt.
+ */
+ synchronize_rcu();
+
+ /*
+ * Timeouts are stopped, now make sure the interrupts are off
+ * in the BMC. Note that timers and CPU interrupts are off,
+ * so no need for locks.
+ */
+ wait_msg_processed(smi_info);
+
+ if (smi_info->handlers)
+ disable_si_irq(smi_info);
+
+ wait_msg_processed(smi_info);
+
+ if (smi_info->handlers)
+ smi_info->handlers->cleanup(smi_info->si_sm);
+
+ if (smi_info->io.io_cleanup) {
+ smi_info->io.io_cleanup(&smi_info->io);
+ smi_info->io.io_cleanup = NULL;
+ }
+
+ kfree(smi_info->si_sm);
+ smi_info->si_sm = NULL;
+
+ smi_info->intf = NULL;
+}
+
+/*
+ * Must be called with smi_infos_lock held, to serialize the
+ * smi_info->intf check.
+ */
+static void cleanup_one_si(struct smi_info *smi_info)
+{
+ if (!smi_info)
+ return;
+
+ list_del(&smi_info->link);
+ ipmi_unregister_smi(smi_info->intf);
+ kfree(smi_info);
+}
+
+void ipmi_si_remove_by_dev(struct device *dev)
+{
+ struct smi_info *e;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ if (e->io.dev == dev) {
+ cleanup_one_si(e);
+ break;
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+}
+
+struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+ unsigned long addr)
+{
+ /* remove */
+ struct smi_info *e, *tmp_e;
+ struct device *dev = NULL;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+ if (e->io.addr_space != addr_space)
+ continue;
+ if (e->io.si_type != si_type)
+ continue;
+ if (e->io.addr_data == addr) {
+ dev = get_device(e->io.dev);
+ cleanup_one_si(e);
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+
+ return dev;
+}
+
+static void cleanup_ipmi_si(void)
+{
+ struct smi_info *e, *tmp_e;
+
+ if (!initialized)
+ return;
+
+ ipmi_si_pci_shutdown();
+
+ ipmi_si_parisc_shutdown();
+
+ ipmi_si_platform_shutdown();
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
+ cleanup_one_si(e);
+ mutex_unlock(&smi_infos_lock);
+
+ ipmi_si_hardcode_exit();
+ ipmi_si_hotmod_exit();
+}
+module_exit(cleanup_ipmi_si);
+
+MODULE_ALIAS("platform:dmi-ipmi-si");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
new file mode 100644
index 000000000..86b92e93a
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_mem_io.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char intf_mem_inb(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return readb((io->addr)+(offset * io->regspacing));
+}
+
+static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inw(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inl(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+#ifdef readq
+static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
+{
+ return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void mem_outq(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeq((u64)b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+#endif
+
+static void mem_region_cleanup(struct si_sm_io *io, int num)
+{
+ unsigned long addr = io->addr_data;
+ int idx;
+
+ for (idx = 0; idx < num; idx++)
+ release_mem_region(addr + idx * io->regspacing,
+ io->regsize);
+}
+
+static void mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr) {
+ iounmap(io->addr);
+ mem_region_cleanup(io, io->io_size);
+ }
+}
+
+int ipmi_si_mem_setup(struct si_sm_io *io)
+{
+ unsigned long addr = io->addr_data;
+ int mapsize, idx;
+
+ if (!addr)
+ return -ENODEV;
+
+ /*
+ * Figure out the actual readb/readw/readl/etc routine to use based
+ * upon the register size.
+ */
+ switch (io->regsize) {
+ case 1:
+ io->inputb = intf_mem_inb;
+ io->outputb = intf_mem_outb;
+ break;
+ case 2:
+ io->inputb = intf_mem_inw;
+ io->outputb = intf_mem_outw;
+ break;
+ case 4:
+ io->inputb = intf_mem_inl;
+ io->outputb = intf_mem_outl;
+ break;
+#ifdef readq
+ case 8:
+ io->inputb = mem_inq;
+ io->outputb = mem_outq;
+ break;
+#endif
+ default:
+ dev_warn(io->dev, "Invalid register size: %d\n",
+ io->regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Some BIOSes reserve disjoint memory regions in their ACPI
+ * tables. This causes problems when trying to request the
+ * entire region. Therefore we must request each register
+ * separately.
+ */
+ for (idx = 0; idx < io->io_size; idx++) {
+ if (request_mem_region(addr + idx * io->regspacing,
+ io->regsize, SI_DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ mem_region_cleanup(io, idx);
+ return -EIO;
+ }
+ }
+
+ /*
+ * Calculate the total amount of memory to claim. This is an
+ * unusual looking calculation, but it avoids claiming any
+ * more memory than it has to. It will claim everything
+ * between the first address to the end of the last full
+ * register.
+ */
+ mapsize = ((io->io_size * io->regspacing)
+ - (io->regspacing - io->regsize));
+ io->addr = ioremap(addr, mapsize);
+ if (io->addr == NULL) {
+ mem_region_cleanup(io, io->io_size);
+ return -EIO;
+ }
+
+ io->io_cleanup = mem_cleanup;
+
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
new file mode 100644
index 000000000..2be2967f6
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <asm/hardware.h> /* for register_parisc_driver() stuff */
+#include <asm/parisc-device.h>
+#include "ipmi_si.h"
+
+static bool parisc_registered;
+
+static int __init ipmi_parisc_probe(struct parisc_device *dev)
+{
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+
+ io.si_type = SI_KCS;
+ io.addr_source = SI_DEVICETREE;
+ io.addr_space = IPMI_MEM_ADDR_SPACE;
+ io.addr_data = dev->hpa.start;
+ io.regsize = 1;
+ io.regspacing = 1;
+ io.regshift = 0;
+ io.irq = 0; /* no interrupt */
+ io.irq_setup = NULL;
+ io.dev = &dev->dev;
+
+ dev_dbg(&dev->dev, "addr 0x%lx\n", io.addr_data);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void __exit ipmi_parisc_remove(struct parisc_device *dev)
+{
+ ipmi_si_remove_by_dev(&dev->dev);
+}
+
+static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
+ { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
+
+static struct parisc_driver ipmi_parisc_driver __refdata = {
+ .name = "ipmi",
+ .id_table = ipmi_parisc_tbl,
+ .probe = ipmi_parisc_probe,
+ .remove = __exit_p(ipmi_parisc_remove),
+};
+
+void ipmi_si_parisc_init(void)
+{
+ register_parisc_driver(&ipmi_parisc_driver);
+ parisc_registered = true;
+}
+
+void ipmi_si_parisc_shutdown(void)
+{
+ if (parisc_registered)
+ unregister_parisc_driver(&ipmi_parisc_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
new file mode 100644
index 000000000..74fa20558
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si_pci.c
+ *
+ * Handling for IPMI devices on the PCI bus.
+ */
+
+#define pr_fmt(fmt) "ipmi_pci: " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "ipmi_si.h"
+
+static bool pci_registered;
+
+static bool si_trypci = true;
+
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(trypci,
+ "Setting this to zero will disable the default scan of the interfaces identified via pci");
+
+#define PCI_DEVICE_ID_HP_MMC 0x121A
+
+static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
+{
+ if (io->si_type == SI_KCS) {
+ unsigned char status;
+ int regspacing;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev, "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
+ }
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
+ }
+ }
+ return DEFAULT_REGSPACING;
+}
+
+static struct pci_device_id ipmi_pci_blacklist[] = {
+ /*
+ * This is a "Virtual IPMI device", whatever that is. It appears
+ * as a KCS device by the class, but it is not one.
+ */
+ { PCI_VDEVICE(REALTEK, 0x816c) },
+ { 0, }
+};
+
+static int ipmi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rv;
+ struct si_sm_io io;
+
+ if (pci_match_id(ipmi_pci_blacklist, pdev))
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_PCI;
+ dev_info(&pdev->dev, "probing via PCI");
+
+ switch (pdev->class) {
+ case PCI_CLASS_SERIAL_IPMI_SMIC:
+ io.si_type = SI_SMIC;
+ break;
+
+ case PCI_CLASS_SERIAL_IPMI_KCS:
+ io.si_type = SI_KCS;
+ break;
+
+ case PCI_CLASS_SERIAL_IPMI_BT:
+ io.si_type = SI_BT;
+ break;
+
+ default:
+ dev_info(&pdev->dev, "Unknown IPMI class: %x\n", pdev->class);
+ return -ENOMEM;
+ }
+
+ rv = pcim_enable_device(pdev);
+ if (rv) {
+ dev_err(&pdev->dev, "couldn't enable PCI device\n");
+ return rv;
+ }
+
+ if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
+ io.addr_space = IPMI_IO_ADDR_SPACE;
+ io.io_setup = ipmi_si_port_setup;
+ } else {
+ io.addr_space = IPMI_MEM_ADDR_SPACE;
+ io.io_setup = ipmi_si_mem_setup;
+ }
+ io.addr_data = pci_resource_start(pdev, 0);
+
+ io.dev = &pdev->dev;
+
+ io.regspacing = ipmi_pci_probe_regspacing(&io);
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = 0;
+
+ io.irq = pdev->irq;
+ if (io.irq)
+ io.irq_setup = ipmi_std_irq_setup;
+
+ dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ &pdev->resource[0], io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void ipmi_pci_remove(struct pci_dev *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+static const struct pci_device_id ipmi_pci_devices[] = {
+ { PCI_VDEVICE(HP, PCI_DEVICE_ID_HP_MMC) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_SMIC, ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_KCS, ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_IPMI_BT, ~0) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
+
+static struct pci_driver ipmi_pci_driver = {
+ .name = SI_DEVICE_NAME,
+ .id_table = ipmi_pci_devices,
+ .probe = ipmi_pci_probe,
+ .remove = ipmi_pci_remove,
+};
+
+void ipmi_si_pci_init(void)
+{
+ if (si_trypci) {
+ int rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ pr_err("Unable to register PCI driver: %d\n", rv);
+ else
+ pci_registered = true;
+ }
+}
+
+void ipmi_si_pci_shutdown(void)
+{
+ if (pci_registered)
+ pci_unregister_driver(&ipmi_pci_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
new file mode 100644
index 000000000..505cc978c
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_si_platform.c
+ *
+ * Handling for platform devices in IPMI (ACPI, OF, and things
+ * coming from the platform.
+ */
+
+#define pr_fmt(fmt) "ipmi_platform: " fmt
+#define dev_fmt pr_fmt
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/acpi.h>
+#include "ipmi_si.h"
+#include "ipmi_dmi.h"
+
+static bool platform_registered;
+static bool si_tryplatform = true;
+#ifdef CONFIG_ACPI
+static bool si_tryacpi = true;
+#endif
+#ifdef CONFIG_OF
+static bool si_tryopenfirmware = true;
+#endif
+#ifdef CONFIG_DMI
+static bool si_trydmi = true;
+#else
+static bool si_trydmi = false;
+#endif
+
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryplatform,
+ "Setting this to zero will disable the default scan of the interfaces identified via platform interfaces besides ACPI, OpenFirmware, and DMI");
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi,
+ "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_OF
+module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0);
+MODULE_PARM_DESC(tryopenfirmware,
+ "Setting this to zero will disable the default scan of the interfaces identified via OpenFirmware");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi,
+ "Setting this to zero will disable the default scan of the interfaces identified via DMI");
+#endif
+
+#ifdef CONFIG_ACPI
+/* For GPE-type interrupts. */
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+ u32 gpe_number, void *context)
+{
+ struct si_sm_io *io = context;
+
+ ipmi_si_irq_handler(io->irq, io->irq_handler_data);
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+static void acpi_gpe_irq_cleanup(struct si_sm_io *io)
+{
+ if (!io->irq)
+ return;
+
+ ipmi_irq_start_cleanup(io);
+ acpi_remove_gpe_handler(NULL, io->irq, &ipmi_acpi_gpe);
+}
+
+static int acpi_gpe_irq_setup(struct si_sm_io *io)
+{
+ acpi_status status;
+
+ if (!io->irq)
+ return 0;
+
+ status = acpi_install_gpe_handler(NULL,
+ io->irq,
+ ACPI_GPE_LEVEL_TRIGGERED,
+ &ipmi_acpi_gpe,
+ io);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(io->dev,
+ "Unable to claim ACPI GPE %d, running polled\n",
+ io->irq);
+ io->irq = 0;
+ return -EINVAL;
+ }
+
+ io->irq_cleanup = acpi_gpe_irq_cleanup;
+ ipmi_irq_finish_setup(io);
+ dev_info(io->dev, "Using ACPI GPE %d\n", io->irq);
+ return 0;
+}
+#endif
+
+static void ipmi_set_addr_data_and_space(struct resource *r, struct si_sm_io *io)
+{
+ if (resource_type(r) == IORESOURCE_IO)
+ io->addr_space = IPMI_IO_ADDR_SPACE;
+ else
+ io->addr_space = IPMI_MEM_ADDR_SPACE;
+ io->addr_data = r->start;
+}
+
+static struct resource *
+ipmi_get_info_from_resources(struct platform_device *pdev,
+ struct si_sm_io *io)
+{
+ struct resource *res, *res_second;
+
+ res = platform_get_mem_or_io(pdev, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no I/O or memory address\n");
+ return NULL;
+ }
+ ipmi_set_addr_data_and_space(res, io);
+
+ io->regspacing = DEFAULT_REGSPACING;
+ res_second = platform_get_mem_or_io(pdev, 1);
+ if (res_second && resource_type(res_second) == resource_type(res)) {
+ if (res_second->start > io->addr_data)
+ io->regspacing = res_second->start - io->addr_data;
+ }
+
+ return res;
+}
+
+static int platform_ipmi_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+ u8 type, slave_addr, addr_source, regsize, regshift;
+ int rv;
+
+ rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source);
+ if (rv)
+ addr_source = SI_PLATFORM;
+ if (addr_source >= SI_LAST)
+ return -EINVAL;
+
+ if (addr_source == SI_SMBIOS) {
+ if (!si_trydmi)
+ return -ENODEV;
+ } else if (addr_source != SI_HARDCODED) {
+ if (!si_tryplatform)
+ return -ENODEV;
+ }
+
+ rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
+ if (rv)
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = addr_source;
+ dev_info(&pdev->dev, "probing via %s\n",
+ ipmi_addr_src_to_str(addr_source));
+
+ switch (type) {
+ case SI_KCS:
+ case SI_SMIC:
+ case SI_BT:
+ io.si_type = type;
+ break;
+ case SI_TYPE_INVALID: /* User disabled this in hardcode. */
+ return -ENODEV;
+ default:
+ dev_err(&pdev->dev, "ipmi-type property is invalid\n");
+ return -EINVAL;
+ }
+
+ io.regsize = DEFAULT_REGSIZE;
+ rv = device_property_read_u8(&pdev->dev, "reg-size", &regsize);
+ if (!rv)
+ io.regsize = regsize;
+
+ io.regshift = 0;
+ rv = device_property_read_u8(&pdev->dev, "reg-shift", &regshift);
+ if (!rv)
+ io.regshift = regshift;
+
+ if (!ipmi_get_info_from_resources(pdev, &io))
+ return -EINVAL;
+
+ rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+ if (rv)
+ io.slave_addr = 0x20;
+ else
+ io.slave_addr = slave_addr;
+
+ io.irq = platform_get_irq_optional(pdev, 0);
+ if (io.irq > 0)
+ io.irq_setup = ipmi_std_irq_setup;
+ else
+ io.irq = 0;
+
+ io.dev = &pdev->dev;
+
+ pr_info("ipmi_si: %s: %s %#lx regsize %d spacing %d irq %d\n",
+ ipmi_addr_src_to_str(addr_source),
+ (io.addr_space == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ ipmi_si_add_smi(&io);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_match[] = {
+ { .type = "ipmi", .compatible = "ipmi-kcs",
+ .data = (void *)(unsigned long) SI_KCS },
+ { .type = "ipmi", .compatible = "ipmi-smic",
+ .data = (void *)(unsigned long) SI_SMIC },
+ { .type = "ipmi", .compatible = "ipmi-bt",
+ .data = (void *)(unsigned long) SI_BT },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
+
+static int of_ipmi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct si_sm_io io;
+ struct resource resource;
+ const __be32 *regsize, *regspacing, *regshift;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ int proplen;
+
+ if (!si_tryopenfirmware)
+ return -ENODEV;
+
+ dev_info(&pdev->dev, "probing via device tree\n");
+
+ match = of_match_device(of_ipmi_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ if (!of_device_is_available(np))
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_warn(&pdev->dev, "invalid address from OF\n");
+ return ret;
+ }
+
+ regsize = of_get_property(np, "reg-size", &proplen);
+ if (regsize && proplen != 4) {
+ dev_warn(&pdev->dev, "invalid regsize from OF\n");
+ return -EINVAL;
+ }
+
+ regspacing = of_get_property(np, "reg-spacing", &proplen);
+ if (regspacing && proplen != 4) {
+ dev_warn(&pdev->dev, "invalid regspacing from OF\n");
+ return -EINVAL;
+ }
+
+ regshift = of_get_property(np, "reg-shift", &proplen);
+ if (regshift && proplen != 4) {
+ dev_warn(&pdev->dev, "invalid regshift from OF\n");
+ return -EINVAL;
+ }
+
+ memset(&io, 0, sizeof(io));
+ io.si_type = (enum si_type) match->data;
+ io.addr_source = SI_DEVICETREE;
+ io.irq_setup = ipmi_std_irq_setup;
+
+ ipmi_set_addr_data_and_space(&resource, &io);
+
+ io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+ io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+ io.regshift = regshift ? be32_to_cpup(regshift) : 0;
+
+ io.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+}
+#else
+#define of_ipmi_match NULL
+static int of_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int find_slave_address(struct si_sm_io *io, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+ if (!slave_addr)
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_type,
+ io->addr_space,
+ io->addr_data);
+#endif
+
+ return slave_addr;
+}
+
+static int acpi_ipmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct si_sm_io io;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long tmp;
+ struct resource *res;
+
+ if (!si_tryacpi)
+ return -ENODEV;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_ACPI;
+ dev_info(dev, "probing via ACPI\n");
+
+ io.addr_info.acpi_info.acpi_handle = handle;
+
+ /* _IFT tells us the interface type: KCS, BT, etc */
+ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Could not find ACPI IPMI interface type\n");
+ return -EINVAL;
+ }
+
+ switch (tmp) {
+ case 1:
+ io.si_type = SI_KCS;
+ break;
+ case 2:
+ io.si_type = SI_SMIC;
+ break;
+ case 3:
+ io.si_type = SI_BT;
+ break;
+ case 4: /* SSIF, just ignore */
+ return -ENODEV;
+ default:
+ dev_info(dev, "unknown IPMI type %lld\n", tmp);
+ return -EINVAL;
+ }
+
+ io.dev = dev;
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = 0;
+
+ res = ipmi_get_info_from_resources(pdev, &io);
+ if (!res)
+ return -EINVAL;
+
+ /* If _GPE exists, use it; otherwise use standard interrupts */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ io.irq = tmp;
+ io.irq_setup = acpi_gpe_irq_setup;
+ } else {
+ int irq = platform_get_irq_optional(pdev, 0);
+
+ if (irq > 0) {
+ io.irq = irq;
+ io.irq_setup = ipmi_std_irq_setup;
+ }
+ }
+
+ io.slave_addr = find_slave_address(&io, io.slave_addr);
+
+ dev_info(dev, "%pR regsize %d spacing %d irq %d\n",
+ res, io.regsize, io.regspacing, io.irq);
+
+ request_module("acpi_ipmi");
+
+ return ipmi_si_add_smi(&io);
+}
+
+static const struct acpi_device_id acpi_ipmi_match[] = {
+ { "IPI0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match);
+#else
+static int acpi_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+static int ipmi_probe(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node && of_ipmi_probe(pdev) == 0)
+ return 0;
+
+ if (acpi_ipmi_probe(pdev) == 0)
+ return 0;
+
+ return platform_ipmi_probe(pdev);
+}
+
+static int ipmi_remove(struct platform_device *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+
+ return 0;
+}
+
+static int pdev_match_name(struct device *dev, const void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const char *name = data;
+
+ return strcmp(pdev->name, name) == 0;
+}
+
+void ipmi_remove_platform_device_by_name(char *name)
+{
+ struct device *dev;
+
+ while ((dev = bus_find_device(&platform_bus_type, NULL, name,
+ pdev_match_name))) {
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+ put_device(dev);
+ }
+}
+
+static const struct platform_device_id si_plat_ids[] = {
+ { "dmi-ipmi-si", 0 },
+ { "hardcode-ipmi-si", 0 },
+ { "hotmod-ipmi-si", 0 },
+ { }
+};
+
+struct platform_driver ipmi_platform_driver = {
+ .driver = {
+ .name = SI_DEVICE_NAME,
+ .of_match_table = of_ipmi_match,
+ .acpi_match_table = ACPI_PTR(acpi_ipmi_match),
+ },
+ .probe = ipmi_probe,
+ .remove = ipmi_remove,
+ .id_table = si_plat_ids
+};
+
+void ipmi_si_platform_init(void)
+{
+ int rv = platform_driver_register(&ipmi_platform_driver);
+ if (rv)
+ pr_err("Unable to register driver: %d\n", rv);
+ else
+ platform_registered = true;
+}
+
+void ipmi_si_platform_shutdown(void)
+{
+ if (platform_registered)
+ platform_driver_unregister(&ipmi_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c
new file mode 100644
index 000000000..7d66f68eb
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_port_io.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return inb(addr + (offset * io->regspacing));
+}
+
+static void port_outb(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outb(b, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outw(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outw(b << io->regshift, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outl(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outl(b << io->regshift, addr+(offset * io->regspacing));
+}
+
+static void port_cleanup(struct si_sm_io *io)
+{
+ unsigned int addr = io->addr_data;
+ int idx;
+
+ if (addr) {
+ for (idx = 0; idx < io->io_size; idx++)
+ release_region(addr + idx * io->regspacing,
+ io->regsize);
+ }
+}
+
+int ipmi_si_port_setup(struct si_sm_io *io)
+{
+ unsigned int addr = io->addr_data;
+ int idx;
+
+ if (!addr)
+ return -ENODEV;
+
+ /*
+ * Figure out the actual inb/inw/inl/etc routine to use based
+ * upon the register size.
+ */
+ switch (io->regsize) {
+ case 1:
+ io->inputb = port_inb;
+ io->outputb = port_outb;
+ break;
+ case 2:
+ io->inputb = port_inw;
+ io->outputb = port_outw;
+ break;
+ case 4:
+ io->inputb = port_inl;
+ io->outputb = port_outl;
+ break;
+ default:
+ dev_warn(io->dev, "Invalid register size: %d\n",
+ io->regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Some BIOSes reserve disjoint I/O regions in their ACPI
+ * tables. This causes problems when trying to register the
+ * entire I/O region. Therefore we must register each I/O
+ * port separately.
+ */
+ for (idx = 0; idx < io->io_size; idx++) {
+ if (request_region(addr + idx * io->regspacing,
+ io->regsize, SI_DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ while (idx--)
+ release_region(addr + idx * io->regspacing,
+ io->regsize);
+ return -EIO;
+ }
+ }
+
+ io->io_cleanup = port_cleanup;
+
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
new file mode 100644
index 000000000..c3cdbcab0
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ipmi_si_sm.h
+ *
+ * State machine interface for low-level IPMI system management
+ * interface state machines. This code is the interface between
+ * the ipmi_smi code (that handles the policy of a KCS, SMIC, or
+ * BT interface) and the actual low-level state machine.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#ifndef __IPMI_SI_SM_H__
+#define __IPMI_SI_SM_H__
+
+#include "ipmi_si.h"
+
+/*
+ * This is defined by the state machines themselves, it is an opaque
+ * data type for them to use.
+ */
+struct si_sm_data;
+
+/* Results of SMI events. */
+enum si_sm_result {
+ SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
+ SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */
+ SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
+ SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
+ SI_SM_IDLE, /* The SM is in idle state. */
+ SI_SM_HOSED, /* The hardware violated the state machine. */
+
+ /*
+ * The hardware is asserting attn and the state machine is
+ * idle.
+ */
+ SI_SM_ATTN
+};
+
+/* Handlers for the SMI state machine. */
+struct si_sm_handlers {
+ /*
+ * Put the version number of the state machine here so the
+ * upper layer can print it.
+ */
+ char *version;
+
+ /*
+ * Initialize the data and return the amount of I/O space to
+ * reserve for the space.
+ */
+ unsigned int (*init_data)(struct si_sm_data *smi,
+ struct si_sm_io *io);
+
+ /*
+ * Start a new transaction in the state machine. This will
+ * return -2 if the state machine is not idle, -1 if the size
+ * is invalid (to large or too small), or 0 if the transaction
+ * is successfully completed.
+ */
+ int (*start_transaction)(struct si_sm_data *smi,
+ unsigned char *data, unsigned int size);
+
+ /*
+ * Return the results after the transaction. This will return
+ * -1 if the buffer is too small, zero if no transaction is
+ * present, or the actual length of the result data.
+ */
+ int (*get_result)(struct si_sm_data *smi,
+ unsigned char *data, unsigned int length);
+
+ /*
+ * Call this periodically (for a polled interface) or upon
+ * receiving an interrupt (for a interrupt-driven interface).
+ * If interrupt driven, you should probably poll this
+ * periodically when not in idle state. This should be called
+ * with the time that passed since the last call, if it is
+ * significant. Time is in microseconds.
+ */
+ enum si_sm_result (*event)(struct si_sm_data *smi, long time);
+
+ /*
+ * Attempt to detect an SMI. Returns 0 on success or nonzero
+ * on failure.
+ */
+ int (*detect)(struct si_sm_data *smi);
+
+ /* The interface is shutting down, so clean it up. */
+ void (*cleanup)(struct si_sm_data *smi);
+
+ /* Return the size of the SMI structure in bytes. */
+ int (*size)(void);
+};
+
+/* Current state machines that we can use. */
+extern const struct si_sm_handlers kcs_smi_handlers;
+extern const struct si_sm_handlers smic_smi_handlers;
+extern const struct si_sm_handlers bt_smi_handlers;
+
+#endif /* __IPMI_SI_SM_H__ */
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
new file mode 100644
index 000000000..bfea500d6
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_smic_sm.c
+ *
+ * The state-machine driver for an IPMI SMIC driver
+ *
+ * It started as a copy of Corey Minyard's driver for the KSC interface
+ * and the kernel patch "mmcdev-patch-245" by HP
+ *
+ * modified by: Hannes Schulz <schulz@schwaar.com>
+ * ipmi@schwaar.com
+ *
+ *
+ * Corey Minyard's driver for the KSC interface has the following
+ * copyright notice:
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * the kernel patch "mmcdev-patch-245" by HP has the following
+ * copyright notice:
+ * (c) Copyright 2001 Grant Grundler (c) Copyright
+ * 2001 Hewlett-Packard Company
+ */
+
+#define DEBUG /* So dev_dbg() is always available. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* smic_debug is a bit-field
+ * SMIC_DEBUG_ENABLE - turned on for now
+ * SMIC_DEBUG_MSG - commands and their responses
+ * SMIC_DEBUG_STATES - state machine
+*/
+#define SMIC_DEBUG_STATES 4
+#define SMIC_DEBUG_MSG 2
+#define SMIC_DEBUG_ENABLE 1
+
+static int smic_debug = 1;
+module_param(smic_debug, int, 0644);
+MODULE_PARM_DESC(smic_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+enum smic_states {
+ SMIC_IDLE,
+ SMIC_START_OP,
+ SMIC_OP_OK,
+ SMIC_WRITE_START,
+ SMIC_WRITE_NEXT,
+ SMIC_WRITE_END,
+ SMIC_WRITE2READ,
+ SMIC_READ_START,
+ SMIC_READ_NEXT,
+ SMIC_READ_END,
+ SMIC_HOSED
+};
+
+#define MAX_SMIC_READ_SIZE 80
+#define MAX_SMIC_WRITE_SIZE 80
+#define SMIC_MAX_ERROR_RETRIES 3
+
+/* Timeouts in microseconds. */
+#define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC)
+
+/* SMIC Flags Register Bits */
+#define SMIC_RX_DATA_READY 0x80
+#define SMIC_TX_DATA_READY 0x40
+
+/*
+ * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
+ * a few systems, and then only by Systems Management
+ * Interrupts, not by the OS. Always ignore these bits.
+ *
+ */
+#define SMIC_SMI 0x10
+#define SMIC_EVM_DATA_AVAIL 0x08
+#define SMIC_SMS_DATA_AVAIL 0x04
+#define SMIC_FLAG_BSY 0x01
+
+/* SMIC Error Codes */
+#define EC_NO_ERROR 0x00
+#define EC_ABORTED 0x01
+#define EC_ILLEGAL_CONTROL 0x02
+#define EC_NO_RESPONSE 0x03
+#define EC_ILLEGAL_COMMAND 0x04
+#define EC_BUFFER_FULL 0x05
+
+struct si_sm_data {
+ enum smic_states state;
+ struct si_sm_io *io;
+ unsigned char write_data[MAX_SMIC_WRITE_SIZE];
+ int write_pos;
+ int write_count;
+ int orig_write_count;
+ unsigned char read_data[MAX_SMIC_READ_SIZE];
+ int read_pos;
+ int truncated;
+ unsigned int error_retries;
+ long smic_timeout;
+};
+
+static unsigned int init_smic_data(struct si_sm_data *smic,
+ struct si_sm_io *io)
+{
+ smic->state = SMIC_IDLE;
+ smic->io = io;
+ smic->write_pos = 0;
+ smic->write_count = 0;
+ smic->orig_write_count = 0;
+ smic->read_pos = 0;
+ smic->error_retries = 0;
+ smic->truncated = 0;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+
+ /* We use 3 bytes of I/O. */
+ return 3;
+}
+
+static int start_smic_transaction(struct si_sm_data *smic,
+ unsigned char *data, unsigned int size)
+{
+ unsigned int i;
+
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > MAX_SMIC_WRITE_SIZE)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) {
+ dev_warn(smic->io->dev,
+ "SMIC in invalid state %d\n", smic->state);
+ return IPMI_NOT_IN_MY_STATE_ERR;
+ }
+
+ if (smic_debug & SMIC_DEBUG_MSG) {
+ dev_dbg(smic->io->dev, "%s -", __func__);
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
+ }
+ smic->error_retries = 0;
+ memcpy(smic->write_data, data, size);
+ smic->write_count = size;
+ smic->orig_write_count = size;
+ smic->write_pos = 0;
+ smic->read_pos = 0;
+ smic->state = SMIC_START_OP;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ return 0;
+}
+
+static int smic_get_result(struct si_sm_data *smic,
+ unsigned char *data, unsigned int length)
+{
+ int i;
+
+ if (smic_debug & SMIC_DEBUG_MSG) {
+ dev_dbg(smic->io->dev, "smic_get result -");
+ for (i = 0; i < smic->read_pos; i++)
+ pr_cont(" %02x", smic->read_data[i]);
+ pr_cont("\n");
+ }
+ if (length < smic->read_pos) {
+ smic->read_pos = length;
+ smic->truncated = 1;
+ }
+ memcpy(data, smic->read_data, smic->read_pos);
+
+ if ((length >= 3) && (smic->read_pos < 3)) {
+ data[2] = IPMI_ERR_UNSPECIFIED;
+ smic->read_pos = 3;
+ }
+ if (smic->truncated) {
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ smic->truncated = 0;
+ }
+ return smic->read_pos;
+}
+
+static inline unsigned char read_smic_flags(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 2);
+}
+
+static inline unsigned char read_smic_status(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 1);
+}
+
+static inline unsigned char read_smic_data(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 0);
+}
+
+static inline void write_smic_flags(struct si_sm_data *smic,
+ unsigned char flags)
+{
+ smic->io->outputb(smic->io, 2, flags);
+}
+
+static inline void write_smic_control(struct si_sm_data *smic,
+ unsigned char control)
+{
+ smic->io->outputb(smic->io, 1, control);
+}
+
+static inline void write_si_sm_data(struct si_sm_data *smic,
+ unsigned char data)
+{
+ smic->io->outputb(smic->io, 0, data);
+}
+
+static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
+{
+ (smic->error_retries)++;
+ if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ pr_warn("ipmi_smic_drv: smic hosed: %s\n", reason);
+ smic->state = SMIC_HOSED;
+ } else {
+ smic->write_count = smic->orig_write_count;
+ smic->write_pos = 0;
+ smic->read_pos = 0;
+ smic->state = SMIC_START_OP;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ }
+}
+
+static inline void write_next_byte(struct si_sm_data *smic)
+{
+ write_si_sm_data(smic, smic->write_data[smic->write_pos]);
+ (smic->write_pos)++;
+ (smic->write_count)--;
+}
+
+static inline void read_next_byte(struct si_sm_data *smic)
+{
+ if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
+ read_smic_data(smic);
+ smic->truncated = 1;
+ } else {
+ smic->read_data[smic->read_pos] = read_smic_data(smic);
+ smic->read_pos++;
+ }
+}
+
+/* SMIC Control/Status Code Components */
+#define SMIC_GET_STATUS 0x00 /* Control form's name */
+#define SMIC_READY 0x00 /* Status form's name */
+#define SMIC_WR_START 0x01 /* Unified Control/Status names... */
+#define SMIC_WR_NEXT 0x02
+#define SMIC_WR_END 0x03
+#define SMIC_RD_START 0x04
+#define SMIC_RD_NEXT 0x05
+#define SMIC_RD_END 0x06
+#define SMIC_CODE_MASK 0x0f
+
+#define SMIC_CONTROL 0x00
+#define SMIC_STATUS 0x80
+#define SMIC_CS_MASK 0x80
+
+#define SMIC_SMS 0x40
+#define SMIC_SMM 0x60
+#define SMIC_STREAM_MASK 0x60
+
+/* SMIC Control Codes */
+#define SMIC_CC_SMS_GET_STATUS (SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS)
+#define SMIC_CC_SMS_WR_START (SMIC_CONTROL|SMIC_SMS|SMIC_WR_START)
+#define SMIC_CC_SMS_WR_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT)
+#define SMIC_CC_SMS_WR_END (SMIC_CONTROL|SMIC_SMS|SMIC_WR_END)
+#define SMIC_CC_SMS_RD_START (SMIC_CONTROL|SMIC_SMS|SMIC_RD_START)
+#define SMIC_CC_SMS_RD_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT)
+#define SMIC_CC_SMS_RD_END (SMIC_CONTROL|SMIC_SMS|SMIC_RD_END)
+
+#define SMIC_CC_SMM_GET_STATUS (SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS)
+#define SMIC_CC_SMM_WR_START (SMIC_CONTROL|SMIC_SMM|SMIC_WR_START)
+#define SMIC_CC_SMM_WR_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT)
+#define SMIC_CC_SMM_WR_END (SMIC_CONTROL|SMIC_SMM|SMIC_WR_END)
+#define SMIC_CC_SMM_RD_START (SMIC_CONTROL|SMIC_SMM|SMIC_RD_START)
+#define SMIC_CC_SMM_RD_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT)
+#define SMIC_CC_SMM_RD_END (SMIC_CONTROL|SMIC_SMM|SMIC_RD_END)
+
+/* SMIC Status Codes */
+#define SMIC_SC_SMS_READY (SMIC_STATUS|SMIC_SMS|SMIC_READY)
+#define SMIC_SC_SMS_WR_START (SMIC_STATUS|SMIC_SMS|SMIC_WR_START)
+#define SMIC_SC_SMS_WR_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT)
+#define SMIC_SC_SMS_WR_END (SMIC_STATUS|SMIC_SMS|SMIC_WR_END)
+#define SMIC_SC_SMS_RD_START (SMIC_STATUS|SMIC_SMS|SMIC_RD_START)
+#define SMIC_SC_SMS_RD_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT)
+#define SMIC_SC_SMS_RD_END (SMIC_STATUS|SMIC_SMS|SMIC_RD_END)
+
+#define SMIC_SC_SMM_READY (SMIC_STATUS|SMIC_SMM|SMIC_READY)
+#define SMIC_SC_SMM_WR_START (SMIC_STATUS|SMIC_SMM|SMIC_WR_START)
+#define SMIC_SC_SMM_WR_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT)
+#define SMIC_SC_SMM_WR_END (SMIC_STATUS|SMIC_SMM|SMIC_WR_END)
+#define SMIC_SC_SMM_RD_START (SMIC_STATUS|SMIC_SMM|SMIC_RD_START)
+#define SMIC_SC_SMM_RD_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT)
+#define SMIC_SC_SMM_RD_END (SMIC_STATUS|SMIC_SMM|SMIC_RD_END)
+
+/* these are the control/status codes we actually use
+ SMIC_CC_SMS_GET_STATUS 0x40
+ SMIC_CC_SMS_WR_START 0x41
+ SMIC_CC_SMS_WR_NEXT 0x42
+ SMIC_CC_SMS_WR_END 0x43
+ SMIC_CC_SMS_RD_START 0x44
+ SMIC_CC_SMS_RD_NEXT 0x45
+ SMIC_CC_SMS_RD_END 0x46
+
+ SMIC_SC_SMS_READY 0xC0
+ SMIC_SC_SMS_WR_START 0xC1
+ SMIC_SC_SMS_WR_NEXT 0xC2
+ SMIC_SC_SMS_WR_END 0xC3
+ SMIC_SC_SMS_RD_START 0xC4
+ SMIC_SC_SMS_RD_NEXT 0xC5
+ SMIC_SC_SMS_RD_END 0xC6
+*/
+
+static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
+{
+ unsigned char status;
+ unsigned char flags;
+ unsigned char data;
+
+ if (smic->state == SMIC_HOSED) {
+ init_smic_data(smic, smic->io);
+ return SI_SM_HOSED;
+ }
+ if (smic->state != SMIC_IDLE) {
+ if (smic_debug & SMIC_DEBUG_STATES)
+ dev_dbg(smic->io->dev,
+ "%s - smic->smic_timeout = %ld, time = %ld\n",
+ __func__, smic->smic_timeout, time);
+ /*
+ * FIXME: smic_event is sometimes called with time >
+ * SMIC_RETRY_TIMEOUT
+ */
+ if (time < SMIC_RETRY_TIMEOUT) {
+ smic->smic_timeout -= time;
+ if (smic->smic_timeout < 0) {
+ start_error_recovery(smic, "smic timed out.");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ }
+ }
+ flags = read_smic_flags(smic);
+ if (flags & SMIC_FLAG_BSY)
+ return SI_SM_CALL_WITH_DELAY;
+
+ status = read_smic_status(smic);
+ if (smic_debug & SMIC_DEBUG_STATES)
+ dev_dbg(smic->io->dev,
+ "%s - state = %d, flags = 0x%02x, status = 0x%02x\n",
+ __func__, smic->state, flags, status);
+
+ switch (smic->state) {
+ case SMIC_IDLE:
+ /* in IDLE we check for available messages */
+ if (flags & SMIC_SMS_DATA_AVAIL)
+ return SI_SM_ATTN;
+ return SI_SM_IDLE;
+
+ case SMIC_START_OP:
+ /* sanity check whether smic is really idle */
+ write_smic_control(smic, SMIC_CC_SMS_GET_STATUS);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_OP_OK;
+ break;
+
+ case SMIC_OP_OK:
+ if (status != SMIC_SC_SMS_READY) {
+ /* this should not happen */
+ start_error_recovery(smic,
+ "state = SMIC_OP_OK,"
+ " status != SMIC_SC_SMS_READY");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* OK so far; smic is idle let us start ... */
+ write_smic_control(smic, SMIC_CC_SMS_WR_START);
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_WRITE_START;
+ break;
+
+ case SMIC_WRITE_START:
+ if (status != SMIC_SC_SMS_WR_START) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_START, "
+ "status != SMIC_SC_SMS_WR_START");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /*
+ * we must not issue WR_(NEXT|END) unless
+ * TX_DATA_READY is set
+ * */
+ if (flags & SMIC_TX_DATA_READY) {
+ if (smic->write_count == 1) {
+ /* last byte */
+ write_smic_control(smic, SMIC_CC_SMS_WR_END);
+ smic->state = SMIC_WRITE_END;
+ } else {
+ write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+ smic->state = SMIC_WRITE_NEXT;
+ }
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_WRITE_NEXT:
+ if (status != SMIC_SC_SMS_WR_NEXT) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_NEXT, "
+ "status != SMIC_SC_SMS_WR_NEXT");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* this is the same code as in SMIC_WRITE_START */
+ if (flags & SMIC_TX_DATA_READY) {
+ if (smic->write_count == 1) {
+ write_smic_control(smic, SMIC_CC_SMS_WR_END);
+ smic->state = SMIC_WRITE_END;
+ } else {
+ write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+ smic->state = SMIC_WRITE_NEXT;
+ }
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_WRITE_END:
+ if (status != SMIC_SC_SMS_WR_END) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_END, "
+ "status != SMIC_SC_SMS_WR_END");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* data register holds an error code */
+ data = read_smic_data(smic);
+ if (data != 0) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ dev_dbg(smic->io->dev,
+ "SMIC_WRITE_END: data = %02x\n",
+ data);
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_END, "
+ "data != SUCCESS");
+ return SI_SM_CALL_WITH_DELAY;
+ } else
+ smic->state = SMIC_WRITE2READ;
+ break;
+
+ case SMIC_WRITE2READ:
+ /*
+ * we must wait for RX_DATA_READY to be set before we
+ * can continue
+ */
+ if (flags & SMIC_RX_DATA_READY) {
+ write_smic_control(smic, SMIC_CC_SMS_RD_START);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_START;
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_READ_START:
+ if (status != SMIC_SC_SMS_RD_START) {
+ start_error_recovery(smic,
+ "state = SMIC_READ_START, "
+ "status != SMIC_SC_SMS_RD_START");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ if (flags & SMIC_RX_DATA_READY) {
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_NEXT;
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_READ_NEXT:
+ switch (status) {
+ /*
+ * smic tells us that this is the last byte to be read
+ * --> clean up
+ */
+ case SMIC_SC_SMS_RD_END:
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_END);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_END;
+ break;
+ case SMIC_SC_SMS_RD_NEXT:
+ if (flags & SMIC_RX_DATA_READY) {
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_NEXT;
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+ default:
+ start_error_recovery(
+ smic,
+ "state = SMIC_READ_NEXT, "
+ "status != SMIC_SC_SMS_RD_(NEXT|END)");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ break;
+
+ case SMIC_READ_END:
+ if (status != SMIC_SC_SMS_READY) {
+ start_error_recovery(smic,
+ "state = SMIC_READ_END, "
+ "status != SMIC_SC_SMS_READY");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ data = read_smic_data(smic);
+ /* data register holds an error code */
+ if (data != 0) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ dev_dbg(smic->io->dev,
+ "SMIC_READ_END: data = %02x\n",
+ data);
+ start_error_recovery(smic,
+ "state = SMIC_READ_END, "
+ "data != SUCCESS");
+ return SI_SM_CALL_WITH_DELAY;
+ } else {
+ smic->state = SMIC_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+
+ case SMIC_HOSED:
+ init_smic_data(smic, smic->io);
+ return SI_SM_HOSED;
+
+ default:
+ if (smic_debug & SMIC_DEBUG_ENABLE) {
+ dev_dbg(smic->io->dev,
+ "smic->state = %d\n", smic->state);
+ start_error_recovery(smic, "state = UNKNOWN");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ }
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int smic_detect(struct si_sm_data *smic)
+{
+ /*
+ * It's impossible for the SMIC fnags register to be all 1's,
+ * (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first.
+ */
+ if (read_smic_flags(smic) == 0xff)
+ return 1;
+
+ return 0;
+}
+
+static void smic_cleanup(struct si_sm_data *kcs)
+{
+}
+
+static int smic_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+const struct si_sm_handlers smic_smi_handlers = {
+ .init_data = init_smic_data,
+ .start_transaction = start_smic_transaction,
+ .get_result = smic_get_result,
+ .event = smic_event,
+ .detect = smic_detect,
+ .cleanup = smic_cleanup,
+ .size = smic_size,
+};
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
new file mode 100644
index 000000000..248459f97
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -0,0 +1,2168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_ssif.c
+ *
+ * The interface to the IPMI driver for SMBus access to a SMBus
+ * compliant device. Called SSIF by the IPMI spec.
+ *
+ * Author: Intel Corporation
+ * Todd Davis <todd.c.davis@intel.com>
+ *
+ * Rewritten by Corey Minyard <minyard@acm.org> to support the
+ * non-blocking I2C interface, add support for multi-part
+ * transactions, add PEC support, and general clenaup.
+ *
+ * Copyright 2003 Intel Corporation
+ * Copyright 2005 MontaVista Software
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SSIF state
+ * machine. It does the configuration, handles timers and interrupts,
+ * and drives the real SSIF state machine.
+ */
+
+#define pr_fmt(fmt) "ipmi_ssif: " fmt
+#define dev_fmt(fmt) "ipmi_ssif: " fmt
+
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/ipmi_smi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/kthread.h>
+#include <linux/acpi.h>
+#include <linux/ctype.h>
+#include <linux/time64.h>
+#include "ipmi_dmi.h"
+
+#define DEVICE_NAME "ipmi_ssif"
+
+#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD 0x57
+
+#define SSIF_IPMI_REQUEST 2
+#define SSIF_IPMI_MULTI_PART_REQUEST_START 6
+#define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7
+#define SSIF_IPMI_MULTI_PART_REQUEST_END 8
+#define SSIF_IPMI_RESPONSE 3
+#define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9
+
+/* ssif_debug is a bit-field
+ * SSIF_DEBUG_MSG - commands and their responses
+ * SSIF_DEBUG_STATES - message states
+ * SSIF_DEBUG_TIMING - Measure times between events in the driver
+ */
+#define SSIF_DEBUG_TIMING 4
+#define SSIF_DEBUG_STATE 2
+#define SSIF_DEBUG_MSG 1
+#define SSIF_NODEBUG 0
+#define SSIF_DEFAULT_DEBUG (SSIF_NODEBUG)
+
+/*
+ * Timer values
+ */
+#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */
+#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */
+#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
+
+/* How many times to we retry sending/receiving the message. */
+#define SSIF_SEND_RETRIES 5
+#define SSIF_RECV_RETRIES 250
+
+#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
+#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000)
+#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
+#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+
+/*
+ * Timeout for the watch, only used for get flag timer.
+ */
+#define SSIF_WATCH_MSG_TIMEOUT msecs_to_jiffies(10)
+#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250)
+
+enum ssif_intf_state {
+ SSIF_IDLE,
+ SSIF_GETTING_FLAGS,
+ SSIF_GETTING_EVENTS,
+ SSIF_CLEARING_FLAGS,
+ SSIF_GETTING_MESSAGES,
+ /* FIXME - add watchdog stuff. */
+};
+
+#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \
+ && (ssif)->curr_msg == NULL)
+
+/*
+ * Indexes into stats[] in ssif_info below.
+ */
+enum ssif_stat_indexes {
+ /* Number of total messages sent. */
+ SSIF_STAT_sent_messages = 0,
+
+ /*
+ * Number of message parts sent. Messages may be broken into
+ * parts if they are long.
+ */
+ SSIF_STAT_sent_messages_parts,
+
+ /*
+ * Number of time a message was retried.
+ */
+ SSIF_STAT_send_retries,
+
+ /*
+ * Number of times the send of a message failed.
+ */
+ SSIF_STAT_send_errors,
+
+ /*
+ * Number of message responses received.
+ */
+ SSIF_STAT_received_messages,
+
+ /*
+ * Number of message fragments received.
+ */
+ SSIF_STAT_received_message_parts,
+
+ /*
+ * Number of times the receive of a message was retried.
+ */
+ SSIF_STAT_receive_retries,
+
+ /*
+ * Number of errors receiving messages.
+ */
+ SSIF_STAT_receive_errors,
+
+ /*
+ * Number of times a flag fetch was requested.
+ */
+ SSIF_STAT_flag_fetches,
+
+ /*
+ * Number of times the hardware didn't follow the state machine.
+ */
+ SSIF_STAT_hosed,
+
+ /*
+ * Number of received events.
+ */
+ SSIF_STAT_events,
+
+ /* Number of asyncronous messages received. */
+ SSIF_STAT_incoming_messages,
+
+ /* Number of watchdog pretimeouts. */
+ SSIF_STAT_watchdog_pretimeouts,
+
+ /* Number of alers received. */
+ SSIF_STAT_alerts,
+
+ /* Always add statistics before this value, it must be last. */
+ SSIF_NUM_STATS
+};
+
+struct ssif_addr_info {
+ struct i2c_board_info binfo;
+ char *adapter_name;
+ int debug;
+ int slave_addr;
+ enum ipmi_addr_src addr_src;
+ union ipmi_smi_info_union addr_info;
+ struct device *dev;
+ struct i2c_client *client;
+
+ struct mutex clients_mutex;
+ struct list_head clients;
+
+ struct list_head link;
+};
+
+struct ssif_info;
+
+typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len);
+
+struct ssif_info {
+ struct ipmi_smi *intf;
+ spinlock_t lock;
+ struct ipmi_smi_msg *waiting_msg;
+ struct ipmi_smi_msg *curr_msg;
+ enum ssif_intf_state ssif_state;
+ unsigned long ssif_debug;
+
+ struct ipmi_smi_handlers handlers;
+
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+ union ipmi_smi_info_union addr_info;
+
+ /*
+ * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+ * is set to hold the flags until we are done handling everything
+ * from the flags.
+ */
+#define RECEIVE_MSG_AVAIL 0x01
+#define EVENT_MSG_BUFFER_FULL 0x02
+#define WDT_PRE_TIMEOUT_INT 0x08
+ unsigned char msg_flags;
+
+ u8 global_enables;
+ bool has_event_buffer;
+ bool supports_alert;
+
+ /*
+ * Used to tell what we should do with alerts. If we are
+ * waiting on a response, read the data immediately.
+ */
+ bool got_alert;
+ bool waiting_alert;
+
+ /* Used to inform the timeout that it should do a resend. */
+ bool do_resend;
+
+ /*
+ * If set to true, this will request events the next time the
+ * state machine is idle.
+ */
+ bool req_events;
+
+ /*
+ * If set to true, this will request flags the next time the
+ * state machine is idle.
+ */
+ bool req_flags;
+
+ /* Used for sending/receiving data. +1 for the length. */
+ unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
+ unsigned int data_len;
+
+ /* Temp receive buffer, gets copied into data. */
+ unsigned char recv[I2C_SMBUS_BLOCK_MAX];
+
+ struct i2c_client *client;
+ ssif_i2c_done done_handler;
+
+ /* Thread interface handling */
+ struct task_struct *thread;
+ struct completion wake_thread;
+ bool stopping;
+ int i2c_read_write;
+ int i2c_command;
+ unsigned char *i2c_data;
+ unsigned int i2c_size;
+
+ struct timer_list retry_timer;
+ int retries_left;
+
+ long watch_timeout; /* Timeout for flags check, 0 if off. */
+ struct timer_list watch_timer; /* Flag fetch timer. */
+
+ /* Info from SSIF cmd */
+ unsigned char max_xmit_msg_size;
+ unsigned char max_recv_msg_size;
+ bool cmd8_works; /* See test_multipart_messages() for details. */
+ unsigned int multi_support;
+ int supports_pec;
+
+#define SSIF_NO_MULTI 0
+#define SSIF_MULTI_2_PART 1
+#define SSIF_MULTI_n_PART 2
+ unsigned char *multi_data;
+ unsigned int multi_len;
+ unsigned int multi_pos;
+
+ atomic_t stats[SSIF_NUM_STATS];
+};
+
+#define ssif_inc_stat(ssif, stat) \
+ atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat])
+#define ssif_get_stat(ssif, stat) \
+ ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
+
+static bool initialized;
+static bool platform_registered;
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg);
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags);
+static int start_send(struct ssif_info *ssif_info,
+ unsigned char *data,
+ unsigned int len);
+
+static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
+ unsigned long *flags)
+ __acquires(&ssif_info->lock)
+{
+ spin_lock_irqsave(&ssif_info->lock, *flags);
+ return flags;
+}
+
+static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info,
+ unsigned long *flags)
+ __releases(&ssif_info->lock)
+{
+ spin_unlock_irqrestore(&ssif_info->lock, *flags);
+}
+
+static void deliver_recv_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg)
+{
+ if (msg->rsp_size < 0) {
+ return_hosed_msg(ssif_info, msg);
+ dev_err(&ssif_info->client->dev,
+ "%s: Malformed message: rsp_size = %d\n",
+ __func__, msg->rsp_size);
+ } else {
+ ipmi_smi_msg_received(ssif_info->intf, msg);
+ }
+}
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg)
+{
+ ssif_inc_stat(ssif_info, hosed);
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = 0xFF; /* Unknown error. */
+ msg->rsp_size = 3;
+
+ deliver_recv_msg(ssif_info, msg);
+}
+
+/*
+ * Must be called with the message lock held. This will release the
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
+ */
+static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ unsigned char msg[3];
+
+ ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ ssif_info->ssif_state = SSIF_CLEARING_FLAGS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ /* Make sure the watchdog pre-timeout flag is not set at startup. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+
+ if (start_send(ssif_info, msg, 3) != 0) {
+ /* Error, just go to normal state. */
+ ssif_info->ssif_state = SSIF_IDLE;
+ }
+}
+
+static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ unsigned char mb[2];
+
+ ssif_info->req_flags = false;
+ ssif_info->ssif_state = SSIF_GETTING_FLAGS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ mb[1] = IPMI_GET_MSG_FLAGS_CMD;
+ if (start_send(ssif_info, mb, 2) != 0)
+ ssif_info->ssif_state = SSIF_IDLE;
+}
+
+static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+ struct ipmi_smi_msg *msg)
+{
+ if (start_send(ssif_info, msg->data, msg->data_size) != 0) {
+ unsigned long oflags;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->curr_msg = NULL;
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ ipmi_free_smi_msg(msg);
+ }
+}
+
+static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+
+ ssif_info->req_events = false;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ ssif_info->curr_msg = msg;
+ ssif_info->ssif_state = SSIF_GETTING_EVENTS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ msg->data_size = 2;
+
+ check_start_send(ssif_info, flags, msg);
+}
+
+static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+ unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ ssif_info->curr_msg = msg;
+ ssif_info->ssif_state = SSIF_GETTING_MESSAGES;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_GET_MSG_CMD;
+ msg->data_size = 2;
+
+ check_start_send(ssif_info, flags, msg);
+}
+
+/*
+ * Must be called with the message lock held. This will release the
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
+ */
+static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+ /* Watchdog pre-timeout */
+ ssif_inc_stat(ssif_info, watchdog_pretimeouts);
+ start_clear_flags(ssif_info, flags);
+ ipmi_smi_watchdog_pretimeout(ssif_info->intf);
+ } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL)
+ /* Messages available. */
+ start_recv_msg_fetch(ssif_info, flags);
+ else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL)
+ /* Events available. */
+ start_event_fetch(ssif_info, flags);
+ else {
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+}
+
+static int ipmi_ssif_thread(void *data)
+{
+ struct ssif_info *ssif_info = data;
+
+ while (!kthread_should_stop()) {
+ int result;
+
+ /* Wait for something to do */
+ result = wait_for_completion_interruptible(
+ &ssif_info->wake_thread);
+ if (ssif_info->stopping)
+ break;
+ if (result == -ERESTARTSYS)
+ continue;
+ init_completion(&ssif_info->wake_thread);
+
+ if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
+ result = i2c_smbus_write_block_data(
+ ssif_info->client, ssif_info->i2c_command,
+ ssif_info->i2c_data[0],
+ ssif_info->i2c_data + 1);
+ ssif_info->done_handler(ssif_info, result, NULL, 0);
+ } else {
+ result = i2c_smbus_read_block_data(
+ ssif_info->client, ssif_info->i2c_command,
+ ssif_info->i2c_data);
+ if (result < 0)
+ ssif_info->done_handler(ssif_info, result,
+ NULL, 0);
+ else
+ ssif_info->done_handler(ssif_info, 0,
+ ssif_info->i2c_data,
+ result);
+ }
+ }
+
+ return 0;
+}
+
+static void ssif_i2c_send(struct ssif_info *ssif_info,
+ ssif_i2c_done handler,
+ int read_write, int command,
+ unsigned char *data, unsigned int size)
+{
+ ssif_info->done_handler = handler;
+
+ ssif_info->i2c_read_write = read_write;
+ ssif_info->i2c_command = command;
+ ssif_info->i2c_data = data;
+ ssif_info->i2c_size = size;
+ complete(&ssif_info->wake_thread);
+}
+
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len);
+
+static void start_get(struct ssif_info *ssif_info)
+{
+ ssif_info->multi_pos = 0;
+
+ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_RESPONSE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+}
+
+static void start_resend(struct ssif_info *ssif_info);
+
+static void retry_timeout(struct timer_list *t)
+{
+ struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
+ unsigned long oflags, *flags;
+ bool waiting, resend;
+
+ if (ssif_info->stopping)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ resend = ssif_info->do_resend;
+ ssif_info->do_resend = false;
+ waiting = ssif_info->waiting_alert;
+ ssif_info->waiting_alert = false;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ if (waiting)
+ start_get(ssif_info);
+ if (resend) {
+ start_resend(ssif_info);
+ ssif_inc_stat(ssif_info, send_retries);
+ }
+}
+
+static void watch_timeout(struct timer_list *t)
+{
+ struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer);
+ unsigned long oflags, *flags;
+
+ if (ssif_info->stopping)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (ssif_info->watch_timeout) {
+ mod_timer(&ssif_info->watch_timer,
+ jiffies + ssif_info->watch_timeout);
+ if (IS_SSIF_IDLE(ssif_info)) {
+ start_flag_fetch(ssif_info, flags); /* Releases lock */
+ return;
+ }
+ ssif_info->req_flags = true;
+ }
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+}
+
+static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ unsigned int data)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+ unsigned long oflags, *flags;
+ bool do_get = false;
+
+ if (type != I2C_PROTOCOL_SMBUS_ALERT)
+ return;
+
+ ssif_inc_stat(ssif_info, alerts);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (ssif_info->waiting_alert) {
+ ssif_info->waiting_alert = false;
+ del_timer(&ssif_info->retry_timer);
+ do_get = true;
+ } else if (ssif_info->curr_msg) {
+ ssif_info->got_alert = true;
+ }
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ if (do_get)
+ start_get(ssif_info);
+}
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+{
+ struct ipmi_smi_msg *msg;
+ unsigned long oflags, *flags;
+
+ /*
+ * We are single-threaded here, so no need for a lock until we
+ * start messing with driver states or the queues.
+ */
+
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+ ssif_inc_stat(ssif_info, receive_retries);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->waiting_alert = true;
+ if (!ssif_info->stopping)
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_JIFFIES);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ ssif_inc_stat(ssif_info, receive_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "%s: Error %d\n", __func__, result);
+ len = 0;
+ goto continue_op;
+ }
+
+ if ((len > 1) && (ssif_info->multi_pos == 0)
+ && (data[0] == 0x00) && (data[1] == 0x01)) {
+ /* Start of multi-part read. Start the next transaction. */
+ int i;
+
+ ssif_inc_stat(ssif_info, received_message_parts);
+
+ /* Remove the multi-part read marker. */
+ len -= 2;
+ data += 2;
+ for (i = 0; i < len; i++)
+ ssif_info->data[i] = data[i];
+ ssif_info->multi_len = len;
+ ssif_info->multi_pos = 1;
+
+ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ return;
+ } else if (ssif_info->multi_pos) {
+ /* Middle of multi-part read. Start the next transaction. */
+ int i;
+ unsigned char blocknum;
+
+ if (len == 0) {
+ result = -EIO;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Middle message with no data\n");
+
+ goto continue_op;
+ }
+
+ blocknum = data[0];
+ len--;
+ data++;
+
+ if (blocknum != 0xff && len != 31) {
+ /* All blocks but the last must have 31 data bytes. */
+ result = -EIO;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Received middle message <31\n");
+
+ goto continue_op;
+ }
+
+ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
+ /* Received message too big, abort the operation. */
+ result = -E2BIG;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Received message too big\n");
+
+ goto continue_op;
+ }
+
+ for (i = 0; i < len; i++)
+ ssif_info->data[i + ssif_info->multi_len] = data[i];
+ ssif_info->multi_len += len;
+ if (blocknum == 0xff) {
+ /* End of read */
+ len = ssif_info->multi_len;
+ data = ssif_info->data;
+ } else if (blocknum + 1 != ssif_info->multi_pos) {
+ /*
+ * Out of sequence block, just abort. Block
+ * numbers start at zero for the second block,
+ * but multi_pos starts at one, so the +1.
+ */
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Received message out of sequence, expected %u, got %u\n",
+ ssif_info->multi_pos - 1, blocknum);
+ result = -EIO;
+ } else {
+ ssif_inc_stat(ssif_info, received_message_parts);
+
+ ssif_info->multi_pos++;
+
+ ssif_i2c_send(ssif_info, msg_done_handler,
+ I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv,
+ I2C_SMBUS_BLOCK_DATA);
+ return;
+ }
+ }
+
+ continue_op:
+ if (result < 0) {
+ ssif_inc_stat(ssif_info, receive_errors);
+ } else {
+ ssif_inc_stat(ssif_info, received_messages);
+ ssif_inc_stat(ssif_info, received_message_parts);
+ }
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+ dev_dbg(&ssif_info->client->dev,
+ "DONE 1: state = %d, result=%d\n",
+ ssif_info->ssif_state, result);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ msg = ssif_info->curr_msg;
+ if (msg) {
+ if (data) {
+ if (len > IPMI_MAX_MSG_LENGTH)
+ len = IPMI_MAX_MSG_LENGTH;
+ memcpy(msg->rsp, data, len);
+ } else {
+ len = 0;
+ }
+ msg->rsp_size = len;
+ ssif_info->curr_msg = NULL;
+ }
+
+ switch (ssif_info->ssif_state) {
+ case SSIF_IDLE:
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ if (!msg)
+ break;
+
+ if (result < 0)
+ return_hosed_msg(ssif_info, msg);
+ else
+ deliver_recv_msg(ssif_info, msg);
+ break;
+
+ case SSIF_GETTING_FLAGS:
+ /* We got the flags from the SSIF, now handle them. */
+ if ((result < 0) || (len < 4) || (data[2] != 0)) {
+ /*
+ * Error fetching flags, or invalid length,
+ * just give up for now.
+ */
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ dev_warn(&ssif_info->client->dev,
+ "Error getting flags: %d %d, %x\n",
+ result, len, (len >= 3) ? data[2] : 0);
+ } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ /*
+ * Recv error response, give up.
+ */
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ dev_warn(&ssif_info->client->dev,
+ "Invalid response getting flags: %x %x\n",
+ data[0], data[1]);
+ } else {
+ ssif_inc_stat(ssif_info, flag_fetches);
+ ssif_info->msg_flags = data[3];
+ handle_flags(ssif_info, flags);
+ }
+ break;
+
+ case SSIF_CLEARING_FLAGS:
+ /* We cleared the flags. */
+ if ((result < 0) || (len < 3) || (data[2] != 0)) {
+ /* Error clearing flags */
+ dev_warn(&ssif_info->client->dev,
+ "Error clearing flags: %d %d, %x\n",
+ result, len, (len >= 3) ? data[2] : 0);
+ } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
+ dev_warn(&ssif_info->client->dev,
+ "Invalid response clearing flags: %x %x\n",
+ data[0], data[1]);
+ }
+ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+
+ case SSIF_GETTING_EVENTS:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting events\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
+ if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the event flag. */
+ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(ssif_info, flags);
+ } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) {
+ dev_warn(&ssif_info->client->dev,
+ "Invalid response getting events: %x %x\n",
+ msg->rsp[0], msg->rsp[1]);
+ msg->done(msg);
+ /* Take off the event flag. */
+ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(ssif_info, flags);
+ } else {
+ handle_flags(ssif_info, flags);
+ ssif_inc_stat(ssif_info, events);
+ deliver_recv_msg(ssif_info, msg);
+ }
+ break;
+
+ case SSIF_GETTING_MESSAGES:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting messages\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
+ if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(ssif_info, flags);
+ } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || msg->rsp[1] != IPMI_GET_MSG_CMD) {
+ dev_warn(&ssif_info->client->dev,
+ "Invalid response clearing flags: %x %x\n",
+ msg->rsp[0], msg->rsp[1]);
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(ssif_info, flags);
+ } else {
+ ssif_inc_stat(ssif_info, incoming_messages);
+ handle_flags(ssif_info, flags);
+ deliver_recv_msg(ssif_info, msg);
+ }
+ break;
+
+ default:
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "Invalid state in message done handling: %d\n",
+ ssif_info->ssif_state);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ if (ssif_info->req_events)
+ start_event_fetch(ssif_info, flags);
+ else if (ssif_info->req_flags)
+ start_flag_fetch(ssif_info, flags);
+ else
+ start_next_msg(ssif_info, flags);
+ } else
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+ dev_dbg(&ssif_info->client->dev,
+ "DONE 2: state = %d.\n", ssif_info->ssif_state);
+}
+
+static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+{
+ /* We are single-threaded here, so no need for a lock. */
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+ /*
+ * Wait the retry timeout time per the spec,
+ * then redo the send.
+ */
+ ssif_info->do_resend = true;
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_REQ_RETRY_JIFFIES);
+ return;
+ }
+
+ ssif_inc_stat(ssif_info, send_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "%s: Out of retries\n", __func__);
+
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
+ return;
+ }
+
+ if (ssif_info->multi_data) {
+ /*
+ * In the middle of a multi-data write. See the comment
+ * in the SSIF_MULTI_n_PART case in the probe function
+ * for details on the intricacies of this.
+ */
+ int left, to_write;
+ unsigned char *data_to_send;
+ unsigned char cmd;
+
+ ssif_inc_stat(ssif_info, sent_messages_parts);
+
+ left = ssif_info->multi_len - ssif_info->multi_pos;
+ to_write = left;
+ if (to_write > 32)
+ to_write = 32;
+ /* Length byte. */
+ ssif_info->multi_data[ssif_info->multi_pos] = to_write;
+ data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
+ ssif_info->multi_pos += to_write;
+ cmd = SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE;
+ if (ssif_info->cmd8_works) {
+ if (left == to_write) {
+ cmd = SSIF_IPMI_MULTI_PART_REQUEST_END;
+ ssif_info->multi_data = NULL;
+ }
+ } else if (to_write < 32) {
+ ssif_info->multi_data = NULL;
+ }
+
+ ssif_i2c_send(ssif_info, msg_written_handler,
+ I2C_SMBUS_WRITE, cmd,
+ data_to_send, I2C_SMBUS_BLOCK_DATA);
+ } else {
+ /* Ready to request the result. */
+ unsigned long oflags, *flags;
+
+ ssif_inc_stat(ssif_info, sent_messages);
+ ssif_inc_stat(ssif_info, sent_messages_parts);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (ssif_info->got_alert) {
+ /* The result is already ready, just start it. */
+ ssif_info->got_alert = false;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ start_get(ssif_info);
+ } else {
+ /* Wait a jiffie then request the next message */
+ ssif_info->waiting_alert = true;
+ ssif_info->retries_left = SSIF_RECV_RETRIES;
+ if (!ssif_info->stopping)
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_PART_JIFFIES);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+ }
+}
+
+static void start_resend(struct ssif_info *ssif_info)
+{
+ int command;
+
+ ssif_info->got_alert = false;
+
+ if (ssif_info->data_len > 32) {
+ command = SSIF_IPMI_MULTI_PART_REQUEST_START;
+ ssif_info->multi_data = ssif_info->data;
+ ssif_info->multi_len = ssif_info->data_len;
+ /*
+ * Subtle thing, this is 32, not 33, because we will
+ * overwrite the thing at position 32 (which was just
+ * transmitted) with the new length.
+ */
+ ssif_info->multi_pos = 32;
+ ssif_info->data[0] = 32;
+ } else {
+ ssif_info->multi_data = NULL;
+ command = SSIF_IPMI_REQUEST;
+ ssif_info->data[0] = ssif_info->data_len;
+ }
+
+ ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+ command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+}
+
+static int start_send(struct ssif_info *ssif_info,
+ unsigned char *data,
+ unsigned int len)
+{
+ if (len > IPMI_MAX_MSG_LENGTH)
+ return -E2BIG;
+ if (len > ssif_info->max_xmit_msg_size)
+ return -E2BIG;
+
+ ssif_info->retries_left = SSIF_SEND_RETRIES;
+ memcpy(ssif_info->data + 1, data, len);
+ ssif_info->data_len = len;
+ start_resend(ssif_info);
+ return 0;
+}
+
+/* Must be called with the message lock held. */
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+ unsigned long oflags;
+
+ restart:
+ if (!IS_SSIF_IDLE(ssif_info)) {
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ if (!ssif_info->waiting_msg) {
+ ssif_info->curr_msg = NULL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ } else {
+ int rv;
+
+ ssif_info->curr_msg = ssif_info->waiting_msg;
+ ssif_info->waiting_msg = NULL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ rv = start_send(ssif_info,
+ ssif_info->curr_msg->data,
+ ssif_info->curr_msg->data_size);
+ if (rv) {
+ msg = ssif_info->curr_msg;
+ ssif_info->curr_msg = NULL;
+ return_hosed_msg(ssif_info, msg);
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ goto restart;
+ }
+ }
+}
+
+static void sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct ssif_info *ssif_info = send_info;
+ unsigned long oflags, *flags;
+
+ BUG_ON(ssif_info->waiting_msg);
+ ssif_info->waiting_msg = msg;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ start_next_msg(ssif_info, flags);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) {
+ struct timespec64 t;
+
+ ktime_get_real_ts64(&t);
+ dev_dbg(&ssif_info->client->dev,
+ "**Enqueue %02x %02x: %lld.%6.6ld\n",
+ msg->data[0], msg->data[1],
+ (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
+ }
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ data->addr_src = ssif_info->addr_source;
+ data->dev = &ssif_info->client->dev;
+ data->addr_info = ssif_info->addr_info;
+ get_device(data->dev);
+
+ return 0;
+}
+
+/*
+ * Upper layer wants us to request events.
+ */
+static void request_events(void *send_info)
+{
+ struct ssif_info *ssif_info = send_info;
+ unsigned long oflags, *flags;
+
+ if (!ssif_info->has_event_buffer)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->req_events = true;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+}
+
+/*
+ * Upper layer is changing the flag saying whether we need to request
+ * flags periodically or not.
+ */
+static void ssif_set_need_watch(void *send_info, unsigned int watch_mask)
+{
+ struct ssif_info *ssif_info = send_info;
+ unsigned long oflags, *flags;
+ long timeout = 0;
+
+ if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES)
+ timeout = SSIF_WATCH_MSG_TIMEOUT;
+ else if (watch_mask)
+ timeout = SSIF_WATCH_WATCHDOG_TIMEOUT;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (timeout != ssif_info->watch_timeout) {
+ ssif_info->watch_timeout = timeout;
+ if (ssif_info->watch_timeout)
+ mod_timer(&ssif_info->watch_timer,
+ jiffies + ssif_info->watch_timeout);
+ }
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+}
+
+static int ssif_start_processing(void *send_info,
+ struct ipmi_smi *intf)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ ssif_info->intf = intf;
+
+ return 0;
+}
+
+#define MAX_SSIF_BMCS 4
+
+static unsigned short addr[MAX_SSIF_BMCS];
+static int num_addrs;
+module_param_array(addr, ushort, &num_addrs, 0);
+MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs.");
+
+static char *adapter_name[MAX_SSIF_BMCS];
+static int num_adapter_names;
+module_param_array(adapter_name, charp, &num_adapter_names, 0);
+MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC. By default all devices are scanned.");
+
+static int slave_addrs[MAX_SSIF_BMCS];
+static int num_slave_addrs;
+module_param_array(slave_addrs, int, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs,
+ "The default IPMB slave address for the controller.");
+
+static bool alerts_broken;
+module_param(alerts_broken, bool, 0);
+MODULE_PARM_DESC(alerts_broken, "Don't enable alerts for the controller.");
+
+/*
+ * Bit 0 enables message debugging, bit 1 enables state debugging, and
+ * bit 2 enables timing debugging. This is an array indexed by
+ * interface number"
+ */
+static int dbg[MAX_SSIF_BMCS];
+static int num_dbg;
+module_param_array(dbg, int, &num_dbg, 0);
+MODULE_PARM_DESC(dbg, "Turn on debugging.");
+
+static bool ssif_dbg_probe;
+module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
+MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
+
+static bool ssif_tryacpi = true;
+module_param_named(tryacpi, ssif_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
+
+static bool ssif_trydmi = true;
+module_param_named(trydmi, ssif_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)");
+
+static DEFINE_MUTEX(ssif_infos_mutex);
+static LIST_HEAD(ssif_infos);
+
+#define IPMI_SSIF_ATTR(name) \
+static ssize_t ipmi_##name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ssif_info *ssif_info = dev_get_drvdata(dev); \
+ \
+ return sysfs_emit(buf, "%u\n", ssif_get_stat(ssif_info, name));\
+} \
+static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
+
+static ssize_t ipmi_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "ssif\n");
+}
+static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
+
+IPMI_SSIF_ATTR(sent_messages);
+IPMI_SSIF_ATTR(sent_messages_parts);
+IPMI_SSIF_ATTR(send_retries);
+IPMI_SSIF_ATTR(send_errors);
+IPMI_SSIF_ATTR(received_messages);
+IPMI_SSIF_ATTR(received_message_parts);
+IPMI_SSIF_ATTR(receive_retries);
+IPMI_SSIF_ATTR(receive_errors);
+IPMI_SSIF_ATTR(flag_fetches);
+IPMI_SSIF_ATTR(hosed);
+IPMI_SSIF_ATTR(events);
+IPMI_SSIF_ATTR(watchdog_pretimeouts);
+IPMI_SSIF_ATTR(alerts);
+
+static struct attribute *ipmi_ssif_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_sent_messages.attr,
+ &dev_attr_sent_messages_parts.attr,
+ &dev_attr_send_retries.attr,
+ &dev_attr_send_errors.attr,
+ &dev_attr_received_messages.attr,
+ &dev_attr_received_message_parts.attr,
+ &dev_attr_receive_retries.attr,
+ &dev_attr_receive_errors.attr,
+ &dev_attr_flag_fetches.attr,
+ &dev_attr_hosed.attr,
+ &dev_attr_events.attr,
+ &dev_attr_watchdog_pretimeouts.attr,
+ &dev_attr_alerts.attr,
+ NULL
+};
+
+static const struct attribute_group ipmi_ssif_dev_attr_group = {
+ .attrs = ipmi_ssif_dev_attrs,
+};
+
+static void shutdown_ssif(void *send_info)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+ dev_set_drvdata(&ssif_info->client->dev, NULL);
+
+ /* make sure the driver is not looking for flags any more. */
+ while (ssif_info->ssif_state != SSIF_IDLE)
+ schedule_timeout(1);
+
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->watch_timer);
+ del_timer_sync(&ssif_info->retry_timer);
+ if (ssif_info->thread) {
+ complete(&ssif_info->wake_thread);
+ kthread_stop(ssif_info->thread);
+ }
+}
+
+static void ssif_remove(struct i2c_client *client)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+ struct ssif_addr_info *addr_info;
+
+ if (!ssif_info)
+ return;
+
+ /*
+ * After this point, we won't deliver anything asychronously
+ * to the message handler. We can unregister ourself.
+ */
+ ipmi_unregister_smi(ssif_info->intf);
+
+ list_for_each_entry(addr_info, &ssif_infos, link) {
+ if (addr_info->client == client) {
+ addr_info->client = NULL;
+ break;
+ }
+ }
+
+ kfree(ssif_info);
+}
+
+static int read_response(struct i2c_client *client, unsigned char *resp)
+{
+ int ret = -ENODEV, retry_cnt = SSIF_RECV_RETRIES;
+
+ while (retry_cnt > 0) {
+ ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE,
+ resp);
+ if (ret > 0)
+ break;
+ msleep(SSIF_MSG_MSEC);
+ retry_cnt--;
+ if (retry_cnt <= 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
+ int *resp_len, unsigned char *resp)
+{
+ int retry_cnt;
+ int ret;
+
+ retry_cnt = SSIF_SEND_RETRIES;
+ retry1:
+ ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
+ if (ret) {
+ retry_cnt--;
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
+ goto retry1;
+ }
+ return -ENODEV;
+ }
+
+ ret = read_response(client, resp);
+ if (ret > 0) {
+ /* Validate that the response is correct. */
+ if (ret < 3 ||
+ (resp[0] != (msg[0] | (1 << 2))) ||
+ (resp[1] != msg[1]))
+ ret = -EINVAL;
+ else if (ret > IPMI_MAX_MSG_LENGTH) {
+ ret = -E2BIG;
+ } else {
+ *resp_len = ret;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ unsigned char *resp;
+ unsigned char msg[3];
+ int rv;
+ int len;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Do a Get Device ID command, since it is required. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv)
+ rv = -ENODEV;
+ else
+ strscpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
+ kfree(resp);
+ return rv;
+}
+
+static int strcmp_nospace(char *s1, char *s2)
+{
+ while (*s1 && *s2) {
+ while (isspace(*s1))
+ s1++;
+ while (isspace(*s2))
+ s2++;
+ if (*s1 > *s2)
+ return 1;
+ if (*s1 < *s2)
+ return -1;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+static struct ssif_addr_info *ssif_info_find(unsigned short addr,
+ char *adapter_name,
+ bool match_null_name)
+{
+ struct ssif_addr_info *info, *found = NULL;
+
+restart:
+ list_for_each_entry(info, &ssif_infos, link) {
+ if (info->binfo.addr == addr) {
+ if (info->addr_src == SI_SMBIOS && !info->adapter_name)
+ info->adapter_name = kstrdup(adapter_name,
+ GFP_KERNEL);
+
+ if (info->adapter_name || adapter_name) {
+ if (!info->adapter_name != !adapter_name) {
+ /* One is NULL and one is not */
+ continue;
+ }
+ if (adapter_name &&
+ strcmp_nospace(info->adapter_name,
+ adapter_name))
+ /* Names do not match */
+ continue;
+ }
+ found = info;
+ break;
+ }
+ }
+
+ if (!found && match_null_name) {
+ /* Try to get an exact match first, then try with a NULL name */
+ adapter_name = NULL;
+ match_null_name = false;
+ goto restart;
+ }
+
+ return found;
+}
+
+static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
+{
+#ifdef CONFIG_ACPI
+ acpi_handle acpi_handle;
+
+ acpi_handle = ACPI_HANDLE(dev);
+ if (acpi_handle) {
+ ssif_info->addr_source = SI_ACPI;
+ ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
+ request_module("acpi_ipmi");
+ return true;
+ }
+#endif
+ return false;
+}
+
+static int find_slave_address(struct i2c_client *client, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+ if (!slave_addr)
+ slave_addr = ipmi_dmi_get_slave_addr(
+ SI_TYPE_INVALID,
+ i2c_adapter_id(client->adapter),
+ client->addr);
+#endif
+
+ return slave_addr;
+}
+
+static int start_multipart_test(struct i2c_client *client,
+ unsigned char *msg, bool do_middle)
+{
+ int retry_cnt = SSIF_SEND_RETRIES, ret;
+
+retry_write:
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_START,
+ 32, msg);
+ if (ret) {
+ retry_cnt--;
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
+ goto retry_write;
+ }
+ dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
+ return ret;
+ }
+
+ if (!do_middle)
+ return 0;
+
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+ 32, msg + 32);
+ if (ret) {
+ dev_err(&client->dev, "Could not write multi-part middle, though the BMC said it could handle it. Just limit sends to one part.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void test_multipart_messages(struct i2c_client *client,
+ struct ssif_info *ssif_info,
+ unsigned char *resp)
+{
+ unsigned char msg[65];
+ int ret;
+ bool do_middle;
+
+ if (ssif_info->max_xmit_msg_size <= 32)
+ return;
+
+ do_middle = ssif_info->max_xmit_msg_size > 63;
+
+ memset(msg, 0, sizeof(msg));
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+
+ /*
+ * The specification is all messed up dealing with sending
+ * multi-part messages. Per what the specification says, it
+ * is impossible to send a message that is a multiple of 32
+ * bytes, except for 32 itself. It talks about a "start"
+ * transaction (cmd=6) that must be 32 bytes, "middle"
+ * transaction (cmd=7) that must be 32 bytes, and an "end"
+ * transaction. The "end" transaction is shown as cmd=7 in
+ * the text, but if that's the case there is no way to
+ * differentiate between a middle and end part except the
+ * length being less than 32. But there is a table at the far
+ * end of the section (that I had never noticed until someone
+ * pointed it out to me) that mentions it as cmd=8.
+ *
+ * After some thought, I think the example is wrong and the
+ * end transaction should be cmd=8. But some systems don't
+ * implement cmd=8, they use a zero-length end transaction,
+ * even though that violates the SMBus specification.
+ *
+ * So, to work around this, this code tests if cmd=8 works.
+ * If it does, then we use that. If not, it tests zero-
+ * byte end transactions. If that works, good. If not,
+ * we only allow 63-byte transactions max.
+ */
+
+ ret = start_multipart_test(client, msg, do_middle);
+ if (ret)
+ goto out_no_multi_part;
+
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_END,
+ 1, msg + 64);
+
+ if (!ret)
+ ret = read_response(client, resp);
+
+ if (ret > 0) {
+ /* End transactions work, we are good. */
+ ssif_info->cmd8_works = true;
+ return;
+ }
+
+ ret = start_multipart_test(client, msg, do_middle);
+ if (ret) {
+ dev_err(&client->dev, "Second multipart test failed.\n");
+ goto out_no_multi_part;
+ }
+
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+ 0, msg + 64);
+ if (!ret)
+ ret = read_response(client, resp);
+ if (ret > 0)
+ /* Zero-size end parts work, use those. */
+ return;
+
+ /* Limit to 63 bytes and use a short middle command to mark the end. */
+ if (ssif_info->max_xmit_msg_size > 63)
+ ssif_info->max_xmit_msg_size = 63;
+ return;
+
+out_no_multi_part:
+ ssif_info->max_xmit_msg_size = 32;
+ return;
+}
+
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+ IPMI_BMC_EVT_MSG_INTR)
+
+static void ssif_remove_dup(struct i2c_client *client)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+
+ ipmi_unregister_smi(ssif_info->intf);
+ kfree(ssif_info);
+}
+
+static int ssif_add_infos(struct i2c_client *client)
+{
+ struct ssif_addr_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->addr_src = SI_ACPI;
+ info->client = client;
+ info->adapter_name = kstrdup(client->adapter->name, GFP_KERNEL);
+ if (!info->adapter_name) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
+ info->binfo.addr = client->addr;
+ list_add_tail(&info->link, &ssif_infos);
+ return 0;
+}
+
+/*
+ * Prefer ACPI over SMBIOS, if both are available.
+ * So if we get an ACPI interface and have already registered a SMBIOS
+ * interface at the same address, remove the SMBIOS and add the ACPI one.
+ */
+static int ssif_check_and_remove(struct i2c_client *client,
+ struct ssif_info *ssif_info)
+{
+ struct ssif_addr_info *info;
+
+ list_for_each_entry(info, &ssif_infos, link) {
+ if (!info->client)
+ return 0;
+ if (!strcmp(info->adapter_name, client->adapter->name) &&
+ info->binfo.addr == client->addr) {
+ if (info->addr_src == SI_ACPI)
+ return -EEXIST;
+
+ if (ssif_info->addr_source == SI_ACPI &&
+ info->addr_src == SI_SMBIOS) {
+ dev_info(&client->dev,
+ "Removing %s-specified SSIF interface in favor of ACPI\n",
+ ipmi_addr_src_to_str(info->addr_src));
+ ssif_remove_dup(info->client);
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+static int ssif_probe(struct i2c_client *client)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ struct ssif_info *ssif_info;
+ int rv = 0;
+ int len = 0;
+ int i;
+ u8 slave_addr = 0;
+ struct ssif_addr_info *addr_info = NULL;
+
+ mutex_lock(&ssif_infos_mutex);
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp) {
+ mutex_unlock(&ssif_infos_mutex);
+ return -ENOMEM;
+ }
+
+ ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL);
+ if (!ssif_info) {
+ kfree(resp);
+ mutex_unlock(&ssif_infos_mutex);
+ return -ENOMEM;
+ }
+
+ if (!check_acpi(ssif_info, &client->dev)) {
+ addr_info = ssif_info_find(client->addr, client->adapter->name,
+ true);
+ if (!addr_info) {
+ /* Must have come in through sysfs. */
+ ssif_info->addr_source = SI_HOTMOD;
+ } else {
+ ssif_info->addr_source = addr_info->addr_src;
+ ssif_info->ssif_debug = addr_info->debug;
+ ssif_info->addr_info = addr_info->addr_info;
+ addr_info->client = client;
+ slave_addr = addr_info->slave_addr;
+ }
+ }
+
+ ssif_info->client = client;
+ i2c_set_clientdata(client, ssif_info);
+
+ rv = ssif_check_and_remove(client, ssif_info);
+ /* If rv is 0 and addr source is not SI_ACPI, continue probing */
+ if (!rv && ssif_info->addr_source == SI_ACPI) {
+ rv = ssif_add_infos(client);
+ if (rv) {
+ dev_err(&client->dev, "Out of memory!, exiting ..\n");
+ goto out;
+ }
+ } else if (rv) {
+ dev_err(&client->dev, "Not probing, Interface already present\n");
+ goto out;
+ }
+
+ slave_addr = find_slave_address(client, slave_addr);
+
+ dev_info(&client->dev,
+ "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
+ ipmi_addr_src_to_str(ssif_info->addr_source),
+ client->addr, client->adapter->name, slave_addr);
+
+ /* Now check for system interface capabilities */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
+ msg[2] = 0; /* SSIF */
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (!rv && (len >= 3) && (resp[2] == 0)) {
+ if (len < 7) {
+ if (ssif_dbg_probe)
+ dev_dbg(&ssif_info->client->dev,
+ "SSIF info too short: %d\n", len);
+ goto no_support;
+ }
+
+ /* Got a good SSIF response, handle it. */
+ ssif_info->max_xmit_msg_size = resp[5];
+ ssif_info->max_recv_msg_size = resp[6];
+ ssif_info->multi_support = (resp[4] >> 6) & 0x3;
+ ssif_info->supports_pec = (resp[4] >> 3) & 0x1;
+
+ /* Sanitize the data */
+ switch (ssif_info->multi_support) {
+ case SSIF_NO_MULTI:
+ if (ssif_info->max_xmit_msg_size > 32)
+ ssif_info->max_xmit_msg_size = 32;
+ if (ssif_info->max_recv_msg_size > 32)
+ ssif_info->max_recv_msg_size = 32;
+ break;
+
+ case SSIF_MULTI_2_PART:
+ if (ssif_info->max_xmit_msg_size > 63)
+ ssif_info->max_xmit_msg_size = 63;
+ if (ssif_info->max_recv_msg_size > 62)
+ ssif_info->max_recv_msg_size = 62;
+ break;
+
+ case SSIF_MULTI_n_PART:
+ /* We take whatever size given, but do some testing. */
+ break;
+
+ default:
+ /* Data is not sane, just give up. */
+ goto no_support;
+ }
+ } else {
+ no_support:
+ /* Assume no multi-part or PEC support */
+ dev_info(&ssif_info->client->dev,
+ "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
+ rv, len, resp[2]);
+
+ ssif_info->max_xmit_msg_size = 32;
+ ssif_info->max_recv_msg_size = 32;
+ ssif_info->multi_support = SSIF_NO_MULTI;
+ ssif_info->supports_pec = 0;
+ }
+
+ test_multipart_messages(client, ssif_info, resp);
+
+ /* Make sure the NMI timeout is cleared. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 3) || (resp[2] != 0))
+ dev_warn(&ssif_info->client->dev,
+ "Unable to clear message flags: %d %d %2.2x\n",
+ rv, len, resp[2]);
+
+ /* Attempt to enable the event buffer. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv || (len < 4) || (resp[2] != 0)) {
+ dev_warn(&ssif_info->client->dev,
+ "Error getting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ ssif_info->global_enables = resp[3];
+
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+ ssif_info->has_event_buffer = true;
+ /* buffer is already enabled, nothing to do. */
+ goto found;
+ }
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 2)) {
+ dev_warn(&ssif_info->client->dev,
+ "Error setting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[2] == 0) {
+ /* A successful return means the event buffer is supported. */
+ ssif_info->has_event_buffer = true;
+ ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF;
+ }
+
+ /* Some systems don't behave well if you enable alerts. */
+ if (alerts_broken)
+ goto found;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 2)) {
+ dev_warn(&ssif_info->client->dev,
+ "Error setting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[2] == 0) {
+ /* A successful return means the alert is supported. */
+ ssif_info->supports_alert = true;
+ ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR;
+ }
+
+ found:
+ if (ssif_dbg_probe) {
+ dev_dbg(&ssif_info->client->dev,
+ "%s: i2c_probe found device at i2c address %x\n",
+ __func__, client->addr);
+ }
+
+ spin_lock_init(&ssif_info->lock);
+ ssif_info->ssif_state = SSIF_IDLE;
+ timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
+ timer_setup(&ssif_info->watch_timer, watch_timeout, 0);
+
+ for (i = 0; i < SSIF_NUM_STATS; i++)
+ atomic_set(&ssif_info->stats[i], 0);
+
+ if (ssif_info->supports_pec)
+ ssif_info->client->flags |= I2C_CLIENT_PEC;
+
+ ssif_info->handlers.owner = THIS_MODULE;
+ ssif_info->handlers.start_processing = ssif_start_processing;
+ ssif_info->handlers.shutdown = shutdown_ssif;
+ ssif_info->handlers.get_smi_info = get_smi_info;
+ ssif_info->handlers.sender = sender;
+ ssif_info->handlers.request_events = request_events;
+ ssif_info->handlers.set_need_watch = ssif_set_need_watch;
+
+ {
+ unsigned int thread_num;
+
+ thread_num = ((i2c_adapter_id(ssif_info->client->adapter)
+ << 8) |
+ ssif_info->client->addr);
+ init_completion(&ssif_info->wake_thread);
+ ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info,
+ "kssif%4.4x", thread_num);
+ if (IS_ERR(ssif_info->thread)) {
+ rv = PTR_ERR(ssif_info->thread);
+ dev_notice(&ssif_info->client->dev,
+ "Could not start kernel thread: error %d\n",
+ rv);
+ goto out;
+ }
+ }
+
+ dev_set_drvdata(&ssif_info->client->dev, ssif_info);
+ rv = device_add_group(&ssif_info->client->dev,
+ &ipmi_ssif_dev_attr_group);
+ if (rv) {
+ dev_err(&ssif_info->client->dev,
+ "Unable to add device attributes: error %d\n",
+ rv);
+ goto out;
+ }
+
+ rv = ipmi_register_smi(&ssif_info->handlers,
+ ssif_info,
+ &ssif_info->client->dev,
+ slave_addr);
+ if (rv) {
+ dev_err(&ssif_info->client->dev,
+ "Unable to register device: error %d\n", rv);
+ goto out_remove_attr;
+ }
+
+ out:
+ if (rv) {
+ if (addr_info)
+ addr_info->client = NULL;
+
+ dev_err(&ssif_info->client->dev,
+ "Unable to start IPMI SSIF: %d\n", rv);
+ i2c_set_clientdata(client, NULL);
+ kfree(ssif_info);
+ }
+ kfree(resp);
+ mutex_unlock(&ssif_infos_mutex);
+ return rv;
+
+out_remove_attr:
+ device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+ dev_set_drvdata(&ssif_info->client->dev, NULL);
+ goto out;
+}
+
+static int new_ssif_client(int addr, char *adapter_name,
+ int debug, int slave_addr,
+ enum ipmi_addr_src addr_src,
+ struct device *dev)
+{
+ struct ssif_addr_info *addr_info;
+ int rv = 0;
+
+ mutex_lock(&ssif_infos_mutex);
+ if (ssif_info_find(addr, adapter_name, false)) {
+ rv = -EEXIST;
+ goto out_unlock;
+ }
+
+ addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL);
+ if (!addr_info) {
+ rv = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (adapter_name) {
+ addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL);
+ if (!addr_info->adapter_name) {
+ kfree(addr_info);
+ rv = -ENOMEM;
+ goto out_unlock;
+ }
+ }
+
+ strncpy(addr_info->binfo.type, DEVICE_NAME,
+ sizeof(addr_info->binfo.type));
+ addr_info->binfo.addr = addr;
+ addr_info->binfo.platform_data = addr_info;
+ addr_info->debug = debug;
+ addr_info->slave_addr = slave_addr;
+ addr_info->addr_src = addr_src;
+ addr_info->dev = dev;
+
+ if (dev)
+ dev_set_drvdata(dev, addr_info);
+
+ list_add_tail(&addr_info->link, &ssif_infos);
+
+ /* Address list will get it */
+
+out_unlock:
+ mutex_unlock(&ssif_infos_mutex);
+ return rv;
+}
+
+static void free_ssif_clients(void)
+{
+ struct ssif_addr_info *info, *tmp;
+
+ mutex_lock(&ssif_infos_mutex);
+ list_for_each_entry_safe(info, tmp, &ssif_infos, link) {
+ list_del(&info->link);
+ kfree(info->adapter_name);
+ kfree(info);
+ }
+ mutex_unlock(&ssif_infos_mutex);
+}
+
+static unsigned short *ssif_address_list(void)
+{
+ struct ssif_addr_info *info;
+ unsigned int count = 0, i = 0;
+ unsigned short *address_list;
+
+ list_for_each_entry(info, &ssif_infos, link)
+ count++;
+
+ address_list = kcalloc(count + 1, sizeof(*address_list),
+ GFP_KERNEL);
+ if (!address_list)
+ return NULL;
+
+ list_for_each_entry(info, &ssif_infos, link) {
+ unsigned short addr = info->binfo.addr;
+ int j;
+
+ for (j = 0; j < i; j++) {
+ if (address_list[j] == addr)
+ /* Found a dup. */
+ break;
+ }
+ if (j == i) /* Didn't find it in the list. */
+ address_list[i++] = addr;
+ }
+ address_list[i] = I2C_CLIENT_END;
+
+ return address_list;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ssif_acpi_match[] = {
+ { "IPI0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssif_acpi_match);
+#endif
+
+#ifdef CONFIG_DMI
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+ u8 slave_addr = 0;
+ u16 i2c_addr;
+ int rv;
+
+ if (!ssif_trydmi)
+ return -ENODEV;
+
+ rv = device_property_read_u16(&pdev->dev, "i2c-addr", &i2c_addr);
+ if (rv) {
+ dev_warn(&pdev->dev, "No i2c-addr property\n");
+ return -ENODEV;
+ }
+
+ rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+ if (rv)
+ slave_addr = 0x20;
+
+ return new_ssif_client(i2c_addr, NULL, 0,
+ slave_addr, SI_SMBIOS, &pdev->dev);
+}
+#else
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
+static const struct i2c_device_id ssif_id[] = {
+ { DEVICE_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ssif_id);
+
+static struct i2c_driver ssif_i2c_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVICE_NAME
+ },
+ .probe_new = ssif_probe,
+ .remove = ssif_remove,
+ .alert = ssif_alert,
+ .id_table = ssif_id,
+ .detect = ssif_detect
+};
+
+static int ssif_platform_probe(struct platform_device *dev)
+{
+ return dmi_ipmi_probe(dev);
+}
+
+static int ssif_platform_remove(struct platform_device *dev)
+{
+ struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev);
+
+ if (!addr_info)
+ return 0;
+
+ mutex_lock(&ssif_infos_mutex);
+ list_del(&addr_info->link);
+ kfree(addr_info);
+ mutex_unlock(&ssif_infos_mutex);
+ return 0;
+}
+
+static const struct platform_device_id ssif_plat_ids[] = {
+ { "dmi-ipmi-ssif", 0 },
+ { }
+};
+
+static struct platform_driver ipmi_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ },
+ .probe = ssif_platform_probe,
+ .remove = ssif_platform_remove,
+ .id_table = ssif_plat_ids
+};
+
+static int __init init_ipmi_ssif(void)
+{
+ int i;
+ int rv;
+
+ if (initialized)
+ return 0;
+
+ pr_info("IPMI SSIF Interface driver\n");
+
+ /* build list for i2c from addr list */
+ for (i = 0; i < num_addrs; i++) {
+ rv = new_ssif_client(addr[i], adapter_name[i],
+ dbg[i], slave_addrs[i],
+ SI_HARDCODED, NULL);
+ if (rv)
+ pr_err("Couldn't add hardcoded device at addr 0x%x\n",
+ addr[i]);
+ }
+
+ if (ssif_tryacpi)
+ ssif_i2c_driver.driver.acpi_match_table =
+ ACPI_PTR(ssif_acpi_match);
+
+ if (ssif_trydmi) {
+ rv = platform_driver_register(&ipmi_driver);
+ if (rv)
+ pr_err("Unable to register driver: %d\n", rv);
+ else
+ platform_registered = true;
+ }
+
+ ssif_i2c_driver.address_list = ssif_address_list();
+
+ rv = i2c_add_driver(&ssif_i2c_driver);
+ if (!rv)
+ initialized = true;
+
+ return rv;
+}
+module_init(init_ipmi_ssif);
+
+static void __exit cleanup_ipmi_ssif(void)
+{
+ if (!initialized)
+ return;
+
+ initialized = false;
+
+ i2c_del_driver(&ssif_i2c_driver);
+
+ kfree(ssif_i2c_driver.address_list);
+
+ if (ssif_trydmi && platform_registered)
+ platform_driver_unregister(&ipmi_driver);
+
+ free_ssif_clients();
+}
+module_exit(cleanup_ipmi_ssif);
+
+MODULE_ALIAS("platform:dmi-ipmi-ssif");
+MODULE_AUTHOR("Todd C Davis <todd.c.davis@intel.com>, Corey Minyard <minyard@acm.org>");
+MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
new file mode 100644
index 000000000..5b4e67792
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -0,0 +1,1344 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ipmi_watchdog.c
+ *
+ * A watchdog timer based upon the IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ */
+
+#define pr_fmt(fmt) "IPMI Watchdog: " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/mutex.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/kdebug.h>
+#include <linux/rwsem.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/nmi.h>
+#include <linux/reboot.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/sched/signal.h>
+
+#ifdef CONFIG_X86
+/*
+ * This is ugly, but I've determined that x86 is the only architecture
+ * that can reasonably support the IPMI NMI watchdog timeout at this
+ * time. If another architecture adds this capability somehow, it
+ * will have to be a somewhat different mechanism and I have no idea
+ * how it will work. So in the unlikely event that another
+ * architecture supports this, we can figure out a good generic
+ * mechanism for it at that time.
+ */
+#include <asm/kdebug.h>
+#include <asm/nmi.h>
+#define HAVE_DIE_NMI
+#endif
+
+/*
+ * The IPMI command/response information for the watchdog timer.
+ */
+
+/* values for byte 1 of the set command, byte 2 of the get response. */
+#define WDOG_DONT_LOG (1 << 7)
+#define WDOG_DONT_STOP_ON_SET (1 << 6)
+#define WDOG_SET_TIMER_USE(byte, use) \
+ byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7)
+#define WDOG_TIMER_USE_BIOS_FRB2 1
+#define WDOG_TIMER_USE_BIOS_POST 2
+#define WDOG_TIMER_USE_OS_LOAD 3
+#define WDOG_TIMER_USE_SMS_OS 4
+#define WDOG_TIMER_USE_OEM 5
+
+/* values for byte 2 of the set command, byte 3 of the get response. */
+#define WDOG_SET_PRETIMEOUT_ACT(byte, use) \
+ byte = ((byte) & 0x8f) | (((use) & 0x7) << 4)
+#define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7)
+#define WDOG_PRETIMEOUT_NONE 0
+#define WDOG_PRETIMEOUT_SMI 1
+#define WDOG_PRETIMEOUT_NMI 2
+#define WDOG_PRETIMEOUT_MSG_INT 3
+
+/* Operations that can be performed on a pretimout. */
+#define WDOG_PREOP_NONE 0
+#define WDOG_PREOP_PANIC 1
+/* Cause data to be available to read. Doesn't work in NMI mode. */
+#define WDOG_PREOP_GIVE_DATA 2
+
+/* Actions to perform on a full timeout. */
+#define WDOG_SET_TIMEOUT_ACT(byte, use) \
+ byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7)
+#define WDOG_TIMEOUT_NONE 0
+#define WDOG_TIMEOUT_RESET 1
+#define WDOG_TIMEOUT_POWER_DOWN 2
+#define WDOG_TIMEOUT_POWER_CYCLE 3
+
+/*
+ * Byte 3 of the get command, byte 4 of the get response is the
+ * pre-timeout in seconds.
+ */
+
+/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
+#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1)
+#define WDOG_EXPIRE_CLEAR_BIOS_POST (1 << 2)
+#define WDOG_EXPIRE_CLEAR_OS_LOAD (1 << 3)
+#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4)
+#define WDOG_EXPIRE_CLEAR_OEM (1 << 5)
+
+/*
+ * Setting/getting the watchdog timer value. This is for bytes 5 and
+ * 6 (the timeout time) of the set command, and bytes 6 and 7 (the
+ * timeout time) and 8 and 9 (the current countdown value) of the
+ * response. The timeout value is given in seconds (in the command it
+ * is 100ms intervals).
+ */
+#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
+ (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
+#define WDOG_GET_TIMEOUT(byte1, byte2) \
+ (((byte1) | ((byte2) << 8)) / 10)
+
+#define IPMI_WDOG_RESET_TIMER 0x22
+#define IPMI_WDOG_SET_TIMER 0x24
+#define IPMI_WDOG_GET_TIMER 0x25
+
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
+
+static DEFINE_MUTEX(ipmi_watchdog_mutex);
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+static struct ipmi_user *watchdog_user;
+static int watchdog_ifnum;
+
+/* Default the timeout to 10 seconds. */
+static int timeout = 10;
+
+/* The pre-timeout is disabled by default. */
+static int pretimeout;
+
+/* Default timeout to set on panic */
+static int panic_wdt_timeout = 255;
+
+/* Default action is to reset the board on a timeout. */
+static unsigned char action_val = WDOG_TIMEOUT_RESET;
+
+static char action[16] = "reset";
+
+static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE;
+
+static char preaction[16] = "pre_none";
+
+static unsigned char preop_val = WDOG_PREOP_NONE;
+
+static char preop[16] = "preop_none";
+static DEFINE_SPINLOCK(ipmi_read_lock);
+static char data_to_read;
+static DECLARE_WAIT_QUEUE_HEAD(read_q);
+static struct fasync_struct *fasync_q;
+static atomic_t pretimeout_since_last_heartbeat;
+static char expect_close;
+
+static int ifnum_to_use = -1;
+
+/* Parameters to ipmi_set_timeout */
+#define IPMI_SET_TIMEOUT_NO_HB 0
+#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1
+#define IPMI_SET_TIMEOUT_FORCE_HB 2
+
+static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
+
+/*
+ * If true, the driver will start running as soon as it is configured
+ * and ready.
+ */
+static int start_now;
+
+static int set_param_timeout(const char *val, const struct kernel_param *kp)
+{
+ char *endp;
+ int l;
+ int rv = 0;
+
+ if (!val)
+ return -EINVAL;
+ l = simple_strtoul(val, &endp, 0);
+ if (endp == val)
+ return -EINVAL;
+
+ *((int *)kp->arg) = l;
+ if (watchdog_user)
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ return rv;
+}
+
+static const struct kernel_param_ops param_ops_timeout = {
+ .set = set_param_timeout,
+ .get = param_get_int,
+};
+#define param_check_timeout param_check_int
+
+typedef int (*action_fn)(const char *intval, char *outval);
+
+static int action_op(const char *inval, char *outval);
+static int preaction_op(const char *inval, char *outval);
+static int preop_op(const char *inval, char *outval);
+static void check_parms(void);
+
+static int set_param_str(const char *val, const struct kernel_param *kp)
+{
+ action_fn fn = (action_fn) kp->arg;
+ int rv = 0;
+ char valcp[16];
+ char *s;
+
+ strncpy(valcp, val, 15);
+ valcp[15] = '\0';
+
+ s = strstrip(valcp);
+
+ rv = fn(s, NULL);
+ if (rv)
+ goto out;
+
+ check_parms();
+ if (watchdog_user)
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ out:
+ return rv;
+}
+
+static int get_param_str(char *buffer, const struct kernel_param *kp)
+{
+ action_fn fn = (action_fn) kp->arg;
+ int rv, len;
+
+ rv = fn(NULL, buffer);
+ if (rv)
+ return rv;
+
+ len = strlen(buffer);
+ buffer[len++] = '\n';
+ buffer[len] = 0;
+
+ return len;
+}
+
+
+static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+ if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+ return 0;
+
+ ipmi_unregister_watchdog(watchdog_ifnum);
+ ipmi_register_watchdog(ifnum_to_use);
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_wdog_ifnum = {
+ .set = set_param_wdog_ifnum,
+ .get = param_get_int,
+};
+
+#define param_check_wdog_ifnum param_check_int
+
+static const struct kernel_param_ops param_ops_str = {
+ .set = set_param_str,
+ .get = get_param_str,
+};
+
+module_param(ifnum_to_use, wdog_ifnum, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+ "timer. Setting to -1 defaults to the first registered "
+ "interface");
+
+module_param(timeout, timeout, 0644);
+MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
+
+module_param(pretimeout, timeout, 0644);
+MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
+
+module_param(panic_wdt_timeout, timeout, 0644);
+MODULE_PARM_DESC(panic_wdt_timeout, "Timeout value on kernel panic in seconds.");
+
+module_param_cb(action, &param_ops_str, action_op, 0644);
+MODULE_PARM_DESC(action, "Timeout action. One of: "
+ "reset, none, power_cycle, power_off.");
+
+module_param_cb(preaction, &param_ops_str, preaction_op, 0644);
+MODULE_PARM_DESC(preaction, "Pretimeout action. One of: "
+ "pre_none, pre_smi, pre_nmi, pre_int.");
+
+module_param_cb(preop, &param_ops_str, preop_op, 0644);
+MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
+ "preop_none, preop_panic, preop_give_data.");
+
+module_param(start_now, int, 0444);
+MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
+ "soon as the driver is loaded.");
+
+module_param(nowayout, bool, 0644);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+/* Default state of the timer. */
+static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+
+/* Is someone using the watchdog? Only one user is allowed. */
+static unsigned long ipmi_wdog_open;
+
+/*
+ * If set to 1, the heartbeat command will set the state to reset and
+ * start the timer. The timer doesn't normally run when the driver is
+ * first opened until the heartbeat is set the first time, this
+ * variable is used to accomplish this.
+ */
+static int ipmi_start_timer_on_heartbeat;
+
+/* IPMI version of the BMC. */
+static unsigned char ipmi_version_major;
+static unsigned char ipmi_version_minor;
+
+/* If a pretimeout occurs, this is used to allow only one panic to happen. */
+static atomic_t preop_panic_excl = ATOMIC_INIT(-1);
+
+#ifdef HAVE_DIE_NMI
+static int testing_nmi;
+static int nmi_handler_registered;
+#endif
+
+static int __ipmi_heartbeat(void);
+
+/*
+ * We use a mutex to make sure that only one thing can send a set a
+ * message at one time. The mutex is claimed when a message is sent
+ * and freed when both the send and receive messages are free.
+ */
+static atomic_t msg_tofree = ATOMIC_INIT(0);
+static DECLARE_COMPLETION(msg_wait);
+static void msg_free_smi(struct ipmi_smi_msg *msg)
+{
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
+}
+static void msg_free_recv(struct ipmi_recv_msg *msg)
+{
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
+}
+static struct ipmi_smi_msg smi_msg = INIT_IPMI_SMI_MSG(msg_free_smi);
+static struct ipmi_recv_msg recv_msg = INIT_IPMI_RECV_MSG(msg_free_recv);
+
+static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ int *send_heartbeat_now)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[6];
+ int rv;
+ struct ipmi_system_interface_addr addr;
+ int hbnow = 0;
+
+
+ data[0] = 0;
+ WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
+
+ if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ if ((ipmi_version_major > 1) ||
+ ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
+ /* This is an IPMI 1.5-only feature. */
+ data[0] |= WDOG_DONT_STOP_ON_SET;
+ } else {
+ /*
+ * In ipmi 1.0, setting the timer stops the watchdog, we
+ * need to start it back up again.
+ */
+ hbnow = 1;
+ }
+ }
+
+ data[1] = 0;
+ WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
+ if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) {
+ WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
+ data[2] = pretimeout;
+ } else {
+ WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE);
+ data[2] = 0; /* No pretimeout. */
+ }
+ data[3] = 0;
+ WDOG_SET_TIMEOUT(data[4], data[5], timeout);
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_SET_TIMER;
+ msg.data = data;
+ msg.data_len = sizeof(data);
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ if (rv)
+ pr_warn("set timeout error: %d\n", rv);
+ else if (send_heartbeat_now)
+ *send_heartbeat_now = hbnow;
+
+ return rv;
+}
+
+static int _ipmi_set_timeout(int do_heartbeat)
+{
+ int send_heartbeat_now;
+ int rv;
+
+ if (!watchdog_user)
+ return -ENODEV;
+
+ atomic_set(&msg_tofree, 2);
+
+ rv = __ipmi_set_timeout(&smi_msg,
+ &recv_msg,
+ &send_heartbeat_now);
+ if (rv) {
+ atomic_set(&msg_tofree, 0);
+ return rv;
+ }
+
+ wait_for_completion(&msg_wait);
+
+ if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
+ || ((send_heartbeat_now)
+ && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+ rv = __ipmi_heartbeat();
+
+ return rv;
+}
+
+static int ipmi_set_timeout(int do_heartbeat)
+{
+ int rv;
+
+ mutex_lock(&ipmi_watchdog_mutex);
+ rv = _ipmi_set_timeout(do_heartbeat);
+ mutex_unlock(&ipmi_watchdog_mutex);
+
+ return rv;
+}
+
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
+static void panic_smi_free(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+static void panic_recv_free(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
+ INIT_IPMI_SMI_MSG(panic_smi_free);
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
+ INIT_IPMI_RECV_MSG(panic_recv_free);
+
+static void panic_halt_ipmi_heartbeat(void)
+{
+ struct kernel_ipmi_msg msg;
+ struct ipmi_system_interface_addr addr;
+ int rv;
+
+ /*
+ * Don't reset the timer if we have the timer turned off, that
+ * re-enables the watchdog.
+ */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+ return;
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_RESET_TIMER;
+ msg.data = NULL;
+ msg.data_len = 0;
+ atomic_add(2, &panic_done_count);
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ &panic_halt_heartbeat_smi_msg,
+ &panic_halt_heartbeat_recv_msg,
+ 1);
+ if (rv)
+ atomic_sub(2, &panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_smi_msg =
+ INIT_IPMI_SMI_MSG(panic_smi_free);
+static struct ipmi_recv_msg panic_halt_recv_msg =
+ INIT_IPMI_RECV_MSG(panic_recv_free);
+
+/*
+ * Special call, doesn't claim any locks. This is only to be called
+ * at panic or halt time, in run-to-completion mode, when the caller
+ * is the only CPU and the only thing that will be going is these IPMI
+ * calls.
+ */
+static void panic_halt_ipmi_set_timeout(void)
+{
+ int send_heartbeat_now;
+ int rv;
+
+ /* Wait for the messages to be free. */
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll_interface(watchdog_user);
+ atomic_add(2, &panic_done_count);
+ rv = __ipmi_set_timeout(&panic_halt_smi_msg,
+ &panic_halt_recv_msg,
+ &send_heartbeat_now);
+ if (rv) {
+ atomic_sub(2, &panic_done_count);
+ pr_warn("Unable to extend the watchdog timeout\n");
+ } else {
+ if (send_heartbeat_now)
+ panic_halt_ipmi_heartbeat();
+ }
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll_interface(watchdog_user);
+}
+
+static int __ipmi_heartbeat(void)
+{
+ struct kernel_ipmi_msg msg;
+ int rv;
+ struct ipmi_system_interface_addr addr;
+ int timeout_retries = 0;
+
+restart:
+ /*
+ * Don't reset the timer if we have the timer turned off, that
+ * re-enables the watchdog.
+ */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+ return 0;
+
+ atomic_set(&msg_tofree, 2);
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_RESET_TIMER;
+ msg.data = NULL;
+ msg.data_len = 0;
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ &smi_msg,
+ &recv_msg,
+ 1);
+ if (rv) {
+ atomic_set(&msg_tofree, 0);
+ pr_warn("heartbeat send failure: %d\n", rv);
+ return rv;
+ }
+
+ /* Wait for the heartbeat to be sent. */
+ wait_for_completion(&msg_wait);
+
+ if (recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
+ timeout_retries++;
+ if (timeout_retries > 3) {
+ pr_err("Unable to restore the IPMI watchdog's settings, giving up\n");
+ rv = -EIO;
+ goto out;
+ }
+
+ /*
+ * The timer was not initialized, that means the BMC was
+ * probably reset and lost the watchdog information. Attempt
+ * to restore the timer's info. Note that we still hold
+ * the heartbeat lock, to keep a heartbeat from happening
+ * in this process, so must say no heartbeat to avoid a
+ * deadlock on this mutex
+ */
+ rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ if (rv) {
+ pr_err("Unable to send the command to set the watchdog's settings, giving up\n");
+ goto out;
+ }
+
+ /* Might need a heartbeat send, go ahead and do it. */
+ goto restart;
+ } else if (recv_msg.msg.data[0] != 0) {
+ /*
+ * Got an error in the heartbeat response. It was already
+ * reported in ipmi_wdog_msg_handler, but we should return
+ * an error here.
+ */
+ rv = -EINVAL;
+ }
+
+out:
+ return rv;
+}
+
+static int _ipmi_heartbeat(void)
+{
+ int rv;
+
+ if (!watchdog_user)
+ return -ENODEV;
+
+ if (ipmi_start_timer_on_heartbeat) {
+ ipmi_start_timer_on_heartbeat = 0;
+ ipmi_watchdog_state = action_val;
+ rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ } else if (atomic_cmpxchg(&pretimeout_since_last_heartbeat, 1, 0)) {
+ /*
+ * A pretimeout occurred, make sure we set the timeout.
+ * We don't want to set the action, though, we want to
+ * leave that alone (thus it can't be combined with the
+ * above operation.
+ */
+ rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+ } else {
+ rv = __ipmi_heartbeat();
+ }
+
+ return rv;
+}
+
+static int ipmi_heartbeat(void)
+{
+ int rv;
+
+ mutex_lock(&ipmi_watchdog_mutex);
+ rv = _ipmi_heartbeat();
+ mutex_unlock(&ipmi_watchdog_mutex);
+
+ return rv;
+}
+
+static const struct watchdog_info ident = {
+ .options = 0, /* WDIOF_SETTIMEOUT, */
+ .firmware_version = 1,
+ .identity = "IPMI"
+};
+
+static int ipmi_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int i;
+ int val;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ i = copy_to_user(argp, &ident, sizeof(ident));
+ return i ? -EFAULT : 0;
+
+ case WDIOC_SETTIMEOUT:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ timeout = val;
+ return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ case WDIOC_GETTIMEOUT:
+ i = copy_to_user(argp, &timeout, sizeof(timeout));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_SETPRETIMEOUT:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ pretimeout = val;
+ return _ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ case WDIOC_GETPRETIMEOUT:
+ i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_KEEPALIVE:
+ return _ipmi_heartbeat();
+
+ case WDIOC_SETOPTIONS:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ if (val & WDIOS_DISABLECARD) {
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ ipmi_start_timer_on_heartbeat = 0;
+ }
+
+ if (val & WDIOS_ENABLECARD) {
+ ipmi_watchdog_state = action_val;
+ _ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ }
+ return 0;
+
+ case WDIOC_GETSTATUS:
+ val = 0;
+ i = copy_to_user(argp, &val, sizeof(val));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ipmi_watchdog_mutex);
+ ret = ipmi_ioctl(file, cmd, arg);
+ mutex_unlock(&ipmi_watchdog_mutex);
+
+ return ret;
+}
+
+static ssize_t ipmi_write(struct file *file,
+ const char __user *buf,
+ size_t len,
+ loff_t *ppos)
+{
+ int rv;
+
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 42;
+ }
+ }
+ rv = ipmi_heartbeat();
+ if (rv)
+ return rv;
+ }
+ return len;
+}
+
+static ssize_t ipmi_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int rv = 0;
+ wait_queue_entry_t wait;
+
+ if (count <= 0)
+ return 0;
+
+ /*
+ * Reading returns if the pretimeout has gone off, and it only does
+ * it once per pretimeout.
+ */
+ spin_lock_irq(&ipmi_read_lock);
+ if (!data_to_read) {
+ if (file->f_flags & O_NONBLOCK) {
+ rv = -EAGAIN;
+ goto out;
+ }
+
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&read_q, &wait);
+ while (!data_to_read) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&ipmi_read_lock);
+ schedule();
+ spin_lock_irq(&ipmi_read_lock);
+ }
+ remove_wait_queue(&read_q, &wait);
+
+ if (signal_pending(current)) {
+ rv = -ERESTARTSYS;
+ goto out;
+ }
+ }
+ data_to_read = 0;
+
+ out:
+ spin_unlock_irq(&ipmi_read_lock);
+
+ if (rv == 0) {
+ if (copy_to_user(buf, &data_to_read, 1))
+ rv = -EFAULT;
+ else
+ rv = 1;
+ }
+
+ return rv;
+}
+
+static int ipmi_open(struct inode *ino, struct file *filep)
+{
+ switch (iminor(ino)) {
+ case WATCHDOG_MINOR:
+ if (test_and_set_bit(0, &ipmi_wdog_open))
+ return -EBUSY;
+
+
+ /*
+ * Don't start the timer now, let it start on the
+ * first heartbeat.
+ */
+ ipmi_start_timer_on_heartbeat = 1;
+ return stream_open(ino, filep);
+
+ default:
+ return (-ENODEV);
+ }
+}
+
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
+{
+ __poll_t mask = 0;
+
+ poll_wait(file, &read_q, wait);
+
+ spin_lock_irq(&ipmi_read_lock);
+ if (data_to_read)
+ mask |= (EPOLLIN | EPOLLRDNORM);
+ spin_unlock_irq(&ipmi_read_lock);
+
+ return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+ int result;
+
+ result = fasync_helper(fd, file, on, &fasync_q);
+
+ return (result);
+}
+
+static int ipmi_close(struct inode *ino, struct file *filep)
+{
+ if (iminor(ino) == WATCHDOG_MINOR) {
+ if (expect_close == 42) {
+ mutex_lock(&ipmi_watchdog_mutex);
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ mutex_unlock(&ipmi_watchdog_mutex);
+ } else {
+ pr_crit("Unexpected close, not stopping watchdog!\n");
+ ipmi_heartbeat();
+ }
+ clear_bit(0, &ipmi_wdog_open);
+ }
+
+ expect_close = 0;
+
+ return 0;
+}
+
+static const struct file_operations ipmi_wdog_fops = {
+ .owner = THIS_MODULE,
+ .read = ipmi_read,
+ .poll = ipmi_poll,
+ .write = ipmi_write,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .open = ipmi_open,
+ .release = ipmi_close,
+ .fasync = ipmi_fasync,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice ipmi_wdog_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &ipmi_wdog_fops
+};
+
+static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
+ void *handler_data)
+{
+ if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+ msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+ pr_info("response: The IPMI controller appears to have been reset, will attempt to reinitialize the watchdog timer\n");
+ else if (msg->msg.data[0] != 0)
+ pr_err("response: Error %x on cmd %x\n",
+ msg->msg.data[0],
+ msg->msg.cmd);
+
+ ipmi_free_recv_msg(msg);
+}
+
+static void ipmi_wdog_pretimeout_handler(void *handler_data)
+{
+ if (preaction_val != WDOG_PRETIMEOUT_NONE) {
+ if (preop_val == WDOG_PREOP_PANIC) {
+ if (atomic_inc_and_test(&preop_panic_excl))
+ panic("Watchdog pre-timeout");
+ } else if (preop_val == WDOG_PREOP_GIVE_DATA) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipmi_read_lock, flags);
+ data_to_read = 1;
+ wake_up_interruptible(&read_q);
+ kill_fasync(&fasync_q, SIGIO, POLL_IN);
+ spin_unlock_irqrestore(&ipmi_read_lock, flags);
+ }
+ }
+
+ /*
+ * On some machines, the heartbeat will give an error and not
+ * work unless we re-enable the timer. So do so.
+ */
+ atomic_set(&pretimeout_since_last_heartbeat, 1);
+}
+
+static void ipmi_wdog_panic_handler(void *user_data)
+{
+ static int panic_event_handled;
+
+ /*
+ * On a panic, if we have a panic timeout, make sure to extend
+ * the watchdog timer to a reasonable value to complete the
+ * panic, if the watchdog timer is running. Plus the
+ * pretimeout is meaningless at panic time.
+ */
+ if (watchdog_user && !panic_event_handled &&
+ ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ /* Make sure we do this only once. */
+ panic_event_handled = 1;
+
+ timeout = panic_wdt_timeout;
+ pretimeout = 0;
+ panic_halt_ipmi_set_timeout();
+ }
+}
+
+static const struct ipmi_user_hndl ipmi_hndlrs = {
+ .ipmi_recv_hndl = ipmi_wdog_msg_handler,
+ .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler,
+ .ipmi_panic_handler = ipmi_wdog_panic_handler
+};
+
+static void ipmi_register_watchdog(int ipmi_intf)
+{
+ int rv = -EBUSY;
+
+ if (watchdog_user)
+ goto out;
+
+ if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+ goto out;
+
+ watchdog_ifnum = ipmi_intf;
+
+ rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
+ if (rv < 0) {
+ pr_crit("Unable to register with ipmi\n");
+ goto out;
+ }
+
+ rv = ipmi_get_version(watchdog_user,
+ &ipmi_version_major,
+ &ipmi_version_minor);
+ if (rv) {
+ pr_warn("Unable to get IPMI version, assuming 1.0\n");
+ ipmi_version_major = 1;
+ ipmi_version_minor = 0;
+ }
+
+ rv = misc_register(&ipmi_wdog_miscdev);
+ if (rv < 0) {
+ ipmi_destroy_user(watchdog_user);
+ watchdog_user = NULL;
+ pr_crit("Unable to register misc device\n");
+ }
+
+#ifdef HAVE_DIE_NMI
+ if (nmi_handler_registered) {
+ int old_pretimeout = pretimeout;
+ int old_timeout = timeout;
+ int old_preop_val = preop_val;
+
+ /*
+ * Set the pretimeout to go off in a second and give
+ * ourselves plenty of time to stop the timer.
+ */
+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
+ pretimeout = 99;
+ timeout = 100;
+
+ testing_nmi = 1;
+
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ if (rv) {
+ pr_warn("Error starting timer to test NMI: 0x%x. The NMI pretimeout will likely not work\n",
+ rv);
+ rv = 0;
+ goto out_restore;
+ }
+
+ msleep(1500);
+
+ if (testing_nmi != 2) {
+ pr_warn("IPMI NMI didn't seem to occur. The NMI pretimeout will likely not work\n");
+ }
+ out_restore:
+ testing_nmi = 0;
+ preop_val = old_preop_val;
+ pretimeout = old_pretimeout;
+ timeout = old_timeout;
+ }
+#endif
+
+ out:
+ if ((start_now) && (rv == 0)) {
+ /* Run from startup, so start the timer now. */
+ start_now = 0; /* Disable this function after first startup. */
+ ipmi_watchdog_state = action_val;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ pr_info("Starting now!\n");
+ } else {
+ /* Stop the timer now. */
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ }
+}
+
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+ int rv;
+ struct ipmi_user *loc_user = watchdog_user;
+
+ if (!loc_user)
+ return;
+
+ if (watchdog_ifnum != ipmi_intf)
+ return;
+
+ /* Make sure no one can call us any more. */
+ misc_deregister(&ipmi_wdog_miscdev);
+
+ watchdog_user = NULL;
+
+ /*
+ * Wait to make sure the message makes it out. The lower layer has
+ * pointers to our buffers, we want to make sure they are done before
+ * we release our memory.
+ */
+ while (atomic_read(&msg_tofree))
+ msg_free_smi(NULL);
+
+ mutex_lock(&ipmi_watchdog_mutex);
+
+ /* Disconnect from IPMI. */
+ rv = ipmi_destroy_user(loc_user);
+ if (rv)
+ pr_warn("error unlinking from IPMI: %d\n", rv);
+
+ /* If it comes back, restart it properly. */
+ ipmi_start_timer_on_heartbeat = 1;
+
+ mutex_unlock(&ipmi_watchdog_mutex);
+}
+
+#ifdef HAVE_DIE_NMI
+static int
+ipmi_nmi(unsigned int val, struct pt_regs *regs)
+{
+ /*
+ * If we get here, it's an NMI that's not a memory or I/O
+ * error. We can't truly tell if it's from IPMI or not
+ * without sending a message, and sending a message is almost
+ * impossible because of locking.
+ */
+
+ if (testing_nmi) {
+ testing_nmi = 2;
+ return NMI_HANDLED;
+ }
+
+ /* If we are not expecting a timeout, ignore it. */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+ return NMI_DONE;
+
+ if (preaction_val != WDOG_PRETIMEOUT_NMI)
+ return NMI_DONE;
+
+ /*
+ * If no one else handled the NMI, we assume it was the IPMI
+ * watchdog.
+ */
+ if (preop_val == WDOG_PREOP_PANIC) {
+ /* On some machines, the heartbeat will give
+ an error and not work unless we re-enable
+ the timer. So do so. */
+ atomic_set(&pretimeout_since_last_heartbeat, 1);
+ if (atomic_inc_and_test(&preop_panic_excl))
+ nmi_panic(regs, "pre-timeout");
+ }
+
+ return NMI_HANDLED;
+}
+#endif
+
+static int wdog_reboot_handler(struct notifier_block *this,
+ unsigned long code,
+ void *unused)
+{
+ static int reboot_event_handled;
+
+ if ((watchdog_user) && (!reboot_event_handled)) {
+ /* Make sure we only do this once. */
+ reboot_event_handled = 1;
+
+ if (code == SYS_POWER_OFF || code == SYS_HALT) {
+ /* Disable the WDT if we are shutting down. */
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ /* Set a long timer to let the reboot happen or
+ reset if it hangs, but only if the watchdog
+ timer was already running. */
+ if (timeout < 120)
+ timeout = 120;
+ pretimeout = 0;
+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_reboot_notifier = {
+ .notifier_call = wdog_reboot_handler,
+ .next = NULL,
+ .priority = 0
+};
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+ ipmi_register_watchdog(if_num);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+ ipmi_unregister_watchdog(if_num);
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_new_smi,
+ .smi_gone = ipmi_smi_gone
+};
+
+static int action_op(const char *inval, char *outval)
+{
+ if (outval)
+ strcpy(outval, action);
+
+ if (!inval)
+ return 0;
+
+ if (strcmp(inval, "reset") == 0)
+ action_val = WDOG_TIMEOUT_RESET;
+ else if (strcmp(inval, "none") == 0)
+ action_val = WDOG_TIMEOUT_NONE;
+ else if (strcmp(inval, "power_cycle") == 0)
+ action_val = WDOG_TIMEOUT_POWER_CYCLE;
+ else if (strcmp(inval, "power_off") == 0)
+ action_val = WDOG_TIMEOUT_POWER_DOWN;
+ else
+ return -EINVAL;
+ strcpy(action, inval);
+ return 0;
+}
+
+static int preaction_op(const char *inval, char *outval)
+{
+ if (outval)
+ strcpy(outval, preaction);
+
+ if (!inval)
+ return 0;
+
+ if (strcmp(inval, "pre_none") == 0)
+ preaction_val = WDOG_PRETIMEOUT_NONE;
+ else if (strcmp(inval, "pre_smi") == 0)
+ preaction_val = WDOG_PRETIMEOUT_SMI;
+#ifdef HAVE_DIE_NMI
+ else if (strcmp(inval, "pre_nmi") == 0)
+ preaction_val = WDOG_PRETIMEOUT_NMI;
+#endif
+ else if (strcmp(inval, "pre_int") == 0)
+ preaction_val = WDOG_PRETIMEOUT_MSG_INT;
+ else
+ return -EINVAL;
+ strcpy(preaction, inval);
+ return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+ if (outval)
+ strcpy(outval, preop);
+
+ if (!inval)
+ return 0;
+
+ if (strcmp(inval, "preop_none") == 0)
+ preop_val = WDOG_PREOP_NONE;
+ else if (strcmp(inval, "preop_panic") == 0)
+ preop_val = WDOG_PREOP_PANIC;
+ else if (strcmp(inval, "preop_give_data") == 0)
+ preop_val = WDOG_PREOP_GIVE_DATA;
+ else
+ return -EINVAL;
+ strcpy(preop, inval);
+ return 0;
+}
+
+static void check_parms(void)
+{
+#ifdef HAVE_DIE_NMI
+ int do_nmi = 0;
+ int rv;
+
+ if (preaction_val == WDOG_PRETIMEOUT_NMI) {
+ do_nmi = 1;
+ if (preop_val == WDOG_PREOP_GIVE_DATA) {
+ pr_warn("Pretimeout op is to give data but NMI pretimeout is enabled, setting pretimeout op to none\n");
+ preop_op("preop_none", NULL);
+ do_nmi = 0;
+ }
+ }
+ if (do_nmi && !nmi_handler_registered) {
+ rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
+ "ipmi");
+ if (rv) {
+ pr_warn("Can't register nmi handler\n");
+ return;
+ } else
+ nmi_handler_registered = 1;
+ } else if (!do_nmi && nmi_handler_registered) {
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+ nmi_handler_registered = 0;
+ }
+#endif
+}
+
+static int __init ipmi_wdog_init(void)
+{
+ int rv;
+
+ if (action_op(action, NULL)) {
+ action_op("reset", NULL);
+ pr_info("Unknown action '%s', defaulting to reset\n", action);
+ }
+
+ if (preaction_op(preaction, NULL)) {
+ preaction_op("pre_none", NULL);
+ pr_info("Unknown preaction '%s', defaulting to none\n",
+ preaction);
+ }
+
+ if (preop_op(preop, NULL)) {
+ preop_op("preop_none", NULL);
+ pr_info("Unknown preop '%s', defaulting to none\n", preop);
+ }
+
+ check_parms();
+
+ register_reboot_notifier(&wdog_reboot_notifier);
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+ if (rv) {
+#ifdef HAVE_DIE_NMI
+ if (nmi_handler_registered)
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+#endif
+ unregister_reboot_notifier(&wdog_reboot_notifier);
+ pr_warn("can't register smi watcher\n");
+ return rv;
+ }
+
+ pr_info("driver initialized\n");
+
+ return 0;
+}
+
+static void __exit ipmi_wdog_exit(void)
+{
+ ipmi_smi_watcher_unregister(&smi_watcher);
+ ipmi_unregister_watchdog(watchdog_ifnum);
+
+#ifdef HAVE_DIE_NMI
+ if (nmi_handler_registered)
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+#endif
+
+ unregister_reboot_notifier(&wdog_reboot_notifier);
+}
+module_exit(ipmi_wdog_exit);
+module_init(ipmi_wdog_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface.");
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
new file mode 100644
index 000000000..03d02a848
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ * Copyright (c) 2021, IBM Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include "kcs_bmc.h"
+
+/* Implement both the device and client interfaces here */
+#include "kcs_bmc_device.h"
+#include "kcs_bmc_client.h"
+
+/* Record registered devices and drivers */
+static DEFINE_MUTEX(kcs_bmc_lock);
+static LIST_HEAD(kcs_bmc_devices);
+static LIST_HEAD(kcs_bmc_drivers);
+
+/* Consumer data access */
+
+u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc)
+{
+ return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.idr);
+}
+EXPORT_SYMBOL(kcs_bmc_read_data);
+
+void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data)
+{
+ kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.odr, data);
+}
+EXPORT_SYMBOL(kcs_bmc_write_data);
+
+u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc)
+{
+ return kcs_bmc->ops->io_inputb(kcs_bmc, kcs_bmc->ioreg.str);
+}
+EXPORT_SYMBOL(kcs_bmc_read_status);
+
+void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data)
+{
+ kcs_bmc->ops->io_outputb(kcs_bmc, kcs_bmc->ioreg.str, data);
+}
+EXPORT_SYMBOL(kcs_bmc_write_status);
+
+void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val)
+{
+ kcs_bmc->ops->io_updateb(kcs_bmc, kcs_bmc->ioreg.str, mask, val);
+}
+EXPORT_SYMBOL(kcs_bmc_update_status);
+
+irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_client *client;
+ irqreturn_t rc = IRQ_NONE;
+
+ spin_lock(&kcs_bmc->lock);
+ client = kcs_bmc->client;
+ if (client)
+ rc = client->ops->event(client);
+ spin_unlock(&kcs_bmc->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(kcs_bmc_handle_event);
+
+int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client)
+{
+ int rc;
+
+ spin_lock_irq(&kcs_bmc->lock);
+ if (kcs_bmc->client) {
+ rc = -EBUSY;
+ } else {
+ u8 mask = KCS_BMC_EVENT_TYPE_IBF;
+
+ kcs_bmc->client = client;
+ kcs_bmc_update_event_mask(kcs_bmc, mask, mask);
+ rc = 0;
+ }
+ spin_unlock_irq(&kcs_bmc->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(kcs_bmc_enable_device);
+
+void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client)
+{
+ spin_lock_irq(&kcs_bmc->lock);
+ if (client == kcs_bmc->client) {
+ u8 mask = KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE;
+
+ kcs_bmc_update_event_mask(kcs_bmc, mask, 0);
+ kcs_bmc->client = NULL;
+ }
+ spin_unlock_irq(&kcs_bmc->lock);
+}
+EXPORT_SYMBOL(kcs_bmc_disable_device);
+
+int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_driver *drv;
+ int error = 0;
+ int rc;
+
+ spin_lock_init(&kcs_bmc->lock);
+ kcs_bmc->client = NULL;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_add(&kcs_bmc->entry, &kcs_bmc_devices);
+ list_for_each_entry(drv, &kcs_bmc_drivers, entry) {
+ rc = drv->ops->add_device(kcs_bmc);
+ if (!rc)
+ continue;
+
+ dev_err(kcs_bmc->dev, "Failed to add chardev for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ error = rc;
+ }
+ mutex_unlock(&kcs_bmc_lock);
+
+ return error;
+}
+EXPORT_SYMBOL(kcs_bmc_add_device);
+
+void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_driver *drv;
+ int rc;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_del(&kcs_bmc->entry);
+ list_for_each_entry(drv, &kcs_bmc_drivers, entry) {
+ rc = drv->ops->remove_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to remove chardev for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
+}
+EXPORT_SYMBOL(kcs_bmc_remove_device);
+
+void kcs_bmc_register_driver(struct kcs_bmc_driver *drv)
+{
+ struct kcs_bmc_device *kcs_bmc;
+ int rc;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_add(&drv->entry, &kcs_bmc_drivers);
+ list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) {
+ rc = drv->ops->add_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to add driver for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
+}
+EXPORT_SYMBOL(kcs_bmc_register_driver);
+
+void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv)
+{
+ struct kcs_bmc_device *kcs_bmc;
+ int rc;
+
+ mutex_lock(&kcs_bmc_lock);
+ list_del(&drv->entry);
+ list_for_each_entry(kcs_bmc, &kcs_bmc_devices, entry) {
+ rc = drv->ops->remove_device(kcs_bmc);
+ if (rc)
+ dev_err(kcs_bmc->dev, "Failed to remove driver for KCS channel %d: %d",
+ kcs_bmc->channel, rc);
+ }
+ mutex_unlock(&kcs_bmc_lock);
+}
+EXPORT_SYMBOL(kcs_bmc_unregister_driver);
+
+void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events)
+{
+ kcs_bmc->ops->irq_mask_update(kcs_bmc, mask, events);
+}
+EXPORT_SYMBOL(kcs_bmc_update_event_mask);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
diff --git a/drivers/char/ipmi/kcs_bmc.h b/drivers/char/ipmi/kcs_bmc.h
new file mode 100644
index 000000000..fa408b802
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#ifndef __KCS_BMC_H__
+#define __KCS_BMC_H__
+
+#include <linux/list.h>
+
+#define KCS_BMC_EVENT_TYPE_OBE BIT(0)
+#define KCS_BMC_EVENT_TYPE_IBF BIT(1)
+
+#define KCS_BMC_STR_OBF BIT(0)
+#define KCS_BMC_STR_IBF BIT(1)
+#define KCS_BMC_STR_CMD_DAT BIT(3)
+
+/* IPMI 2.0 - 9.5, KCS Interface Registers
+ * @idr: Input Data Register
+ * @odr: Output Data Register
+ * @str: Status Register
+ */
+struct kcs_ioreg {
+ u32 idr;
+ u32 odr;
+ u32 str;
+};
+
+struct kcs_bmc_device_ops;
+struct kcs_bmc_client;
+
+struct kcs_bmc_device {
+ struct list_head entry;
+
+ struct device *dev;
+ u32 channel;
+
+ struct kcs_ioreg ioreg;
+
+ const struct kcs_bmc_device_ops *ops;
+
+ spinlock_t lock;
+ struct kcs_bmc_client *client;
+};
+
+#endif /* __KCS_BMC_H__ */
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
new file mode 100644
index 000000000..2dea8cd5a
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "kcs_bmc_device.h"
+
+
+#define DEVICE_NAME "ast-kcs-bmc"
+
+#define KCS_CHANNEL_MAX 4
+
+/*
+ * Field class descriptions
+ *
+ * LPCyE Enable LPC channel y
+ * IBFIEy Input Buffer Full IRQ Enable for LPC channel y
+ * IRQxEy Assert SerIRQ x for LPC channel y (Deprecated, use IDyIRQX, IRQXEy)
+ * IDyIRQX Use the specified 4-bit SerIRQ for LPC channel y
+ * SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1)
+ * IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y
+ */
+
+#define LPC_TYIRQX_LOW 0b00
+#define LPC_TYIRQX_HIGH 0b01
+#define LPC_TYIRQX_RSVD 0b10
+#define LPC_TYIRQX_RISING 0b11
+
+#define LPC_HICR0 0x000
+#define LPC_HICR0_LPC3E BIT(7)
+#define LPC_HICR0_LPC2E BIT(6)
+#define LPC_HICR0_LPC1E BIT(5)
+#define LPC_HICR2 0x008
+#define LPC_HICR2_IBFIE3 BIT(3)
+#define LPC_HICR2_IBFIE2 BIT(2)
+#define LPC_HICR2_IBFIE1 BIT(1)
+#define LPC_HICR4 0x010
+#define LPC_HICR4_LADR12AS BIT(7)
+#define LPC_HICR4_KCSENBL BIT(2)
+#define LPC_SIRQCR0 0x070
+/* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */
+#define LPC_SIRQCR0_IRQ12E1 BIT(1)
+#define LPC_SIRQCR0_IRQ1E1 BIT(0)
+#define LPC_HICR5 0x080
+#define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20)
+#define LPC_HICR5_ID3IRQX_SHIFT 20
+#define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16)
+#define LPC_HICR5_ID2IRQX_SHIFT 16
+#define LPC_HICR5_SEL3IRQX BIT(15)
+#define LPC_HICR5_IRQXE3 BIT(14)
+#define LPC_HICR5_SEL2IRQX BIT(13)
+#define LPC_HICR5_IRQXE2 BIT(12)
+#define LPC_LADR3H 0x014
+#define LPC_LADR3L 0x018
+#define LPC_LADR12H 0x01C
+#define LPC_LADR12L 0x020
+#define LPC_IDR1 0x024
+#define LPC_IDR2 0x028
+#define LPC_IDR3 0x02C
+#define LPC_ODR1 0x030
+#define LPC_ODR2 0x034
+#define LPC_ODR3 0x038
+#define LPC_STR1 0x03C
+#define LPC_STR2 0x040
+#define LPC_STR3 0x044
+#define LPC_HICRB 0x100
+#define LPC_HICRB_EN16LADR2 BIT(5)
+#define LPC_HICRB_EN16LADR1 BIT(4)
+#define LPC_HICRB_IBFIE4 BIT(1)
+#define LPC_HICRB_LPC4E BIT(0)
+#define LPC_HICRC 0x104
+#define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4)
+#define LPC_HICRC_ID4IRQX_SHIFT 4
+#define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2)
+#define LPC_HICRC_TY4IRQX_SHIFT 2
+#define LPC_HICRC_OBF4_AUTO_CLR BIT(1)
+#define LPC_HICRC_IRQXE4 BIT(0)
+#define LPC_LADR4 0x110
+#define LPC_IDR4 0x114
+#define LPC_ODR4 0x118
+#define LPC_STR4 0x11C
+#define LPC_LSADR12 0x120
+#define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16)
+#define LPC_LSADR12_LSADR2_SHIFT 16
+#define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0)
+#define LPC_LSADR12_LSADR1_SHIFT 0
+
+#define OBE_POLL_PERIOD (HZ / 2)
+
+enum aspeed_kcs_irq_mode {
+ aspeed_kcs_irq_none,
+ aspeed_kcs_irq_serirq,
+};
+
+struct aspeed_kcs_bmc {
+ struct kcs_bmc_device kcs_bmc;
+
+ struct regmap *map;
+
+ struct {
+ enum aspeed_kcs_irq_mode mode;
+ int id;
+ } upstream_irq;
+
+ struct {
+ spinlock_t lock;
+ bool remove;
+ struct timer_list timer;
+ } obe;
+};
+
+static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
+{
+ return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc);
+}
+
+static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ u32 val = 0;
+ int rc;
+
+ rc = regmap_read(priv->map, reg, &val);
+ WARN(rc != 0, "regmap_read() failed: %d\n", rc);
+
+ return rc == 0 ? (u8) val : 0;
+}
+
+static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_write(priv->map, reg, data);
+ WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+
+ /* Trigger the upstream IRQ on ODR writes, if enabled */
+
+ switch (reg) {
+ case LPC_ODR1:
+ case LPC_ODR2:
+ case LPC_ODR3:
+ case LPC_ODR4:
+ break;
+ default:
+ return;
+ }
+
+ if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq)
+ return;
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ switch (priv->upstream_irq.id) {
+ case 12:
+ regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1,
+ LPC_SIRQCR0_IRQ12E1);
+ break;
+ case 1:
+ regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1,
+ LPC_SIRQCR0_IRQ1E1);
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2);
+ break;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3);
+ break;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4);
+ break;
+ default:
+ break;
+ }
+}
+
+static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_update_bits(priv->map, reg, mask, val);
+ WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
+}
+
+/*
+ * We note D for Data, and C for Cmd/Status, default rules are
+ *
+ * 1. Only the D address is given:
+ * A. KCS1/KCS2 (D/C: X/X+4)
+ * D/C: CA0h/CA4h
+ * D/C: CA8h/CACh
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
+ * C. KCS4 (D/C: X/X+1)
+ * D/C: CA4h/CA5h
+ *
+ * 2. Both the D/C addresses are given:
+ * A. KCS1/KCS2/KCS4 (D/C: X/Y)
+ * D/C: CA0h/CA1h
+ * D/C: CA8h/CA9h
+ * D/C: CA4h/CA5h
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
+ */
+static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+
+ if (WARN_ON(nr_addrs < 1 || nr_addrs > 2))
+ return -EINVAL;
+
+ switch (priv->kcs_bmc.channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0);
+ regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
+ if (nr_addrs == 2) {
+ regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK,
+ addrs[1] << LPC_LSADR12_LSADR1_SHIFT);
+
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1,
+ LPC_HICRB_EN16LADR1);
+ }
+ break;
+
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
+ regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
+ if (nr_addrs == 2) {
+ regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK,
+ addrs[1] << LPC_LSADR12_LSADR2_SHIFT);
+
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2,
+ LPC_HICRB_EN16LADR2);
+ }
+ break;
+
+ case 3:
+ if (nr_addrs == 2) {
+ dev_err(priv->kcs_bmc.dev,
+ "Channel 3 only supports inferred status IO address\n");
+ return -EINVAL;
+ }
+
+ regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8);
+ regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF);
+ break;
+
+ case 4:
+ if (nr_addrs == 1)
+ regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]);
+ else
+ regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]);
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int aspeed_kcs_map_serirq_type(u32 dt_type)
+{
+ switch (dt_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ return LPC_TYIRQX_RISING;
+ case IRQ_TYPE_LEVEL_HIGH:
+ return LPC_TYIRQX_HIGH;
+ case IRQ_TYPE_LEVEL_LOW:
+ return LPC_TYIRQX_LOW;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type)
+{
+ unsigned int mask, val, hw_type;
+ int ret;
+
+ if (id > 15)
+ return -EINVAL;
+
+ ret = aspeed_kcs_map_serirq_type(dt_type);
+ if (ret < 0)
+ return ret;
+ hw_type = ret;
+
+ priv->upstream_irq.mode = aspeed_kcs_irq_serirq;
+ priv->upstream_irq.id = id;
+
+ switch (priv->kcs_bmc.channel) {
+ case 1:
+ /* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */
+ break;
+ case 2:
+ if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
+ return -EINVAL;
+
+ mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK;
+ val = (id << LPC_HICR5_ID2IRQX_SHIFT);
+ val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0;
+ regmap_update_bits(priv->map, LPC_HICR5, mask, val);
+
+ break;
+ case 3:
+ if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
+ return -EINVAL;
+
+ mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK;
+ val = (id << LPC_HICR5_ID3IRQX_SHIFT);
+ val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0;
+ regmap_update_bits(priv->map, LPC_HICR5, mask, val);
+
+ break;
+ case 4:
+ mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR;
+ val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT);
+ regmap_update_bits(priv->map, LPC_HICRC, mask, val);
+ break;
+ default:
+ dev_warn(priv->kcs_bmc.dev,
+ "SerIRQ configuration not supported on KCS channel %d\n",
+ priv->kcs_bmc.channel);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E);
+ return;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E);
+ return;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E);
+ regmap_update_bits(priv->map, LPC_HICR4,
+ LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL);
+ return;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E);
+ return;
+ default:
+ pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
+ return;
+ }
+}
+
+static void aspeed_kcs_check_obe(struct timer_list *timer)
+{
+ struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer);
+ unsigned long flags;
+ u8 str;
+
+ spin_lock_irqsave(&priv->obe.lock, flags);
+ if (priv->obe.remove) {
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+ return;
+ }
+
+ str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
+ if (str & KCS_BMC_STR_OBF) {
+ mod_timer(timer, jiffies + OBE_POLL_PERIOD);
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->obe.lock, flags);
+
+ kcs_bmc_handle_event(&priv->kcs_bmc);
+}
+
+static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
+{
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
+ int rc;
+ u8 str;
+
+ /* We don't have an OBE IRQ, emulate it */
+ if (mask & KCS_BMC_EVENT_TYPE_OBE) {
+ if (KCS_BMC_EVENT_TYPE_OBE & state) {
+ /*
+ * Given we don't have an OBE IRQ, delay by polling briefly to see if we can
+ * observe such an event before returning to the caller. This is not
+ * incorrect because OBF may have already become clear before enabling the
+ * IRQ if we had one, under which circumstance no event will be propagated
+ * anyway.
+ *
+ * The onus is on the client to perform a race-free check that it hasn't
+ * missed the event.
+ */
+ rc = read_poll_timeout_atomic(aspeed_kcs_inb, str,
+ !(str & KCS_BMC_STR_OBF), 1, 100, false,
+ &priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
+ /* Time for the slow path? */
+ if (rc == -ETIMEDOUT)
+ mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
+ } else {
+ del_timer(&priv->obe.timer);
+ }
+ }
+
+ if (mask & KCS_BMC_EVENT_TYPE_IBF) {
+ const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF);
+
+ switch (kcs_bmc->channel) {
+ case 1:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1,
+ enable * LPC_HICR2_IBFIE1);
+ return;
+ case 2:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2,
+ enable * LPC_HICR2_IBFIE2);
+ return;
+ case 3:
+ regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3,
+ enable * LPC_HICR2_IBFIE3);
+ return;
+ case 4:
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4,
+ enable * LPC_HICRB_IBFIE4);
+ return;
+ default:
+ pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
+ return;
+ }
+ }
+}
+
+static const struct kcs_bmc_device_ops aspeed_kcs_ops = {
+ .irq_mask_update = aspeed_kcs_irq_mask_update,
+ .io_inputb = aspeed_kcs_inb,
+ .io_outputb = aspeed_kcs_outb,
+ .io_updateb = aspeed_kcs_updateb,
+};
+
+static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
+{
+ struct kcs_bmc_device *kcs_bmc = arg;
+
+ return kcs_bmc_handle_event(kcs_bmc);
+}
+
+static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return devm_request_irq(dev, irq, aspeed_kcs_irq, IRQF_SHARED,
+ dev_name(dev), kcs_bmc);
+}
+
+static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
+ { .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 },
+ { .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 },
+ { .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 },
+ { .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
+};
+
+static int aspeed_kcs_of_get_channel(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct kcs_ioreg ioreg;
+ const __be32 *reg;
+ int i;
+
+ np = pdev->dev.of_node;
+
+ /* Don't translate addresses, we want offsets for the regmaps */
+ reg = of_get_address(np, 0, NULL, NULL);
+ if (!reg)
+ return -EINVAL;
+ ioreg.idr = be32_to_cpup(reg);
+
+ reg = of_get_address(np, 1, NULL, NULL);
+ if (!reg)
+ return -EINVAL;
+ ioreg.odr = be32_to_cpup(reg);
+
+ reg = of_get_address(np, 2, NULL, NULL);
+ if (!reg)
+ return -EINVAL;
+ ioreg.str = be32_to_cpup(reg);
+
+ for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
+ if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg)))
+ return i + 1;
+ }
+ return -EINVAL;
+}
+
+static int
+aspeed_kcs_of_get_io_address(struct platform_device *pdev, u32 addrs[2])
+{
+ int rc;
+
+ rc = of_property_read_variable_u32_array(pdev->dev.of_node,
+ "aspeed,lpc-io-reg",
+ addrs, 1, 2);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n");
+ return rc;
+ }
+
+ if (addrs[0] > 0xffff) {
+ dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n");
+ return -EINVAL;
+ }
+
+ if (rc == 2 && addrs[1] > 0xffff) {
+ dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int aspeed_kcs_probe(struct platform_device *pdev)
+{
+ struct kcs_bmc_device *kcs_bmc;
+ struct aspeed_kcs_bmc *priv;
+ struct device_node *np;
+ bool have_upstream_irq;
+ u32 upstream_irq[2];
+ int rc, channel;
+ int nr_addrs;
+ u32 addrs[2];
+
+ np = pdev->dev.of_node->parent;
+ if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
+ dev_err(&pdev->dev, "unsupported LPC device binding\n");
+ return -ENODEV;
+ }
+
+ channel = aspeed_kcs_of_get_channel(pdev);
+ if (channel < 0)
+ return channel;
+
+ nr_addrs = aspeed_kcs_of_get_io_address(pdev, addrs);
+ if (nr_addrs < 0)
+ return nr_addrs;
+
+ np = pdev->dev.of_node;
+ rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2);
+ if (rc && rc != -EINVAL)
+ return -EINVAL;
+
+ have_upstream_irq = !rc;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ kcs_bmc = &priv->kcs_bmc;
+ kcs_bmc->dev = &pdev->dev;
+ kcs_bmc->channel = channel;
+ kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1];
+ kcs_bmc->ops = &aspeed_kcs_ops;
+
+ priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(priv->map)) {
+ dev_err(&pdev->dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&priv->obe.lock);
+ priv->obe.remove = false;
+ timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0);
+
+ rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs);
+ if (rc)
+ return rc;
+
+ /* Host to BMC IRQ */
+ rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev);
+ if (rc)
+ return rc;
+
+ /* BMC to Host IRQ */
+ if (have_upstream_irq) {
+ rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]);
+ if (rc < 0)
+ return rc;
+ } else {
+ priv->upstream_irq.mode = aspeed_kcs_irq_none;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+ aspeed_kcs_enable_channel(kcs_bmc, true);
+
+ rc = kcs_bmc_add_device(&priv->kcs_bmc);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
+ return rc;
+ }
+
+ dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n",
+ kcs_bmc->channel, addrs[0]);
+
+ return 0;
+}
+
+static int aspeed_kcs_remove(struct platform_device *pdev)
+{
+ struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev);
+ struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
+
+ kcs_bmc_remove_device(kcs_bmc);
+
+ aspeed_kcs_enable_channel(kcs_bmc, false);
+ aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+
+ /* Make sure it's proper dead */
+ spin_lock_irq(&priv->obe.lock);
+ priv->obe.remove = true;
+ spin_unlock_irq(&priv->obe.lock);
+ del_timer_sync(&priv->obe.timer);
+
+ return 0;
+}
+
+static const struct of_device_id ast_kcs_bmc_match[] = {
+ { .compatible = "aspeed,ast2400-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2500-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2600-kcs-bmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
+
+static struct platform_driver ast_kcs_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = ast_kcs_bmc_match,
+ },
+ .probe = aspeed_kcs_probe,
+ .remove = aspeed_kcs_remove,
+};
+module_platform_driver(ast_kcs_bmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device");
diff --git a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
new file mode 100644
index 000000000..cf670e891
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "kcs-bmc: " fmt
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ipmi_bmc.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_client.h"
+
+/* Different phases of the KCS BMC module.
+ * KCS_PHASE_IDLE:
+ * BMC should not be expecting nor sending any data.
+ * KCS_PHASE_WRITE_START:
+ * BMC is receiving a WRITE_START command from system software.
+ * KCS_PHASE_WRITE_DATA:
+ * BMC is receiving a data byte from system software.
+ * KCS_PHASE_WRITE_END_CMD:
+ * BMC is waiting a last data byte from system software.
+ * KCS_PHASE_WRITE_DONE:
+ * BMC has received the whole request from system software.
+ * KCS_PHASE_WAIT_READ:
+ * BMC is waiting the response from the upper IPMI service.
+ * KCS_PHASE_READ:
+ * BMC is transferring the response to system software.
+ * KCS_PHASE_ABORT_ERROR1:
+ * BMC is waiting error status request from system software.
+ * KCS_PHASE_ABORT_ERROR2:
+ * BMC is waiting for idle status afer error from system software.
+ * KCS_PHASE_ERROR:
+ * BMC has detected a protocol violation at the interface level.
+ */
+enum kcs_ipmi_phases {
+ KCS_PHASE_IDLE,
+
+ KCS_PHASE_WRITE_START,
+ KCS_PHASE_WRITE_DATA,
+ KCS_PHASE_WRITE_END_CMD,
+ KCS_PHASE_WRITE_DONE,
+
+ KCS_PHASE_WAIT_READ,
+ KCS_PHASE_READ,
+
+ KCS_PHASE_ABORT_ERROR1,
+ KCS_PHASE_ABORT_ERROR2,
+ KCS_PHASE_ERROR
+};
+
+/* IPMI 2.0 - Table 9-4, KCS Interface Status Codes */
+enum kcs_ipmi_errors {
+ KCS_NO_ERROR = 0x00,
+ KCS_ABORTED_BY_COMMAND = 0x01,
+ KCS_ILLEGAL_CONTROL_CODE = 0x02,
+ KCS_LENGTH_ERROR = 0x06,
+ KCS_UNSPECIFIED_ERROR = 0xFF
+};
+
+struct kcs_bmc_ipmi {
+ struct list_head entry;
+
+ struct kcs_bmc_client client;
+
+ spinlock_t lock;
+
+ enum kcs_ipmi_phases phase;
+ enum kcs_ipmi_errors error;
+
+ wait_queue_head_t queue;
+ bool data_in_avail;
+ int data_in_idx;
+ u8 *data_in;
+
+ int data_out_idx;
+ int data_out_len;
+ u8 *data_out;
+
+ struct mutex mutex;
+ u8 *kbuffer;
+
+ struct miscdevice miscdev;
+};
+
+#define DEVICE_NAME "ipmi-kcs"
+
+#define KCS_MSG_BUFSIZ 1000
+
+#define KCS_ZERO_DATA 0
+
+/* IPMI 2.0 - Table 9-1, KCS Interface Status Register Bits */
+#define KCS_STATUS_STATE(state) (state << 6)
+#define KCS_STATUS_STATE_MASK GENMASK(7, 6)
+#define KCS_STATUS_CMD_DAT BIT(3)
+#define KCS_STATUS_SMS_ATN BIT(2)
+#define KCS_STATUS_IBF BIT(1)
+#define KCS_STATUS_OBF BIT(0)
+
+/* IPMI 2.0 - Table 9-2, KCS Interface State Bits */
+enum kcs_states {
+ IDLE_STATE = 0,
+ READ_STATE = 1,
+ WRITE_STATE = 2,
+ ERROR_STATE = 3,
+};
+
+/* IPMI 2.0 - Table 9-3, KCS Interface Control Codes */
+#define KCS_CMD_GET_STATUS_ABORT 0x60
+#define KCS_CMD_WRITE_START 0x61
+#define KCS_CMD_WRITE_END 0x62
+#define KCS_CMD_READ_BYTE 0x68
+
+static inline void set_state(struct kcs_bmc_ipmi *priv, u8 state)
+{
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_STATE_MASK, KCS_STATUS_STATE(state));
+}
+
+static void kcs_bmc_ipmi_force_abort(struct kcs_bmc_ipmi *priv)
+{
+ set_state(priv, ERROR_STATE);
+ kcs_bmc_read_data(priv->client.dev);
+ kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
+
+ priv->phase = KCS_PHASE_ERROR;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+}
+
+static void kcs_bmc_ipmi_handle_data(struct kcs_bmc_ipmi *priv)
+{
+ struct kcs_bmc_device *dev;
+ u8 data;
+
+ dev = priv->client.dev;
+
+ switch (priv->phase) {
+ case KCS_PHASE_WRITE_START:
+ priv->phase = KCS_PHASE_WRITE_DATA;
+ fallthrough;
+
+ case KCS_PHASE_WRITE_DATA:
+ if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
+ set_state(priv, WRITE_STATE);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
+ } else {
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_LENGTH_ERROR;
+ }
+ break;
+
+ case KCS_PHASE_WRITE_END_CMD:
+ if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
+ set_state(priv, READ_STATE);
+ priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
+ priv->phase = KCS_PHASE_WRITE_DONE;
+ priv->data_in_avail = true;
+ wake_up_interruptible(&priv->queue);
+ } else {
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_LENGTH_ERROR;
+ }
+ break;
+
+ case KCS_PHASE_READ:
+ if (priv->data_out_idx == priv->data_out_len)
+ set_state(priv, IDLE_STATE);
+
+ data = kcs_bmc_read_data(dev);
+ if (data != KCS_CMD_READ_BYTE) {
+ set_state(priv, ERROR_STATE);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ break;
+ }
+
+ if (priv->data_out_idx == priv->data_out_len) {
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->phase = KCS_PHASE_IDLE;
+ break;
+ }
+
+ kcs_bmc_write_data(dev, priv->data_out[priv->data_out_idx++]);
+ break;
+
+ case KCS_PHASE_ABORT_ERROR1:
+ set_state(priv, READ_STATE);
+ kcs_bmc_read_data(dev);
+ kcs_bmc_write_data(dev, priv->error);
+ priv->phase = KCS_PHASE_ABORT_ERROR2;
+ break;
+
+ case KCS_PHASE_ABORT_ERROR2:
+ set_state(priv, IDLE_STATE);
+ kcs_bmc_read_data(dev);
+ kcs_bmc_write_data(dev, KCS_ZERO_DATA);
+ priv->phase = KCS_PHASE_IDLE;
+ break;
+
+ default:
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+ }
+}
+
+static void kcs_bmc_ipmi_handle_cmd(struct kcs_bmc_ipmi *priv)
+{
+ u8 cmd;
+
+ set_state(priv, WRITE_STATE);
+ kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
+
+ cmd = kcs_bmc_read_data(priv->client.dev);
+ switch (cmd) {
+ case KCS_CMD_WRITE_START:
+ priv->phase = KCS_PHASE_WRITE_START;
+ priv->error = KCS_NO_ERROR;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ break;
+
+ case KCS_CMD_WRITE_END:
+ if (priv->phase != KCS_PHASE_WRITE_DATA) {
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+ }
+
+ priv->phase = KCS_PHASE_WRITE_END_CMD;
+ break;
+
+ case KCS_CMD_GET_STATUS_ABORT:
+ if (priv->error == KCS_NO_ERROR)
+ priv->error = KCS_ABORTED_BY_COMMAND;
+
+ priv->phase = KCS_PHASE_ABORT_ERROR1;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ break;
+
+ default:
+ kcs_bmc_ipmi_force_abort(priv);
+ priv->error = KCS_ILLEGAL_CONTROL_CODE;
+ break;
+ }
+}
+
+static inline struct kcs_bmc_ipmi *client_to_kcs_bmc_ipmi(struct kcs_bmc_client *client)
+{
+ return container_of(client, struct kcs_bmc_ipmi, client);
+}
+
+static irqreturn_t kcs_bmc_ipmi_event(struct kcs_bmc_client *client)
+{
+ struct kcs_bmc_ipmi *priv;
+ u8 status;
+ int ret;
+
+ priv = client_to_kcs_bmc_ipmi(client);
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ status = kcs_bmc_read_status(client->dev);
+ if (status & KCS_STATUS_IBF) {
+ if (status & KCS_STATUS_CMD_DAT)
+ kcs_bmc_ipmi_handle_cmd(priv);
+ else
+ kcs_bmc_ipmi_handle_data(priv);
+
+ ret = IRQ_HANDLED;
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static const struct kcs_bmc_client_ops kcs_bmc_ipmi_client_ops = {
+ .event = kcs_bmc_ipmi_event,
+};
+
+static inline struct kcs_bmc_ipmi *to_kcs_bmc(struct file *filp)
+{
+ return container_of(filp->private_data, struct kcs_bmc_ipmi, miscdev);
+}
+
+static int kcs_bmc_ipmi_open(struct inode *inode, struct file *filp)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+
+ return kcs_bmc_enable_device(priv->client.dev, &priv->client);
+}
+
+static __poll_t kcs_bmc_ipmi_poll(struct file *filp, poll_table *wait)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ __poll_t mask = 0;
+
+ poll_wait(filp, &priv->queue, wait);
+
+ spin_lock_irq(&priv->lock);
+ if (priv->data_in_avail)
+ mask |= EPOLLIN;
+ spin_unlock_irq(&priv->lock);
+
+ return mask;
+}
+
+static ssize_t kcs_bmc_ipmi_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ bool data_avail;
+ size_t data_len;
+ ssize_t ret;
+
+ if (!(filp->f_flags & O_NONBLOCK))
+ wait_event_interruptible(priv->queue,
+ priv->data_in_avail);
+
+ mutex_lock(&priv->mutex);
+
+ spin_lock_irq(&priv->lock);
+ data_avail = priv->data_in_avail;
+ if (data_avail) {
+ data_len = priv->data_in_idx;
+ memcpy(priv->kbuffer, priv->data_in, data_len);
+ }
+ spin_unlock_irq(&priv->lock);
+
+ if (!data_avail) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (count < data_len) {
+ pr_err("channel=%u with too large data : %zu\n",
+ priv->client.dev->channel, data_len);
+
+ spin_lock_irq(&priv->lock);
+ kcs_bmc_ipmi_force_abort(priv);
+ spin_unlock_irq(&priv->lock);
+
+ ret = -EOVERFLOW;
+ goto out_unlock;
+ }
+
+ if (copy_to_user(buf, priv->kbuffer, data_len)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ ret = data_len;
+
+ spin_lock_irq(&priv->lock);
+ if (priv->phase == KCS_PHASE_WRITE_DONE) {
+ priv->phase = KCS_PHASE_WAIT_READ;
+ priv->data_in_avail = false;
+ priv->data_in_idx = 0;
+ } else {
+ ret = -EAGAIN;
+ }
+ spin_unlock_irq(&priv->lock);
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static ssize_t kcs_bmc_ipmi_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ ssize_t ret;
+
+ /* a minimum response size '3' : netfn + cmd + ccode */
+ if (count < 3 || count > KCS_MSG_BUFSIZ)
+ return -EINVAL;
+
+ mutex_lock(&priv->mutex);
+
+ if (copy_from_user(priv->kbuffer, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ spin_lock_irq(&priv->lock);
+ if (priv->phase == KCS_PHASE_WAIT_READ) {
+ priv->phase = KCS_PHASE_READ;
+ priv->data_out_idx = 1;
+ priv->data_out_len = count;
+ memcpy(priv->data_out, priv->kbuffer, count);
+ kcs_bmc_write_data(priv->client.dev, priv->data_out[0]);
+ ret = count;
+ } else {
+ ret = -EINVAL;
+ }
+ spin_unlock_irq(&priv->lock);
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static long kcs_bmc_ipmi_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+ long ret = 0;
+
+ spin_lock_irq(&priv->lock);
+
+ switch (cmd) {
+ case IPMI_BMC_IOCTL_SET_SMS_ATN:
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, KCS_STATUS_SMS_ATN);
+ break;
+
+ case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
+ kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, 0);
+ break;
+
+ case IPMI_BMC_IOCTL_FORCE_ABORT:
+ kcs_bmc_ipmi_force_abort(priv);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+static int kcs_bmc_ipmi_release(struct inode *inode, struct file *filp)
+{
+ struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
+
+ kcs_bmc_ipmi_force_abort(priv);
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+
+ return 0;
+}
+
+static const struct file_operations kcs_bmc_ipmi_fops = {
+ .owner = THIS_MODULE,
+ .open = kcs_bmc_ipmi_open,
+ .read = kcs_bmc_ipmi_read,
+ .write = kcs_bmc_ipmi_write,
+ .release = kcs_bmc_ipmi_release,
+ .poll = kcs_bmc_ipmi_poll,
+ .unlocked_ioctl = kcs_bmc_ipmi_ioctl,
+};
+
+static DEFINE_SPINLOCK(kcs_bmc_ipmi_instances_lock);
+static LIST_HEAD(kcs_bmc_ipmi_instances);
+
+static int kcs_bmc_ipmi_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_ipmi *priv;
+ int rc;
+
+ priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ mutex_init(&priv->mutex);
+
+ init_waitqueue_head(&priv->queue);
+
+ priv->client.dev = kcs_bmc;
+ priv->client.ops = &kcs_bmc_ipmi_client_ops;
+ priv->data_in = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ priv->data_out = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ priv->kbuffer = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = devm_kasprintf(kcs_bmc->dev, GFP_KERNEL, "%s%u", DEVICE_NAME,
+ kcs_bmc->channel);
+ if (!priv->data_in || !priv->data_out || !priv->kbuffer || !priv->miscdev.name)
+ return -EINVAL;
+
+ priv->miscdev.fops = &kcs_bmc_ipmi_fops;
+
+ rc = misc_register(&priv->miscdev);
+ if (rc) {
+ dev_err(kcs_bmc->dev, "Unable to register device: %d\n", rc);
+ return rc;
+ }
+
+ spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
+ list_add(&priv->entry, &kcs_bmc_ipmi_instances);
+ spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
+
+ dev_info(kcs_bmc->dev, "Initialised IPMI client for channel %d", kcs_bmc->channel);
+
+ return 0;
+}
+
+static int kcs_bmc_ipmi_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_ipmi *priv = NULL, *pos;
+
+ spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
+ list_for_each_entry(pos, &kcs_bmc_ipmi_instances, entry) {
+ if (pos->client.dev == kcs_bmc) {
+ priv = pos;
+ list_del(&pos->entry);
+ break;
+ }
+ }
+ spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
+
+ if (!priv)
+ return -ENODEV;
+
+ misc_deregister(&priv->miscdev);
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+ devm_kfree(kcs_bmc->dev, priv->kbuffer);
+ devm_kfree(kcs_bmc->dev, priv->data_out);
+ devm_kfree(kcs_bmc->dev, priv->data_in);
+ devm_kfree(kcs_bmc->dev, priv);
+
+ return 0;
+}
+
+static const struct kcs_bmc_driver_ops kcs_bmc_ipmi_driver_ops = {
+ .add_device = kcs_bmc_ipmi_add_device,
+ .remove_device = kcs_bmc_ipmi_remove_device,
+};
+
+static struct kcs_bmc_driver kcs_bmc_ipmi_driver = {
+ .ops = &kcs_bmc_ipmi_driver_ops,
+};
+
+static int __init kcs_bmc_ipmi_init(void)
+{
+ kcs_bmc_register_driver(&kcs_bmc_ipmi_driver);
+
+ return 0;
+}
+module_init(kcs_bmc_ipmi_init);
+
+static void __exit kcs_bmc_ipmi_exit(void)
+{
+ kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver);
+}
+module_exit(kcs_bmc_ipmi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");
diff --git a/drivers/char/ipmi/kcs_bmc_client.h b/drivers/char/ipmi/kcs_bmc_client.h
new file mode 100644
index 000000000..6fdcde0a7
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_client.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, IBM Corp. */
+
+#ifndef __KCS_BMC_CONSUMER_H__
+#define __KCS_BMC_CONSUMER_H__
+
+#include <linux/irqreturn.h>
+
+#include "kcs_bmc.h"
+
+struct kcs_bmc_driver_ops {
+ int (*add_device)(struct kcs_bmc_device *kcs_bmc);
+ int (*remove_device)(struct kcs_bmc_device *kcs_bmc);
+};
+
+struct kcs_bmc_driver {
+ struct list_head entry;
+
+ const struct kcs_bmc_driver_ops *ops;
+};
+
+struct kcs_bmc_client_ops {
+ irqreturn_t (*event)(struct kcs_bmc_client *client);
+};
+
+struct kcs_bmc_client {
+ const struct kcs_bmc_client_ops *ops;
+
+ struct kcs_bmc_device *dev;
+};
+
+void kcs_bmc_register_driver(struct kcs_bmc_driver *drv);
+void kcs_bmc_unregister_driver(struct kcs_bmc_driver *drv);
+
+int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
+void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client);
+
+void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events);
+
+u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data);
+u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_write_status(struct kcs_bmc_device *kcs_bmc, u8 data);
+void kcs_bmc_update_status(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 val);
+#endif
diff --git a/drivers/char/ipmi/kcs_bmc_device.h b/drivers/char/ipmi/kcs_bmc_device.h
new file mode 100644
index 000000000..17c572f25
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_device.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, IBM Corp. */
+
+#ifndef __KCS_BMC_DEVICE_H__
+#define __KCS_BMC_DEVICE_H__
+
+#include <linux/irqreturn.h>
+
+#include "kcs_bmc.h"
+
+struct kcs_bmc_device_ops {
+ void (*irq_mask_update)(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 enable);
+ u8 (*io_inputb)(struct kcs_bmc_device *kcs_bmc, u32 reg);
+ void (*io_outputb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 b);
+ void (*io_updateb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 b);
+};
+
+irqreturn_t kcs_bmc_handle_event(struct kcs_bmc_device *kcs_bmc);
+int kcs_bmc_add_device(struct kcs_bmc_device *kcs_bmc);
+void kcs_bmc_remove_device(struct kcs_bmc_device *kcs_bmc);
+
+#endif
diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
new file mode 100644
index 000000000..7961fec56
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, Nuvoton Corporation.
+ * Copyright (c) 2018, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "nuvoton-kcs-bmc: " fmt
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_device.h"
+
+#define DEVICE_NAME "npcm-kcs-bmc"
+#define KCS_CHANNEL_MAX 3
+
+#define KCS1ST 0x0C
+#define KCS2ST 0x1E
+#define KCS3ST 0x30
+
+#define KCS1DO 0x0E
+#define KCS2DO 0x20
+#define KCS3DO 0x32
+
+#define KCS1DI 0x10
+#define KCS2DI 0x22
+#define KCS3DI 0x34
+
+#define KCS1CTL 0x18
+#define KCS2CTL 0x2A
+#define KCS3CTL 0x3C
+#define KCS_CTL_IBFIE BIT(0)
+#define KCS_CTL_OBEIE BIT(1)
+
+#define KCS1IE 0x1C
+#define KCS2IE 0x2E
+#define KCS3IE 0x40
+#define KCS_IE_IRQE BIT(0)
+#define KCS_IE_HIRQE BIT(3)
+
+/*
+ * 7.2.4 Core KCS Registers
+ * Registers in this module are 8 bits. An 8-bit register must be accessed
+ * by an 8-bit read or write.
+ *
+ * sts: KCS Channel n Status Register (KCSnST).
+ * dob: KCS Channel n Data Out Buffer Register (KCSnDO).
+ * dib: KCS Channel n Data In Buffer Register (KCSnDI).
+ * ctl: KCS Channel n Control Register (KCSnCTL).
+ * ie : KCS Channel n Interrupt Enable Register (KCSnIE).
+ */
+struct npcm7xx_kcs_reg {
+ u32 sts;
+ u32 dob;
+ u32 dib;
+ u32 ctl;
+ u32 ie;
+};
+
+struct npcm7xx_kcs_bmc {
+ struct kcs_bmc_device kcs_bmc;
+
+ struct regmap *map;
+
+ const struct npcm7xx_kcs_reg *reg;
+};
+
+static const struct npcm7xx_kcs_reg npcm7xx_kcs_reg_tbl[KCS_CHANNEL_MAX] = {
+ { .sts = KCS1ST, .dob = KCS1DO, .dib = KCS1DI, .ctl = KCS1CTL, .ie = KCS1IE },
+ { .sts = KCS2ST, .dob = KCS2DO, .dib = KCS2DI, .ctl = KCS2CTL, .ie = KCS2IE },
+ { .sts = KCS3ST, .dob = KCS3DO, .dib = KCS3DI, .ctl = KCS3CTL, .ie = KCS3IE },
+};
+
+static inline struct npcm7xx_kcs_bmc *to_npcm7xx_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
+{
+ return container_of(kcs_bmc, struct npcm7xx_kcs_bmc, kcs_bmc);
+}
+
+static u8 npcm7xx_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+ u32 val = 0;
+ int rc;
+
+ rc = regmap_read(priv->map, reg, &val);
+ WARN(rc != 0, "regmap_read() failed: %d\n", rc);
+
+ return rc == 0 ? (u8)val : 0;
+}
+
+static void npcm7xx_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_write(priv->map, reg, data);
+ WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+}
+
+static void npcm7xx_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 data)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+ int rc;
+
+ rc = regmap_update_bits(priv->map, reg, mask, data);
+ WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
+}
+
+static void npcm7xx_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+
+ regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE,
+ enable ? KCS_IE_IRQE | KCS_IE_HIRQE : 0);
+}
+
+static void npcm7xx_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
+{
+ struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc);
+
+ if (mask & KCS_BMC_EVENT_TYPE_OBE)
+ regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_OBEIE,
+ !!(state & KCS_BMC_EVENT_TYPE_OBE) * KCS_CTL_OBEIE);
+
+ if (mask & KCS_BMC_EVENT_TYPE_IBF)
+ regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE,
+ !!(state & KCS_BMC_EVENT_TYPE_IBF) * KCS_CTL_IBFIE);
+}
+
+static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg)
+{
+ struct kcs_bmc_device *kcs_bmc = arg;
+
+ return kcs_bmc_handle_event(kcs_bmc);
+}
+
+static int npcm7xx_kcs_config_irq(struct kcs_bmc_device *kcs_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return devm_request_irq(dev, irq, npcm7xx_kcs_irq, IRQF_SHARED,
+ dev_name(dev), kcs_bmc);
+}
+
+static const struct kcs_bmc_device_ops npcm7xx_kcs_ops = {
+ .irq_mask_update = npcm7xx_kcs_irq_mask_update,
+ .io_inputb = npcm7xx_kcs_inb,
+ .io_outputb = npcm7xx_kcs_outb,
+ .io_updateb = npcm7xx_kcs_updateb,
+};
+
+static int npcm7xx_kcs_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct npcm7xx_kcs_bmc *priv;
+ struct kcs_bmc_device *kcs_bmc;
+ u32 chan;
+ int rc;
+
+ rc = of_property_read_u32(dev->of_node, "kcs_chan", &chan);
+ if (rc != 0 || chan == 0 || chan > KCS_CHANNEL_MAX) {
+ dev_err(dev, "no valid 'kcs_chan' configured\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->map)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+ priv->reg = &npcm7xx_kcs_reg_tbl[chan - 1];
+
+ kcs_bmc = &priv->kcs_bmc;
+ kcs_bmc->dev = &pdev->dev;
+ kcs_bmc->channel = chan;
+ kcs_bmc->ioreg.idr = priv->reg->dib;
+ kcs_bmc->ioreg.odr = priv->reg->dob;
+ kcs_bmc->ioreg.str = priv->reg->sts;
+ kcs_bmc->ops = &npcm7xx_kcs_ops;
+
+ platform_set_drvdata(pdev, priv);
+
+ rc = npcm7xx_kcs_config_irq(kcs_bmc, pdev);
+ if (rc)
+ return rc;
+
+ npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+ npcm7xx_kcs_enable_channel(kcs_bmc, true);
+
+ rc = kcs_bmc_add_device(kcs_bmc);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
+ return rc;
+ }
+
+ pr_info("channel=%u idr=0x%x odr=0x%x str=0x%x\n",
+ chan,
+ kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr, kcs_bmc->ioreg.str);
+
+ return 0;
+}
+
+static int npcm7xx_kcs_remove(struct platform_device *pdev)
+{
+ struct npcm7xx_kcs_bmc *priv = platform_get_drvdata(pdev);
+ struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
+
+ kcs_bmc_remove_device(kcs_bmc);
+
+ npcm7xx_kcs_enable_channel(kcs_bmc, false);
+ npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
+
+ return 0;
+}
+
+static const struct of_device_id npcm_kcs_bmc_match[] = {
+ { .compatible = "nuvoton,npcm750-kcs-bmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, npcm_kcs_bmc_match);
+
+static struct platform_driver npcm_kcs_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = npcm_kcs_bmc_match,
+ },
+ .probe = npcm7xx_kcs_probe,
+ .remove = npcm7xx_kcs_remove,
+};
+module_platform_driver(npcm_kcs_bmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Avi Fishman <avifishman70@gmail.com>");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_DESCRIPTION("NPCM7xx device interface to the KCS BMC device");
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
new file mode 100644
index 000000000..1793358be
--- /dev/null
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2021 IBM Corp. */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include "kcs_bmc_client.h"
+
+struct kcs_bmc_serio {
+ struct list_head entry;
+
+ struct kcs_bmc_client client;
+ struct serio *port;
+
+ spinlock_t lock;
+};
+
+static inline struct kcs_bmc_serio *client_to_kcs_bmc_serio(struct kcs_bmc_client *client)
+{
+ return container_of(client, struct kcs_bmc_serio, client);
+}
+
+static irqreturn_t kcs_bmc_serio_event(struct kcs_bmc_client *client)
+{
+ struct kcs_bmc_serio *priv;
+ u8 handled = IRQ_NONE;
+ u8 status;
+
+ priv = client_to_kcs_bmc_serio(client);
+
+ spin_lock(&priv->lock);
+
+ status = kcs_bmc_read_status(client->dev);
+
+ if (status & KCS_BMC_STR_IBF)
+ handled = serio_interrupt(priv->port, kcs_bmc_read_data(client->dev), 0);
+
+ spin_unlock(&priv->lock);
+
+ return handled;
+}
+
+static const struct kcs_bmc_client_ops kcs_bmc_serio_client_ops = {
+ .event = kcs_bmc_serio_event,
+};
+
+static int kcs_bmc_serio_open(struct serio *port)
+{
+ struct kcs_bmc_serio *priv = port->port_data;
+
+ return kcs_bmc_enable_device(priv->client.dev, &priv->client);
+}
+
+static void kcs_bmc_serio_close(struct serio *port)
+{
+ struct kcs_bmc_serio *priv = port->port_data;
+
+ kcs_bmc_disable_device(priv->client.dev, &priv->client);
+}
+
+static DEFINE_SPINLOCK(kcs_bmc_serio_instances_lock);
+static LIST_HEAD(kcs_bmc_serio_instances);
+
+static int kcs_bmc_serio_add_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_serio *priv;
+ struct serio *port;
+
+ priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Use kzalloc() as the allocation is cleaned up with kfree() via serio_unregister_port() */
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->id.type = SERIO_8042;
+ port->open = kcs_bmc_serio_open;
+ port->close = kcs_bmc_serio_close;
+ port->port_data = priv;
+ port->dev.parent = kcs_bmc->dev;
+
+ spin_lock_init(&priv->lock);
+ priv->port = port;
+ priv->client.dev = kcs_bmc;
+ priv->client.ops = &kcs_bmc_serio_client_ops;
+
+ spin_lock_irq(&kcs_bmc_serio_instances_lock);
+ list_add(&priv->entry, &kcs_bmc_serio_instances);
+ spin_unlock_irq(&kcs_bmc_serio_instances_lock);
+
+ serio_register_port(port);
+
+ dev_info(kcs_bmc->dev, "Initialised serio client for channel %d", kcs_bmc->channel);
+
+ return 0;
+}
+
+static int kcs_bmc_serio_remove_device(struct kcs_bmc_device *kcs_bmc)
+{
+ struct kcs_bmc_serio *priv = NULL, *pos;
+
+ spin_lock_irq(&kcs_bmc_serio_instances_lock);
+ list_for_each_entry(pos, &kcs_bmc_serio_instances, entry) {
+ if (pos->client.dev == kcs_bmc) {
+ priv = pos;
+ list_del(&pos->entry);
+ break;
+ }
+ }
+ spin_unlock_irq(&kcs_bmc_serio_instances_lock);
+
+ if (!priv)
+ return -ENODEV;
+
+ /* kfree()s priv->port via put_device() */
+ serio_unregister_port(priv->port);
+
+ /* Ensure the IBF IRQ is disabled if we were the active client */
+ kcs_bmc_disable_device(kcs_bmc, &priv->client);
+
+ devm_kfree(priv->client.dev->dev, priv);
+
+ return 0;
+}
+
+static const struct kcs_bmc_driver_ops kcs_bmc_serio_driver_ops = {
+ .add_device = kcs_bmc_serio_add_device,
+ .remove_device = kcs_bmc_serio_remove_device,
+};
+
+static struct kcs_bmc_driver kcs_bmc_serio_driver = {
+ .ops = &kcs_bmc_serio_driver_ops,
+};
+
+static int __init kcs_bmc_serio_init(void)
+{
+ kcs_bmc_register_driver(&kcs_bmc_serio_driver);
+
+ return 0;
+}
+module_init(kcs_bmc_serio_init);
+
+static void __exit kcs_bmc_serio_exit(void)
+{
+ kcs_bmc_unregister_driver(&kcs_bmc_serio_driver);
+}
+module_exit(kcs_bmc_serio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("Adapter driver for serio access to BMC KCS devices");