[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/Kconfig b/src/kernel/linux/v4.14/drivers/char/ipmi/Kconfig
new file mode 100644
index 0000000..f6fa056
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/Kconfig
@@ -0,0 +1,91 @@
+#
+# IPMI device configuration
+#
+
+menuconfig IPMI_HANDLER
+       tristate 'IPMI top-level message handler'
+       depends on HAS_IOMEM
+       select IPMI_DMI_DECODE if DMI
+       help
+         This enables the central IPMI message handler, required for IPMI
+	 to work.
+
+         IPMI is a standard for managing sensors (temperature,
+         voltage, etc.) in a system.
+
+         See <file:Documentation/IPMI.txt> for more details on the driver.
+
+	 If unsure, say N.
+
+config IPMI_DMI_DECODE
+       bool
+
+if IPMI_HANDLER
+
+config IPMI_PANIC_EVENT
+       bool 'Generate a panic event to all BMCs on a panic'
+       help
+         When a panic occurs, this will cause the IPMI message handler to
+	 generate an IPMI event describing the panic to each interface
+	 registered with the message handler.
+
+config IPMI_PANIC_STRING
+	bool 'Generate OEM events containing the panic string'
+	depends on IPMI_PANIC_EVENT
+	help
+	  When a panic occurs, this will cause the IPMI message handler to
+	  generate IPMI OEM type f0 events holding the IPMB address of the
+	  panic generator (byte 4 of the event), a sequence number for the
+	  string (byte 5 of the event) and part of the string (the rest of the
+	  event).  Bytes 1, 2, and 3 are the normal usage for an OEM event.
+	  You can fetch these events and use the sequence numbers to piece the
+	  string together.
+
+config IPMI_DEVICE_INTERFACE
+       tristate 'Device interface for IPMI'
+       help
+         This provides an IOCTL interface to the IPMI message handler so
+	 userland processes may use IPMI.  It supports poll() and select().
+
+config IPMI_SI
+       tristate 'IPMI System Interface handler'
+       help
+         Provides a driver for System Interfaces (KCS, SMIC, BT).
+	 Currently, only KCS and SMIC are supported.  If
+	 you are using IPMI, you should probably say "y" here.
+
+config IPMI_SSIF
+       tristate 'IPMI SMBus handler (SSIF)'
+       select I2C
+       help
+         Provides a driver for a SMBus interface to a BMC, meaning that you
+	 have a driver that must be accessed over an I2C bus instead of a
+	 standard interface.  This module requires I2C support.
+
+config IPMI_POWERNV
+       depends on PPC_POWERNV
+       tristate 'POWERNV (OPAL firmware) IPMI interface'
+       help
+         Provides a driver for OPAL firmware-based IPMI interfaces.
+
+config IPMI_WATCHDOG
+       tristate 'IPMI Watchdog Timer'
+       help
+         This enables the IPMI watchdog timer.
+
+config IPMI_POWEROFF
+       tristate 'IPMI Poweroff'
+       help
+         This enables a function to power off the system with IPMI if
+	 the IPMI management controller is capable of this.
+
+endif # IPMI_HANDLER
+
+config ASPEED_BT_IPMI_BMC
+	depends on ARCH_ASPEED || COMPILE_TEST
+       depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
+	tristate "BT IPMI bmc driver"
+	help
+	  Provides a driver for the BT (Block Transfer) IPMI interface
+	  found on Aspeed SOCs (AST2400 and AST2500). The driver
+	  implements the BMC side of the BT interface.
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/Makefile b/src/kernel/linux/v4.14/drivers/char/ipmi/Makefile
new file mode 100644
index 0000000..43b7d86
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the ipmi drivers.
+#
+
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+
+obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
+obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
+obj-$(CONFIG_IPMI_SI) += ipmi_si.o
+obj-$(CONFIG_IPMI_DMI_DECODE) += ipmi_dmi.o
+obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
+obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
+obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
+obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/bt-bmc.c b/src/kernel/linux/v4.14/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 0000000..70d434b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME	"ipmi-bt-host"
+
+#define BT_IO_BASE	0xe4
+#define BT_IRQ		10
+
+#define BT_CR0		0x0
+#define   BT_CR0_IO_BASE		16
+#define   BT_CR0_IRQ			12
+#define   BT_CR0_EN_CLR_SLV_RDP		0x8
+#define   BT_CR0_EN_CLR_SLV_WRP		0x4
+#define   BT_CR0_ENABLE_IBT		0x1
+#define BT_CR1		0x4
+#define   BT_CR1_IRQ_H2B	0x01
+#define   BT_CR1_IRQ_HBUSY	0x40
+#define BT_CR2		0x8
+#define   BT_CR2_IRQ_H2B	0x01
+#define   BT_CR2_IRQ_HBUSY	0x40
+#define BT_CR3		0xc
+#define BT_CTRL		0x10
+#define   BT_CTRL_B_BUSY		0x80
+#define   BT_CTRL_H_BUSY		0x40
+#define   BT_CTRL_OEM0			0x20
+#define   BT_CTRL_SMS_ATN		0x10
+#define   BT_CTRL_B2H_ATN		0x08
+#define   BT_CTRL_H2B_ATN		0x04
+#define   BT_CTRL_CLR_RD_PTR		0x02
+#define   BT_CTRL_CLR_WR_PTR		0x01
+#define BT_BMC2HOST	0x14
+#define BT_INTMASK	0x18
+#define   BT_INTMASK_B2H_IRQEN		0x01
+#define   BT_INTMASK_B2H_IRQ		0x02
+#define   BT_INTMASK_BMC_HWRST		0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+	struct device		dev;
+	struct miscdevice	miscdev;
+	struct regmap		*map;
+	int			offset;
+	int			irq;
+	wait_queue_head_t	queue;
+	struct timer_list	poll_timer;
+	struct mutex		mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static const struct regmap_config bt_regmap_cfg = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+};
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+	uint32_t val = 0;
+	int rc;
+
+	rc = regmap_read(bt_bmc->map, bt_bmc->offset + reg, &val);
+	WARN(rc != 0, "regmap_read() failed: %d\n", rc);
+
+	return rc == 0 ? (u8) val : 0;
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+	int rc;
+
+	rc = regmap_write(bt_bmc->map, bt_bmc->offset + reg, data);
+	WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+	if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+		bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+	if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+		bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+	return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+	int i;
+
+	for (i = 0; i < n; i++)
+		buf[i] = bt_read(bt_bmc);
+	return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+	bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+	int i;
+
+	for (i = 0; i < n; i++)
+		bt_write(bt_bmc, buf[i]);
+	return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+	bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+	return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+	if (atomic_inc_return(&open_count) == 1) {
+		clr_b_busy(bt_bmc);
+		return 0;
+	}
+
+	atomic_dec(&open_count);
+	return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ *    Byte 1  Byte 2     Byte 3  Byte 4  Byte 5:N
+ *    Length  NetFn/LUN  Seq     Cmd     Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+	u8 len;
+	int len_byte = 1;
+	u8 kbuffer[BT_BMC_BUFFER_SIZE];
+	ssize_t ret = 0;
+	ssize_t nread;
+
+	if (!access_ok(VERIFY_WRITE, buf, count))
+		return -EFAULT;
+
+	WARN_ON(*ppos);
+
+	if (wait_event_interruptible(bt_bmc->queue,
+				     bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+		return -ERESTARTSYS;
+
+	mutex_lock(&bt_bmc->mutex);
+
+	if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+		ret = -EIO;
+		goto out_unlock;
+	}
+
+	set_b_busy(bt_bmc);
+	clr_h2b_atn(bt_bmc);
+	clr_rd_ptr(bt_bmc);
+
+	/*
+	 * The BT frames start with the message length, which does not
+	 * include the length byte.
+	 */
+	kbuffer[0] = bt_read(bt_bmc);
+	len = kbuffer[0];
+
+	/* We pass the length back to userspace as well */
+	if (len + 1 > count)
+		len = count - 1;
+
+	while (len) {
+		nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+		bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+		if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+			ret = -EFAULT;
+			break;
+		}
+		len -= nread;
+		buf += nread + len_byte;
+		ret += nread + len_byte;
+		len_byte = 0;
+	}
+
+	clr_b_busy(bt_bmc);
+
+out_unlock:
+	mutex_unlock(&bt_bmc->mutex);
+	return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ *    Byte 1  Byte 2     Byte 3  Byte 4  Byte 5  Byte 6:N
+ *    Length  NetFn/LUN  Seq     Cmd     Code    Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+	u8 kbuffer[BT_BMC_BUFFER_SIZE];
+	ssize_t ret = 0;
+	ssize_t nwritten;
+
+	/*
+	 * send a minimum response size
+	 */
+	if (count < 5)
+		return -EINVAL;
+
+	if (!access_ok(VERIFY_READ, buf, count))
+		return -EFAULT;
+
+	WARN_ON(*ppos);
+
+	/*
+	 * There's no interrupt for clearing bmc busy so we have to
+	 * poll
+	 */
+	if (wait_event_interruptible(bt_bmc->queue,
+				     !(bt_inb(bt_bmc, BT_CTRL) &
+				       (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+		return -ERESTARTSYS;
+
+	mutex_lock(&bt_bmc->mutex);
+
+	if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+		     (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+		ret = -EIO;
+		goto out_unlock;
+	}
+
+	clr_wr_ptr(bt_bmc);
+
+	while (count) {
+		nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+		if (copy_from_user(&kbuffer, buf, nwritten)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		bt_writen(bt_bmc, kbuffer, nwritten);
+
+		count -= nwritten;
+		buf += nwritten;
+		ret += nwritten;
+	}
+
+	set_b2h_atn(bt_bmc);
+
+out_unlock:
+	mutex_unlock(&bt_bmc->mutex);
+	return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+			 unsigned long param)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+	switch (cmd) {
+	case BT_BMC_IOCTL_SMS_ATN:
+		set_sms_atn(bt_bmc);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+	atomic_dec(&open_count);
+	set_b_busy(bt_bmc);
+	return 0;
+}
+
+static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+{
+	struct bt_bmc *bt_bmc = file_bt_bmc(file);
+	unsigned int mask = 0;
+	u8 ctrl;
+
+	poll_wait(file, &bt_bmc->queue, wait);
+
+	ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+	if (ctrl & BT_CTRL_H2B_ATN)
+		mask |= POLLIN;
+
+	if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+		mask |= POLLOUT;
+
+	return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= bt_bmc_open,
+	.read		= bt_bmc_read,
+	.write		= bt_bmc_write,
+	.release	= bt_bmc_release,
+	.poll		= bt_bmc_poll,
+	.unlocked_ioctl	= bt_bmc_ioctl,
+};
+
+static void poll_timer(unsigned long data)
+{
+	struct bt_bmc *bt_bmc = (void *)data;
+
+	bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+	wake_up(&bt_bmc->queue);
+	add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+	struct bt_bmc *bt_bmc = arg;
+	u32 reg;
+	int rc;
+
+	rc = regmap_read(bt_bmc->map, bt_bmc->offset + BT_CR2, &reg);
+	if (rc)
+		return IRQ_NONE;
+
+	reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+	if (!reg)
+		return IRQ_NONE;
+
+	/* ack pending IRQs */
+	regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR2, reg);
+
+	wake_up(&bt_bmc->queue);
+	return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+			     struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int rc;
+
+	bt_bmc->irq = platform_get_irq(pdev, 0);
+	if (!bt_bmc->irq)
+		return -ENODEV;
+
+	rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+			      DEVICE_NAME, bt_bmc);
+	if (rc < 0) {
+		dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+		bt_bmc->irq = 0;
+		return rc;
+	}
+
+	/*
+	 * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+	 * H2B will be asserted when the bmc has data for us; HBUSY
+	 * will be cleared (along with B2H) when we can write the next
+	 * message to the BT buffer
+	 */
+	rc = regmap_update_bits(bt_bmc->map, bt_bmc->offset + BT_CR1,
+				(BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY),
+				(BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY));
+
+	return rc;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+	struct bt_bmc *bt_bmc;
+	struct device *dev;
+	int rc;
+
+	if (!pdev || !pdev->dev.of_node)
+		return -ENODEV;
+
+	dev = &pdev->dev;
+	dev_info(dev, "Found bt bmc device\n");
+
+	bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+	if (!bt_bmc)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, bt_bmc);
+
+	bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+	if (IS_ERR(bt_bmc->map)) {
+		struct resource *res;
+		void __iomem *base;
+
+		/*
+		 * Assume it's not the MFD-based devicetree description, in
+		 * which case generate a regmap ourselves
+		 */
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		base = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(base))
+			return PTR_ERR(base);
+
+		bt_bmc->map = devm_regmap_init_mmio(dev, base, &bt_regmap_cfg);
+		bt_bmc->offset = 0;
+	} else {
+		rc = of_property_read_u32(dev->of_node, "reg", &bt_bmc->offset);
+		if (rc)
+			return rc;
+	}
+
+	mutex_init(&bt_bmc->mutex);
+	init_waitqueue_head(&bt_bmc->queue);
+
+	bt_bmc->miscdev.minor	= MISC_DYNAMIC_MINOR,
+		bt_bmc->miscdev.name	= DEVICE_NAME,
+		bt_bmc->miscdev.fops	= &bt_bmc_fops,
+		bt_bmc->miscdev.parent = dev;
+	rc = misc_register(&bt_bmc->miscdev);
+	if (rc) {
+		dev_err(dev, "Unable to register misc device\n");
+		return rc;
+	}
+
+	bt_bmc_config_irq(bt_bmc, pdev);
+
+	if (bt_bmc->irq) {
+		dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+	} else {
+		dev_info(dev, "No IRQ; using timer\n");
+		setup_timer(&bt_bmc->poll_timer, poll_timer,
+			    (unsigned long)bt_bmc);
+		bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+		add_timer(&bt_bmc->poll_timer);
+	}
+
+	regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR0,
+		     (BT_IO_BASE << BT_CR0_IO_BASE) |
+		     (BT_IRQ << BT_CR0_IRQ) |
+		     BT_CR0_EN_CLR_SLV_RDP |
+		     BT_CR0_EN_CLR_SLV_WRP |
+		     BT_CR0_ENABLE_IBT);
+
+	clr_b_busy(bt_bmc);
+
+	return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+	struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+	misc_deregister(&bt_bmc->miscdev);
+	if (!bt_bmc->irq)
+		del_timer_sync(&bt_bmc->poll_timer);
+	return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+	{ .compatible = "aspeed,ast2400-ibt-bmc" },
+	{ .compatible = "aspeed,ast2500-ibt-bmc" },
+	{ },
+};
+
+static struct platform_driver bt_bmc_driver = {
+	.driver = {
+		.name		= DEVICE_NAME,
+		.of_match_table = bt_bmc_match,
+	},
+	.probe = bt_bmc_probe,
+	.remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_bt_sm.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_bt_sm.c
new file mode 100644
index 0000000..4835b58
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_bt_sm.c
@@ -0,0 +1,706 @@
+/*
+ *  ipmi_bt_sm.c
+ *
+ *  The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part
+ *  of the driver architecture at http://sourceforge.net/projects/openipmi 
+ *
+ *  Author:	Rocky Craig <first.last@hp.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.  */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h>		/* for completion codes */
+#include "ipmi_si_sm.h"
+
+#define BT_DEBUG_OFF	0	/* Used in production */
+#define BT_DEBUG_ENABLE	1	/* Generic messages */
+#define BT_DEBUG_MSG	2	/* Prints all request/response buffers */
+#define BT_DEBUG_STATES	4	/* Verbose look at state changes */
+/*
+ * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
+ * value
+ */
+
+static int bt_debug; /* 0 == BT_DEBUG_OFF */
+
+module_param(bt_debug, int, 0644);
+MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/*
+ * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
+ * and 64 byte buffers.  However, one HP implementation wants 255 bytes of
+ * buffer (with a documented message of 160 bytes) so go for the max.
+ * Since the Open IPMI architecture is single-message oriented at this
+ * stage, the queue depth of BT is of no concern.
+ */
+
+#define BT_NORMAL_TIMEOUT	5	/* seconds */
+#define BT_NORMAL_RETRY_LIMIT	2
+#define BT_RESET_DELAY		6	/* seconds after warm reset */
+
+/*
+ * States are written in chronological order and usually cover
+ * multiple rows of the state table discussion in the IPMI spec.
+ */
+
+enum bt_states {
+	BT_STATE_IDLE = 0,	/* Order is critical in this list */
+	BT_STATE_XACTION_START,
+	BT_STATE_WRITE_BYTES,
+	BT_STATE_WRITE_CONSUME,
+	BT_STATE_READ_WAIT,
+	BT_STATE_CLEAR_B2H,
+	BT_STATE_READ_BYTES,
+	BT_STATE_RESET1,	/* These must come last */
+	BT_STATE_RESET2,
+	BT_STATE_RESET3,
+	BT_STATE_RESTART,
+	BT_STATE_PRINTME,
+	BT_STATE_CAPABILITIES_BEGIN,
+	BT_STATE_CAPABILITIES_END,
+	BT_STATE_LONG_BUSY	/* BT doesn't get hosed :-) */
+};
+
+/*
+ * Macros seen at the end of state "case" blocks.  They help with legibility
+ * and debugging.
+ */
+
+#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
+
+#define BT_SI_SM_RETURN(Y)   { last_printed = BT_STATE_PRINTME; return Y; }
+
+struct si_sm_data {
+	enum bt_states	state;
+	unsigned char	seq;		/* BT sequence number */
+	struct si_sm_io	*io;
+	unsigned char	write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+	int		write_count;
+	unsigned char	read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+	int		read_count;
+	int		truncated;
+	long		timeout;	/* microseconds countdown */
+	int		error_retries;	/* end of "common" fields */
+	int		nonzero_status;	/* hung BMCs stay all 0 */
+	enum bt_states	complete;	/* to divert the state machine */
+	int		BT_CAP_outreqs;
+	long		BT_CAP_req2rsp;
+	int		BT_CAP_retries;	/* Recommended retries */
+};
+
+#define BT_CLR_WR_PTR	0x01	/* See IPMI 1.5 table 11.6.4 */
+#define BT_CLR_RD_PTR	0x02
+#define BT_H2B_ATN	0x04
+#define BT_B2H_ATN	0x08
+#define BT_SMS_ATN	0x10
+#define BT_OEM0		0x20
+#define BT_H_BUSY	0x40
+#define BT_B_BUSY	0x80
+
+/*
+ * Some bits are toggled on each write: write once to set it, once
+ * more to clear it; writing a zero does nothing.  To absolutely
+ * clear it, check its state and write if set.  This avoids the "get
+ * current then use as mask" scheme to modify one bit.  Note that the
+ * variable "bt" is hardcoded into these macros.
+ */
+
+#define BT_STATUS	bt->io->inputb(bt->io, 0)
+#define BT_CONTROL(x)	bt->io->outputb(bt->io, 0, x)
+
+#define BMC2HOST	bt->io->inputb(bt->io, 1)
+#define HOST2BMC(x)	bt->io->outputb(bt->io, 1, x)
+
+#define BT_INTMASK_R	bt->io->inputb(bt->io, 2)
+#define BT_INTMASK_W(x)	bt->io->outputb(bt->io, 2, x)
+
+/*
+ * Convenience routines for debugging.  These are not multi-open safe!
+ * Note the macros have hardcoded variables in them.
+ */
+
+static char *state2txt(unsigned char state)
+{
+	switch (state) {
+	case BT_STATE_IDLE:		return("IDLE");
+	case BT_STATE_XACTION_START:	return("XACTION");
+	case BT_STATE_WRITE_BYTES:	return("WR_BYTES");
+	case BT_STATE_WRITE_CONSUME:	return("WR_CONSUME");
+	case BT_STATE_READ_WAIT:	return("RD_WAIT");
+	case BT_STATE_CLEAR_B2H:	return("CLEAR_B2H");
+	case BT_STATE_READ_BYTES:	return("RD_BYTES");
+	case BT_STATE_RESET1:		return("RESET1");
+	case BT_STATE_RESET2:		return("RESET2");
+	case BT_STATE_RESET3:		return("RESET3");
+	case BT_STATE_RESTART:		return("RESTART");
+	case BT_STATE_LONG_BUSY:	return("LONG_BUSY");
+	case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
+	case BT_STATE_CAPABILITIES_END:	return("CAP_END");
+	}
+	return("BAD STATE");
+}
+#define STATE2TXT state2txt(bt->state)
+
+static char *status2txt(unsigned char status)
+{
+	/*
+	 * This cannot be called by two threads at the same time and
+	 * the buffer is always consumed immediately, so the static is
+	 * safe to use.
+	 */
+	static char buf[40];
+
+	strcpy(buf, "[ ");
+	if (status & BT_B_BUSY)
+		strcat(buf, "B_BUSY ");
+	if (status & BT_H_BUSY)
+		strcat(buf, "H_BUSY ");
+	if (status & BT_OEM0)
+		strcat(buf, "OEM0 ");
+	if (status & BT_SMS_ATN)
+		strcat(buf, "SMS ");
+	if (status & BT_B2H_ATN)
+		strcat(buf, "B2H ");
+	if (status & BT_H2B_ATN)
+		strcat(buf, "H2B ");
+	strcat(buf, "]");
+	return buf;
+}
+#define STATUS2TXT status2txt(status)
+
+/* called externally at insmod time, and internally on cleanup */
+
+static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
+{
+	memset(bt, 0, sizeof(struct si_sm_data));
+	if (bt->io != io) {
+		/* external: one-time only things */
+		bt->io = io;
+		bt->seq = 0;
+	}
+	bt->state = BT_STATE_IDLE;	/* start here */
+	bt->complete = BT_STATE_IDLE;	/* end here */
+	bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
+	bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
+	/* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
+	return 3; /* We claim 3 bytes of space; ought to check SPMI table */
+}
+
+/* Jam a completion code (probably an error) into a response */
+
+static void force_result(struct si_sm_data *bt, unsigned char completion_code)
+{
+	bt->read_data[0] = 4;				/* # following bytes */
+	bt->read_data[1] = bt->write_data[1] | 4;	/* Odd NetFn/LUN */
+	bt->read_data[2] = bt->write_data[2];		/* seq (ignored) */
+	bt->read_data[3] = bt->write_data[3];		/* Command */
+	bt->read_data[4] = completion_code;
+	bt->read_count = 5;
+}
+
+/* The upper state machine starts here */
+
+static int bt_start_transaction(struct si_sm_data *bt,
+				unsigned char *data,
+				unsigned int size)
+{
+	unsigned int i;
+
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > IPMI_MAX_MSG_LENGTH)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if (bt->state == BT_STATE_LONG_BUSY)
+		return IPMI_NODE_BUSY_ERR;
+
+	if (bt->state != BT_STATE_IDLE)
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
+		printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
+		for (i = 0; i < size; i ++)
+			printk(" %02x", data[i]);
+		printk("\n");
+	}
+	bt->write_data[0] = size + 1;	/* all data plus seq byte */
+	bt->write_data[1] = *data;	/* NetFn/LUN */
+	bt->write_data[2] = bt->seq++;
+	memcpy(bt->write_data + 3, data + 1, size - 1);
+	bt->write_count = size + 2;
+	bt->error_retries = 0;
+	bt->nonzero_status = 0;
+	bt->truncated = 0;
+	bt->state = BT_STATE_XACTION_START;
+	bt->timeout = bt->BT_CAP_req2rsp;
+	force_result(bt, IPMI_ERR_UNSPECIFIED);
+	return 0;
+}
+
+/*
+ * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
+ * it calls this.  Strip out the length and seq bytes.
+ */
+
+static int bt_get_result(struct si_sm_data *bt,
+			 unsigned char *data,
+			 unsigned int length)
+{
+	int i, msg_len;
+
+	msg_len = bt->read_count - 2;		/* account for length & seq */
+	if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
+		force_result(bt, IPMI_ERR_UNSPECIFIED);
+		msg_len = 3;
+	}
+	data[0] = bt->read_data[1];
+	data[1] = bt->read_data[3];
+	if (length < msg_len || bt->truncated) {
+		data[2] = IPMI_ERR_MSG_TRUNCATED;
+		msg_len = 3;
+	} else
+		memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		printk(KERN_WARNING "BT: result %d bytes:", msg_len);
+		for (i = 0; i < msg_len; i++)
+			printk(" %02x", data[i]);
+		printk("\n");
+	}
+	return msg_len;
+}
+
+/* This bit's functionality is optional */
+#define BT_BMC_HWRST	0x80
+
+static void reset_flags(struct si_sm_data *bt)
+{
+	if (bt_debug)
+		printk(KERN_WARNING "IPMI BT: flag reset %s\n",
+					status2txt(BT_STATUS));
+	if (BT_STATUS & BT_H_BUSY)
+		BT_CONTROL(BT_H_BUSY);	/* force clear */
+	BT_CONTROL(BT_CLR_WR_PTR);	/* always reset */
+	BT_CONTROL(BT_SMS_ATN);		/* always clear */
+	BT_INTMASK_W(BT_BMC_HWRST);
+}
+
+/*
+ * Get rid of an unwanted/stale response.  This should only be needed for
+ * BMCs that support multiple outstanding requests.
+ */
+
+static void drain_BMC2HOST(struct si_sm_data *bt)
+{
+	int i, size;
+
+	if (!(BT_STATUS & BT_B2H_ATN)) 	/* Not signalling a response */
+		return;
+
+	BT_CONTROL(BT_H_BUSY);		/* now set */
+	BT_CONTROL(BT_B2H_ATN);		/* always clear */
+	BT_STATUS;			/* pause */
+	BT_CONTROL(BT_B2H_ATN);		/* some BMCs are stubborn */
+	BT_CONTROL(BT_CLR_RD_PTR);	/* always reset */
+	if (bt_debug)
+		printk(KERN_WARNING "IPMI BT: stale response %s; ",
+			status2txt(BT_STATUS));
+	size = BMC2HOST;
+	for (i = 0; i < size ; i++)
+		BMC2HOST;
+	BT_CONTROL(BT_H_BUSY);		/* now clear */
+	if (bt_debug)
+		printk("drained %d bytes\n", size + 1);
+}
+
+static inline void write_all_bytes(struct si_sm_data *bt)
+{
+	int i;
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
+			bt->write_count, bt->seq);
+		for (i = 0; i < bt->write_count; i++)
+			printk(" %02x", bt->write_data[i]);
+		printk("\n");
+	}
+	for (i = 0; i < bt->write_count; i++)
+		HOST2BMC(bt->write_data[i]);
+}
+
+static inline int read_all_bytes(struct si_sm_data *bt)
+{
+	unsigned int i;
+
+	/*
+	 * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+	 * Keep layout of first four bytes aligned with write_data[]
+	 */
+
+	bt->read_data[0] = BMC2HOST;
+	bt->read_count = bt->read_data[0];
+
+	if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
+		if (bt_debug & BT_DEBUG_MSG)
+			printk(KERN_WARNING "BT: bad raw rsp len=%d\n",
+				bt->read_count);
+		bt->truncated = 1;
+		return 1;	/* let next XACTION START clean it up */
+	}
+	for (i = 1; i <= bt->read_count; i++)
+		bt->read_data[i] = BMC2HOST;
+	bt->read_count++;	/* Account internally for length byte */
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		int max = bt->read_count;
+
+		printk(KERN_WARNING "BT: got %d bytes seq=0x%02X",
+			max, bt->read_data[2]);
+		if (max > 16)
+			max = 16;
+		for (i = 0; i < max; i++)
+			printk(KERN_CONT " %02x", bt->read_data[i]);
+		printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ...");
+	}
+
+	/* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
+	if ((bt->read_data[3] == bt->write_data[3]) &&
+	    (bt->read_data[2] == bt->write_data[2]) &&
+	    ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+			return 1;
+
+	if (bt_debug & BT_DEBUG_MSG)
+		printk(KERN_WARNING "IPMI BT: bad packet: "
+		"want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
+		bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3],
+		bt->read_data[1],  bt->read_data[2],  bt->read_data[3]);
+	return 0;
+}
+
+/* Restart if retries are left, or return an error completion code */
+
+static enum si_sm_result error_recovery(struct si_sm_data *bt,
+					unsigned char status,
+					unsigned char cCode)
+{
+	char *reason;
+
+	bt->timeout = bt->BT_CAP_req2rsp;
+
+	switch (cCode) {
+	case IPMI_TIMEOUT_ERR:
+		reason = "timeout";
+		break;
+	default:
+		reason = "internal error";
+		break;
+	}
+
+	printk(KERN_WARNING "IPMI BT: %s in %s %s ", 	/* open-ended line */
+		reason, STATE2TXT, STATUS2TXT);
+
+	/*
+	 * Per the IPMI spec, retries are based on the sequence number
+	 * known only to this module, so manage a restart here.
+	 */
+	(bt->error_retries)++;
+	if (bt->error_retries < bt->BT_CAP_retries) {
+		printk("%d retries left\n",
+			bt->BT_CAP_retries - bt->error_retries);
+		bt->state = BT_STATE_RESTART;
+		return SI_SM_CALL_WITHOUT_DELAY;
+	}
+
+	printk(KERN_WARNING "failed %d retries, sending error response\n",
+	       bt->BT_CAP_retries);
+	if (!bt->nonzero_status)
+		printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
+
+	/* this is most likely during insmod */
+	else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
+		printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
+		bt->state = BT_STATE_RESET1;
+		return SI_SM_CALL_WITHOUT_DELAY;
+	}
+
+	/*
+	 * Concoct a useful error message, set up the next state, and
+	 * be done with this sequence.
+	 */
+
+	bt->state = BT_STATE_IDLE;
+	switch (cCode) {
+	case IPMI_TIMEOUT_ERR:
+		if (status & BT_B_BUSY) {
+			cCode = IPMI_NODE_BUSY_ERR;
+			bt->state = BT_STATE_LONG_BUSY;
+		}
+		break;
+	default:
+		break;
+	}
+	force_result(bt, cCode);
+	return SI_SM_TRANSACTION_COMPLETE;
+}
+
+/* Check status and (usually) take action and change this state machine. */
+
+static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
+{
+	unsigned char status, BT_CAP[8];
+	static enum bt_states last_printed = BT_STATE_PRINTME;
+	int i;
+
+	status = BT_STATUS;
+	bt->nonzero_status |= status;
+	if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
+		printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
+			STATE2TXT,
+			STATUS2TXT,
+			bt->timeout,
+			time);
+		last_printed = bt->state;
+	}
+
+	/*
+	 * Commands that time out may still (eventually) provide a response.
+	 * This stale response will get in the way of a new response so remove
+	 * it if possible (hopefully during IDLE).  Even if it comes up later
+	 * it will be rejected by its (now-forgotten) seq number.
+	 */
+
+	if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
+		drain_BMC2HOST(bt);
+		BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+	}
+
+	if ((bt->state != BT_STATE_IDLE) &&
+	    (bt->state <  BT_STATE_PRINTME)) {
+		/* check timeout */
+		bt->timeout -= time;
+		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
+			return error_recovery(bt,
+					      status,
+					      IPMI_TIMEOUT_ERR);
+	}
+
+	switch (bt->state) {
+
+	/*
+	 * Idle state first checks for asynchronous messages from another
+	 * channel, then does some opportunistic housekeeping.
+	 */
+
+	case BT_STATE_IDLE:
+		if (status & BT_SMS_ATN) {
+			BT_CONTROL(BT_SMS_ATN);	/* clear it */
+			return SI_SM_ATTN;
+		}
+
+		if (status & BT_H_BUSY)		/* clear a leftover H_BUSY */
+			BT_CONTROL(BT_H_BUSY);
+
+		bt->timeout = bt->BT_CAP_req2rsp;
+
+		/* Read BT capabilities if it hasn't been done yet */
+		if (!bt->BT_CAP_outreqs)
+			BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
+					SI_SM_CALL_WITHOUT_DELAY);
+		BT_SI_SM_RETURN(SI_SM_IDLE);
+
+	case BT_STATE_XACTION_START:
+		if (status & (BT_B_BUSY | BT_H2B_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		if (BT_STATUS & BT_H_BUSY)
+			BT_CONTROL(BT_H_BUSY);	/* force clear */
+		BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	case BT_STATE_WRITE_BYTES:
+		if (status & BT_H_BUSY)
+			BT_CONTROL(BT_H_BUSY);	/* clear */
+		BT_CONTROL(BT_CLR_WR_PTR);
+		write_all_bytes(bt);
+		BT_CONTROL(BT_H2B_ATN);	/* can clear too fast to catch */
+		BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	case BT_STATE_WRITE_CONSUME:
+		if (status & (BT_B_BUSY | BT_H2B_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	/* Spinning hard can suppress B2H_ATN and force a timeout */
+
+	case BT_STATE_READ_WAIT:
+		if (!(status & BT_B2H_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		BT_CONTROL(BT_H_BUSY);		/* set */
+
+		/*
+		 * Uncached, ordered writes should just proceed serially but
+		 * some BMCs don't clear B2H_ATN with one hit.  Fast-path a
+		 * workaround without too much penalty to the general case.
+		 */
+
+		BT_CONTROL(BT_B2H_ATN);		/* clear it to ACK the BMC */
+		BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	case BT_STATE_CLEAR_B2H:
+		if (status & BT_B2H_ATN) {
+			/* keep hitting it */
+			BT_CONTROL(BT_B2H_ATN);
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		}
+		BT_STATE_CHANGE(BT_STATE_READ_BYTES,
+				SI_SM_CALL_WITHOUT_DELAY);
+
+	case BT_STATE_READ_BYTES:
+		if (!(status & BT_H_BUSY))
+			/* check in case of retry */
+			BT_CONTROL(BT_H_BUSY);
+		BT_CONTROL(BT_CLR_RD_PTR);	/* start of BMC2HOST buffer */
+		i = read_all_bytes(bt);		/* true == packet seq match */
+		BT_CONTROL(BT_H_BUSY);		/* NOW clear */
+		if (!i) 			/* Not my message */
+			BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+					SI_SM_CALL_WITHOUT_DELAY);
+		bt->state = bt->complete;
+		return bt->state == BT_STATE_IDLE ?	/* where to next? */
+			SI_SM_TRANSACTION_COMPLETE :	/* normal */
+			SI_SM_CALL_WITHOUT_DELAY;	/* Startup magic */
+
+	case BT_STATE_LONG_BUSY:	/* For example: after FW update */
+		if (!(status & BT_B_BUSY)) {
+			reset_flags(bt);	/* next state is now IDLE */
+			bt_init_data(bt, bt->io);
+		}
+		return SI_SM_CALL_WITH_DELAY;	/* No repeat printing */
+
+	case BT_STATE_RESET1:
+		reset_flags(bt);
+		drain_BMC2HOST(bt);
+		BT_STATE_CHANGE(BT_STATE_RESET2,
+				SI_SM_CALL_WITH_DELAY);
+
+	case BT_STATE_RESET2:		/* Send a soft reset */
+		BT_CONTROL(BT_CLR_WR_PTR);
+		HOST2BMC(3);		/* number of bytes following */
+		HOST2BMC(0x18);		/* NetFn/LUN == Application, LUN 0 */
+		HOST2BMC(42);		/* Sequence number */
+		HOST2BMC(3);		/* Cmd == Soft reset */
+		BT_CONTROL(BT_H2B_ATN);
+		bt->timeout = BT_RESET_DELAY * USEC_PER_SEC;
+		BT_STATE_CHANGE(BT_STATE_RESET3,
+				SI_SM_CALL_WITH_DELAY);
+
+	case BT_STATE_RESET3:		/* Hold off everything for a bit */
+		if (bt->timeout > 0)
+			return SI_SM_CALL_WITH_DELAY;
+		drain_BMC2HOST(bt);
+		BT_STATE_CHANGE(BT_STATE_RESTART,
+				SI_SM_CALL_WITH_DELAY);
+
+	case BT_STATE_RESTART:		/* don't reset retries or seq! */
+		bt->read_count = 0;
+		bt->nonzero_status = 0;
+		bt->timeout = bt->BT_CAP_req2rsp;
+		BT_STATE_CHANGE(BT_STATE_XACTION_START,
+				SI_SM_CALL_WITH_DELAY);
+
+	/*
+	 * Get BT Capabilities, using timing of upper level state machine.
+	 * Set outreqs to prevent infinite loop on timeout.
+	 */
+	case BT_STATE_CAPABILITIES_BEGIN:
+		bt->BT_CAP_outreqs = 1;
+		{
+			unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+			bt->state = BT_STATE_IDLE;
+			bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+		}
+		bt->complete = BT_STATE_CAPABILITIES_END;
+		BT_STATE_CHANGE(BT_STATE_XACTION_START,
+				SI_SM_CALL_WITH_DELAY);
+
+	case BT_STATE_CAPABILITIES_END:
+		i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+		bt_init_data(bt, bt->io);
+		if ((i == 8) && !BT_CAP[2]) {
+			bt->BT_CAP_outreqs = BT_CAP[3];
+			bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
+			bt->BT_CAP_retries = BT_CAP[7];
+		} else
+			printk(KERN_WARNING "IPMI BT: using default values\n");
+		if (!bt->BT_CAP_outreqs)
+			bt->BT_CAP_outreqs = 1;
+		printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
+			bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
+		bt->timeout = bt->BT_CAP_req2rsp;
+		return SI_SM_CALL_WITHOUT_DELAY;
+
+	default:	/* should never occur */
+		return error_recovery(bt,
+				      status,
+				      IPMI_ERR_UNSPECIFIED);
+	}
+	return SI_SM_CALL_WITH_DELAY;
+}
+
+static int bt_detect(struct si_sm_data *bt)
+{
+	/*
+	 * It's impossible for the BT status and interrupt registers to be
+	 * all 1's, (assuming a properly functioning, self-initialized BMC)
+	 * but that's what you get from reading a bogus address, so we
+	 * test that first.  The calling routine uses negative logic.
+	 */
+
+	if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
+		return 1;
+	reset_flags(bt);
+	return 0;
+}
+
+static void bt_cleanup(struct si_sm_data *bt)
+{
+}
+
+static int bt_size(void)
+{
+	return sizeof(struct si_sm_data);
+}
+
+const struct si_sm_handlers bt_smi_handlers = {
+	.init_data		= bt_init_data,
+	.start_transaction	= bt_start_transaction,
+	.get_result		= bt_get_result,
+	.event			= bt_event,
+	.detect			= bt_detect,
+	.cleanup		= bt_cleanup,
+	.size			= bt_size,
+};
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_devintf.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_devintf.c
new file mode 100644
index 0000000..2ffca42
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_devintf.c
@@ -0,0 +1,960 @@
+/*
+ * ipmi_devintf.c
+ *
+ * Linux device interface for the IPMI message handler.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/compat.h>
+
+struct ipmi_file_private
+{
+	ipmi_user_t          user;
+	spinlock_t           recv_msg_lock;
+	struct list_head     recv_msgs;
+	struct file          *file;
+	struct fasync_struct *fasync_queue;
+	wait_queue_head_t    wait;
+	struct mutex	     recv_mutex;
+	int                  default_retries;
+	unsigned int         default_retry_time_ms;
+};
+
+static DEFINE_MUTEX(ipmi_mutex);
+static void file_receive_handler(struct ipmi_recv_msg *msg,
+				 void                 *handler_data)
+{
+	struct ipmi_file_private *priv = handler_data;
+	int                      was_empty;
+	unsigned long            flags;
+
+	spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+
+	was_empty = list_empty(&(priv->recv_msgs));
+	list_add_tail(&(msg->link), &(priv->recv_msgs));
+
+	if (was_empty) {
+		wake_up_interruptible(&priv->wait);
+		kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
+	}
+
+	spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+}
+
+static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+{
+	struct ipmi_file_private *priv = file->private_data;
+	unsigned int             mask = 0;
+	unsigned long            flags;
+
+	poll_wait(file, &priv->wait, wait);
+
+	spin_lock_irqsave(&priv->recv_msg_lock, flags);
+
+	if (!list_empty(&(priv->recv_msgs)))
+		mask |= (POLLIN | POLLRDNORM);
+
+	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+	return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+	struct ipmi_file_private *priv = file->private_data;
+	int                      result;
+
+	mutex_lock(&ipmi_mutex); /* could race against open() otherwise */
+	result = fasync_helper(fd, file, on, &priv->fasync_queue);
+	mutex_unlock(&ipmi_mutex);
+
+	return (result);
+}
+
+static const struct ipmi_user_hndl ipmi_hndlrs =
+{
+	.ipmi_recv_hndl	= file_receive_handler,
+};
+
+static int ipmi_open(struct inode *inode, struct file *file)
+{
+	int                      if_num = iminor(inode);
+	int                      rv;
+	struct ipmi_file_private *priv;
+
+
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	mutex_lock(&ipmi_mutex);
+	priv->file = file;
+
+	rv = ipmi_create_user(if_num,
+			      &ipmi_hndlrs,
+			      priv,
+			      &(priv->user));
+	if (rv) {
+		kfree(priv);
+		goto out;
+	}
+
+	file->private_data = priv;
+
+	spin_lock_init(&(priv->recv_msg_lock));
+	INIT_LIST_HEAD(&(priv->recv_msgs));
+	init_waitqueue_head(&priv->wait);
+	priv->fasync_queue = NULL;
+	mutex_init(&priv->recv_mutex);
+
+	/* Use the low-level defaults. */
+	priv->default_retries = -1;
+	priv->default_retry_time_ms = 0;
+
+out:
+	mutex_unlock(&ipmi_mutex);
+	return rv;
+}
+
+static int ipmi_release(struct inode *inode, struct file *file)
+{
+	struct ipmi_file_private *priv = file->private_data;
+	int                      rv;
+	struct  ipmi_recv_msg *msg, *next;
+
+	rv = ipmi_destroy_user(priv->user);
+	if (rv)
+		return rv;
+
+	list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
+		ipmi_free_recv_msg(msg);
+
+
+	kfree(priv);
+
+	return 0;
+}
+
+static int handle_send_req(ipmi_user_t     user,
+			   struct ipmi_req *req,
+			   int             retries,
+			   unsigned int    retry_time_ms)
+{
+	int              rv;
+	struct ipmi_addr addr;
+	struct kernel_ipmi_msg msg;
+
+	if (req->addr_len > sizeof(struct ipmi_addr))
+		return -EINVAL;
+
+	if (copy_from_user(&addr, req->addr, req->addr_len))
+		return -EFAULT;
+
+	msg.netfn = req->msg.netfn;
+	msg.cmd = req->msg.cmd;
+	msg.data_len = req->msg.data_len;
+	msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!msg.data)
+		return -ENOMEM;
+
+	/* From here out we cannot return, we must jump to "out" for
+	   error exits to free msgdata. */
+
+	rv = ipmi_validate_addr(&addr, req->addr_len);
+	if (rv)
+		goto out;
+
+	if (req->msg.data != NULL) {
+		if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
+			rv = -EMSGSIZE;
+			goto out;
+		}
+
+		if (copy_from_user(msg.data,
+				   req->msg.data,
+				   req->msg.data_len))
+		{
+			rv = -EFAULT;
+			goto out;
+		}
+	} else {
+		msg.data_len = 0;
+	}
+
+	rv = ipmi_request_settime(user,
+				  &addr,
+				  req->msgid,
+				  &msg,
+				  NULL,
+				  0,
+				  retries,
+				  retry_time_ms);
+ out:
+	kfree(msg.data);
+	return rv;
+}
+
+static int handle_recv(struct ipmi_file_private *priv,
+			bool trunc, struct ipmi_recv *rsp,
+			int (*copyout)(struct ipmi_recv *, void __user *),
+			void __user *to)
+{
+	int              addr_len;
+	struct list_head *entry;
+	struct ipmi_recv_msg  *msg;
+	unsigned long    flags;
+	int rv = 0;
+
+	/* We claim a mutex because we don't want two
+	   users getting something from the queue at a time.
+	   Since we have to release the spinlock before we can
+	   copy the data to the user, it's possible another
+	   user will grab something from the queue, too.  Then
+	   the messages might get out of order if something
+	   fails and the message gets put back onto the
+	   queue.  This mutex prevents that problem. */
+	mutex_lock(&priv->recv_mutex);
+
+	/* Grab the message off the list. */
+	spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+	if (list_empty(&(priv->recv_msgs))) {
+		spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+		rv = -EAGAIN;
+		goto recv_err;
+	}
+	entry = priv->recv_msgs.next;
+	msg = list_entry(entry, struct ipmi_recv_msg, link);
+	list_del(entry);
+	spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+
+	addr_len = ipmi_addr_length(msg->addr.addr_type);
+	if (rsp->addr_len < addr_len)
+	{
+		rv = -EINVAL;
+		goto recv_putback_on_err;
+	}
+
+	if (copy_to_user(rsp->addr, &(msg->addr), addr_len)) {
+		rv = -EFAULT;
+		goto recv_putback_on_err;
+	}
+	rsp->addr_len = addr_len;
+
+	rsp->recv_type = msg->recv_type;
+	rsp->msgid = msg->msgid;
+	rsp->msg.netfn = msg->msg.netfn;
+	rsp->msg.cmd = msg->msg.cmd;
+
+	if (msg->msg.data_len > 0) {
+		if (rsp->msg.data_len < msg->msg.data_len) {
+			rv = -EMSGSIZE;
+			if (trunc)
+				msg->msg.data_len = rsp->msg.data_len;
+			else
+				goto recv_putback_on_err;
+		}
+
+		if (copy_to_user(rsp->msg.data,
+				 msg->msg.data,
+				 msg->msg.data_len))
+		{
+			rv = -EFAULT;
+			goto recv_putback_on_err;
+		}
+		rsp->msg.data_len = msg->msg.data_len;
+	} else {
+		rsp->msg.data_len = 0;
+	}
+
+	rv = copyout(rsp, to);
+	if (rv)
+		goto recv_putback_on_err;
+
+	mutex_unlock(&priv->recv_mutex);
+	ipmi_free_recv_msg(msg);
+	return 0;
+
+recv_putback_on_err:
+	/* If we got an error, put the message back onto
+	   the head of the queue. */
+	spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+	list_add(entry, &(priv->recv_msgs));
+	spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+recv_err:
+	mutex_unlock(&priv->recv_mutex);
+	return rv;
+}
+
+static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
+{
+	return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
+}
+
+static int ipmi_ioctl(struct file   *file,
+		      unsigned int  cmd,
+		      unsigned long data)
+{
+	int                      rv = -EINVAL;
+	struct ipmi_file_private *priv = file->private_data;
+	void __user *arg = (void __user *)data;
+
+	switch (cmd) 
+	{
+	case IPMICTL_SEND_COMMAND:
+	{
+		struct ipmi_req req;
+
+		if (copy_from_user(&req, arg, sizeof(req))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = handle_send_req(priv->user,
+				     &req,
+				     priv->default_retries,
+				     priv->default_retry_time_ms);
+		break;
+	}
+
+	case IPMICTL_SEND_COMMAND_SETTIME:
+	{
+		struct ipmi_req_settime req;
+
+		if (copy_from_user(&req, arg, sizeof(req))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = handle_send_req(priv->user,
+				     &req.req,
+				     req.retries,
+				     req.retry_time_ms);
+		break;
+	}
+
+	case IPMICTL_RECEIVE_MSG:
+	case IPMICTL_RECEIVE_MSG_TRUNC:
+	{
+		struct ipmi_recv      rsp;
+
+		if (copy_from_user(&rsp, arg, sizeof(rsp)))
+			rv = -EFAULT;
+		else
+			rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
+					 &rsp, copyout_recv, arg);
+		break;
+	}
+
+	case IPMICTL_REGISTER_FOR_CMD:
+	{
+		struct ipmi_cmdspec val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+					   IPMI_CHAN_ALL);
+		break;
+	}
+
+	case IPMICTL_UNREGISTER_FOR_CMD:
+	{
+		struct ipmi_cmdspec   val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+					     IPMI_CHAN_ALL);
+		break;
+	}
+
+	case IPMICTL_REGISTER_FOR_CMD_CHANS:
+	{
+		struct ipmi_cmdspec_chans val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+					   val.chans);
+		break;
+	}
+
+	case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
+	{
+		struct ipmi_cmdspec_chans val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+					     val.chans);
+		break;
+	}
+
+	case IPMICTL_SET_GETS_EVENTS_CMD:
+	{
+		int val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_set_gets_events(priv->user, val);
+		break;
+	}
+
+	/* The next four are legacy, not per-channel. */
+	case IPMICTL_SET_MY_ADDRESS_CMD:
+	{
+		unsigned int val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_set_my_address(priv->user, 0, val);
+		break;
+	}
+
+	case IPMICTL_GET_MY_ADDRESS_CMD:
+	{
+		unsigned int  val;
+		unsigned char rval;
+
+		rv = ipmi_get_my_address(priv->user, 0, &rval);
+		if (rv)
+			break;
+
+		val = rval;
+
+		if (copy_to_user(arg, &val, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+		break;
+	}
+
+	case IPMICTL_SET_MY_LUN_CMD:
+	{
+		unsigned int val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_set_my_LUN(priv->user, 0, val);
+		break;
+	}
+
+	case IPMICTL_GET_MY_LUN_CMD:
+	{
+		unsigned int  val;
+		unsigned char rval;
+
+		rv = ipmi_get_my_LUN(priv->user, 0, &rval);
+		if (rv)
+			break;
+
+		val = rval;
+
+		if (copy_to_user(arg, &val, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+		break;
+	}
+
+	case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
+	{
+		struct ipmi_channel_lun_address_set val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		return ipmi_set_my_address(priv->user, val.channel, val.value);
+		break;
+	}
+
+	case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
+	{
+		struct ipmi_channel_lun_address_set val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
+		if (rv)
+			break;
+
+		if (copy_to_user(arg, &val, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+		break;
+	}
+
+	case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
+	{
+		struct ipmi_channel_lun_address_set val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
+		break;
+	}
+
+	case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
+	{
+		struct ipmi_channel_lun_address_set val;
+
+		if (copy_from_user(&val, arg, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
+		if (rv)
+			break;
+
+		if (copy_to_user(arg, &val, sizeof(val))) {
+			rv = -EFAULT;
+			break;
+		}
+		break;
+	}
+
+	case IPMICTL_SET_TIMING_PARMS_CMD:
+	{
+		struct ipmi_timing_parms parms;
+
+		if (copy_from_user(&parms, arg, sizeof(parms))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		priv->default_retries = parms.retries;
+		priv->default_retry_time_ms = parms.retry_time_ms;
+		rv = 0;
+		break;
+	}
+
+	case IPMICTL_GET_TIMING_PARMS_CMD:
+	{
+		struct ipmi_timing_parms parms;
+
+		parms.retries = priv->default_retries;
+		parms.retry_time_ms = priv->default_retry_time_ms;
+
+		if (copy_to_user(arg, &parms, sizeof(parms))) {
+			rv = -EFAULT;
+			break;
+		}
+
+		rv = 0;
+		break;
+	}
+
+	case IPMICTL_GET_MAINTENANCE_MODE_CMD:
+	{
+		int mode;
+
+		mode = ipmi_get_maintenance_mode(priv->user);
+		if (copy_to_user(arg, &mode, sizeof(mode))) {
+			rv = -EFAULT;
+			break;
+		}
+		rv = 0;
+		break;
+	}
+
+	case IPMICTL_SET_MAINTENANCE_MODE_CMD:
+	{
+		int mode;
+
+		if (copy_from_user(&mode, arg, sizeof(mode))) {
+			rv = -EFAULT;
+			break;
+		}
+		rv = ipmi_set_maintenance_mode(priv->user, mode);
+		break;
+	}
+	}
+  
+	return rv;
+}
+
+/*
+ * Note: it doesn't make sense to take the BKL here but
+ *       not in compat_ipmi_ioctl. -arnd
+ */
+static long ipmi_unlocked_ioctl(struct file   *file,
+			        unsigned int  cmd,
+			        unsigned long data)
+{
+	int ret;
+
+	mutex_lock(&ipmi_mutex);
+	ret = ipmi_ioctl(file, cmd, data);
+	mutex_unlock(&ipmi_mutex);
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+/*
+ * The following code contains code for supporting 32-bit compatible
+ * ioctls on 64-bit kernels.  This allows running 32-bit apps on the
+ * 64-bit kernel
+ */
+#define COMPAT_IPMICTL_SEND_COMMAND	\
+	_IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
+#define COMPAT_IPMICTL_SEND_COMMAND_SETTIME	\
+	_IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
+#define COMPAT_IPMICTL_RECEIVE_MSG	\
+	_IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
+#define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC	\
+	_IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
+
+struct compat_ipmi_msg {
+	u8		netfn;
+	u8		cmd;
+	u16		data_len;
+	compat_uptr_t	data;
+};
+
+struct compat_ipmi_req {
+	compat_uptr_t		addr;
+	compat_uint_t		addr_len;
+	compat_long_t		msgid;
+	struct compat_ipmi_msg	msg;
+};
+
+struct compat_ipmi_recv {
+	compat_int_t		recv_type;
+	compat_uptr_t		addr;
+	compat_uint_t		addr_len;
+	compat_long_t		msgid;
+	struct compat_ipmi_msg	msg;
+};
+
+struct compat_ipmi_req_settime {
+	struct compat_ipmi_req	req;
+	compat_int_t		retries;
+	compat_uint_t		retry_time_ms;
+};
+
+/*
+ * Define some helper functions for copying IPMI data
+ */
+static void get_compat_ipmi_msg(struct ipmi_msg *p64,
+				struct compat_ipmi_msg *p32)
+{
+	p64->netfn = p32->netfn;
+	p64->cmd = p32->cmd;
+	p64->data_len = p32->data_len;
+	p64->data = compat_ptr(p32->data);
+}
+
+static void get_compat_ipmi_req(struct ipmi_req *p64,
+				struct compat_ipmi_req *p32)
+{
+	p64->addr = compat_ptr(p32->addr);
+	p64->addr_len = p32->addr_len;
+	p64->msgid = p32->msgid;
+	get_compat_ipmi_msg(&p64->msg, &p32->msg);
+}
+
+static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
+		struct compat_ipmi_req_settime *p32)
+{
+	get_compat_ipmi_req(&p64->req, &p32->req);
+	p64->retries = p32->retries;
+	p64->retry_time_ms = p32->retry_time_ms;
+}
+
+static void get_compat_ipmi_recv(struct ipmi_recv *p64,
+				 struct compat_ipmi_recv *p32)
+{
+	memset(p64, 0, sizeof(struct ipmi_recv));
+	p64->recv_type = p32->recv_type;
+	p64->addr = compat_ptr(p32->addr);
+	p64->addr_len = p32->addr_len;
+	p64->msgid = p32->msgid;
+	get_compat_ipmi_msg(&p64->msg, &p32->msg);
+}
+
+static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
+{
+	struct compat_ipmi_recv v32;
+	memset(&v32, 0, sizeof(struct compat_ipmi_recv));
+	v32.recv_type = p64->recv_type;
+	v32.addr = ptr_to_compat(p64->addr);
+	v32.addr_len = p64->addr_len;
+	v32.msgid = p64->msgid;
+	v32.msg.netfn = p64->msg.netfn;
+	v32.msg.cmd = p64->msg.cmd;
+	v32.msg.data_len = p64->msg.data_len;
+	v32.msg.data = ptr_to_compat(p64->msg.data);
+	return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
+}
+
+/*
+ * Handle compatibility ioctls
+ */
+static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+			      unsigned long arg)
+{
+	struct ipmi_file_private *priv = filep->private_data;
+
+	switch(cmd) {
+	case COMPAT_IPMICTL_SEND_COMMAND:
+	{
+		struct ipmi_req	rp;
+		struct compat_ipmi_req r32;
+
+		if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
+			return -EFAULT;
+
+		get_compat_ipmi_req(&rp, &r32);
+
+		return handle_send_req(priv->user, &rp,
+				priv->default_retries,
+				priv->default_retry_time_ms);
+	}
+	case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
+	{
+		struct ipmi_req_settime	sp;
+		struct compat_ipmi_req_settime sp32;
+
+		if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
+			return -EFAULT;
+
+		get_compat_ipmi_req_settime(&sp, &sp32);
+
+		return handle_send_req(priv->user, &sp.req,
+				sp.retries, sp.retry_time_ms);
+	}
+	case COMPAT_IPMICTL_RECEIVE_MSG:
+	case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
+	{
+		struct ipmi_recv   recv64;
+		struct compat_ipmi_recv recv32;
+
+		if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
+			return -EFAULT;
+
+		get_compat_ipmi_recv(&recv64, &recv32);
+
+		return handle_recv(priv,
+				 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
+				 &recv64, copyout_recv32, compat_ptr(arg));
+	}
+	default:
+		return ipmi_ioctl(filep, cmd, arg);
+	}
+}
+
+static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+				       unsigned long arg)
+{
+	int ret;
+
+	mutex_lock(&ipmi_mutex);
+	ret = compat_ipmi_ioctl(filep, cmd, arg);
+	mutex_unlock(&ipmi_mutex);
+
+	return ret;
+}
+#endif
+
+static const struct file_operations ipmi_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl	= ipmi_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = unlocked_compat_ipmi_ioctl,
+#endif
+	.open		= ipmi_open,
+	.release	= ipmi_release,
+	.fasync		= ipmi_fasync,
+	.poll		= ipmi_poll,
+	.llseek		= noop_llseek,
+};
+
+#define DEVICE_NAME     "ipmidev"
+
+static int ipmi_major;
+module_param(ipmi_major, int, 0);
+MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device.  By"
+		 " default, or if you set it to zero, it will choose the next"
+		 " available device.  Setting it to -1 will disable the"
+		 " interface.  Other values will set the major device number"
+		 " to that value.");
+
+/* Keep track of the devices that are registered. */
+struct ipmi_reg_list {
+	dev_t            dev;
+	struct list_head link;
+};
+static LIST_HEAD(reg_list);
+static DEFINE_MUTEX(reg_list_mutex);
+
+static struct class *ipmi_class;
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+	dev_t dev = MKDEV(ipmi_major, if_num);
+	struct ipmi_reg_list *entry;
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		printk(KERN_ERR "ipmi_devintf: Unable to create the"
+		       " ipmi class device link\n");
+		return;
+	}
+	entry->dev = dev;
+
+	mutex_lock(&reg_list_mutex);
+	device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
+	list_add(&entry->link, &reg_list);
+	mutex_unlock(&reg_list_mutex);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+	dev_t dev = MKDEV(ipmi_major, if_num);
+	struct ipmi_reg_list *entry;
+
+	mutex_lock(&reg_list_mutex);
+	list_for_each_entry(entry, &reg_list, link) {
+		if (entry->dev == dev) {
+			list_del(&entry->link);
+			kfree(entry);
+			break;
+		}
+	}
+	device_destroy(ipmi_class, dev);
+	mutex_unlock(&reg_list_mutex);
+}
+
+static struct ipmi_smi_watcher smi_watcher =
+{
+	.owner    = THIS_MODULE,
+	.new_smi  = ipmi_new_smi,
+	.smi_gone = ipmi_smi_gone,
+};
+
+static int __init init_ipmi_devintf(void)
+{
+	int rv;
+
+	if (ipmi_major < 0)
+		return -EINVAL;
+
+	printk(KERN_INFO "ipmi device interface\n");
+
+	ipmi_class = class_create(THIS_MODULE, "ipmi");
+	if (IS_ERR(ipmi_class)) {
+		printk(KERN_ERR "ipmi: can't register device class\n");
+		return PTR_ERR(ipmi_class);
+	}
+
+	rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
+	if (rv < 0) {
+		class_destroy(ipmi_class);
+		printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
+		return rv;
+	}
+
+	if (ipmi_major == 0) {
+		ipmi_major = rv;
+	}
+
+	rv = ipmi_smi_watcher_register(&smi_watcher);
+	if (rv) {
+		unregister_chrdev(ipmi_major, DEVICE_NAME);
+		class_destroy(ipmi_class);
+		printk(KERN_WARNING "ipmi: can't register smi watcher\n");
+		return rv;
+	}
+
+	return 0;
+}
+module_init(init_ipmi_devintf);
+
+static void __exit cleanup_ipmi(void)
+{
+	struct ipmi_reg_list *entry, *entry2;
+	mutex_lock(&reg_list_mutex);
+	list_for_each_entry_safe(entry, entry2, &reg_list, link) {
+		list_del(&entry->link);
+		device_destroy(ipmi_class, entry->dev);
+		kfree(entry);
+	}
+	mutex_unlock(&reg_list_mutex);
+	class_destroy(ipmi_class);
+	ipmi_smi_watcher_unregister(&smi_watcher);
+	unregister_chrdev(ipmi_major, DEVICE_NAME);
+}
+module_exit(cleanup_ipmi);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_dmi.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_dmi.c
new file mode 100644
index 0000000..a37d979
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_dmi.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A hack to create a platform device from a DMI entry.  This will
+ * allow autoloading of the IPMI drive based on SMBIOS entries.
+ */
+
+#include <linux/ipmi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include "ipmi_dmi.h"
+
+struct ipmi_dmi_info {
+	int type;
+	u32 flags;
+	unsigned long addr;
+	u8 slave_addr;
+	struct ipmi_dmi_info *next;
+};
+
+static struct ipmi_dmi_info *ipmi_dmi_infos;
+
+static int ipmi_dmi_nr __initdata;
+
+static void __init dmi_add_platform_ipmi(unsigned long base_addr,
+					 u32 flags,
+					 u8 slave_addr,
+					 int irq,
+					 int offset,
+					 int type)
+{
+	struct platform_device *pdev;
+	struct resource r[4];
+	unsigned int num_r = 1, size;
+	struct property_entry p[4] = {
+		PROPERTY_ENTRY_U8("slave-addr", slave_addr),
+		PROPERTY_ENTRY_U8("ipmi-type", type),
+		PROPERTY_ENTRY_U16("i2c-addr", base_addr),
+		{ }
+	};
+	char *name, *override;
+	int rv;
+	struct ipmi_dmi_info *info;
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		pr_warn("ipmi:dmi: Could not allocate dmi info\n");
+	} else {
+		info->type = type;
+		info->flags = flags;
+		info->addr = base_addr;
+		info->slave_addr = slave_addr;
+		info->next = ipmi_dmi_infos;
+		ipmi_dmi_infos = info;
+	}
+
+	name = "dmi-ipmi-si";
+	override = "ipmi_si";
+	switch (type) {
+	case IPMI_DMI_TYPE_SSIF:
+		name = "dmi-ipmi-ssif";
+		override = "ipmi_ssif";
+		offset = 1;
+		size = 1;
+		break;
+	case IPMI_DMI_TYPE_BT:
+		size = 3;
+		break;
+	case IPMI_DMI_TYPE_KCS:
+	case IPMI_DMI_TYPE_SMIC:
+		size = 2;
+		break;
+	default:
+		pr_err("ipmi:dmi: Invalid IPMI type: %d", type);
+		return;
+	}
+
+	pdev = platform_device_alloc(name, ipmi_dmi_nr);
+	if (!pdev) {
+		pr_err("ipmi:dmi: Error allocation IPMI platform device");
+		return;
+	}
+	pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+					  override);
+	if (!pdev->driver_override)
+		goto err;
+
+	if (type == IPMI_DMI_TYPE_SSIF)
+		goto add_properties;
+
+	memset(r, 0, sizeof(r));
+
+	r[0].start = base_addr;
+	r[0].end = r[0].start + offset - 1;
+	r[0].name = "IPMI Address 1";
+	r[0].flags = flags;
+
+	if (size > 1) {
+		r[1].start = r[0].start + offset;
+		r[1].end = r[1].start + offset - 1;
+		r[1].name = "IPMI Address 2";
+		r[1].flags = flags;
+		num_r++;
+	}
+
+	if (size > 2) {
+		r[2].start = r[1].start + offset;
+		r[2].end = r[2].start + offset - 1;
+		r[2].name = "IPMI Address 3";
+		r[2].flags = flags;
+		num_r++;
+	}
+
+	if (irq) {
+		r[num_r].start = irq;
+		r[num_r].end = irq;
+		r[num_r].name = "IPMI IRQ";
+		r[num_r].flags = IORESOURCE_IRQ;
+		num_r++;
+	}
+
+	rv = platform_device_add_resources(pdev, r, num_r);
+	if (rv) {
+		dev_err(&pdev->dev,
+			"ipmi:dmi: Unable to add resources: %d\n", rv);
+		goto err;
+	}
+
+add_properties:
+	rv = platform_device_add_properties(pdev, p);
+	if (rv) {
+		dev_err(&pdev->dev,
+			"ipmi:dmi: Unable to add properties: %d\n", rv);
+		goto err;
+	}
+
+	rv = platform_device_add(pdev);
+	if (rv) {
+		dev_err(&pdev->dev, "ipmi:dmi: Unable to add device: %d\n", rv);
+		goto err;
+	}
+
+	ipmi_dmi_nr++;
+	return;
+
+err:
+	platform_device_put(pdev);
+}
+
+/*
+ * Look up the slave address for a given interface.  This is here
+ * because ACPI doesn't have a slave address while SMBIOS does, but we
+ * prefer using ACPI so the ACPI code can use the IPMI namespace.
+ * This function allows an ACPI-specified IPMI device to look up the
+ * slave address from the DMI table.
+ */
+int ipmi_dmi_get_slave_addr(int type, u32 flags, unsigned long base_addr)
+{
+	struct ipmi_dmi_info *info = ipmi_dmi_infos;
+
+	while (info) {
+		if (info->type == type &&
+		    info->flags == flags &&
+		    info->addr == base_addr)
+			return info->slave_addr;
+		info = info->next;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_dmi_get_slave_addr);
+
+#define DMI_IPMI_MIN_LENGTH	0x10
+#define DMI_IPMI_VER2_LENGTH	0x12
+#define DMI_IPMI_TYPE		4
+#define DMI_IPMI_SLAVEADDR	6
+#define DMI_IPMI_ADDR		8
+#define DMI_IPMI_ACCESS		0x10
+#define DMI_IPMI_IRQ		0x11
+#define DMI_IPMI_IO_MASK	0xfffe
+
+static void __init dmi_decode_ipmi(const struct dmi_header *dm)
+{
+	const u8	*data = (const u8 *) dm;
+	u32             flags = IORESOURCE_IO;
+	unsigned long	base_addr;
+	u8              len = dm->length;
+	u8              slave_addr;
+	int             irq = 0, offset;
+	int             type;
+
+	if (len < DMI_IPMI_MIN_LENGTH)
+		return;
+
+	type = data[DMI_IPMI_TYPE];
+	slave_addr = data[DMI_IPMI_SLAVEADDR];
+
+	memcpy(&base_addr, data + DMI_IPMI_ADDR, sizeof(unsigned long));
+	if (!base_addr) {
+		pr_err("Base address is zero, assuming no IPMI interface\n");
+		return;
+	}
+	if (len >= DMI_IPMI_VER2_LENGTH) {
+		if (type == IPMI_DMI_TYPE_SSIF) {
+			offset = 0;
+			flags = 0;
+			base_addr = data[DMI_IPMI_ADDR] >> 1;
+			if (base_addr == 0) {
+				/*
+				 * Some broken systems put the I2C address in
+				 * the slave address field.  We try to
+				 * accommodate them here.
+				 */
+				base_addr = data[DMI_IPMI_SLAVEADDR] >> 1;
+				slave_addr = 0;
+			}
+		} else {
+			if (base_addr & 1) {
+				/* I/O */
+				base_addr &= DMI_IPMI_IO_MASK;
+			} else {
+				/* Memory */
+				flags = IORESOURCE_MEM;
+			}
+
+			/*
+			 * If bit 4 of byte 0x10 is set, then the lsb
+			 * for the address is odd.
+			 */
+			base_addr |= (data[DMI_IPMI_ACCESS] >> 4) & 1;
+
+			irq = data[DMI_IPMI_IRQ];
+
+			/*
+			 * The top two bits of byte 0x10 hold the
+			 * register spacing.
+			 */
+			switch ((data[DMI_IPMI_ACCESS] >> 6) & 3) {
+			case 0: /* Byte boundaries */
+				offset = 1;
+				break;
+			case 1: /* 32-bit boundaries */
+				offset = 4;
+				break;
+			case 2: /* 16-byte boundaries */
+				offset = 16;
+				break;
+			default:
+				pr_err("ipmi:dmi: Invalid offset: 0");
+				return;
+			}
+		}
+	} else {
+		/* Old DMI spec. */
+		/*
+		 * Note that technically, the lower bit of the base
+		 * address should be 1 if the address is I/O and 0 if
+		 * the address is in memory.  So many systems get that
+		 * wrong (and all that I have seen are I/O) so we just
+		 * ignore that bit and assume I/O.  Systems that use
+		 * memory should use the newer spec, anyway.
+		 */
+		base_addr = base_addr & DMI_IPMI_IO_MASK;
+		offset = 1;
+	}
+
+	dmi_add_platform_ipmi(base_addr, flags, slave_addr, irq,
+			      offset, type);
+}
+
+static int __init scan_for_dmi_ipmi(void)
+{
+	const struct dmi_device *dev = NULL;
+
+	while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev)))
+		dmi_decode_ipmi((const struct dmi_header *) dev->device_data);
+
+	return 0;
+}
+subsys_initcall(scan_for_dmi_ipmi);
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_dmi.h b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_dmi.h
new file mode 100644
index 0000000..ea990a8
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_dmi.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMI defines for use by IPMI
+ */
+
+#define IPMI_DMI_TYPE_KCS	0x01
+#define IPMI_DMI_TYPE_SMIC	0x02
+#define IPMI_DMI_TYPE_BT	0x03
+#define IPMI_DMI_TYPE_SSIF	0x04
+
+#ifdef CONFIG_IPMI_DMI_DECODE
+int ipmi_dmi_get_slave_addr(int type, u32 flags, unsigned long base_addr);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_kcs_sm.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_kcs_sm.c
new file mode 100644
index 0000000..1da61af
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -0,0 +1,551 @@
+/*
+ * ipmi_kcs_sm.c
+ *
+ * State machine for handling IPMI KCS interfaces.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * This state machine is taken from the state machine in the IPMI spec,
+ * pretty much verbatim.  If you have questions about the states, see
+ * that document.
+ */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/ipmi_msgdefs.h>		/* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* kcs_debug is a bit-field
+ *	KCS_DEBUG_ENABLE -	turned on for now
+ *	KCS_DEBUG_MSG    -	commands and their responses
+ *	KCS_DEBUG_STATES -	state machine
+ */
+#define KCS_DEBUG_STATES	4
+#define KCS_DEBUG_MSG		2
+#define	KCS_DEBUG_ENABLE	1
+
+static int kcs_debug;
+module_param(kcs_debug, int, 0644);
+MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/* The states the KCS driver may be in. */
+enum kcs_states {
+	/* The KCS interface is currently doing nothing. */
+	KCS_IDLE,
+
+	/*
+	 * We are starting an operation.  The data is in the output
+	 * buffer, but nothing has been done to the interface yet.  This
+	 * was added to the state machine in the spec to wait for the
+	 * initial IBF.
+	 */
+	KCS_START_OP,
+
+	/* We have written a write cmd to the interface. */
+	KCS_WAIT_WRITE_START,
+
+	/* We are writing bytes to the interface. */
+	KCS_WAIT_WRITE,
+
+	/*
+	 * We have written the write end cmd to the interface, and
+	 * still need to write the last byte.
+	 */
+	KCS_WAIT_WRITE_END,
+
+	/* We are waiting to read data from the interface. */
+	KCS_WAIT_READ,
+
+	/*
+	 * State to transition to the error handler, this was added to
+	 * the state machine in the spec to be sure IBF was there.
+	 */
+	KCS_ERROR0,
+
+	/*
+	 * First stage error handler, wait for the interface to
+	 * respond.
+	 */
+	KCS_ERROR1,
+
+	/*
+	 * The abort cmd has been written, wait for the interface to
+	 * respond.
+	 */
+	KCS_ERROR2,
+
+	/*
+	 * We wrote some data to the interface, wait for it to switch
+	 * to read mode.
+	 */
+	KCS_ERROR3,
+
+	/* The hardware failed to follow the state machine. */
+	KCS_HOSED
+};
+
+#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
+#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
+
+/* Timeouts in microseconds. */
+#define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define MAX_ERROR_RETRIES 10
+#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
+
+struct si_sm_data {
+	enum kcs_states  state;
+	struct si_sm_io *io;
+	unsigned char    write_data[MAX_KCS_WRITE_SIZE];
+	int              write_pos;
+	int              write_count;
+	int              orig_write_count;
+	unsigned char    read_data[MAX_KCS_READ_SIZE];
+	int              read_pos;
+	int	         truncated;
+
+	unsigned int  error_retries;
+	long          ibf_timeout;
+	long          obf_timeout;
+	unsigned long  error0_timeout;
+};
+
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+				  struct si_sm_io *io)
+{
+	kcs->state = KCS_IDLE;
+	kcs->io = io;
+	kcs->write_pos = 0;
+	kcs->write_count = 0;
+	kcs->orig_write_count = 0;
+	kcs->read_pos = 0;
+	kcs->error_retries = 0;
+	kcs->truncated = 0;
+	kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+	kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+
+	/* Reserve 2 I/O bytes. */
+	return 2;
+}
+
+static inline unsigned char read_status(struct si_sm_data *kcs)
+{
+	return kcs->io->inputb(kcs->io, 1);
+}
+
+static inline unsigned char read_data(struct si_sm_data *kcs)
+{
+	return kcs->io->inputb(kcs->io, 0);
+}
+
+static inline void write_cmd(struct si_sm_data *kcs, unsigned char data)
+{
+	kcs->io->outputb(kcs->io, 1, data);
+}
+
+static inline void write_data(struct si_sm_data *kcs, unsigned char data)
+{
+	kcs->io->outputb(kcs->io, 0, data);
+}
+
+/* Control codes. */
+#define KCS_GET_STATUS_ABORT	0x60
+#define KCS_WRITE_START		0x61
+#define KCS_WRITE_END		0x62
+#define KCS_READ_BYTE		0x68
+
+/* Status bits. */
+#define GET_STATUS_STATE(status) (((status) >> 6) & 0x03)
+#define KCS_IDLE_STATE	0
+#define KCS_READ_STATE	1
+#define KCS_WRITE_STATE	2
+#define KCS_ERROR_STATE	3
+#define GET_STATUS_ATN(status) ((status) & 0x04)
+#define GET_STATUS_IBF(status) ((status) & 0x02)
+#define GET_STATUS_OBF(status) ((status) & 0x01)
+
+
+static inline void write_next_byte(struct si_sm_data *kcs)
+{
+	write_data(kcs, kcs->write_data[kcs->write_pos]);
+	(kcs->write_pos)++;
+	(kcs->write_count)--;
+}
+
+static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
+{
+	(kcs->error_retries)++;
+	if (kcs->error_retries > MAX_ERROR_RETRIES) {
+		if (kcs_debug & KCS_DEBUG_ENABLE)
+			printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n",
+			       reason);
+		kcs->state = KCS_HOSED;
+	} else {
+		kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
+		kcs->state = KCS_ERROR0;
+	}
+}
+
+static inline void read_next_byte(struct si_sm_data *kcs)
+{
+	if (kcs->read_pos >= MAX_KCS_READ_SIZE) {
+		/* Throw the data away and mark it truncated. */
+		read_data(kcs);
+		kcs->truncated = 1;
+	} else {
+		kcs->read_data[kcs->read_pos] = read_data(kcs);
+		(kcs->read_pos)++;
+	}
+	write_data(kcs, KCS_READ_BYTE);
+}
+
+static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
+			    long time)
+{
+	if (GET_STATUS_IBF(status)) {
+		kcs->ibf_timeout -= time;
+		if (kcs->ibf_timeout < 0) {
+			start_error_recovery(kcs, "IBF not ready in time");
+			kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+			return 1;
+		}
+		return 0;
+	}
+	kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+	return 1;
+}
+
+static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
+			    long time)
+{
+	if (!GET_STATUS_OBF(status)) {
+		kcs->obf_timeout -= time;
+		if (kcs->obf_timeout < 0) {
+			kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+			start_error_recovery(kcs, "OBF not ready in time");
+			return 1;
+		}
+		return 0;
+	}
+	kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+	return 1;
+}
+
+static void clear_obf(struct si_sm_data *kcs, unsigned char status)
+{
+	if (GET_STATUS_OBF(status))
+		read_data(kcs);
+}
+
+static void restart_kcs_transaction(struct si_sm_data *kcs)
+{
+	kcs->write_count = kcs->orig_write_count;
+	kcs->write_pos = 0;
+	kcs->read_pos = 0;
+	kcs->state = KCS_WAIT_WRITE_START;
+	kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+	kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+	write_cmd(kcs, KCS_WRITE_START);
+}
+
+static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
+				 unsigned int size)
+{
+	unsigned int i;
+
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > MAX_KCS_WRITE_SIZE)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED))
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
+	if (kcs_debug & KCS_DEBUG_MSG) {
+		printk(KERN_DEBUG "start_kcs_transaction -");
+		for (i = 0; i < size; i++)
+			printk(" %02x", (unsigned char) (data [i]));
+		printk("\n");
+	}
+	kcs->error_retries = 0;
+	memcpy(kcs->write_data, data, size);
+	kcs->write_count = size;
+	kcs->orig_write_count = size;
+	kcs->write_pos = 0;
+	kcs->read_pos = 0;
+	kcs->state = KCS_START_OP;
+	kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+	kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+	return 0;
+}
+
+static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
+			  unsigned int length)
+{
+	if (length < kcs->read_pos) {
+		kcs->read_pos = length;
+		kcs->truncated = 1;
+	}
+
+	memcpy(data, kcs->read_data, kcs->read_pos);
+
+	if ((length >= 3) && (kcs->read_pos < 3)) {
+		/* Guarantee that we return at least 3 bytes, with an
+		   error in the third byte if it is too short. */
+		data[2] = IPMI_ERR_UNSPECIFIED;
+		kcs->read_pos = 3;
+	}
+	if (kcs->truncated) {
+		/*
+		 * Report a truncated error.  We might overwrite
+		 * another error, but that's too bad, the user needs
+		 * to know it was truncated.
+		 */
+		data[2] = IPMI_ERR_MSG_TRUNCATED;
+		kcs->truncated = 0;
+	}
+
+	return kcs->read_pos;
+}
+
+/*
+ * This implements the state machine defined in the IPMI manual, see
+ * that for details on how this works.  Divide that flowchart into
+ * sections delimited by "Wait for IBF" and this will become clear.
+ */
+static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
+{
+	unsigned char status;
+	unsigned char state;
+
+	status = read_status(kcs);
+
+	if (kcs_debug & KCS_DEBUG_STATES)
+		printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status);
+
+	/* All states wait for ibf, so just do it here. */
+	if (!check_ibf(kcs, status, time))
+		return SI_SM_CALL_WITH_DELAY;
+
+	/* Just about everything looks at the KCS state, so grab that, too. */
+	state = GET_STATUS_STATE(status);
+
+	switch (kcs->state) {
+	case KCS_IDLE:
+		/* If there's and interrupt source, turn it off. */
+		clear_obf(kcs, status);
+
+		if (GET_STATUS_ATN(status))
+			return SI_SM_ATTN;
+		else
+			return SI_SM_IDLE;
+
+	case KCS_START_OP:
+		if (state != KCS_IDLE_STATE) {
+			start_error_recovery(kcs,
+					     "State machine not idle at start");
+			break;
+		}
+
+		clear_obf(kcs, status);
+		write_cmd(kcs, KCS_WRITE_START);
+		kcs->state = KCS_WAIT_WRITE_START;
+		break;
+
+	case KCS_WAIT_WRITE_START:
+		if (state != KCS_WRITE_STATE) {
+			start_error_recovery(
+				kcs,
+				"Not in write state at write start");
+			break;
+		}
+		read_data(kcs);
+		if (kcs->write_count == 1) {
+			write_cmd(kcs, KCS_WRITE_END);
+			kcs->state = KCS_WAIT_WRITE_END;
+		} else {
+			write_next_byte(kcs);
+			kcs->state = KCS_WAIT_WRITE;
+		}
+		break;
+
+	case KCS_WAIT_WRITE:
+		if (state != KCS_WRITE_STATE) {
+			start_error_recovery(kcs,
+					     "Not in write state for write");
+			break;
+		}
+		clear_obf(kcs, status);
+		if (kcs->write_count == 1) {
+			write_cmd(kcs, KCS_WRITE_END);
+			kcs->state = KCS_WAIT_WRITE_END;
+		} else {
+			write_next_byte(kcs);
+		}
+		break;
+
+	case KCS_WAIT_WRITE_END:
+		if (state != KCS_WRITE_STATE) {
+			start_error_recovery(kcs,
+					     "Not in write state"
+					     " for write end");
+			break;
+		}
+		clear_obf(kcs, status);
+		write_next_byte(kcs);
+		kcs->state = KCS_WAIT_READ;
+		break;
+
+	case KCS_WAIT_READ:
+		if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
+			start_error_recovery(
+				kcs,
+				"Not in read or idle in read state");
+			break;
+		}
+
+		if (state == KCS_READ_STATE) {
+			if (!check_obf(kcs, status, time))
+				return SI_SM_CALL_WITH_DELAY;
+			read_next_byte(kcs);
+		} else {
+			/*
+			 * We don't implement this exactly like the state
+			 * machine in the spec.  Some broken hardware
+			 * does not write the final dummy byte to the
+			 * read register.  Thus obf will never go high
+			 * here.  We just go straight to idle, and we
+			 * handle clearing out obf in idle state if it
+			 * happens to come in.
+			 */
+			clear_obf(kcs, status);
+			kcs->orig_write_count = 0;
+			kcs->state = KCS_IDLE;
+			return SI_SM_TRANSACTION_COMPLETE;
+		}
+		break;
+
+	case KCS_ERROR0:
+		clear_obf(kcs, status);
+		status = read_status(kcs);
+		if (GET_STATUS_OBF(status))
+			/* controller isn't responding */
+			if (time_before(jiffies, kcs->error0_timeout))
+				return SI_SM_CALL_WITH_TICK_DELAY;
+		write_cmd(kcs, KCS_GET_STATUS_ABORT);
+		kcs->state = KCS_ERROR1;
+		break;
+
+	case KCS_ERROR1:
+		clear_obf(kcs, status);
+		write_data(kcs, 0);
+		kcs->state = KCS_ERROR2;
+		break;
+
+	case KCS_ERROR2:
+		if (state != KCS_READ_STATE) {
+			start_error_recovery(kcs,
+					     "Not in read state for error2");
+			break;
+		}
+		if (!check_obf(kcs, status, time))
+			return SI_SM_CALL_WITH_DELAY;
+
+		clear_obf(kcs, status);
+		write_data(kcs, KCS_READ_BYTE);
+		kcs->state = KCS_ERROR3;
+		break;
+
+	case KCS_ERROR3:
+		if (state != KCS_IDLE_STATE) {
+			start_error_recovery(kcs,
+					     "Not in idle state for error3");
+			break;
+		}
+
+		if (!check_obf(kcs, status, time))
+			return SI_SM_CALL_WITH_DELAY;
+
+		clear_obf(kcs, status);
+		if (kcs->orig_write_count) {
+			restart_kcs_transaction(kcs);
+		} else {
+			kcs->state = KCS_IDLE;
+			return SI_SM_TRANSACTION_COMPLETE;
+		}
+		break;
+
+	case KCS_HOSED:
+		break;
+	}
+
+	if (kcs->state == KCS_HOSED) {
+		init_kcs_data(kcs, kcs->io);
+		return SI_SM_HOSED;
+	}
+
+	return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int kcs_size(void)
+{
+	return sizeof(struct si_sm_data);
+}
+
+static int kcs_detect(struct si_sm_data *kcs)
+{
+	/*
+	 * It's impossible for the KCS status register to be all 1's,
+	 * (assuming a properly functioning, self-initialized BMC)
+	 * but that's what you get from reading a bogus address, so we
+	 * test that first.
+	 */
+	if (read_status(kcs) == 0xff)
+		return 1;
+
+	return 0;
+}
+
+static void kcs_cleanup(struct si_sm_data *kcs)
+{
+}
+
+const struct si_sm_handlers kcs_smi_handlers = {
+	.init_data         = init_kcs_data,
+	.start_transaction = start_kcs_transaction,
+	.get_result        = get_kcs_result,
+	.event             = kcs_event,
+	.detect            = kcs_detect,
+	.cleanup           = kcs_cleanup,
+	.size              = kcs_size,
+};
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_msghandler.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_msghandler.c
new file mode 100644
index 0000000..f72a272
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_msghandler.c
@@ -0,0 +1,4658 @@
+/*
+ * ipmi_msghandler.c
+ *
+ * Incoming and outgoing message routing for an IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+
+#define PFX "IPMI message handler: "
+
+#define IPMI_DRIVER_VERSION "39.2"
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static int ipmi_init_msghandler(void);
+static void smi_recv_tasklet(unsigned long);
+static void handle_new_recv_msgs(ipmi_smi_t intf);
+static void need_waiter(ipmi_smi_t intf);
+static int handle_one_recv_msg(ipmi_smi_t          intf,
+			       struct ipmi_smi_msg *msg);
+
+static int initialized;
+
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *proc_ipmi_root;
+#endif /* CONFIG_PROC_FS */
+
+/* Remain in auto-maintenance mode for this amount of time (in ms). */
+#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
+
+#define MAX_EVENTS_IN_QUEUE	25
+
+/*
+ * Don't let a message sit in a queue forever, always time it with at lest
+ * the max message timer.  This is in milliseconds.
+ */
+#define MAX_MSG_TIMEOUT		60000
+
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME	1000
+
+/* How many jiffies does it take to get to the timeout time. */
+#define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
+
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
+#define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))
+
+/*
+ * The main "user" data structure.
+ */
+struct ipmi_user {
+	struct list_head link;
+
+	/* Set to false when the user is destroyed. */
+	bool valid;
+
+	struct kref refcount;
+
+	/* The upper layer that handles receive messages. */
+	const struct ipmi_user_hndl *handler;
+	void             *handler_data;
+
+	/* The interface this user is bound to. */
+	ipmi_smi_t intf;
+
+	/* Does this interface receive IPMI events? */
+	bool gets_events;
+};
+
+struct cmd_rcvr {
+	struct list_head link;
+
+	ipmi_user_t   user;
+	unsigned char netfn;
+	unsigned char cmd;
+	unsigned int  chans;
+
+	/*
+	 * This is used to form a linked lised during mass deletion.
+	 * Since this is in an RCU list, we cannot use the link above
+	 * or change any data until the RCU period completes.  So we
+	 * use this next variable during mass deletion so we can have
+	 * a list and don't have to wait and restart the search on
+	 * every individual deletion of a command.
+	 */
+	struct cmd_rcvr *next;
+};
+
+struct seq_table {
+	unsigned int         inuse : 1;
+	unsigned int         broadcast : 1;
+
+	unsigned long        timeout;
+	unsigned long        orig_timeout;
+	unsigned int         retries_left;
+
+	/*
+	 * To verify on an incoming send message response that this is
+	 * the message that the response is for, we keep a sequence id
+	 * and increment it every time we send a message.
+	 */
+	long                 seqid;
+
+	/*
+	 * This is held so we can properly respond to the message on a
+	 * timeout, and it is used to hold the temporary data for
+	 * retransmission, too.
+	 */
+	struct ipmi_recv_msg *recv_msg;
+};
+
+/*
+ * Store the information in a msgid (long) to allow us to find a
+ * sequence table entry from the msgid.
+ */
+#define STORE_SEQ_IN_MSGID(seq, seqid) \
+	((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
+
+#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
+	do {								\
+		seq = (((msgid) >> 26) & 0x3f);				\
+		seqid = ((msgid) & 0x3ffffff);				\
+	} while (0)
+
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
+
+struct ipmi_channel {
+	unsigned char medium;
+	unsigned char protocol;
+
+	/*
+	 * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
+	 * but may be changed by the user.
+	 */
+	unsigned char address;
+
+	/*
+	 * My LUN.  This should generally stay the SMS LUN, but just in
+	 * case...
+	 */
+	unsigned char lun;
+};
+
+#ifdef CONFIG_PROC_FS
+struct ipmi_proc_entry {
+	char                   *name;
+	struct ipmi_proc_entry *next;
+};
+#endif
+
+struct bmc_device {
+	struct platform_device pdev;
+	struct ipmi_device_id  id;
+	unsigned char          guid[16];
+	int                    guid_set;
+	char                   name[16];
+	struct kref	       usecount;
+};
+#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+
+/*
+ * Various statistics for IPMI, these index stats[] in the ipmi_smi
+ * structure.
+ */
+enum ipmi_stat_indexes {
+	/* Commands we got from the user that were invalid. */
+	IPMI_STAT_sent_invalid_commands = 0,
+
+	/* Commands we sent to the MC. */
+	IPMI_STAT_sent_local_commands,
+
+	/* Responses from the MC that were delivered to a user. */
+	IPMI_STAT_handled_local_responses,
+
+	/* Responses from the MC that were not delivered to a user. */
+	IPMI_STAT_unhandled_local_responses,
+
+	/* Commands we sent out to the IPMB bus. */
+	IPMI_STAT_sent_ipmb_commands,
+
+	/* Commands sent on the IPMB that had errors on the SEND CMD */
+	IPMI_STAT_sent_ipmb_command_errs,
+
+	/* Each retransmit increments this count. */
+	IPMI_STAT_retransmitted_ipmb_commands,
+
+	/*
+	 * When a message times out (runs out of retransmits) this is
+	 * incremented.
+	 */
+	IPMI_STAT_timed_out_ipmb_commands,
+
+	/*
+	 * This is like above, but for broadcasts.  Broadcasts are
+	 * *not* included in the above count (they are expected to
+	 * time out).
+	 */
+	IPMI_STAT_timed_out_ipmb_broadcasts,
+
+	/* Responses I have sent to the IPMB bus. */
+	IPMI_STAT_sent_ipmb_responses,
+
+	/* The response was delivered to the user. */
+	IPMI_STAT_handled_ipmb_responses,
+
+	/* The response had invalid data in it. */
+	IPMI_STAT_invalid_ipmb_responses,
+
+	/* The response didn't have anyone waiting for it. */
+	IPMI_STAT_unhandled_ipmb_responses,
+
+	/* Commands we sent out to the IPMB bus. */
+	IPMI_STAT_sent_lan_commands,
+
+	/* Commands sent on the IPMB that had errors on the SEND CMD */
+	IPMI_STAT_sent_lan_command_errs,
+
+	/* Each retransmit increments this count. */
+	IPMI_STAT_retransmitted_lan_commands,
+
+	/*
+	 * When a message times out (runs out of retransmits) this is
+	 * incremented.
+	 */
+	IPMI_STAT_timed_out_lan_commands,
+
+	/* Responses I have sent to the IPMB bus. */
+	IPMI_STAT_sent_lan_responses,
+
+	/* The response was delivered to the user. */
+	IPMI_STAT_handled_lan_responses,
+
+	/* The response had invalid data in it. */
+	IPMI_STAT_invalid_lan_responses,
+
+	/* The response didn't have anyone waiting for it. */
+	IPMI_STAT_unhandled_lan_responses,
+
+	/* The command was delivered to the user. */
+	IPMI_STAT_handled_commands,
+
+	/* The command had invalid data in it. */
+	IPMI_STAT_invalid_commands,
+
+	/* The command didn't have anyone waiting for it. */
+	IPMI_STAT_unhandled_commands,
+
+	/* Invalid data in an event. */
+	IPMI_STAT_invalid_events,
+
+	/* Events that were received with the proper format. */
+	IPMI_STAT_events,
+
+	/* Retransmissions on IPMB that failed. */
+	IPMI_STAT_dropped_rexmit_ipmb_commands,
+
+	/* Retransmissions on LAN that failed. */
+	IPMI_STAT_dropped_rexmit_lan_commands,
+
+	/* This *must* remain last, add new values above this. */
+	IPMI_NUM_STATS
+};
+
+
+#define IPMI_IPMB_NUM_SEQ	64
+#define IPMI_MAX_CHANNELS       16
+struct ipmi_smi {
+	/* What interface number are we? */
+	int intf_num;
+
+	struct kref refcount;
+
+	/* Set when the interface is being unregistered. */
+	bool in_shutdown;
+
+	/* Used for a list of interfaces. */
+	struct list_head link;
+
+	/*
+	 * The list of upper layers that are using me.  seq_lock
+	 * protects this.
+	 */
+	struct list_head users;
+
+	/* Information to supply to users. */
+	unsigned char ipmi_version_major;
+	unsigned char ipmi_version_minor;
+
+	/* Used for wake ups at startup. */
+	wait_queue_head_t waitq;
+
+	struct bmc_device *bmc;
+	char *my_dev_name;
+
+	/*
+	 * This is the lower-layer's sender routine.  Note that you
+	 * must either be holding the ipmi_interfaces_mutex or be in
+	 * an umpreemptible region to use this.  You must fetch the
+	 * value into a local variable and make sure it is not NULL.
+	 */
+	const struct ipmi_smi_handlers *handlers;
+	void                     *send_info;
+
+#ifdef CONFIG_PROC_FS
+	/* A list of proc entries for this interface. */
+	struct mutex           proc_entry_lock;
+	struct ipmi_proc_entry *proc_entries;
+#endif
+
+	/* Driver-model device for the system interface. */
+	struct device          *si_dev;
+
+	/*
+	 * A table of sequence numbers for this interface.  We use the
+	 * sequence numbers for IPMB messages that go out of the
+	 * interface to match them up with their responses.  A routine
+	 * is called periodically to time the items in this list.
+	 */
+	spinlock_t       seq_lock;
+	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
+	int curr_seq;
+
+	/*
+	 * Messages queued for delivery.  If delivery fails (out of memory
+	 * for instance), They will stay in here to be processed later in a
+	 * periodic timer interrupt.  The tasklet is for handling received
+	 * messages directly from the handler.
+	 */
+	spinlock_t       waiting_rcv_msgs_lock;
+	struct list_head waiting_rcv_msgs;
+	atomic_t	 watchdog_pretimeouts_to_deliver;
+	struct tasklet_struct recv_tasklet;
+
+	spinlock_t             xmit_msgs_lock;
+	struct list_head       xmit_msgs;
+	struct ipmi_smi_msg    *curr_msg;
+	struct list_head       hp_xmit_msgs;
+
+	/*
+	 * The list of command receivers that are registered for commands
+	 * on this interface.
+	 */
+	struct mutex     cmd_rcvrs_mutex;
+	struct list_head cmd_rcvrs;
+
+	/*
+	 * Events that were queues because no one was there to receive
+	 * them.
+	 */
+	spinlock_t       events_lock; /* For dealing with event stuff. */
+	struct list_head waiting_events;
+	unsigned int     waiting_events_count; /* How many events in queue? */
+	char             delivering_events;
+	char             event_msg_printed;
+	atomic_t         event_waiters;
+	unsigned int     ticks_to_req_ev;
+	int              last_needs_timer;
+
+	/*
+	 * The event receiver for my BMC, only really used at panic
+	 * shutdown as a place to store this.
+	 */
+	unsigned char event_receiver;
+	unsigned char event_receiver_lun;
+	unsigned char local_sel_device;
+	unsigned char local_event_generator;
+
+	/* For handling of maintenance mode. */
+	int maintenance_mode;
+	bool maintenance_mode_enable;
+	int auto_maintenance_timeout;
+	spinlock_t maintenance_mode_lock; /* Used in a timer... */
+
+	/*
+	 * A cheap hack, if this is non-null and a message to an
+	 * interface comes in with a NULL user, call this routine with
+	 * it.  Note that the message will still be freed by the
+	 * caller.  This only works on the system interface.
+	 */
+	void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
+
+	/*
+	 * When we are scanning the channels for an SMI, this will
+	 * tell which channel we are scanning.
+	 */
+	int curr_channel;
+
+	/* Channel information */
+	struct ipmi_channel channels[IPMI_MAX_CHANNELS];
+
+	/* Proc FS stuff. */
+	struct proc_dir_entry *proc_dir;
+	char                  proc_dir_name[10];
+
+	atomic_t stats[IPMI_NUM_STATS];
+
+	/*
+	 * run_to_completion duplicate of smb_info, smi_info
+	 * and ipmi_serial_info structures. Used to decrease numbers of
+	 * parameters passed by "low" level IPMI code.
+	 */
+	int run_to_completion;
+};
+#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
+
+/**
+ * The driver model view of the IPMI messaging driver.
+ */
+static struct platform_driver ipmidriver = {
+	.driver = {
+		.name = "ipmi",
+		.bus = &platform_bus_type
+	}
+};
+static DEFINE_MUTEX(ipmidriver_mutex);
+
+static LIST_HEAD(ipmi_interfaces);
+static DEFINE_MUTEX(ipmi_interfaces_mutex);
+
+/*
+ * List of watchers that want to know when smi's are added and deleted.
+ */
+static LIST_HEAD(smi_watchers);
+static DEFINE_MUTEX(smi_watchers_mutex);
+
+#define ipmi_inc_stat(intf, stat) \
+	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+#define ipmi_get_stat(intf, stat) \
+	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+static const char * const addr_src_to_str[] = {
+	"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
+	"device-tree"
+};
+
+const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
+{
+	if (src >= SI_LAST)
+		src = 0; /* Invalid */
+	return addr_src_to_str[src];
+}
+EXPORT_SYMBOL(ipmi_addr_src_to_str);
+
+static int is_lan_addr(struct ipmi_addr *addr)
+{
+	return addr->addr_type == IPMI_LAN_ADDR_TYPE;
+}
+
+static int is_ipmb_addr(struct ipmi_addr *addr)
+{
+	return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
+}
+
+static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
+{
+	return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
+}
+
+static void free_recv_msg_list(struct list_head *q)
+{
+	struct ipmi_recv_msg *msg, *msg2;
+
+	list_for_each_entry_safe(msg, msg2, q, link) {
+		list_del(&msg->link);
+		ipmi_free_recv_msg(msg);
+	}
+}
+
+static void free_smi_msg_list(struct list_head *q)
+{
+	struct ipmi_smi_msg *msg, *msg2;
+
+	list_for_each_entry_safe(msg, msg2, q, link) {
+		list_del(&msg->link);
+		ipmi_free_smi_msg(msg);
+	}
+}
+
+static void clean_up_interface_data(ipmi_smi_t intf)
+{
+	int              i;
+	struct cmd_rcvr  *rcvr, *rcvr2;
+	struct list_head list;
+
+	tasklet_kill(&intf->recv_tasklet);
+
+	free_smi_msg_list(&intf->waiting_rcv_msgs);
+	free_recv_msg_list(&intf->waiting_events);
+
+	/*
+	 * Wholesale remove all the entries from the list in the
+	 * interface and wait for RCU to know that none are in use.
+	 */
+	mutex_lock(&intf->cmd_rcvrs_mutex);
+	INIT_LIST_HEAD(&list);
+	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
+	mutex_unlock(&intf->cmd_rcvrs_mutex);
+
+	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+		kfree(rcvr);
+
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+		if ((intf->seq_table[i].inuse)
+					&& (intf->seq_table[i].recv_msg))
+			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+	}
+}
+
+static void intf_free(struct kref *ref)
+{
+	ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
+
+	clean_up_interface_data(intf);
+	kfree(intf);
+}
+
+struct watcher_entry {
+	int              intf_num;
+	ipmi_smi_t       intf;
+	struct list_head link;
+};
+
+int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
+{
+	ipmi_smi_t intf;
+	LIST_HEAD(to_deliver);
+	struct watcher_entry *e, *e2;
+
+	mutex_lock(&smi_watchers_mutex);
+
+	mutex_lock(&ipmi_interfaces_mutex);
+
+	/* Build a list of things to deliver. */
+	list_for_each_entry(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == -1)
+			continue;
+		e = kmalloc(sizeof(*e), GFP_KERNEL);
+		if (!e)
+			goto out_err;
+		kref_get(&intf->refcount);
+		e->intf = intf;
+		e->intf_num = intf->intf_num;
+		list_add_tail(&e->link, &to_deliver);
+	}
+
+	/* We will succeed, so add it to the list. */
+	list_add(&watcher->link, &smi_watchers);
+
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	list_for_each_entry_safe(e, e2, &to_deliver, link) {
+		list_del(&e->link);
+		watcher->new_smi(e->intf_num, e->intf->si_dev);
+		kref_put(&e->intf->refcount, intf_free);
+		kfree(e);
+	}
+
+	mutex_unlock(&smi_watchers_mutex);
+
+	return 0;
+
+ out_err:
+	mutex_unlock(&ipmi_interfaces_mutex);
+	mutex_unlock(&smi_watchers_mutex);
+	list_for_each_entry_safe(e, e2, &to_deliver, link) {
+		list_del(&e->link);
+		kref_put(&e->intf->refcount, intf_free);
+		kfree(e);
+	}
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_register);
+
+int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
+{
+	mutex_lock(&smi_watchers_mutex);
+	list_del(&(watcher->link));
+	mutex_unlock(&smi_watchers_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
+
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
+static void
+call_smi_watchers(int i, struct device *dev)
+{
+	struct ipmi_smi_watcher *w;
+
+	list_for_each_entry(w, &smi_watchers, link) {
+		if (try_module_get(w->owner)) {
+			w->new_smi(i, dev);
+			module_put(w->owner);
+		}
+	}
+}
+
+static int
+ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
+{
+	if (addr1->addr_type != addr2->addr_type)
+		return 0;
+
+	if (addr1->channel != addr2->channel)
+		return 0;
+
+	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+		struct ipmi_system_interface_addr *smi_addr1
+		    = (struct ipmi_system_interface_addr *) addr1;
+		struct ipmi_system_interface_addr *smi_addr2
+		    = (struct ipmi_system_interface_addr *) addr2;
+		return (smi_addr1->lun == smi_addr2->lun);
+	}
+
+	if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
+		struct ipmi_ipmb_addr *ipmb_addr1
+		    = (struct ipmi_ipmb_addr *) addr1;
+		struct ipmi_ipmb_addr *ipmb_addr2
+		    = (struct ipmi_ipmb_addr *) addr2;
+
+		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
+			&& (ipmb_addr1->lun == ipmb_addr2->lun));
+	}
+
+	if (is_lan_addr(addr1)) {
+		struct ipmi_lan_addr *lan_addr1
+			= (struct ipmi_lan_addr *) addr1;
+		struct ipmi_lan_addr *lan_addr2
+		    = (struct ipmi_lan_addr *) addr2;
+
+		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
+			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
+			&& (lan_addr1->session_handle
+			    == lan_addr2->session_handle)
+			&& (lan_addr1->lun == lan_addr2->lun));
+	}
+
+	return 1;
+}
+
+int ipmi_validate_addr(struct ipmi_addr *addr, int len)
+{
+	if (len < sizeof(struct ipmi_system_interface_addr))
+		return -EINVAL;
+
+	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+		if (addr->channel != IPMI_BMC_CHANNEL)
+			return -EINVAL;
+		return 0;
+	}
+
+	if ((addr->channel == IPMI_BMC_CHANNEL)
+	    || (addr->channel >= IPMI_MAX_CHANNELS)
+	    || (addr->channel < 0))
+		return -EINVAL;
+
+	if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+		if (len < sizeof(struct ipmi_ipmb_addr))
+			return -EINVAL;
+		return 0;
+	}
+
+	if (is_lan_addr(addr)) {
+		if (len < sizeof(struct ipmi_lan_addr))
+			return -EINVAL;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(ipmi_validate_addr);
+
+unsigned int ipmi_addr_length(int addr_type)
+{
+	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+		return sizeof(struct ipmi_system_interface_addr);
+
+	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
+			|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
+		return sizeof(struct ipmi_ipmb_addr);
+
+	if (addr_type == IPMI_LAN_ADDR_TYPE)
+		return sizeof(struct ipmi_lan_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_addr_length);
+
+static void deliver_response(struct ipmi_recv_msg *msg)
+{
+	if (!msg->user) {
+		ipmi_smi_t    intf = msg->user_msg_data;
+
+		/* Special handling for NULL users. */
+		if (intf->null_user_handler) {
+			intf->null_user_handler(intf, msg);
+			ipmi_inc_stat(intf, handled_local_responses);
+		} else {
+			/* No handler, so give up. */
+			ipmi_inc_stat(intf, unhandled_local_responses);
+		}
+		ipmi_free_recv_msg(msg);
+	} else if (!oops_in_progress) {
+		/*
+		 * If we are running in the panic context, calling the
+		 * receive handler doesn't much meaning and has a deadlock
+		 * risk.  At this moment, simply skip it in that case.
+		 */
+
+		ipmi_user_t user = msg->user;
+		user->handler->ipmi_recv_hndl(msg, user->handler_data);
+	}
+}
+
+static void
+deliver_err_response(struct ipmi_recv_msg *msg, int err)
+{
+	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	msg->msg_data[0] = err;
+	msg->msg.netfn |= 1; /* Convert to a response. */
+	msg->msg.data_len = 1;
+	msg->msg.data = msg->msg_data;
+	deliver_response(msg);
+}
+
+/*
+ * Find the next sequence number not being used and add the given
+ * message with the given timeout to the sequence table.  This must be
+ * called with the interface's seq_lock held.
+ */
+static int intf_next_seq(ipmi_smi_t           intf,
+			 struct ipmi_recv_msg *recv_msg,
+			 unsigned long        timeout,
+			 int                  retries,
+			 int                  broadcast,
+			 unsigned char        *seq,
+			 long                 *seqid)
+{
+	int          rv = 0;
+	unsigned int i;
+
+	for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
+					i = (i+1)%IPMI_IPMB_NUM_SEQ) {
+		if (!intf->seq_table[i].inuse)
+			break;
+	}
+
+	if (!intf->seq_table[i].inuse) {
+		intf->seq_table[i].recv_msg = recv_msg;
+
+		/*
+		 * Start with the maximum timeout, when the send response
+		 * comes in we will start the real timer.
+		 */
+		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
+		intf->seq_table[i].orig_timeout = timeout;
+		intf->seq_table[i].retries_left = retries;
+		intf->seq_table[i].broadcast = broadcast;
+		intf->seq_table[i].inuse = 1;
+		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
+		*seq = i;
+		*seqid = intf->seq_table[i].seqid;
+		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
+		need_waiter(intf);
+	} else {
+		rv = -EAGAIN;
+	}
+
+	return rv;
+}
+
+/*
+ * Return the receive message for the given sequence number and
+ * release the sequence number so it can be reused.  Some other data
+ * is passed in to be sure the message matches up correctly (to help
+ * guard against message coming in after their timeout and the
+ * sequence number being reused).
+ */
+static int intf_find_seq(ipmi_smi_t           intf,
+			 unsigned char        seq,
+			 short                channel,
+			 unsigned char        cmd,
+			 unsigned char        netfn,
+			 struct ipmi_addr     *addr,
+			 struct ipmi_recv_msg **recv_msg)
+{
+	int           rv = -ENODEV;
+	unsigned long flags;
+
+	if (seq >= IPMI_IPMB_NUM_SEQ)
+		return -EINVAL;
+
+	spin_lock_irqsave(&(intf->seq_lock), flags);
+	if (intf->seq_table[seq].inuse) {
+		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
+
+		if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
+				&& (msg->msg.netfn == netfn)
+				&& (ipmi_addr_equal(addr, &(msg->addr)))) {
+			*recv_msg = msg;
+			intf->seq_table[seq].inuse = 0;
+			rv = 0;
+		}
+	}
+	spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+	return rv;
+}
+
+
+/* Start the timer for a specific sequence table entry. */
+static int intf_start_seq_timer(ipmi_smi_t intf,
+				long       msgid)
+{
+	int           rv = -ENODEV;
+	unsigned long flags;
+	unsigned char seq;
+	unsigned long seqid;
+
+
+	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+	spin_lock_irqsave(&(intf->seq_lock), flags);
+	/*
+	 * We do this verification because the user can be deleted
+	 * while a message is outstanding.
+	 */
+	if ((intf->seq_table[seq].inuse)
+				&& (intf->seq_table[seq].seqid == seqid)) {
+		struct seq_table *ent = &(intf->seq_table[seq]);
+		ent->timeout = ent->orig_timeout;
+		rv = 0;
+	}
+	spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+	return rv;
+}
+
+/* Got an error for the send message for a specific sequence number. */
+static int intf_err_seq(ipmi_smi_t   intf,
+			long         msgid,
+			unsigned int err)
+{
+	int                  rv = -ENODEV;
+	unsigned long        flags;
+	unsigned char        seq;
+	unsigned long        seqid;
+	struct ipmi_recv_msg *msg = NULL;
+
+
+	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+	spin_lock_irqsave(&(intf->seq_lock), flags);
+	/*
+	 * We do this verification because the user can be deleted
+	 * while a message is outstanding.
+	 */
+	if ((intf->seq_table[seq].inuse)
+				&& (intf->seq_table[seq].seqid == seqid)) {
+		struct seq_table *ent = &(intf->seq_table[seq]);
+
+		ent->inuse = 0;
+		msg = ent->recv_msg;
+		rv = 0;
+	}
+	spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+	if (msg)
+		deliver_err_response(msg, err);
+
+	return rv;
+}
+
+
+int ipmi_create_user(unsigned int          if_num,
+		     const struct ipmi_user_hndl *handler,
+		     void                  *handler_data,
+		     ipmi_user_t           *user)
+{
+	unsigned long flags;
+	ipmi_user_t   new_user;
+	int           rv = 0;
+	ipmi_smi_t    intf;
+
+	/*
+	 * There is no module usecount here, because it's not
+	 * required.  Since this can only be used by and called from
+	 * other modules, they will implicitly use this module, and
+	 * thus this can't be removed unless the other modules are
+	 * removed.
+	 */
+
+	if (handler == NULL)
+		return -EINVAL;
+
+	/*
+	 * Make sure the driver is actually initialized, this handles
+	 * problems with initialization order.
+	 */
+	if (!initialized) {
+		rv = ipmi_init_msghandler();
+		if (rv)
+			return rv;
+
+		/*
+		 * The init code doesn't return an error if it was turned
+		 * off, but it won't initialize.  Check that.
+		 */
+		if (!initialized)
+			return -ENODEV;
+	}
+
+	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
+	if (!new_user)
+		return -ENOMEM;
+
+	mutex_lock(&ipmi_interfaces_mutex);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == if_num)
+			goto found;
+	}
+	/* Not found, return an error */
+	rv = -EINVAL;
+	goto out_kfree;
+
+ found:
+	/* Note that each existing user holds a refcount to the interface. */
+	kref_get(&intf->refcount);
+
+	kref_init(&new_user->refcount);
+	new_user->handler = handler;
+	new_user->handler_data = handler_data;
+	new_user->intf = intf;
+	new_user->gets_events = false;
+
+	if (!try_module_get(intf->handlers->owner)) {
+		rv = -ENODEV;
+		goto out_kref;
+	}
+
+	if (intf->handlers->inc_usecount) {
+		rv = intf->handlers->inc_usecount(intf->send_info);
+		if (rv) {
+			module_put(intf->handlers->owner);
+			goto out_kref;
+		}
+	}
+
+	/*
+	 * Hold the lock so intf->handlers is guaranteed to be good
+	 * until now
+	 */
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	new_user->valid = true;
+	spin_lock_irqsave(&intf->seq_lock, flags);
+	list_add_rcu(&new_user->link, &intf->users);
+	spin_unlock_irqrestore(&intf->seq_lock, flags);
+	if (handler->ipmi_watchdog_pretimeout) {
+		/* User wants pretimeouts, so make sure to watch for them. */
+		if (atomic_inc_return(&intf->event_waiters) == 1)
+			need_waiter(intf);
+	}
+	*user = new_user;
+	return 0;
+
+out_kref:
+	kref_put(&intf->refcount, intf_free);
+out_kfree:
+	mutex_unlock(&ipmi_interfaces_mutex);
+	kfree(new_user);
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_create_user);
+
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+	int           rv = 0;
+	ipmi_smi_t    intf;
+	const struct ipmi_smi_handlers *handlers;
+
+	mutex_lock(&ipmi_interfaces_mutex);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == if_num)
+			goto found;
+	}
+	/* Not found, return an error */
+	rv = -EINVAL;
+	mutex_unlock(&ipmi_interfaces_mutex);
+	return rv;
+
+found:
+	handlers = intf->handlers;
+	rv = -ENOSYS;
+	if (handlers->get_smi_info)
+		rv = handlers->get_smi_info(intf->send_info, data);
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
+static void free_user(struct kref *ref)
+{
+	ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
+	kfree(user);
+}
+
+int ipmi_destroy_user(ipmi_user_t user)
+{
+	ipmi_smi_t       intf = user->intf;
+	int              i;
+	unsigned long    flags;
+	struct cmd_rcvr  *rcvr;
+	struct cmd_rcvr  *rcvrs = NULL;
+
+	user->valid = false;
+
+	if (user->handler->ipmi_watchdog_pretimeout)
+		atomic_dec(&intf->event_waiters);
+
+	if (user->gets_events)
+		atomic_dec(&intf->event_waiters);
+
+	/* Remove the user from the interface's sequence table. */
+	spin_lock_irqsave(&intf->seq_lock, flags);
+	list_del_rcu(&user->link);
+
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+		if (intf->seq_table[i].inuse
+		    && (intf->seq_table[i].recv_msg->user == user)) {
+			intf->seq_table[i].inuse = 0;
+			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+		}
+	}
+	spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+	/*
+	 * Remove the user from the command receiver's table.  First
+	 * we build a list of everything (not using the standard link,
+	 * since other things may be using it till we do
+	 * synchronize_rcu()) then free everything in that list.
+	 */
+	mutex_lock(&intf->cmd_rcvrs_mutex);
+	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+		if (rcvr->user == user) {
+			list_del_rcu(&rcvr->link);
+			rcvr->next = rcvrs;
+			rcvrs = rcvr;
+		}
+	}
+	mutex_unlock(&intf->cmd_rcvrs_mutex);
+	synchronize_rcu();
+	while (rcvrs) {
+		rcvr = rcvrs;
+		rcvrs = rcvr->next;
+		kfree(rcvr);
+	}
+
+	mutex_lock(&ipmi_interfaces_mutex);
+	if (intf->handlers) {
+		module_put(intf->handlers->owner);
+		if (intf->handlers->dec_usecount)
+			intf->handlers->dec_usecount(intf->send_info);
+	}
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	kref_put(&intf->refcount, intf_free);
+
+	kref_put(&user->refcount, free_user);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_destroy_user);
+
+void ipmi_get_version(ipmi_user_t   user,
+		      unsigned char *major,
+		      unsigned char *minor)
+{
+	*major = user->intf->ipmi_version_major;
+	*minor = user->intf->ipmi_version_minor;
+}
+EXPORT_SYMBOL(ipmi_get_version);
+
+int ipmi_set_my_address(ipmi_user_t   user,
+			unsigned int  channel,
+			unsigned char address)
+{
+	if (channel >= IPMI_MAX_CHANNELS)
+		return -EINVAL;
+	user->intf->channels[channel].address = address;
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_set_my_address);
+
+int ipmi_get_my_address(ipmi_user_t   user,
+			unsigned int  channel,
+			unsigned char *address)
+{
+	if (channel >= IPMI_MAX_CHANNELS)
+		return -EINVAL;
+	*address = user->intf->channels[channel].address;
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_get_my_address);
+
+int ipmi_set_my_LUN(ipmi_user_t   user,
+		    unsigned int  channel,
+		    unsigned char LUN)
+{
+	if (channel >= IPMI_MAX_CHANNELS)
+		return -EINVAL;
+	user->intf->channels[channel].lun = LUN & 0x3;
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_set_my_LUN);
+
+int ipmi_get_my_LUN(ipmi_user_t   user,
+		    unsigned int  channel,
+		    unsigned char *address)
+{
+	if (channel >= IPMI_MAX_CHANNELS)
+		return -EINVAL;
+	*address = user->intf->channels[channel].lun;
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_get_my_LUN);
+
+int ipmi_get_maintenance_mode(ipmi_user_t user)
+{
+	int           mode;
+	unsigned long flags;
+
+	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
+	mode = user->intf->maintenance_mode;
+	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
+
+	return mode;
+}
+EXPORT_SYMBOL(ipmi_get_maintenance_mode);
+
+static void maintenance_mode_update(ipmi_smi_t intf)
+{
+	if (intf->handlers->set_maintenance_mode)
+		intf->handlers->set_maintenance_mode(
+			intf->send_info, intf->maintenance_mode_enable);
+}
+
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
+{
+	int           rv = 0;
+	unsigned long flags;
+	ipmi_smi_t    intf = user->intf;
+
+	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+	if (intf->maintenance_mode != mode) {
+		switch (mode) {
+		case IPMI_MAINTENANCE_MODE_AUTO:
+			intf->maintenance_mode_enable
+				= (intf->auto_maintenance_timeout > 0);
+			break;
+
+		case IPMI_MAINTENANCE_MODE_OFF:
+			intf->maintenance_mode_enable = false;
+			break;
+
+		case IPMI_MAINTENANCE_MODE_ON:
+			intf->maintenance_mode_enable = true;
+			break;
+
+		default:
+			rv = -EINVAL;
+			goto out_unlock;
+		}
+		intf->maintenance_mode = mode;
+
+		maintenance_mode_update(intf);
+	}
+ out_unlock:
+	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_set_maintenance_mode);
+
+int ipmi_set_gets_events(ipmi_user_t user, bool val)
+{
+	unsigned long        flags;
+	ipmi_smi_t           intf = user->intf;
+	struct ipmi_recv_msg *msg, *msg2;
+	struct list_head     msgs;
+
+	INIT_LIST_HEAD(&msgs);
+
+	spin_lock_irqsave(&intf->events_lock, flags);
+	if (user->gets_events == val)
+		goto out;
+
+	user->gets_events = val;
+
+	if (val) {
+		if (atomic_inc_return(&intf->event_waiters) == 1)
+			need_waiter(intf);
+	} else {
+		atomic_dec(&intf->event_waiters);
+	}
+
+	if (intf->delivering_events)
+		/*
+		 * Another thread is delivering events for this, so
+		 * let it handle any new events.
+		 */
+		goto out;
+
+	/* Deliver any queued events. */
+	while (user->gets_events && !list_empty(&intf->waiting_events)) {
+		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
+			list_move_tail(&msg->link, &msgs);
+		intf->waiting_events_count = 0;
+		if (intf->event_msg_printed) {
+			printk(KERN_WARNING PFX "Event queue no longer"
+			       " full\n");
+			intf->event_msg_printed = 0;
+		}
+
+		intf->delivering_events = 1;
+		spin_unlock_irqrestore(&intf->events_lock, flags);
+
+		list_for_each_entry_safe(msg, msg2, &msgs, link) {
+			msg->user = user;
+			kref_get(&user->refcount);
+			deliver_response(msg);
+		}
+
+		spin_lock_irqsave(&intf->events_lock, flags);
+		intf->delivering_events = 0;
+	}
+
+ out:
+	spin_unlock_irqrestore(&intf->events_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_set_gets_events);
+
+static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t    intf,
+				      unsigned char netfn,
+				      unsigned char cmd,
+				      unsigned char chan)
+{
+	struct cmd_rcvr *rcvr;
+
+	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+					&& (rcvr->chans & (1 << chan)))
+			return rcvr;
+	}
+	return NULL;
+}
+
+static int is_cmd_rcvr_exclusive(ipmi_smi_t    intf,
+				 unsigned char netfn,
+				 unsigned char cmd,
+				 unsigned int  chans)
+{
+	struct cmd_rcvr *rcvr;
+
+	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+					&& (rcvr->chans & chans))
+			return 0;
+	}
+	return 1;
+}
+
+int ipmi_register_for_cmd(ipmi_user_t   user,
+			  unsigned char netfn,
+			  unsigned char cmd,
+			  unsigned int  chans)
+{
+	ipmi_smi_t      intf = user->intf;
+	struct cmd_rcvr *rcvr;
+	int             rv = 0;
+
+
+	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
+	if (!rcvr)
+		return -ENOMEM;
+	rcvr->cmd = cmd;
+	rcvr->netfn = netfn;
+	rcvr->chans = chans;
+	rcvr->user = user;
+
+	mutex_lock(&intf->cmd_rcvrs_mutex);
+	/* Make sure the command/netfn is not already registered. */
+	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
+		rv = -EBUSY;
+		goto out_unlock;
+	}
+
+	if (atomic_inc_return(&intf->event_waiters) == 1)
+		need_waiter(intf);
+
+	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
+
+ out_unlock:
+	mutex_unlock(&intf->cmd_rcvrs_mutex);
+	if (rv)
+		kfree(rcvr);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_register_for_cmd);
+
+int ipmi_unregister_for_cmd(ipmi_user_t   user,
+			    unsigned char netfn,
+			    unsigned char cmd,
+			    unsigned int  chans)
+{
+	ipmi_smi_t      intf = user->intf;
+	struct cmd_rcvr *rcvr;
+	struct cmd_rcvr *rcvrs = NULL;
+	int i, rv = -ENOENT;
+
+	mutex_lock(&intf->cmd_rcvrs_mutex);
+	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
+		if (((1 << i) & chans) == 0)
+			continue;
+		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
+		if (rcvr == NULL)
+			continue;
+		if (rcvr->user == user) {
+			rv = 0;
+			rcvr->chans &= ~chans;
+			if (rcvr->chans == 0) {
+				list_del_rcu(&rcvr->link);
+				rcvr->next = rcvrs;
+				rcvrs = rcvr;
+			}
+		}
+	}
+	mutex_unlock(&intf->cmd_rcvrs_mutex);
+	synchronize_rcu();
+	while (rcvrs) {
+		atomic_dec(&intf->event_waiters);
+		rcvr = rcvrs;
+		rcvrs = rcvr->next;
+		kfree(rcvr);
+	}
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_unregister_for_cmd);
+
+static unsigned char
+ipmb_checksum(unsigned char *data, int size)
+{
+	unsigned char csum = 0;
+
+	for (; size > 0; size--, data++)
+		csum += *data;
+
+	return -csum;
+}
+
+static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
+				   struct kernel_ipmi_msg *msg,
+				   struct ipmi_ipmb_addr *ipmb_addr,
+				   long                  msgid,
+				   unsigned char         ipmb_seq,
+				   int                   broadcast,
+				   unsigned char         source_address,
+				   unsigned char         source_lun)
+{
+	int i = broadcast;
+
+	/* Format the IPMB header data. */
+	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+	smi_msg->data[2] = ipmb_addr->channel;
+	if (broadcast)
+		smi_msg->data[3] = 0;
+	smi_msg->data[i+3] = ipmb_addr->slave_addr;
+	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
+	smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
+	smi_msg->data[i+6] = source_address;
+	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
+	smi_msg->data[i+8] = msg->cmd;
+
+	/* Now tack on the data to the message. */
+	if (msg->data_len > 0)
+		memcpy(&(smi_msg->data[i+9]), msg->data,
+		       msg->data_len);
+	smi_msg->data_size = msg->data_len + 9;
+
+	/* Now calculate the checksum and tack it on. */
+	smi_msg->data[i+smi_msg->data_size]
+		= ipmb_checksum(&(smi_msg->data[i+6]),
+				smi_msg->data_size-6);
+
+	/*
+	 * Add on the checksum size and the offset from the
+	 * broadcast.
+	 */
+	smi_msg->data_size += 1 + i;
+
+	smi_msg->msgid = msgid;
+}
+
+static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
+				  struct kernel_ipmi_msg *msg,
+				  struct ipmi_lan_addr  *lan_addr,
+				  long                  msgid,
+				  unsigned char         ipmb_seq,
+				  unsigned char         source_lun)
+{
+	/* Format the IPMB header data. */
+	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+	smi_msg->data[2] = lan_addr->channel;
+	smi_msg->data[3] = lan_addr->session_handle;
+	smi_msg->data[4] = lan_addr->remote_SWID;
+	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
+	smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
+	smi_msg->data[7] = lan_addr->local_SWID;
+	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
+	smi_msg->data[9] = msg->cmd;
+
+	/* Now tack on the data to the message. */
+	if (msg->data_len > 0)
+		memcpy(&(smi_msg->data[10]), msg->data,
+		       msg->data_len);
+	smi_msg->data_size = msg->data_len + 10;
+
+	/* Now calculate the checksum and tack it on. */
+	smi_msg->data[smi_msg->data_size]
+		= ipmb_checksum(&(smi_msg->data[7]),
+				smi_msg->data_size-7);
+
+	/*
+	 * Add on the checksum size and the offset from the
+	 * broadcast.
+	 */
+	smi_msg->data_size += 1;
+
+	smi_msg->msgid = msgid;
+}
+
+static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
+					     struct ipmi_smi_msg *smi_msg,
+					     int priority)
+{
+	if (intf->curr_msg) {
+		if (priority > 0)
+			list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
+		else
+			list_add_tail(&smi_msg->link, &intf->xmit_msgs);
+		smi_msg = NULL;
+	} else {
+		intf->curr_msg = smi_msg;
+	}
+
+	return smi_msg;
+}
+
+
+static void smi_send(ipmi_smi_t intf, const struct ipmi_smi_handlers *handlers,
+		     struct ipmi_smi_msg *smi_msg, int priority)
+{
+	int run_to_completion = intf->run_to_completion;
+
+	if (run_to_completion) {
+		smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+	} else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+		smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+	}
+
+	if (smi_msg)
+		handlers->sender(intf->send_info, smi_msg);
+}
+
+/*
+ * Separate from ipmi_request so that the user does not have to be
+ * supplied in certain circumstances (mainly at panic time).  If
+ * messages are supplied, they will be freed, even if an error
+ * occurs.
+ */
+static int i_ipmi_request(ipmi_user_t          user,
+			  ipmi_smi_t           intf,
+			  struct ipmi_addr     *addr,
+			  long                 msgid,
+			  struct kernel_ipmi_msg *msg,
+			  void                 *user_msg_data,
+			  void                 *supplied_smi,
+			  struct ipmi_recv_msg *supplied_recv,
+			  int                  priority,
+			  unsigned char        source_address,
+			  unsigned char        source_lun,
+			  int                  retries,
+			  unsigned int         retry_time_ms)
+{
+	int                      rv = 0;
+	struct ipmi_smi_msg      *smi_msg;
+	struct ipmi_recv_msg     *recv_msg;
+	unsigned long            flags;
+
+
+	if (supplied_recv)
+		recv_msg = supplied_recv;
+	else {
+		recv_msg = ipmi_alloc_recv_msg();
+		if (recv_msg == NULL)
+			return -ENOMEM;
+	}
+	recv_msg->user_msg_data = user_msg_data;
+
+	if (supplied_smi)
+		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
+	else {
+		smi_msg = ipmi_alloc_smi_msg();
+		if (smi_msg == NULL) {
+			ipmi_free_recv_msg(recv_msg);
+			return -ENOMEM;
+		}
+	}
+
+	rcu_read_lock();
+	if (intf->in_shutdown) {
+		rv = -ENODEV;
+		goto out_err;
+	}
+
+	recv_msg->user = user;
+	if (user)
+		kref_get(&user->refcount);
+	recv_msg->msgid = msgid;
+	/*
+	 * Store the message to send in the receive message so timeout
+	 * responses can get the proper response data.
+	 */
+	recv_msg->msg = *msg;
+
+	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+		struct ipmi_system_interface_addr *smi_addr;
+
+		if (msg->netfn & 1) {
+			/* Responses are not allowed to the SMI. */
+			rv = -EINVAL;
+			goto out_err;
+		}
+
+		smi_addr = (struct ipmi_system_interface_addr *) addr;
+		if (smi_addr->lun > 3) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EINVAL;
+			goto out_err;
+		}
+
+		memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
+
+		if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
+		    && ((msg->cmd == IPMI_SEND_MSG_CMD)
+			|| (msg->cmd == IPMI_GET_MSG_CMD)
+			|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
+			/*
+			 * We don't let the user do these, since we manage
+			 * the sequence numbers.
+			 */
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EINVAL;
+			goto out_err;
+		}
+
+		if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
+		      && ((msg->cmd == IPMI_COLD_RESET_CMD)
+			  || (msg->cmd == IPMI_WARM_RESET_CMD)))
+		     || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
+			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+			intf->auto_maintenance_timeout
+				= IPMI_MAINTENANCE_MODE_TIMEOUT;
+			if (!intf->maintenance_mode
+			    && !intf->maintenance_mode_enable) {
+				intf->maintenance_mode_enable = true;
+				maintenance_mode_update(intf);
+			}
+			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+					       flags);
+		}
+
+		if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EMSGSIZE;
+			goto out_err;
+		}
+
+		smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
+		smi_msg->data[1] = msg->cmd;
+		smi_msg->msgid = msgid;
+		smi_msg->user_data = recv_msg;
+		if (msg->data_len > 0)
+			memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
+		smi_msg->data_size = msg->data_len + 2;
+		ipmi_inc_stat(intf, sent_local_commands);
+	} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+		struct ipmi_ipmb_addr *ipmb_addr;
+		unsigned char         ipmb_seq;
+		long                  seqid;
+		int                   broadcast = 0;
+
+		if (addr->channel >= IPMI_MAX_CHANNELS) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EINVAL;
+			goto out_err;
+		}
+
+		if (intf->channels[addr->channel].medium
+					!= IPMI_CHANNEL_MEDIUM_IPMB) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EINVAL;
+			goto out_err;
+		}
+
+		if (retries < 0) {
+		    if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
+			retries = 0; /* Don't retry broadcasts. */
+		    else
+			retries = 4;
+		}
+		if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
+		    /*
+		     * Broadcasts add a zero at the beginning of the
+		     * message, but otherwise is the same as an IPMB
+		     * address.
+		     */
+		    addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+		    broadcast = 1;
+		}
+
+
+		/* Default to 1 second retries. */
+		if (retry_time_ms == 0)
+		    retry_time_ms = 1000;
+
+		/*
+		 * 9 for the header and 1 for the checksum, plus
+		 * possibly one for the broadcast.
+		 */
+		if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EMSGSIZE;
+			goto out_err;
+		}
+
+		ipmb_addr = (struct ipmi_ipmb_addr *) addr;
+		if (ipmb_addr->lun > 3) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EINVAL;
+			goto out_err;
+		}
+
+		memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
+
+		if (recv_msg->msg.netfn & 0x1) {
+			/*
+			 * It's a response, so use the user's sequence
+			 * from msgid.
+			 */
+			ipmi_inc_stat(intf, sent_ipmb_responses);
+			format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
+					msgid, broadcast,
+					source_address, source_lun);
+
+			/*
+			 * Save the receive message so we can use it
+			 * to deliver the response.
+			 */
+			smi_msg->user_data = recv_msg;
+		} else {
+			/* It's a command, so get a sequence for it. */
+
+			spin_lock_irqsave(&(intf->seq_lock), flags);
+
+			/*
+			 * Create a sequence number with a 1 second
+			 * timeout and 4 retries.
+			 */
+			rv = intf_next_seq(intf,
+					   recv_msg,
+					   retry_time_ms,
+					   retries,
+					   broadcast,
+					   &ipmb_seq,
+					   &seqid);
+			if (rv) {
+				/*
+				 * We have used up all the sequence numbers,
+				 * probably, so abort.
+				 */
+				spin_unlock_irqrestore(&(intf->seq_lock),
+						       flags);
+				goto out_err;
+			}
+
+			ipmi_inc_stat(intf, sent_ipmb_commands);
+
+			/*
+			 * Store the sequence number in the message,
+			 * so that when the send message response
+			 * comes back we can start the timer.
+			 */
+			format_ipmb_msg(smi_msg, msg, ipmb_addr,
+					STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+					ipmb_seq, broadcast,
+					source_address, source_lun);
+
+			/*
+			 * Copy the message into the recv message data, so we
+			 * can retransmit it later if necessary.
+			 */
+			memcpy(recv_msg->msg_data, smi_msg->data,
+			       smi_msg->data_size);
+			recv_msg->msg.data = recv_msg->msg_data;
+			recv_msg->msg.data_len = smi_msg->data_size;
+
+			/*
+			 * We don't unlock until here, because we need
+			 * to copy the completed message into the
+			 * recv_msg before we release the lock.
+			 * Otherwise, race conditions may bite us.  I
+			 * know that's pretty paranoid, but I prefer
+			 * to be correct.
+			 */
+			spin_unlock_irqrestore(&(intf->seq_lock), flags);
+		}
+	} else if (is_lan_addr(addr)) {
+		struct ipmi_lan_addr  *lan_addr;
+		unsigned char         ipmb_seq;
+		long                  seqid;
+
+		if (addr->channel >= IPMI_MAX_CHANNELS) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EINVAL;
+			goto out_err;
+		}
+
+		if ((intf->channels[addr->channel].medium
+				!= IPMI_CHANNEL_MEDIUM_8023LAN)
+		    && (intf->channels[addr->channel].medium
+				!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EINVAL;
+			goto out_err;
+		}
+
+		retries = 4;
+
+		/* Default to 1 second retries. */
+		if (retry_time_ms == 0)
+		    retry_time_ms = 1000;
+
+		/* 11 for the header and 1 for the checksum. */
+		if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EMSGSIZE;
+			goto out_err;
+		}
+
+		lan_addr = (struct ipmi_lan_addr *) addr;
+		if (lan_addr->lun > 3) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
+			rv = -EINVAL;
+			goto out_err;
+		}
+
+		memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
+
+		if (recv_msg->msg.netfn & 0x1) {
+			/*
+			 * It's a response, so use the user's sequence
+			 * from msgid.
+			 */
+			ipmi_inc_stat(intf, sent_lan_responses);
+			format_lan_msg(smi_msg, msg, lan_addr, msgid,
+				       msgid, source_lun);
+
+			/*
+			 * Save the receive message so we can use it
+			 * to deliver the response.
+			 */
+			smi_msg->user_data = recv_msg;
+		} else {
+			/* It's a command, so get a sequence for it. */
+
+			spin_lock_irqsave(&(intf->seq_lock), flags);
+
+			/*
+			 * Create a sequence number with a 1 second
+			 * timeout and 4 retries.
+			 */
+			rv = intf_next_seq(intf,
+					   recv_msg,
+					   retry_time_ms,
+					   retries,
+					   0,
+					   &ipmb_seq,
+					   &seqid);
+			if (rv) {
+				/*
+				 * We have used up all the sequence numbers,
+				 * probably, so abort.
+				 */
+				spin_unlock_irqrestore(&(intf->seq_lock),
+						       flags);
+				goto out_err;
+			}
+
+			ipmi_inc_stat(intf, sent_lan_commands);
+
+			/*
+			 * Store the sequence number in the message,
+			 * so that when the send message response
+			 * comes back we can start the timer.
+			 */
+			format_lan_msg(smi_msg, msg, lan_addr,
+				       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+				       ipmb_seq, source_lun);
+
+			/*
+			 * Copy the message into the recv message data, so we
+			 * can retransmit it later if necessary.
+			 */
+			memcpy(recv_msg->msg_data, smi_msg->data,
+			       smi_msg->data_size);
+			recv_msg->msg.data = recv_msg->msg_data;
+			recv_msg->msg.data_len = smi_msg->data_size;
+
+			/*
+			 * We don't unlock until here, because we need
+			 * to copy the completed message into the
+			 * recv_msg before we release the lock.
+			 * Otherwise, race conditions may bite us.  I
+			 * know that's pretty paranoid, but I prefer
+			 * to be correct.
+			 */
+			spin_unlock_irqrestore(&(intf->seq_lock), flags);
+		}
+	} else {
+	    /* Unknown address type. */
+		ipmi_inc_stat(intf, sent_invalid_commands);
+		rv = -EINVAL;
+		goto out_err;
+	}
+
+#ifdef DEBUG_MSGING
+	{
+		int m;
+		for (m = 0; m < smi_msg->data_size; m++)
+			printk(" %2.2x", smi_msg->data[m]);
+		printk("\n");
+	}
+#endif
+
+	smi_send(intf, intf->handlers, smi_msg, priority);
+	rcu_read_unlock();
+
+	return 0;
+
+ out_err:
+	rcu_read_unlock();
+	ipmi_free_smi_msg(smi_msg);
+	ipmi_free_recv_msg(recv_msg);
+	return rv;
+}
+
+static int check_addr(ipmi_smi_t       intf,
+		      struct ipmi_addr *addr,
+		      unsigned char    *saddr,
+		      unsigned char    *lun)
+{
+	if (addr->channel >= IPMI_MAX_CHANNELS)
+		return -EINVAL;
+	*lun = intf->channels[addr->channel].lun;
+	*saddr = intf->channels[addr->channel].address;
+	return 0;
+}
+
+int ipmi_request_settime(ipmi_user_t      user,
+			 struct ipmi_addr *addr,
+			 long             msgid,
+			 struct kernel_ipmi_msg  *msg,
+			 void             *user_msg_data,
+			 int              priority,
+			 int              retries,
+			 unsigned int     retry_time_ms)
+{
+	unsigned char saddr = 0, lun = 0;
+	int           rv;
+
+	if (!user)
+		return -EINVAL;
+	rv = check_addr(user->intf, addr, &saddr, &lun);
+	if (rv)
+		return rv;
+	return i_ipmi_request(user,
+			      user->intf,
+			      addr,
+			      msgid,
+			      msg,
+			      user_msg_data,
+			      NULL, NULL,
+			      priority,
+			      saddr,
+			      lun,
+			      retries,
+			      retry_time_ms);
+}
+EXPORT_SYMBOL(ipmi_request_settime);
+
+int ipmi_request_supply_msgs(ipmi_user_t          user,
+			     struct ipmi_addr     *addr,
+			     long                 msgid,
+			     struct kernel_ipmi_msg *msg,
+			     void                 *user_msg_data,
+			     void                 *supplied_smi,
+			     struct ipmi_recv_msg *supplied_recv,
+			     int                  priority)
+{
+	unsigned char saddr = 0, lun = 0;
+	int           rv;
+
+	if (!user)
+		return -EINVAL;
+	rv = check_addr(user->intf, addr, &saddr, &lun);
+	if (rv)
+		return rv;
+	return i_ipmi_request(user,
+			      user->intf,
+			      addr,
+			      msgid,
+			      msg,
+			      user_msg_data,
+			      supplied_smi,
+			      supplied_recv,
+			      priority,
+			      saddr,
+			      lun,
+			      -1, 0);
+}
+EXPORT_SYMBOL(ipmi_request_supply_msgs);
+
+#ifdef CONFIG_PROC_FS
+static int smi_ipmb_proc_show(struct seq_file *m, void *v)
+{
+	ipmi_smi_t intf = m->private;
+	int        i;
+
+	seq_printf(m, "%x", intf->channels[0].address);
+	for (i = 1; i < IPMI_MAX_CHANNELS; i++)
+		seq_printf(m, " %x", intf->channels[i].address);
+	seq_putc(m, '\n');
+
+	return 0;
+}
+
+static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_ipmb_proc_ops = {
+	.open		= smi_ipmb_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int smi_version_proc_show(struct seq_file *m, void *v)
+{
+	ipmi_smi_t intf = m->private;
+
+	seq_printf(m, "%u.%u\n",
+		   ipmi_version_major(&intf->bmc->id),
+		   ipmi_version_minor(&intf->bmc->id));
+
+	return 0;
+}
+
+static int smi_version_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, smi_version_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_version_proc_ops = {
+	.open		= smi_version_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int smi_stats_proc_show(struct seq_file *m, void *v)
+{
+	ipmi_smi_t intf = m->private;
+
+	seq_printf(m, "sent_invalid_commands:       %u\n",
+		       ipmi_get_stat(intf, sent_invalid_commands));
+	seq_printf(m, "sent_local_commands:         %u\n",
+		       ipmi_get_stat(intf, sent_local_commands));
+	seq_printf(m, "handled_local_responses:     %u\n",
+		       ipmi_get_stat(intf, handled_local_responses));
+	seq_printf(m, "unhandled_local_responses:   %u\n",
+		       ipmi_get_stat(intf, unhandled_local_responses));
+	seq_printf(m, "sent_ipmb_commands:          %u\n",
+		       ipmi_get_stat(intf, sent_ipmb_commands));
+	seq_printf(m, "sent_ipmb_command_errs:      %u\n",
+		       ipmi_get_stat(intf, sent_ipmb_command_errs));
+	seq_printf(m, "retransmitted_ipmb_commands: %u\n",
+		       ipmi_get_stat(intf, retransmitted_ipmb_commands));
+	seq_printf(m, "timed_out_ipmb_commands:     %u\n",
+		       ipmi_get_stat(intf, timed_out_ipmb_commands));
+	seq_printf(m, "timed_out_ipmb_broadcasts:   %u\n",
+		       ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
+	seq_printf(m, "sent_ipmb_responses:         %u\n",
+		       ipmi_get_stat(intf, sent_ipmb_responses));
+	seq_printf(m, "handled_ipmb_responses:      %u\n",
+		       ipmi_get_stat(intf, handled_ipmb_responses));
+	seq_printf(m, "invalid_ipmb_responses:      %u\n",
+		       ipmi_get_stat(intf, invalid_ipmb_responses));
+	seq_printf(m, "unhandled_ipmb_responses:    %u\n",
+		       ipmi_get_stat(intf, unhandled_ipmb_responses));
+	seq_printf(m, "sent_lan_commands:           %u\n",
+		       ipmi_get_stat(intf, sent_lan_commands));
+	seq_printf(m, "sent_lan_command_errs:       %u\n",
+		       ipmi_get_stat(intf, sent_lan_command_errs));
+	seq_printf(m, "retransmitted_lan_commands:  %u\n",
+		       ipmi_get_stat(intf, retransmitted_lan_commands));
+	seq_printf(m, "timed_out_lan_commands:      %u\n",
+		       ipmi_get_stat(intf, timed_out_lan_commands));
+	seq_printf(m, "sent_lan_responses:          %u\n",
+		       ipmi_get_stat(intf, sent_lan_responses));
+	seq_printf(m, "handled_lan_responses:       %u\n",
+		       ipmi_get_stat(intf, handled_lan_responses));
+	seq_printf(m, "invalid_lan_responses:       %u\n",
+		       ipmi_get_stat(intf, invalid_lan_responses));
+	seq_printf(m, "unhandled_lan_responses:     %u\n",
+		       ipmi_get_stat(intf, unhandled_lan_responses));
+	seq_printf(m, "handled_commands:            %u\n",
+		       ipmi_get_stat(intf, handled_commands));
+	seq_printf(m, "invalid_commands:            %u\n",
+		       ipmi_get_stat(intf, invalid_commands));
+	seq_printf(m, "unhandled_commands:          %u\n",
+		       ipmi_get_stat(intf, unhandled_commands));
+	seq_printf(m, "invalid_events:              %u\n",
+		       ipmi_get_stat(intf, invalid_events));
+	seq_printf(m, "events:                      %u\n",
+		       ipmi_get_stat(intf, events));
+	seq_printf(m, "failed rexmit LAN msgs:      %u\n",
+		       ipmi_get_stat(intf, dropped_rexmit_lan_commands));
+	seq_printf(m, "failed rexmit IPMB msgs:     %u\n",
+		       ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
+	return 0;
+}
+
+static int smi_stats_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_stats_proc_ops = {
+	.open		= smi_stats_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#endif /* CONFIG_PROC_FS */
+
+int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
+			    const struct file_operations *proc_ops,
+			    void *data)
+{
+	int                    rv = 0;
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry  *file;
+	struct ipmi_proc_entry *entry;
+
+	/* Create a list element. */
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+	entry->name = kstrdup(name, GFP_KERNEL);
+	if (!entry->name) {
+		kfree(entry);
+		return -ENOMEM;
+	}
+
+	file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
+	if (!file) {
+		kfree(entry->name);
+		kfree(entry);
+		rv = -ENOMEM;
+	} else {
+		mutex_lock(&smi->proc_entry_lock);
+		/* Stick it on the list. */
+		entry->next = smi->proc_entries;
+		smi->proc_entries = entry;
+		mutex_unlock(&smi->proc_entry_lock);
+	}
+#endif /* CONFIG_PROC_FS */
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
+
+static int add_proc_entries(ipmi_smi_t smi, int num)
+{
+	int rv = 0;
+
+#ifdef CONFIG_PROC_FS
+	sprintf(smi->proc_dir_name, "%d", num);
+	smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
+	if (!smi->proc_dir)
+		rv = -ENOMEM;
+
+	if (rv == 0)
+		rv = ipmi_smi_add_proc_entry(smi, "stats",
+					     &smi_stats_proc_ops,
+					     smi);
+
+	if (rv == 0)
+		rv = ipmi_smi_add_proc_entry(smi, "ipmb",
+					     &smi_ipmb_proc_ops,
+					     smi);
+
+	if (rv == 0)
+		rv = ipmi_smi_add_proc_entry(smi, "version",
+					     &smi_version_proc_ops,
+					     smi);
+#endif /* CONFIG_PROC_FS */
+
+	return rv;
+}
+
+static void remove_proc_entries(ipmi_smi_t smi)
+{
+#ifdef CONFIG_PROC_FS
+	struct ipmi_proc_entry *entry;
+
+	mutex_lock(&smi->proc_entry_lock);
+	while (smi->proc_entries) {
+		entry = smi->proc_entries;
+		smi->proc_entries = entry->next;
+
+		remove_proc_entry(entry->name, smi->proc_dir);
+		kfree(entry->name);
+		kfree(entry);
+	}
+	mutex_unlock(&smi->proc_entry_lock);
+	remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
+#endif /* CONFIG_PROC_FS */
+}
+
+static int __find_bmc_guid(struct device *dev, void *data)
+{
+	unsigned char *id = data;
+	struct bmc_device *bmc = to_bmc_device(dev);
+	return memcmp(bmc->guid, id, 16) == 0;
+}
+
+static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
+					     unsigned char *guid)
+{
+	struct device *dev;
+
+	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
+	if (dev)
+		return to_bmc_device(dev);
+	else
+		return NULL;
+}
+
+struct prod_dev_id {
+	unsigned int  product_id;
+	unsigned char device_id;
+};
+
+static int __find_bmc_prod_dev_id(struct device *dev, void *data)
+{
+	struct prod_dev_id *id = data;
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return (bmc->id.product_id == id->product_id
+		&& bmc->id.device_id == id->device_id);
+}
+
+static struct bmc_device *ipmi_find_bmc_prod_dev_id(
+	struct device_driver *drv,
+	unsigned int product_id, unsigned char device_id)
+{
+	struct prod_dev_id id = {
+		.product_id = product_id,
+		.device_id = device_id,
+	};
+	struct device *dev;
+
+	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
+	if (dev)
+		return to_bmc_device(dev);
+	else
+		return NULL;
+}
+
+static ssize_t device_id_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 10, "%u\n", bmc->id.device_id);
+}
+static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
+
+static ssize_t provides_device_sdrs_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 10, "%u\n",
+			(bmc->id.device_revision & 0x80) >> 7);
+}
+static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
+		   NULL);
+
+static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 20, "%u\n",
+			bmc->id.device_revision & 0x0F);
+}
+static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
+
+static ssize_t firmware_revision_show(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
+			bmc->id.firmware_revision_2);
+}
+static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
+
+static ssize_t ipmi_version_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 20, "%u.%u\n",
+			ipmi_version_major(&bmc->id),
+			ipmi_version_minor(&bmc->id));
+}
+static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
+
+static ssize_t add_dev_support_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 10, "0x%02x\n",
+			bmc->id.additional_device_support);
+}
+static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
+		   NULL);
+
+static ssize_t manufacturer_id_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
+}
+static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
+
+static ssize_t product_id_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
+}
+static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
+
+static ssize_t aux_firmware_rev_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
+			bmc->id.aux_firmware_revision[3],
+			bmc->id.aux_firmware_revision[2],
+			bmc->id.aux_firmware_revision[1],
+			bmc->id.aux_firmware_revision[0]);
+}
+static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
+
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct bmc_device *bmc = to_bmc_device(dev);
+
+	return snprintf(buf, 100, "%Lx%Lx\n",
+			(long long) bmc->guid[0],
+			(long long) bmc->guid[8]);
+}
+static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
+
+static struct attribute *bmc_dev_attrs[] = {
+	&dev_attr_device_id.attr,
+	&dev_attr_provides_device_sdrs.attr,
+	&dev_attr_revision.attr,
+	&dev_attr_firmware_revision.attr,
+	&dev_attr_ipmi_version.attr,
+	&dev_attr_additional_device_support.attr,
+	&dev_attr_manufacturer_id.attr,
+	&dev_attr_product_id.attr,
+	&dev_attr_aux_firmware_revision.attr,
+	&dev_attr_guid.attr,
+	NULL
+};
+
+static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
+				       struct attribute *attr, int idx)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct bmc_device *bmc = to_bmc_device(dev);
+	umode_t mode = attr->mode;
+
+	if (attr == &dev_attr_aux_firmware_revision.attr)
+		return bmc->id.aux_firmware_revision_set ? mode : 0;
+	if (attr == &dev_attr_guid.attr)
+		return bmc->guid_set ? mode : 0;
+	return mode;
+}
+
+static const struct attribute_group bmc_dev_attr_group = {
+	.attrs		= bmc_dev_attrs,
+	.is_visible	= bmc_dev_attr_is_visible,
+};
+
+static const struct attribute_group *bmc_dev_attr_groups[] = {
+	&bmc_dev_attr_group,
+	NULL
+};
+
+static const struct device_type bmc_device_type = {
+	.groups		= bmc_dev_attr_groups,
+};
+
+static void
+release_bmc_device(struct device *dev)
+{
+	kfree(to_bmc_device(dev));
+}
+
+static void
+cleanup_bmc_device(struct kref *ref)
+{
+	struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
+
+	platform_device_unregister(&bmc->pdev);
+}
+
+static void ipmi_bmc_unregister(ipmi_smi_t intf)
+{
+	struct bmc_device *bmc = intf->bmc;
+
+	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+	if (intf->my_dev_name) {
+		sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
+		kfree(intf->my_dev_name);
+		intf->my_dev_name = NULL;
+	}
+
+	mutex_lock(&ipmidriver_mutex);
+	kref_put(&bmc->usecount, cleanup_bmc_device);
+	intf->bmc = NULL;
+	mutex_unlock(&ipmidriver_mutex);
+}
+
+static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
+{
+	int               rv;
+	struct bmc_device *bmc = intf->bmc;
+	struct bmc_device *old_bmc;
+
+	mutex_lock(&ipmidriver_mutex);
+
+	/*
+	 * Try to find if there is an bmc_device struct
+	 * representing the interfaced BMC already
+	 */
+	if (bmc->guid_set)
+		old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid);
+	else
+		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
+						    bmc->id.product_id,
+						    bmc->id.device_id);
+
+	/*
+	 * If there is already an bmc_device, free the new one,
+	 * otherwise register the new BMC device
+	 */
+	if (old_bmc) {
+		kfree(bmc);
+		intf->bmc = old_bmc;
+		bmc = old_bmc;
+
+		kref_get(&bmc->usecount);
+		mutex_unlock(&ipmidriver_mutex);
+
+		printk(KERN_INFO
+		       "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
+		       " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+		       bmc->id.manufacturer_id,
+		       bmc->id.product_id,
+		       bmc->id.device_id);
+	} else {
+		unsigned char orig_dev_id = bmc->id.device_id;
+		int warn_printed = 0;
+
+		snprintf(bmc->name, sizeof(bmc->name),
+			 "ipmi_bmc.%4.4x", bmc->id.product_id);
+		bmc->pdev.name = bmc->name;
+
+		while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
+						 bmc->id.product_id,
+						 bmc->id.device_id)) {
+			if (!warn_printed) {
+				printk(KERN_WARNING PFX
+				       "This machine has two different BMCs"
+				       " with the same product id and device"
+				       " id.  This is an error in the"
+				       " firmware, but incrementing the"
+				       " device id to work around the problem."
+				       " Prod ID = 0x%x, Dev ID = 0x%x\n",
+				       bmc->id.product_id, bmc->id.device_id);
+				warn_printed = 1;
+			}
+			bmc->id.device_id++; /* Wraps at 255 */
+			if (bmc->id.device_id == orig_dev_id) {
+				printk(KERN_ERR PFX
+				       "Out of device ids!\n");
+				break;
+			}
+		}
+
+		bmc->pdev.dev.driver = &ipmidriver.driver;
+		bmc->pdev.id = bmc->id.device_id;
+		bmc->pdev.dev.release = release_bmc_device;
+		bmc->pdev.dev.type = &bmc_device_type;
+		kref_init(&bmc->usecount);
+
+		rv = platform_device_register(&bmc->pdev);
+		mutex_unlock(&ipmidriver_mutex);
+		if (rv) {
+			put_device(&bmc->pdev.dev);
+			printk(KERN_ERR
+			       "ipmi_msghandler:"
+			       " Unable to register bmc device: %d\n",
+			       rv);
+			/*
+			 * Don't go to out_err, you can only do that if
+			 * the device is registered already.
+			 */
+			return rv;
+		}
+
+		dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
+			 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+			 bmc->id.manufacturer_id,
+			 bmc->id.product_id,
+			 bmc->id.device_id);
+	}
+
+	/*
+	 * create symlink from system interface device to bmc device
+	 * and back.
+	 */
+	rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
+	if (rv) {
+		printk(KERN_ERR
+		       "ipmi_msghandler: Unable to create bmc symlink: %d\n",
+		       rv);
+		goto out_err;
+	}
+
+	intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum);
+	if (!intf->my_dev_name) {
+		rv = -ENOMEM;
+		printk(KERN_ERR
+		       "ipmi_msghandler: allocate link from BMC: %d\n",
+		       rv);
+		goto out_err;
+	}
+
+	rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
+			       intf->my_dev_name);
+	if (rv) {
+		kfree(intf->my_dev_name);
+		intf->my_dev_name = NULL;
+		printk(KERN_ERR
+		       "ipmi_msghandler:"
+		       " Unable to create symlink to bmc: %d\n",
+		       rv);
+		goto out_err;
+	}
+
+	return 0;
+
+out_err:
+	ipmi_bmc_unregister(intf);
+	return rv;
+}
+
+static int
+send_guid_cmd(ipmi_smi_t intf, int chan)
+{
+	struct kernel_ipmi_msg            msg;
+	struct ipmi_system_interface_addr si;
+
+	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	si.channel = IPMI_BMC_CHANNEL;
+	si.lun = 0;
+
+	msg.netfn = IPMI_NETFN_APP_REQUEST;
+	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
+	msg.data = NULL;
+	msg.data_len = 0;
+	return i_ipmi_request(NULL,
+			      intf,
+			      (struct ipmi_addr *) &si,
+			      0,
+			      &msg,
+			      intf,
+			      NULL,
+			      NULL,
+			      0,
+			      intf->channels[0].address,
+			      intf->channels[0].lun,
+			      -1, 0);
+}
+
+static void
+guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
+		/* Not for me */
+		return;
+
+	if (msg->msg.data[0] != 0) {
+		/* Error from getting the GUID, the BMC doesn't have one. */
+		intf->bmc->guid_set = 0;
+		goto out;
+	}
+
+	if (msg->msg.data_len < 17) {
+		intf->bmc->guid_set = 0;
+		printk(KERN_WARNING PFX
+		       "guid_handler: The GUID response from the BMC was too"
+		       " short, it was %d but should have been 17.  Assuming"
+		       " GUID is not available.\n",
+		       msg->msg.data_len);
+		goto out;
+	}
+
+	memcpy(intf->bmc->guid, msg->msg.data, 16);
+	intf->bmc->guid_set = 1;
+ out:
+	wake_up(&intf->waitq);
+}
+
+static void
+get_guid(ipmi_smi_t intf)
+{
+	int rv;
+
+	intf->bmc->guid_set = 0x2;
+	intf->null_user_handler = guid_handler;
+	rv = send_guid_cmd(intf, 0);
+	if (rv)
+		/* Send failed, no GUID available. */
+		intf->bmc->guid_set = 0;
+	else
+		wait_event(intf->waitq, intf->bmc->guid_set != 2);
+
+	intf->null_user_handler = NULL;
+}
+
+static int
+send_channel_info_cmd(ipmi_smi_t intf, int chan)
+{
+	struct kernel_ipmi_msg            msg;
+	unsigned char                     data[1];
+	struct ipmi_system_interface_addr si;
+
+	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	si.channel = IPMI_BMC_CHANNEL;
+	si.lun = 0;
+
+	msg.netfn = IPMI_NETFN_APP_REQUEST;
+	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
+	msg.data = data;
+	msg.data_len = 1;
+	data[0] = chan;
+	return i_ipmi_request(NULL,
+			      intf,
+			      (struct ipmi_addr *) &si,
+			      0,
+			      &msg,
+			      intf,
+			      NULL,
+			      NULL,
+			      0,
+			      intf->channels[0].address,
+			      intf->channels[0].lun,
+			      -1, 0);
+}
+
+static void
+channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+	int rv = 0;
+	int chan;
+
+	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
+		/* It's the one we want */
+		if (msg->msg.data[0] != 0) {
+			/* Got an error from the channel, just go on. */
+
+			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
+				/*
+				 * If the MC does not support this
+				 * command, that is legal.  We just
+				 * assume it has one IPMB at channel
+				 * zero.
+				 */
+				intf->channels[0].medium
+					= IPMI_CHANNEL_MEDIUM_IPMB;
+				intf->channels[0].protocol
+					= IPMI_CHANNEL_PROTOCOL_IPMB;
+
+				intf->curr_channel = IPMI_MAX_CHANNELS;
+				wake_up(&intf->waitq);
+				goto out;
+			}
+			goto next_channel;
+		}
+		if (msg->msg.data_len < 4) {
+			/* Message not big enough, just go on. */
+			goto next_channel;
+		}
+		chan = intf->curr_channel;
+		intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
+		intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
+
+ next_channel:
+		intf->curr_channel++;
+		if (intf->curr_channel >= IPMI_MAX_CHANNELS)
+			wake_up(&intf->waitq);
+		else
+			rv = send_channel_info_cmd(intf, intf->curr_channel);
+
+		if (rv) {
+			/* Got an error somehow, just give up. */
+			printk(KERN_WARNING PFX
+			       "Error sending channel information for channel"
+			       " %d: %d\n", intf->curr_channel, rv);
+
+			intf->curr_channel = IPMI_MAX_CHANNELS;
+			wake_up(&intf->waitq);
+		}
+	}
+ out:
+	return;
+}
+
+static void ipmi_poll(ipmi_smi_t intf)
+{
+	if (intf->handlers->poll)
+		intf->handlers->poll(intf->send_info);
+	/* In case something came in */
+	handle_new_recv_msgs(intf);
+}
+
+void ipmi_poll_interface(ipmi_user_t user)
+{
+	ipmi_poll(user->intf);
+}
+EXPORT_SYMBOL(ipmi_poll_interface);
+
+int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
+		      void		       *send_info,
+		      struct ipmi_device_id    *device_id,
+		      struct device            *si_dev,
+		      unsigned char            slave_addr)
+{
+	int              i, j;
+	int              rv;
+	ipmi_smi_t       intf;
+	ipmi_smi_t       tintf;
+	struct list_head *link;
+
+	/*
+	 * Make sure the driver is actually initialized, this handles
+	 * problems with initialization order.
+	 */
+	if (!initialized) {
+		rv = ipmi_init_msghandler();
+		if (rv)
+			return rv;
+		/*
+		 * The init code doesn't return an error if it was turned
+		 * off, but it won't initialize.  Check that.
+		 */
+		if (!initialized)
+			return -ENODEV;
+	}
+
+	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+	if (!intf)
+		return -ENOMEM;
+
+	intf->ipmi_version_major = ipmi_version_major(device_id);
+	intf->ipmi_version_minor = ipmi_version_minor(device_id);
+
+	intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
+	if (!intf->bmc) {
+		kfree(intf);
+		return -ENOMEM;
+	}
+	intf->intf_num = -1; /* Mark it invalid for now. */
+	kref_init(&intf->refcount);
+	intf->bmc->id = *device_id;
+	intf->si_dev = si_dev;
+	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
+		intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
+		intf->channels[j].lun = 2;
+	}
+	if (slave_addr != 0)
+		intf->channels[0].address = slave_addr;
+	INIT_LIST_HEAD(&intf->users);
+	intf->handlers = handlers;
+	intf->send_info = send_info;
+	spin_lock_init(&intf->seq_lock);
+	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
+		intf->seq_table[j].inuse = 0;
+		intf->seq_table[j].seqid = 0;
+	}
+	intf->curr_seq = 0;
+#ifdef CONFIG_PROC_FS
+	mutex_init(&intf->proc_entry_lock);
+#endif
+	spin_lock_init(&intf->waiting_rcv_msgs_lock);
+	INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+	tasklet_init(&intf->recv_tasklet,
+		     smi_recv_tasklet,
+		     (unsigned long) intf);
+	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
+	spin_lock_init(&intf->xmit_msgs_lock);
+	INIT_LIST_HEAD(&intf->xmit_msgs);
+	INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+	spin_lock_init(&intf->events_lock);
+	atomic_set(&intf->event_waiters, 0);
+	intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+	INIT_LIST_HEAD(&intf->waiting_events);
+	intf->waiting_events_count = 0;
+	mutex_init(&intf->cmd_rcvrs_mutex);
+	spin_lock_init(&intf->maintenance_mode_lock);
+	INIT_LIST_HEAD(&intf->cmd_rcvrs);
+	init_waitqueue_head(&intf->waitq);
+	for (i = 0; i < IPMI_NUM_STATS; i++)
+		atomic_set(&intf->stats[i], 0);
+
+	intf->proc_dir = NULL;
+
+	mutex_lock(&smi_watchers_mutex);
+	mutex_lock(&ipmi_interfaces_mutex);
+	/* Look for a hole in the numbers. */
+	i = 0;
+	link = &ipmi_interfaces;
+	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
+		if (tintf->intf_num != i) {
+			link = &tintf->link;
+			break;
+		}
+		i++;
+	}
+	/* Add the new interface in numeric order. */
+	if (i == 0)
+		list_add_rcu(&intf->link, &ipmi_interfaces);
+	else
+		list_add_tail_rcu(&intf->link, link);
+
+	rv = handlers->start_processing(send_info, intf);
+	if (rv)
+		goto out;
+
+	get_guid(intf);
+
+	if ((intf->ipmi_version_major > 1)
+			|| ((intf->ipmi_version_major == 1)
+			    && (intf->ipmi_version_minor >= 5))) {
+		/*
+		 * Start scanning the channels to see what is
+		 * available.
+		 */
+		intf->null_user_handler = channel_handler;
+		intf->curr_channel = 0;
+		rv = send_channel_info_cmd(intf, 0);
+		if (rv) {
+			printk(KERN_WARNING PFX
+			       "Error sending channel information for channel"
+			       " 0, %d\n", rv);
+			goto out;
+		}
+
+		/* Wait for the channel info to be read. */
+		wait_event(intf->waitq,
+			   intf->curr_channel >= IPMI_MAX_CHANNELS);
+		intf->null_user_handler = NULL;
+	} else {
+		/* Assume a single IPMB channel at zero. */
+		intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
+		intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
+		intf->curr_channel = IPMI_MAX_CHANNELS;
+	}
+
+	rv = ipmi_bmc_register(intf, i);
+
+	if (rv == 0)
+		rv = add_proc_entries(intf, i);
+
+ out:
+	if (rv) {
+		if (intf->proc_dir)
+			remove_proc_entries(intf);
+		intf->handlers = NULL;
+		list_del_rcu(&intf->link);
+		mutex_unlock(&ipmi_interfaces_mutex);
+		mutex_unlock(&smi_watchers_mutex);
+		synchronize_rcu();
+		kref_put(&intf->refcount, intf_free);
+	} else {
+		/*
+		 * Keep memory order straight for RCU readers.  Make
+		 * sure everything else is committed to memory before
+		 * setting intf_num to mark the interface valid.
+		 */
+		smp_wmb();
+		intf->intf_num = i;
+		mutex_unlock(&ipmi_interfaces_mutex);
+		/* After this point the interface is legal to use. */
+		call_smi_watchers(i, intf->si_dev);
+		mutex_unlock(&smi_watchers_mutex);
+	}
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_register_smi);
+
+static void deliver_smi_err_response(ipmi_smi_t intf,
+				     struct ipmi_smi_msg *msg,
+				     unsigned char err)
+{
+	msg->rsp[0] = msg->data[0] | 4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = err;
+	msg->rsp_size = 3;
+	/* It's an error, so it will never requeue, no need to check return. */
+	handle_one_recv_msg(intf, msg);
+}
+
+static void cleanup_smi_msgs(ipmi_smi_t intf)
+{
+	int              i;
+	struct seq_table *ent;
+	struct ipmi_smi_msg *msg;
+	struct list_head *entry;
+	struct list_head tmplist;
+
+	/* Clear out our transmit queues and hold the messages. */
+	INIT_LIST_HEAD(&tmplist);
+	list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
+	list_splice_tail(&intf->xmit_msgs, &tmplist);
+
+	/* Current message first, to preserve order */
+	while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+		/* Wait for the message to clear out. */
+		schedule_timeout(1);
+	}
+
+	/* No need for locks, the interface is down. */
+
+	/*
+	 * Return errors for all pending messages in queue and in the
+	 * tables waiting for remote responses.
+	 */
+	while (!list_empty(&tmplist)) {
+		entry = tmplist.next;
+		list_del(entry);
+		msg = list_entry(entry, struct ipmi_smi_msg, link);
+		deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
+	}
+
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+		ent = &(intf->seq_table[i]);
+		if (!ent->inuse)
+			continue;
+		deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+	}
+}
+
+int ipmi_unregister_smi(ipmi_smi_t intf)
+{
+	struct ipmi_smi_watcher *w;
+	int intf_num = intf->intf_num;
+	ipmi_user_t user;
+
+	mutex_lock(&smi_watchers_mutex);
+	mutex_lock(&ipmi_interfaces_mutex);
+	intf->intf_num = -1;
+	intf->in_shutdown = true;
+	list_del_rcu(&intf->link);
+	mutex_unlock(&ipmi_interfaces_mutex);
+	synchronize_rcu();
+
+	cleanup_smi_msgs(intf);
+
+	/* Clean up the effects of users on the lower-level software. */
+	mutex_lock(&ipmi_interfaces_mutex);
+	rcu_read_lock();
+	list_for_each_entry_rcu(user, &intf->users, link) {
+		module_put(intf->handlers->owner);
+		if (intf->handlers->dec_usecount)
+			intf->handlers->dec_usecount(intf->send_info);
+	}
+	rcu_read_unlock();
+	intf->handlers = NULL;
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	remove_proc_entries(intf);
+	ipmi_bmc_unregister(intf);
+
+	/*
+	 * Call all the watcher interfaces to tell them that
+	 * an interface is gone.
+	 */
+	list_for_each_entry(w, &smi_watchers, link)
+		w->smi_gone(intf_num);
+	mutex_unlock(&smi_watchers_mutex);
+
+	kref_put(&intf->refcount, intf_free);
+	return 0;
+}
+EXPORT_SYMBOL(ipmi_unregister_smi);
+
+static int handle_ipmb_get_msg_rsp(ipmi_smi_t          intf,
+				   struct ipmi_smi_msg *msg)
+{
+	struct ipmi_ipmb_addr ipmb_addr;
+	struct ipmi_recv_msg  *recv_msg;
+
+	/*
+	 * This is 11, not 10, because the response must contain a
+	 * completion code.
+	 */
+	if (msg->rsp_size < 11) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_ipmb_responses);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+	ipmb_addr.slave_addr = msg->rsp[6];
+	ipmb_addr.channel = msg->rsp[3] & 0x0f;
+	ipmb_addr.lun = msg->rsp[7] & 3;
+
+	/*
+	 * It's a response from a remote entity.  Look up the sequence
+	 * number and handle the response.
+	 */
+	if (intf_find_seq(intf,
+			  msg->rsp[7] >> 2,
+			  msg->rsp[3] & 0x0f,
+			  msg->rsp[8],
+			  (msg->rsp[4] >> 2) & (~1),
+			  (struct ipmi_addr *) &(ipmb_addr),
+			  &recv_msg)) {
+		/*
+		 * We were unable to find the sequence number,
+		 * so just nuke the message.
+		 */
+		ipmi_inc_stat(intf, unhandled_ipmb_responses);
+		return 0;
+	}
+
+	memcpy(recv_msg->msg_data,
+	       &(msg->rsp[9]),
+	       msg->rsp_size - 9);
+	/*
+	 * The other fields matched, so no need to set them, except
+	 * for netfn, which needs to be the response that was
+	 * returned, not the request value.
+	 */
+	recv_msg->msg.netfn = msg->rsp[4] >> 2;
+	recv_msg->msg.data = recv_msg->msg_data;
+	recv_msg->msg.data_len = msg->rsp_size - 10;
+	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	ipmi_inc_stat(intf, handled_ipmb_responses);
+	deliver_response(recv_msg);
+
+	return 0;
+}
+
+static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
+				   struct ipmi_smi_msg *msg)
+{
+	struct cmd_rcvr          *rcvr;
+	int                      rv = 0;
+	unsigned char            netfn;
+	unsigned char            cmd;
+	unsigned char            chan;
+	ipmi_user_t              user = NULL;
+	struct ipmi_ipmb_addr    *ipmb_addr;
+	struct ipmi_recv_msg     *recv_msg;
+
+	if (msg->rsp_size < 10) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_commands);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	netfn = msg->rsp[4] >> 2;
+	cmd = msg->rsp[8];
+	chan = msg->rsp[3] & 0xf;
+
+	rcu_read_lock();
+	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+	if (rcvr) {
+		user = rcvr->user;
+		kref_get(&user->refcount);
+	} else
+		user = NULL;
+	rcu_read_unlock();
+
+	if (user == NULL) {
+		/* We didn't find a user, deliver an error response. */
+		ipmi_inc_stat(intf, unhandled_commands);
+
+		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+		msg->data[1] = IPMI_SEND_MSG_CMD;
+		msg->data[2] = msg->rsp[3];
+		msg->data[3] = msg->rsp[6];
+		msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
+		msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
+		msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
+		/* rqseq/lun */
+		msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
+		msg->data[8] = msg->rsp[8]; /* cmd */
+		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
+		msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
+		msg->data_size = 11;
+
+#ifdef DEBUG_MSGING
+	{
+		int m;
+		printk("Invalid command:");
+		for (m = 0; m < msg->data_size; m++)
+			printk(" %2.2x", msg->data[m]);
+		printk("\n");
+	}
+#endif
+		rcu_read_lock();
+		if (!intf->in_shutdown) {
+			smi_send(intf, intf->handlers, msg, 0);
+			/*
+			 * We used the message, so return the value
+			 * that causes it to not be freed or
+			 * queued.
+			 */
+			rv = -1;
+		}
+		rcu_read_unlock();
+	} else {
+		/* Deliver the message to the user. */
+		ipmi_inc_stat(intf, handled_commands);
+
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
+			rv = 1;
+			kref_put(&user->refcount, free_user);
+		} else {
+			/* Extract the source address from the data. */
+			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+			ipmb_addr->slave_addr = msg->rsp[6];
+			ipmb_addr->lun = msg->rsp[7] & 3;
+			ipmb_addr->channel = msg->rsp[3] & 0xf;
+
+			/*
+			 * Extract the rest of the message information
+			 * from the IPMB header.
+			 */
+			recv_msg->user = user;
+			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+			recv_msg->msgid = msg->rsp[7] >> 2;
+			recv_msg->msg.netfn = msg->rsp[4] >> 2;
+			recv_msg->msg.cmd = msg->rsp[8];
+			recv_msg->msg.data = recv_msg->msg_data;
+
+			/*
+			 * We chop off 10, not 9 bytes because the checksum
+			 * at the end also needs to be removed.
+			 */
+			recv_msg->msg.data_len = msg->rsp_size - 10;
+			memcpy(recv_msg->msg_data,
+			       &(msg->rsp[9]),
+			       msg->rsp_size - 10);
+			deliver_response(recv_msg);
+		}
+	}
+
+	return rv;
+}
+
+static int handle_lan_get_msg_rsp(ipmi_smi_t          intf,
+				  struct ipmi_smi_msg *msg)
+{
+	struct ipmi_lan_addr  lan_addr;
+	struct ipmi_recv_msg  *recv_msg;
+
+
+	/*
+	 * This is 13, not 12, because the response must contain a
+	 * completion code.
+	 */
+	if (msg->rsp_size < 13) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_lan_responses);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
+	lan_addr.session_handle = msg->rsp[4];
+	lan_addr.remote_SWID = msg->rsp[8];
+	lan_addr.local_SWID = msg->rsp[5];
+	lan_addr.channel = msg->rsp[3] & 0x0f;
+	lan_addr.privilege = msg->rsp[3] >> 4;
+	lan_addr.lun = msg->rsp[9] & 3;
+
+	/*
+	 * It's a response from a remote entity.  Look up the sequence
+	 * number and handle the response.
+	 */
+	if (intf_find_seq(intf,
+			  msg->rsp[9] >> 2,
+			  msg->rsp[3] & 0x0f,
+			  msg->rsp[10],
+			  (msg->rsp[6] >> 2) & (~1),
+			  (struct ipmi_addr *) &(lan_addr),
+			  &recv_msg)) {
+		/*
+		 * We were unable to find the sequence number,
+		 * so just nuke the message.
+		 */
+		ipmi_inc_stat(intf, unhandled_lan_responses);
+		return 0;
+	}
+
+	memcpy(recv_msg->msg_data,
+	       &(msg->rsp[11]),
+	       msg->rsp_size - 11);
+	/*
+	 * The other fields matched, so no need to set them, except
+	 * for netfn, which needs to be the response that was
+	 * returned, not the request value.
+	 */
+	recv_msg->msg.netfn = msg->rsp[6] >> 2;
+	recv_msg->msg.data = recv_msg->msg_data;
+	recv_msg->msg.data_len = msg->rsp_size - 12;
+	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	ipmi_inc_stat(intf, handled_lan_responses);
+	deliver_response(recv_msg);
+
+	return 0;
+}
+
+static int handle_lan_get_msg_cmd(ipmi_smi_t          intf,
+				  struct ipmi_smi_msg *msg)
+{
+	struct cmd_rcvr          *rcvr;
+	int                      rv = 0;
+	unsigned char            netfn;
+	unsigned char            cmd;
+	unsigned char            chan;
+	ipmi_user_t              user = NULL;
+	struct ipmi_lan_addr     *lan_addr;
+	struct ipmi_recv_msg     *recv_msg;
+
+	if (msg->rsp_size < 12) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_commands);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	netfn = msg->rsp[6] >> 2;
+	cmd = msg->rsp[10];
+	chan = msg->rsp[3] & 0xf;
+
+	rcu_read_lock();
+	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+	if (rcvr) {
+		user = rcvr->user;
+		kref_get(&user->refcount);
+	} else
+		user = NULL;
+	rcu_read_unlock();
+
+	if (user == NULL) {
+		/* We didn't find a user, just give up. */
+		ipmi_inc_stat(intf, unhandled_commands);
+
+		/*
+		 * Don't do anything with these messages, just allow
+		 * them to be freed.
+		 */
+		rv = 0;
+	} else {
+		/* Deliver the message to the user. */
+		ipmi_inc_stat(intf, handled_commands);
+
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling later.
+			 */
+			rv = 1;
+			kref_put(&user->refcount, free_user);
+		} else {
+			/* Extract the source address from the data. */
+			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+			lan_addr->session_handle = msg->rsp[4];
+			lan_addr->remote_SWID = msg->rsp[8];
+			lan_addr->local_SWID = msg->rsp[5];
+			lan_addr->lun = msg->rsp[9] & 3;
+			lan_addr->channel = msg->rsp[3] & 0xf;
+			lan_addr->privilege = msg->rsp[3] >> 4;
+
+			/*
+			 * Extract the rest of the message information
+			 * from the IPMB header.
+			 */
+			recv_msg->user = user;
+			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+			recv_msg->msgid = msg->rsp[9] >> 2;
+			recv_msg->msg.netfn = msg->rsp[6] >> 2;
+			recv_msg->msg.cmd = msg->rsp[10];
+			recv_msg->msg.data = recv_msg->msg_data;
+
+			/*
+			 * We chop off 12, not 11 bytes because the checksum
+			 * at the end also needs to be removed.
+			 */
+			recv_msg->msg.data_len = msg->rsp_size - 12;
+			memcpy(recv_msg->msg_data,
+			       &(msg->rsp[11]),
+			       msg->rsp_size - 12);
+			deliver_response(recv_msg);
+		}
+	}
+
+	return rv;
+}
+
+/*
+ * This routine will handle "Get Message" command responses with
+ * channels that use an OEM Medium. The message format belongs to
+ * the OEM.  See IPMI 2.0 specification, Chapter 6 and
+ * Chapter 22, sections 22.6 and 22.24 for more details.
+ */
+static int handle_oem_get_msg_cmd(ipmi_smi_t          intf,
+				  struct ipmi_smi_msg *msg)
+{
+	struct cmd_rcvr       *rcvr;
+	int                   rv = 0;
+	unsigned char         netfn;
+	unsigned char         cmd;
+	unsigned char         chan;
+	ipmi_user_t           user = NULL;
+	struct ipmi_system_interface_addr *smi_addr;
+	struct ipmi_recv_msg  *recv_msg;
+
+	/*
+	 * We expect the OEM SW to perform error checking
+	 * so we just do some basic sanity checks
+	 */
+	if (msg->rsp_size < 4) {
+		/* Message not big enough, just ignore it. */
+		ipmi_inc_stat(intf, invalid_commands);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the response, just ignore it. */
+		return 0;
+	}
+
+	/*
+	 * This is an OEM Message so the OEM needs to know how
+	 * handle the message. We do no interpretation.
+	 */
+	netfn = msg->rsp[0] >> 2;
+	cmd = msg->rsp[1];
+	chan = msg->rsp[3] & 0xf;
+
+	rcu_read_lock();
+	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+	if (rcvr) {
+		user = rcvr->user;
+		kref_get(&user->refcount);
+	} else
+		user = NULL;
+	rcu_read_unlock();
+
+	if (user == NULL) {
+		/* We didn't find a user, just give up. */
+		ipmi_inc_stat(intf, unhandled_commands);
+
+		/*
+		 * Don't do anything with these messages, just allow
+		 * them to be freed.
+		 */
+
+		rv = 0;
+	} else {
+		/* Deliver the message to the user. */
+		ipmi_inc_stat(intf, handled_commands);
+
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
+			rv = 1;
+			kref_put(&user->refcount, free_user);
+		} else {
+			/*
+			 * OEM Messages are expected to be delivered via
+			 * the system interface to SMS software.  We might
+			 * need to visit this again depending on OEM
+			 * requirements
+			 */
+			smi_addr = ((struct ipmi_system_interface_addr *)
+				    &(recv_msg->addr));
+			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+			smi_addr->channel = IPMI_BMC_CHANNEL;
+			smi_addr->lun = msg->rsp[0] & 3;
+
+			recv_msg->user = user;
+			recv_msg->user_msg_data = NULL;
+			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+			recv_msg->msg.netfn = msg->rsp[0] >> 2;
+			recv_msg->msg.cmd = msg->rsp[1];
+			recv_msg->msg.data = recv_msg->msg_data;
+
+			/*
+			 * The message starts at byte 4 which follows the
+			 * the Channel Byte in the "GET MESSAGE" command
+			 */
+			recv_msg->msg.data_len = msg->rsp_size - 4;
+			memcpy(recv_msg->msg_data,
+			       &(msg->rsp[4]),
+			       msg->rsp_size - 4);
+			deliver_response(recv_msg);
+		}
+	}
+
+	return rv;
+}
+
+static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
+				     struct ipmi_smi_msg  *msg)
+{
+	struct ipmi_system_interface_addr *smi_addr;
+
+	recv_msg->msgid = 0;
+	smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
+	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr->channel = IPMI_BMC_CHANNEL;
+	smi_addr->lun = msg->rsp[0] & 3;
+	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
+	recv_msg->msg.netfn = msg->rsp[0] >> 2;
+	recv_msg->msg.cmd = msg->rsp[1];
+	memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
+	recv_msg->msg.data = recv_msg->msg_data;
+	recv_msg->msg.data_len = msg->rsp_size - 3;
+}
+
+static int handle_read_event_rsp(ipmi_smi_t          intf,
+				 struct ipmi_smi_msg *msg)
+{
+	struct ipmi_recv_msg *recv_msg, *recv_msg2;
+	struct list_head     msgs;
+	ipmi_user_t          user;
+	int                  rv = 0;
+	int                  deliver_count = 0;
+	unsigned long        flags;
+
+	if (msg->rsp_size < 19) {
+		/* Message is too small to be an IPMB event. */
+		ipmi_inc_stat(intf, invalid_events);
+		return 0;
+	}
+
+	if (msg->rsp[2] != 0) {
+		/* An error getting the event, just ignore it. */
+		return 0;
+	}
+
+	INIT_LIST_HEAD(&msgs);
+
+	spin_lock_irqsave(&intf->events_lock, flags);
+
+	ipmi_inc_stat(intf, events);
+
+	/*
+	 * Allocate and fill in one message for every user that is
+	 * getting events.
+	 */
+	rcu_read_lock();
+	list_for_each_entry_rcu(user, &intf->users, link) {
+		if (!user->gets_events)
+			continue;
+
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			rcu_read_unlock();
+			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+						 link) {
+				list_del(&recv_msg->link);
+				ipmi_free_recv_msg(recv_msg);
+			}
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
+			rv = 1;
+			goto out;
+		}
+
+		deliver_count++;
+
+		copy_event_into_recv_msg(recv_msg, msg);
+		recv_msg->user = user;
+		kref_get(&user->refcount);
+		list_add_tail(&(recv_msg->link), &msgs);
+	}
+	rcu_read_unlock();
+
+	if (deliver_count) {
+		/* Now deliver all the messages. */
+		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
+			list_del(&recv_msg->link);
+			deliver_response(recv_msg);
+		}
+	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
+		/*
+		 * No one to receive the message, put it in queue if there's
+		 * not already too many things in the queue.
+		 */
+		recv_msg = ipmi_alloc_recv_msg();
+		if (!recv_msg) {
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
+			rv = 1;
+			goto out;
+		}
+
+		copy_event_into_recv_msg(recv_msg, msg);
+		list_add_tail(&(recv_msg->link), &(intf->waiting_events));
+		intf->waiting_events_count++;
+	} else if (!intf->event_msg_printed) {
+		/*
+		 * There's too many things in the queue, discard this
+		 * message.
+		 */
+		printk(KERN_WARNING PFX "Event queue full, discarding"
+		       " incoming events\n");
+		intf->event_msg_printed = 1;
+	}
+
+ out:
+	spin_unlock_irqrestore(&(intf->events_lock), flags);
+
+	return rv;
+}
+
+static int handle_bmc_rsp(ipmi_smi_t          intf,
+			  struct ipmi_smi_msg *msg)
+{
+	struct ipmi_recv_msg *recv_msg;
+	struct ipmi_user     *user;
+
+	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
+	if (recv_msg == NULL) {
+		printk(KERN_WARNING
+		       "IPMI message received with no owner. This\n"
+		       "could be because of a malformed message, or\n"
+		       "because of a hardware error.  Contact your\n"
+		       "hardware vender for assistance\n");
+		return 0;
+	}
+
+	user = recv_msg->user;
+	/* Make sure the user still exists. */
+	if (user && !user->valid) {
+		/* The user for the message went away, so give up. */
+		ipmi_inc_stat(intf, unhandled_local_responses);
+		ipmi_free_recv_msg(recv_msg);
+	} else {
+		struct ipmi_system_interface_addr *smi_addr;
+
+		ipmi_inc_stat(intf, handled_local_responses);
+		recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+		recv_msg->msgid = msg->msgid;
+		smi_addr = ((struct ipmi_system_interface_addr *)
+			    &(recv_msg->addr));
+		smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+		smi_addr->channel = IPMI_BMC_CHANNEL;
+		smi_addr->lun = msg->rsp[0] & 3;
+		recv_msg->msg.netfn = msg->rsp[0] >> 2;
+		recv_msg->msg.cmd = msg->rsp[1];
+		memcpy(recv_msg->msg_data,
+		       &(msg->rsp[2]),
+		       msg->rsp_size - 2);
+		recv_msg->msg.data = recv_msg->msg_data;
+		recv_msg->msg.data_len = msg->rsp_size - 2;
+		deliver_response(recv_msg);
+	}
+
+	return 0;
+}
+
+/*
+ * Handle a received message.  Return 1 if the message should be requeued,
+ * 0 if the message should be freed, or -1 if the message should not
+ * be freed or requeued.
+ */
+static int handle_one_recv_msg(ipmi_smi_t          intf,
+			       struct ipmi_smi_msg *msg)
+{
+	int requeue;
+	int chan;
+
+#ifdef DEBUG_MSGING
+	int m;
+	printk("Recv:");
+	for (m = 0; m < msg->rsp_size; m++)
+		printk(" %2.2x", msg->rsp[m]);
+	printk("\n");
+#endif
+	if (msg->rsp_size < 2) {
+		/* Message is too small to be correct. */
+		printk(KERN_WARNING PFX "BMC returned to small a message"
+		       " for netfn %x cmd %x, got %d bytes\n",
+		       (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+
+		/* Generate an error response for the message. */
+		msg->rsp[0] = msg->data[0] | (1 << 2);
+		msg->rsp[1] = msg->data[1];
+		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+		msg->rsp_size = 3;
+	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
+		   || (msg->rsp[1] != msg->data[1])) {
+		/*
+		 * The NetFN and Command in the response is not even
+		 * marginally correct.
+		 */
+		printk(KERN_WARNING PFX "BMC returned incorrect response,"
+		       " expected netfn %x cmd %x, got netfn %x cmd %x\n",
+		       (msg->data[0] >> 2) | 1, msg->data[1],
+		       msg->rsp[0] >> 2, msg->rsp[1]);
+
+		/* Generate an error response for the message. */
+		msg->rsp[0] = msg->data[0] | (1 << 2);
+		msg->rsp[1] = msg->data[1];
+		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+		msg->rsp_size = 3;
+	}
+
+	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
+	    && (msg->user_data != NULL)) {
+		/*
+		 * It's a response to a response we sent.  For this we
+		 * deliver a send message response to the user.
+		 */
+		struct ipmi_recv_msg     *recv_msg = msg->user_data;
+
+		requeue = 0;
+		if (msg->rsp_size < 2)
+			/* Message is too small to be correct. */
+			goto out;
+
+		chan = msg->data[2] & 0x0f;
+		if (chan >= IPMI_MAX_CHANNELS)
+			/* Invalid channel number */
+			goto out;
+
+		if (!recv_msg)
+			goto out;
+
+		/* Make sure the user still exists. */
+		if (!recv_msg->user || !recv_msg->user->valid)
+			goto out;
+
+		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
+		recv_msg->msg.data = recv_msg->msg_data;
+		recv_msg->msg.data_len = 1;
+		recv_msg->msg_data[0] = msg->rsp[2];
+		deliver_response(recv_msg);
+	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+		   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
+		/* It's from the receive queue. */
+		chan = msg->rsp[3] & 0xf;
+		if (chan >= IPMI_MAX_CHANNELS) {
+			/* Invalid channel number */
+			requeue = 0;
+			goto out;
+		}
+
+		/*
+		 * We need to make sure the channels have been initialized.
+		 * The channel_handler routine will set the "curr_channel"
+		 * equal to or greater than IPMI_MAX_CHANNELS when all the
+		 * channels for this interface have been initialized.
+		 */
+		if (intf->curr_channel < IPMI_MAX_CHANNELS) {
+			requeue = 0; /* Throw the message away */
+			goto out;
+		}
+
+		switch (intf->channels[chan].medium) {
+		case IPMI_CHANNEL_MEDIUM_IPMB:
+			if (msg->rsp[4] & 0x04) {
+				/*
+				 * It's a response, so find the
+				 * requesting message and send it up.
+				 */
+				requeue = handle_ipmb_get_msg_rsp(intf, msg);
+			} else {
+				/*
+				 * It's a command to the SMS from some other
+				 * entity.  Handle that.
+				 */
+				requeue = handle_ipmb_get_msg_cmd(intf, msg);
+			}
+			break;
+
+		case IPMI_CHANNEL_MEDIUM_8023LAN:
+		case IPMI_CHANNEL_MEDIUM_ASYNC:
+			if (msg->rsp[6] & 0x04) {
+				/*
+				 * It's a response, so find the
+				 * requesting message and send it up.
+				 */
+				requeue = handle_lan_get_msg_rsp(intf, msg);
+			} else {
+				/*
+				 * It's a command to the SMS from some other
+				 * entity.  Handle that.
+				 */
+				requeue = handle_lan_get_msg_cmd(intf, msg);
+			}
+			break;
+
+		default:
+			/* Check for OEM Channels.  Clients had better
+			   register for these commands. */
+			if ((intf->channels[chan].medium
+			     >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
+			    && (intf->channels[chan].medium
+				<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
+				requeue = handle_oem_get_msg_cmd(intf, msg);
+			} else {
+				/*
+				 * We don't handle the channel type, so just
+				 * free the message.
+				 */
+				requeue = 0;
+			}
+		}
+
+	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
+		/* It's an asynchronous event. */
+		requeue = handle_read_event_rsp(intf, msg);
+	} else {
+		/* It's a response from the local BMC. */
+		requeue = handle_bmc_rsp(intf, msg);
+	}
+
+ out:
+	return requeue;
+}
+
+/*
+ * If there are messages in the queue or pretimeouts, handle them.
+ */
+static void handle_new_recv_msgs(ipmi_smi_t intf)
+{
+	struct ipmi_smi_msg  *smi_msg;
+	unsigned long        flags = 0;
+	int                  rv;
+	int                  run_to_completion = intf->run_to_completion;
+
+	/* See if any waiting messages need to be processed. */
+	if (!run_to_completion)
+		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+	while (!list_empty(&intf->waiting_rcv_msgs)) {
+		smi_msg = list_entry(intf->waiting_rcv_msgs.next,
+				     struct ipmi_smi_msg, link);
+		list_del(&smi_msg->link);
+		if (!run_to_completion)
+			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+					       flags);
+		rv = handle_one_recv_msg(intf, smi_msg);
+		if (!run_to_completion)
+			spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+		if (rv > 0) {
+			/*
+			 * To preserve message order, quit if we
+			 * can't handle a message.  Add the message
+			 * back at the head, this is safe because this
+			 * tasklet is the only thing that pulls the
+			 * messages.
+			 */
+			list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
+			break;
+		} else {
+			if (rv == 0)
+				/* Message handled */
+				ipmi_free_smi_msg(smi_msg);
+			/* If rv < 0, fatal error, del but don't free. */
+		}
+	}
+	if (!run_to_completion)
+		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
+
+	/*
+	 * If the pretimout count is non-zero, decrement one from it and
+	 * deliver pretimeouts to all the users.
+	 */
+	if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+		ipmi_user_t user;
+
+		rcu_read_lock();
+		list_for_each_entry_rcu(user, &intf->users, link) {
+			if (user->handler->ipmi_watchdog_pretimeout)
+				user->handler->ipmi_watchdog_pretimeout(
+					user->handler_data);
+		}
+		rcu_read_unlock();
+	}
+}
+
+static void smi_recv_tasklet(unsigned long val)
+{
+	unsigned long flags = 0; /* keep us warning-free. */
+	ipmi_smi_t intf = (ipmi_smi_t) val;
+	int run_to_completion = intf->run_to_completion;
+	struct ipmi_smi_msg *newmsg = NULL;
+
+	/*
+	 * Start the next message if available.
+	 *
+	 * Do this here, not in the actual receiver, because we may deadlock
+	 * because the lower layer is allowed to hold locks while calling
+	 * message delivery.
+	 */
+
+	rcu_read_lock();
+
+	if (!run_to_completion)
+		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+	if (intf->curr_msg == NULL && !intf->in_shutdown) {
+		struct list_head *entry = NULL;
+
+		/* Pick the high priority queue first. */
+		if (!list_empty(&intf->hp_xmit_msgs))
+			entry = intf->hp_xmit_msgs.next;
+		else if (!list_empty(&intf->xmit_msgs))
+			entry = intf->xmit_msgs.next;
+
+		if (entry) {
+			list_del(entry);
+			newmsg = list_entry(entry, struct ipmi_smi_msg, link);
+			intf->curr_msg = newmsg;
+		}
+	}
+	if (!run_to_completion)
+		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+	if (newmsg)
+		intf->handlers->sender(intf->send_info, newmsg);
+
+	rcu_read_unlock();
+
+	handle_new_recv_msgs(intf);
+}
+
+/* Handle a new message from the lower layer. */
+void ipmi_smi_msg_received(ipmi_smi_t          intf,
+			   struct ipmi_smi_msg *msg)
+{
+	unsigned long flags = 0; /* keep us warning-free. */
+	int run_to_completion = intf->run_to_completion;
+
+	if ((msg->data_size >= 2)
+	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
+	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
+	    && (msg->user_data == NULL)) {
+
+		if (intf->in_shutdown)
+			goto free_msg;
+
+		/*
+		 * This is the local response to a command send, start
+		 * the timer for these.  The user_data will not be
+		 * NULL if this is a response send, and we will let
+		 * response sends just go through.
+		 */
+
+		/*
+		 * Check for errors, if we get certain errors (ones
+		 * that mean basically we can try again later), we
+		 * ignore them and start the timer.  Otherwise we
+		 * report the error immediately.
+		 */
+		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
+		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
+		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
+		    && (msg->rsp[2] != IPMI_BUS_ERR)
+		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
+			int chan = msg->rsp[3] & 0xf;
+
+			/* Got an error sending the message, handle it. */
+			if (chan >= IPMI_MAX_CHANNELS)
+				; /* This shouldn't happen */
+			else if ((intf->channels[chan].medium
+				  == IPMI_CHANNEL_MEDIUM_8023LAN)
+				 || (intf->channels[chan].medium
+				     == IPMI_CHANNEL_MEDIUM_ASYNC))
+				ipmi_inc_stat(intf, sent_lan_command_errs);
+			else
+				ipmi_inc_stat(intf, sent_ipmb_command_errs);
+			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
+		} else
+			/* The message was sent, start the timer. */
+			intf_start_seq_timer(intf, msg->msgid);
+
+free_msg:
+		ipmi_free_smi_msg(msg);
+	} else {
+		/*
+		 * To preserve message order, we keep a queue and deliver from
+		 * a tasklet.
+		 */
+		if (!run_to_completion)
+			spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+		list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
+		if (!run_to_completion)
+			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+					       flags);
+	}
+
+	if (!run_to_completion)
+		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+	/*
+	 * We can get an asynchronous event or receive message in addition
+	 * to commands we send.
+	 */
+	if (msg == intf->curr_msg)
+		intf->curr_msg = NULL;
+	if (!run_to_completion)
+		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
+	if (run_to_completion)
+		smi_recv_tasklet((unsigned long) intf);
+	else
+		tasklet_schedule(&intf->recv_tasklet);
+}
+EXPORT_SYMBOL(ipmi_smi_msg_received);
+
+void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
+{
+	if (intf->in_shutdown)
+		return;
+
+	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
+	tasklet_schedule(&intf->recv_tasklet);
+}
+EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
+
+static struct ipmi_smi_msg *
+smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
+		  unsigned char seq, long seqid)
+{
+	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
+	if (!smi_msg)
+		/*
+		 * If we can't allocate the message, then just return, we
+		 * get 4 retries, so this should be ok.
+		 */
+		return NULL;
+
+	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
+	smi_msg->data_size = recv_msg->msg.data_len;
+	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
+
+#ifdef DEBUG_MSGING
+	{
+		int m;
+		printk("Resend: ");
+		for (m = 0; m < smi_msg->data_size; m++)
+			printk(" %2.2x", smi_msg->data[m]);
+		printk("\n");
+	}
+#endif
+	return smi_msg;
+}
+
+static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
+			      struct list_head *timeouts,
+			      unsigned long timeout_period,
+			      int slot, unsigned long *flags,
+			      unsigned int *waiting_msgs)
+{
+	struct ipmi_recv_msg     *msg;
+	const struct ipmi_smi_handlers *handlers;
+
+	if (intf->in_shutdown)
+		return;
+
+	if (!ent->inuse)
+		return;
+
+	if (timeout_period < ent->timeout) {
+		ent->timeout -= timeout_period;
+		(*waiting_msgs)++;
+		return;
+	}
+
+	if (ent->retries_left == 0) {
+		/* The message has used all its retries. */
+		ent->inuse = 0;
+		msg = ent->recv_msg;
+		list_add_tail(&msg->link, timeouts);
+		if (ent->broadcast)
+			ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
+		else if (is_lan_addr(&ent->recv_msg->addr))
+			ipmi_inc_stat(intf, timed_out_lan_commands);
+		else
+			ipmi_inc_stat(intf, timed_out_ipmb_commands);
+	} else {
+		struct ipmi_smi_msg *smi_msg;
+		/* More retries, send again. */
+
+		(*waiting_msgs)++;
+
+		/*
+		 * Start with the max timer, set to normal timer after
+		 * the message is sent.
+		 */
+		ent->timeout = MAX_MSG_TIMEOUT;
+		ent->retries_left--;
+		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
+					    ent->seqid);
+		if (!smi_msg) {
+			if (is_lan_addr(&ent->recv_msg->addr))
+				ipmi_inc_stat(intf,
+					      dropped_rexmit_lan_commands);
+			else
+				ipmi_inc_stat(intf,
+					      dropped_rexmit_ipmb_commands);
+			return;
+		}
+
+		spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
+		/*
+		 * Send the new message.  We send with a zero
+		 * priority.  It timed out, I doubt time is that
+		 * critical now, and high priority messages are really
+		 * only for messages to the local MC, which don't get
+		 * resent.
+		 */
+		handlers = intf->handlers;
+		if (handlers) {
+			if (is_lan_addr(&ent->recv_msg->addr))
+				ipmi_inc_stat(intf,
+					      retransmitted_lan_commands);
+			else
+				ipmi_inc_stat(intf,
+					      retransmitted_ipmb_commands);
+
+			smi_send(intf, handlers, smi_msg, 0);
+		} else
+			ipmi_free_smi_msg(smi_msg);
+
+		spin_lock_irqsave(&intf->seq_lock, *flags);
+	}
+}
+
+static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
+					 unsigned long timeout_period)
+{
+	struct list_head     timeouts;
+	struct ipmi_recv_msg *msg, *msg2;
+	unsigned long        flags;
+	int                  i;
+	unsigned int         waiting_msgs = 0;
+
+	/*
+	 * Go through the seq table and find any messages that
+	 * have timed out, putting them in the timeouts
+	 * list.
+	 */
+	INIT_LIST_HEAD(&timeouts);
+	spin_lock_irqsave(&intf->seq_lock, flags);
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+		check_msg_timeout(intf, &(intf->seq_table[i]),
+				  &timeouts, timeout_period, i,
+				  &flags, &waiting_msgs);
+	spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+	list_for_each_entry_safe(msg, msg2, &timeouts, link)
+		deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
+
+	/*
+	 * Maintenance mode handling.  Check the timeout
+	 * optimistically before we claim the lock.  It may
+	 * mean a timeout gets missed occasionally, but that
+	 * only means the timeout gets extended by one period
+	 * in that case.  No big deal, and it avoids the lock
+	 * most of the time.
+	 */
+	if (intf->auto_maintenance_timeout > 0) {
+		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+		if (intf->auto_maintenance_timeout > 0) {
+			intf->auto_maintenance_timeout
+				-= timeout_period;
+			if (!intf->maintenance_mode
+			    && (intf->auto_maintenance_timeout <= 0)) {
+				intf->maintenance_mode_enable = false;
+				maintenance_mode_update(intf);
+			}
+		}
+		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+				       flags);
+	}
+
+	tasklet_schedule(&intf->recv_tasklet);
+
+	return waiting_msgs;
+}
+
+static void ipmi_request_event(ipmi_smi_t intf)
+{
+	/* No event requests when in maintenance mode. */
+	if (intf->maintenance_mode_enable)
+		return;
+
+	if (!intf->in_shutdown)
+		intf->handlers->request_events(intf->send_info);
+}
+
+static struct timer_list ipmi_timer;
+
+static atomic_t stop_operation;
+
+static void ipmi_timeout(unsigned long data)
+{
+	ipmi_smi_t intf;
+	int nt = 0;
+
+	if (atomic_read(&stop_operation))
+		return;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		int lnt = 0;
+
+		if (atomic_read(&intf->event_waiters)) {
+			intf->ticks_to_req_ev--;
+			if (intf->ticks_to_req_ev == 0) {
+				ipmi_request_event(intf);
+				intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+			}
+			lnt++;
+		}
+
+		lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
+
+		lnt = !!lnt;
+		if (lnt != intf->last_needs_timer &&
+					intf->handlers->set_need_watch)
+			intf->handlers->set_need_watch(intf->send_info, lnt);
+		intf->last_needs_timer = lnt;
+
+		nt += lnt;
+	}
+	rcu_read_unlock();
+
+	if (nt)
+		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
+
+static void need_waiter(ipmi_smi_t intf)
+{
+	/* Racy, but worst case we start the timer twice. */
+	if (!timer_pending(&ipmi_timer))
+		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
+
+static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
+static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
+
+static void free_smi_msg(struct ipmi_smi_msg *msg)
+{
+	atomic_dec(&smi_msg_inuse_count);
+	kfree(msg);
+}
+
+struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
+{
+	struct ipmi_smi_msg *rv;
+	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
+	if (rv) {
+		rv->done = free_smi_msg;
+		rv->user_data = NULL;
+		atomic_inc(&smi_msg_inuse_count);
+	}
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_alloc_smi_msg);
+
+static void free_recv_msg(struct ipmi_recv_msg *msg)
+{
+	atomic_dec(&recv_msg_inuse_count);
+	kfree(msg);
+}
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+{
+	struct ipmi_recv_msg *rv;
+
+	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+	if (rv) {
+		rv->user = NULL;
+		rv->done = free_recv_msg;
+		atomic_inc(&recv_msg_inuse_count);
+	}
+	return rv;
+}
+
+void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
+{
+	if (msg->user)
+		kref_put(&msg->user->refcount, free_user);
+	msg->done(msg);
+}
+EXPORT_SYMBOL(ipmi_free_recv_msg);
+
+#ifdef CONFIG_IPMI_PANIC_EVENT
+
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
+static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
+{
+	atomic_dec(&panic_done_count);
+}
+
+static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
+{
+	atomic_dec(&panic_done_count);
+}
+
+/*
+ * Inside a panic, send a message and wait for a response.
+ */
+static void ipmi_panic_request_and_wait(ipmi_smi_t           intf,
+					struct ipmi_addr     *addr,
+					struct kernel_ipmi_msg *msg)
+{
+	struct ipmi_smi_msg  smi_msg;
+	struct ipmi_recv_msg recv_msg;
+	int rv;
+
+	smi_msg.done = dummy_smi_done_handler;
+	recv_msg.done = dummy_recv_done_handler;
+	atomic_add(2, &panic_done_count);
+	rv = i_ipmi_request(NULL,
+			    intf,
+			    addr,
+			    0,
+			    msg,
+			    intf,
+			    &smi_msg,
+			    &recv_msg,
+			    0,
+			    intf->channels[0].address,
+			    intf->channels[0].lun,
+			    0, 1); /* Don't retry, and don't wait. */
+	if (rv)
+		atomic_sub(2, &panic_done_count);
+	else if (intf->handlers->flush_messages)
+		intf->handlers->flush_messages(intf->send_info);
+
+	while (atomic_read(&panic_done_count) != 0)
+		ipmi_poll(intf);
+}
+
+#ifdef CONFIG_IPMI_PANIC_STRING
+static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
+	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
+	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+		/* A get event receiver command, save it. */
+		intf->event_receiver = msg->msg.data[1];
+		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
+	}
+}
+
+static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
+	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+		/*
+		 * A get device id command, save if we are an event
+		 * receiver or generator.
+		 */
+		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
+		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
+	}
+}
+#endif
+
+static void send_panic_events(char *str)
+{
+	struct kernel_ipmi_msg            msg;
+	ipmi_smi_t                        intf;
+	unsigned char                     data[16];
+	struct ipmi_system_interface_addr *si;
+	struct ipmi_addr                  addr;
+
+	si = (struct ipmi_system_interface_addr *) &addr;
+	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	si->channel = IPMI_BMC_CHANNEL;
+	si->lun = 0;
+
+	/* Fill in an event telling that we have failed. */
+	msg.netfn = 0x04; /* Sensor or Event. */
+	msg.cmd = 2; /* Platform event command. */
+	msg.data = data;
+	msg.data_len = 8;
+	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
+	data[1] = 0x03; /* This is for IPMI 1.0. */
+	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
+	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
+	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
+
+	/*
+	 * Put a few breadcrumbs in.  Hopefully later we can add more things
+	 * to make the panic events more useful.
+	 */
+	if (str) {
+		data[3] = str[0];
+		data[6] = str[1];
+		data[7] = str[2];
+	}
+
+	/* For every registered interface, send the event. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (!intf->handlers)
+			/* Interface is not ready. */
+			continue;
+
+		/* Send the event announcing the panic. */
+		ipmi_panic_request_and_wait(intf, &addr, &msg);
+	}
+
+#ifdef CONFIG_IPMI_PANIC_STRING
+	/*
+	 * On every interface, dump a bunch of OEM event holding the
+	 * string.
+	 */
+	if (!str)
+		return;
+
+	/* For every registered interface, send the event. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		char                  *p = str;
+		struct ipmi_ipmb_addr *ipmb;
+		int                   j;
+
+		if (intf->intf_num == -1)
+			/* Interface was not ready yet. */
+			continue;
+
+		/*
+		 * intf_num is used as an marker to tell if the
+		 * interface is valid.  Thus we need a read barrier to
+		 * make sure data fetched before checking intf_num
+		 * won't be used.
+		 */
+		smp_rmb();
+
+		/*
+		 * First job here is to figure out where to send the
+		 * OEM events.  There's no way in IPMI to send OEM
+		 * events using an event send command, so we have to
+		 * find the SEL to put them in and stick them in
+		 * there.
+		 */
+
+		/* Get capabilities from the get device id. */
+		intf->local_sel_device = 0;
+		intf->local_event_generator = 0;
+		intf->event_receiver = 0;
+
+		/* Request the device info from the local MC. */
+		msg.netfn = IPMI_NETFN_APP_REQUEST;
+		msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+		msg.data = NULL;
+		msg.data_len = 0;
+		intf->null_user_handler = device_id_fetcher;
+		ipmi_panic_request_and_wait(intf, &addr, &msg);
+
+		if (intf->local_event_generator) {
+			/* Request the event receiver from the local MC. */
+			msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
+			msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
+			msg.data = NULL;
+			msg.data_len = 0;
+			intf->null_user_handler = event_receiver_fetcher;
+			ipmi_panic_request_and_wait(intf, &addr, &msg);
+		}
+		intf->null_user_handler = NULL;
+
+		/*
+		 * Validate the event receiver.  The low bit must not
+		 * be 1 (it must be a valid IPMB address), it cannot
+		 * be zero, and it must not be my address.
+		 */
+		if (((intf->event_receiver & 1) == 0)
+		    && (intf->event_receiver != 0)
+		    && (intf->event_receiver != intf->channels[0].address)) {
+			/*
+			 * The event receiver is valid, send an IPMB
+			 * message.
+			 */
+			ipmb = (struct ipmi_ipmb_addr *) &addr;
+			ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
+			ipmb->channel = 0; /* FIXME - is this right? */
+			ipmb->lun = intf->event_receiver_lun;
+			ipmb->slave_addr = intf->event_receiver;
+		} else if (intf->local_sel_device) {
+			/*
+			 * The event receiver was not valid (or was
+			 * me), but I am an SEL device, just dump it
+			 * in my SEL.
+			 */
+			si = (struct ipmi_system_interface_addr *) &addr;
+			si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+			si->channel = IPMI_BMC_CHANNEL;
+			si->lun = 0;
+		} else
+			continue; /* No where to send the event. */
+
+		msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
+		msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
+		msg.data = data;
+		msg.data_len = 16;
+
+		j = 0;
+		while (*p) {
+			int size = strlen(p);
+
+			if (size > 11)
+				size = 11;
+			data[0] = 0;
+			data[1] = 0;
+			data[2] = 0xf0; /* OEM event without timestamp. */
+			data[3] = intf->channels[0].address;
+			data[4] = j++; /* sequence # */
+			/*
+			 * Always give 11 bytes, so strncpy will fill
+			 * it with zeroes for me.
+			 */
+			strncpy(data+5, p, 11);
+			p += size;
+
+			ipmi_panic_request_and_wait(intf, &addr, &msg);
+		}
+	}
+#endif /* CONFIG_IPMI_PANIC_STRING */
+}
+#endif /* CONFIG_IPMI_PANIC_EVENT */
+
+static int has_panicked;
+
+static int panic_event(struct notifier_block *this,
+		       unsigned long         event,
+		       void                  *ptr)
+{
+	ipmi_smi_t intf;
+
+	if (has_panicked)
+		return NOTIFY_DONE;
+	has_panicked = 1;
+
+	/* For every registered interface, set it to run to completion. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (!intf->handlers)
+			/* Interface is not ready. */
+			continue;
+
+		/*
+		 * If we were interrupted while locking xmit_msgs_lock or
+		 * waiting_rcv_msgs_lock, the corresponding list may be
+		 * corrupted.  In this case, drop items on the list for
+		 * the safety.
+		 */
+		if (!spin_trylock(&intf->xmit_msgs_lock)) {
+			INIT_LIST_HEAD(&intf->xmit_msgs);
+			INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+		} else
+			spin_unlock(&intf->xmit_msgs_lock);
+
+		if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
+			INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+		else
+			spin_unlock(&intf->waiting_rcv_msgs_lock);
+
+		intf->run_to_completion = 1;
+		intf->handlers->set_run_to_completion(intf->send_info, 1);
+	}
+
+#ifdef CONFIG_IPMI_PANIC_EVENT
+	send_panic_events(ptr);
+#endif
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_block = {
+	.notifier_call	= panic_event,
+	.next		= NULL,
+	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
+};
+
+static int ipmi_init_msghandler(void)
+{
+	int rv;
+
+	if (initialized)
+		return 0;
+
+	rv = driver_register(&ipmidriver.driver);
+	if (rv) {
+		printk(KERN_ERR PFX "Could not register IPMI driver\n");
+		return rv;
+	}
+
+	printk(KERN_INFO "ipmi message handler version "
+	       IPMI_DRIVER_VERSION "\n");
+
+#ifdef CONFIG_PROC_FS
+	proc_ipmi_root = proc_mkdir("ipmi", NULL);
+	if (!proc_ipmi_root) {
+	    printk(KERN_ERR PFX "Unable to create IPMI proc dir");
+	    driver_unregister(&ipmidriver.driver);
+	    return -ENOMEM;
+	}
+
+#endif /* CONFIG_PROC_FS */
+
+	setup_timer(&ipmi_timer, ipmi_timeout, 0);
+	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+	initialized = 1;
+
+	return 0;
+}
+
+static int __init ipmi_init_msghandler_mod(void)
+{
+	ipmi_init_msghandler();
+	return 0;
+}
+
+static void __exit cleanup_ipmi(void)
+{
+	int count;
+
+	if (!initialized)
+		return;
+
+	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
+
+	/*
+	 * This can't be called if any interfaces exist, so no worry
+	 * about shutting down the interfaces.
+	 */
+
+	/*
+	 * Tell the timer to stop, then wait for it to stop.  This
+	 * avoids problems with race conditions removing the timer
+	 * here.
+	 */
+	atomic_inc(&stop_operation);
+	del_timer_sync(&ipmi_timer);
+
+#ifdef CONFIG_PROC_FS
+	proc_remove(proc_ipmi_root);
+#endif /* CONFIG_PROC_FS */
+
+	driver_unregister(&ipmidriver.driver);
+
+	initialized = 0;
+
+	/* Check for buffer leaks. */
+	count = atomic_read(&smi_msg_inuse_count);
+	if (count != 0)
+		printk(KERN_WARNING PFX "SMI message count %d at exit\n",
+		       count);
+	count = atomic_read(&recv_msg_inuse_count);
+	if (count != 0)
+		printk(KERN_WARNING PFX "recv message count %d at exit\n",
+		       count);
+}
+module_exit(cleanup_ipmi);
+
+module_init(ipmi_init_msghandler_mod);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
+		   " interface.");
+MODULE_VERSION(IPMI_DRIVER_VERSION);
+MODULE_SOFTDEP("post: ipmi_devintf");
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_powernv.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_powernv.c
new file mode 100644
index 0000000..845efa0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_powernv.c
@@ -0,0 +1,323 @@
+/*
+ * PowerNV OPAL IPMI driver
+ *
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#define pr_fmt(fmt)        "ipmi-powernv: " fmt
+
+#include <linux/ipmi_smi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/opal.h>
+
+
+struct ipmi_smi_powernv {
+	u64			interface_id;
+	struct ipmi_device_id	ipmi_id;
+	ipmi_smi_t		intf;
+	unsigned int		irq;
+
+	/**
+	 * We assume that there can only be one outstanding request, so
+	 * keep the pending message in cur_msg. We protect this from concurrent
+	 * updates through send & recv calls, (and consequently opal_msg, which
+	 * is in-use when cur_msg is set) with msg_lock
+	 */
+	spinlock_t		msg_lock;
+	struct ipmi_smi_msg	*cur_msg;
+	struct opal_ipmi_msg	*opal_msg;
+};
+
+static int ipmi_powernv_start_processing(void *send_info, ipmi_smi_t intf)
+{
+	struct ipmi_smi_powernv *smi = send_info;
+
+	smi->intf = intf;
+	return 0;
+}
+
+static void send_error_reply(struct ipmi_smi_powernv *smi,
+		struct ipmi_smi_msg *msg, u8 completion_code)
+{
+	msg->rsp[0] = msg->data[0] | 0x4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = completion_code;
+	msg->rsp_size = 3;
+	ipmi_smi_msg_received(smi->intf, msg);
+}
+
+static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+{
+	struct ipmi_smi_powernv *smi = send_info;
+	struct opal_ipmi_msg *opal_msg;
+	unsigned long flags;
+	int comp, rc;
+	size_t size;
+
+	/* ensure data_len will fit in the opal_ipmi_msg buffer... */
+	if (msg->data_size > IPMI_MAX_MSG_LENGTH) {
+		comp = IPMI_REQ_LEN_EXCEEDED_ERR;
+		goto err;
+	}
+
+	/* ... and that we at least have netfn and cmd bytes */
+	if (msg->data_size < 2) {
+		comp = IPMI_REQ_LEN_INVALID_ERR;
+		goto err;
+	}
+
+	spin_lock_irqsave(&smi->msg_lock, flags);
+
+	if (smi->cur_msg) {
+		comp = IPMI_NODE_BUSY_ERR;
+		goto err_unlock;
+	}
+
+	/* format our data for the OPAL API */
+	opal_msg = smi->opal_msg;
+	opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1;
+	opal_msg->netfn = msg->data[0];
+	opal_msg->cmd = msg->data[1];
+	if (msg->data_size > 2)
+		memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2);
+
+	/* data_size already includes the netfn and cmd bytes */
+	size = sizeof(*opal_msg) + msg->data_size - 2;
+
+	pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__,
+			smi->interface_id, opal_msg, size);
+	rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
+	pr_devel("%s:  -> %d\n", __func__, rc);
+
+	if (!rc) {
+		smi->cur_msg = msg;
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		return;
+	}
+
+	comp = IPMI_ERR_UNSPECIFIED;
+err_unlock:
+	spin_unlock_irqrestore(&smi->msg_lock, flags);
+err:
+	send_error_reply(smi, msg, comp);
+}
+
+static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
+{
+	struct opal_ipmi_msg *opal_msg;
+	struct ipmi_smi_msg *msg;
+	unsigned long flags;
+	uint64_t size;
+	int rc;
+
+	pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__,
+			smi->interface_id);
+
+	spin_lock_irqsave(&smi->msg_lock, flags);
+
+	if (!smi->cur_msg) {
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		pr_warn("no current message?\n");
+		return 0;
+	}
+
+	msg = smi->cur_msg;
+	opal_msg = smi->opal_msg;
+
+	size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH);
+
+	rc = opal_ipmi_recv(smi->interface_id,
+			opal_msg,
+			&size);
+	size = be64_to_cpu(size);
+	pr_devel("%s:   -> %d (size %lld)\n", __func__,
+			rc, rc == 0 ? size : 0);
+	if (rc) {
+		/* If came via the poll, and response was not yet ready */
+		if (rc == OPAL_EMPTY) {
+			spin_unlock_irqrestore(&smi->msg_lock, flags);
+			return 0;
+		}
+
+		smi->cur_msg = NULL;
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		send_error_reply(smi, msg, IPMI_ERR_UNSPECIFIED);
+		return 0;
+	}
+
+	if (size < sizeof(*opal_msg)) {
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		pr_warn("unexpected IPMI message size %lld\n", size);
+		return 0;
+	}
+
+	if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) {
+		spin_unlock_irqrestore(&smi->msg_lock, flags);
+		pr_warn("unexpected IPMI message format (version %d)\n",
+				opal_msg->version);
+		return 0;
+	}
+
+	msg->rsp[0] = opal_msg->netfn;
+	msg->rsp[1] = opal_msg->cmd;
+	if (size > sizeof(*opal_msg))
+		memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg));
+	msg->rsp_size = 2 + size - sizeof(*opal_msg);
+
+	smi->cur_msg = NULL;
+	spin_unlock_irqrestore(&smi->msg_lock, flags);
+	ipmi_smi_msg_received(smi->intf, msg);
+	return 0;
+}
+
+static void ipmi_powernv_request_events(void *send_info)
+{
+}
+
+static void ipmi_powernv_set_run_to_completion(void *send_info,
+		bool run_to_completion)
+{
+}
+
+static void ipmi_powernv_poll(void *send_info)
+{
+	struct ipmi_smi_powernv *smi = send_info;
+
+	ipmi_powernv_recv(smi);
+}
+
+static const struct ipmi_smi_handlers ipmi_powernv_smi_handlers = {
+	.owner			= THIS_MODULE,
+	.start_processing	= ipmi_powernv_start_processing,
+	.sender			= ipmi_powernv_send,
+	.request_events		= ipmi_powernv_request_events,
+	.set_run_to_completion	= ipmi_powernv_set_run_to_completion,
+	.poll			= ipmi_powernv_poll,
+};
+
+static irqreturn_t ipmi_opal_event(int irq, void *data)
+{
+	struct ipmi_smi_powernv *smi = data;
+
+	ipmi_powernv_recv(smi);
+	return IRQ_HANDLED;
+}
+
+static int ipmi_powernv_probe(struct platform_device *pdev)
+{
+	struct ipmi_smi_powernv *ipmi;
+	struct device *dev;
+	u32 prop;
+	int rc;
+
+	if (!pdev || !pdev->dev.of_node)
+		return -ENODEV;
+
+	dev = &pdev->dev;
+
+	ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL);
+	if (!ipmi)
+		return -ENOMEM;
+
+	spin_lock_init(&ipmi->msg_lock);
+
+	rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id",
+			&prop);
+	if (rc) {
+		dev_warn(dev, "No interface ID property\n");
+		goto err_free;
+	}
+	ipmi->interface_id = prop;
+
+	rc = of_property_read_u32(dev->of_node, "interrupts", &prop);
+	if (rc) {
+		dev_warn(dev, "No interrupts property\n");
+		goto err_free;
+	}
+
+	ipmi->irq = irq_of_parse_and_map(dev->of_node, 0);
+	if (!ipmi->irq) {
+		dev_info(dev, "Unable to map irq from device tree\n");
+		ipmi->irq = opal_event_request(prop);
+	}
+
+	rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+			 "opal-ipmi", ipmi);
+	if (rc) {
+		dev_warn(dev, "Unable to request irq\n");
+		goto err_dispose;
+	}
+
+	ipmi->opal_msg = devm_kmalloc(dev,
+			sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH,
+			GFP_KERNEL);
+	if (!ipmi->opal_msg) {
+		rc = -ENOMEM;
+		goto err_unregister;
+	}
+
+	/* todo: query actual ipmi_device_id */
+	rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi,
+			&ipmi->ipmi_id, dev, 0);
+	if (rc) {
+		dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
+		goto err_free_msg;
+	}
+
+	dev_set_drvdata(dev, ipmi);
+	return 0;
+
+err_free_msg:
+	devm_kfree(dev, ipmi->opal_msg);
+err_unregister:
+	free_irq(ipmi->irq, ipmi);
+err_dispose:
+	irq_dispose_mapping(ipmi->irq);
+err_free:
+	devm_kfree(dev, ipmi);
+	return rc;
+}
+
+static int ipmi_powernv_remove(struct platform_device *pdev)
+{
+	struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
+
+	ipmi_unregister_smi(smi->intf);
+	free_irq(smi->irq, smi);
+	irq_dispose_mapping(smi->irq);
+
+	return 0;
+}
+
+static const struct of_device_id ipmi_powernv_match[] = {
+	{ .compatible = "ibm,opal-ipmi" },
+	{ },
+};
+
+
+static struct platform_driver powernv_ipmi_driver = {
+	.driver = {
+		.name		= "ipmi-powernv",
+		.of_match_table	= ipmi_powernv_match,
+	},
+	.probe	= ipmi_powernv_probe,
+	.remove	= ipmi_powernv_remove,
+};
+
+
+module_platform_driver(powernv_ipmi_driver);
+
+MODULE_DEVICE_TABLE(of, ipmi_powernv_match);
+MODULE_DESCRIPTION("powernv IPMI driver");
+MODULE_AUTHOR("Jeremy Kerr <jk@ozlabs.org>");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_poweroff.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_poweroff.c
new file mode 100644
index 0000000..676c910
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_poweroff.c
@@ -0,0 +1,749 @@
+/*
+ * ipmi_poweroff.c
+ *
+ * MontaVista IPMI Poweroff extension to sys_reboot
+ *
+ * Author: MontaVista Software, Inc.
+ *         Steven Dake <sdake@mvista.com>
+ *         Corey Minyard <cminyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002,2004 MontaVista Software Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/pm.h>
+#include <linux/kdev_t.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+
+#define PFX "IPMI poweroff: "
+
+static void ipmi_po_smi_gone(int if_num);
+static void ipmi_po_new_smi(int if_num, struct device *device);
+
+/* Definitions for controlling power off (if the system supports it).  It
+ * conveniently matches the IPMI chassis control values. */
+#define IPMI_CHASSIS_POWER_DOWN		0	/* power down, the default. */
+#define IPMI_CHASSIS_POWER_CYCLE	0x02	/* power cycle */
+
+/* the IPMI data command */
+static int poweroff_powercycle;
+
+/* Which interface to use, -1 means the first we see. */
+static int ifnum_to_use = -1;
+
+/* Our local state. */
+static int ready;
+static ipmi_user_t ipmi_user;
+static int ipmi_ifnum;
+static void (*specific_poweroff_func)(ipmi_user_t user);
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+static int set_param_ifnum(const char *val, const struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+	if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
+		return 0;
+
+	ipmi_po_smi_gone(ipmi_ifnum);
+	ipmi_po_new_smi(ifnum_to_use, NULL);
+	return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
+		  &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+		 "timer.  Setting to -1 defaults to the first registered "
+		 "interface");
+
+/* parameter definition to allow user to flag power cycle */
+module_param(poweroff_powercycle, int, 0644);
+MODULE_PARM_DESC(poweroff_powercycle,
+		 " Set to non-zero to enable power cycle instead of power"
+		 " down. Power cycle is contingent on hardware support,"
+		 " otherwise it defaults back to power down.");
+
+/* Stuff from the get device id command. */
+static unsigned int mfg_id;
+static unsigned int prod_id;
+static unsigned char capabilities;
+static unsigned char ipmi_version;
+
+/*
+ * We use our own messages for this operation, we don't let the system
+ * allocate them, since we may be in a panic situation.  The whole
+ * thing is single-threaded, anyway, so multiple messages are not
+ * required.
+ */
+static atomic_t dummy_count = ATOMIC_INIT(0);
+static void dummy_smi_free(struct ipmi_smi_msg *msg)
+{
+	atomic_dec(&dummy_count);
+}
+static void dummy_recv_free(struct ipmi_recv_msg *msg)
+{
+	atomic_dec(&dummy_count);
+}
+static struct ipmi_smi_msg halt_smi_msg = {
+	.done = dummy_smi_free
+};
+static struct ipmi_recv_msg halt_recv_msg = {
+	.done = dummy_recv_free
+};
+
+
+/*
+ * Code to send a message and wait for the response.
+ */
+
+static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
+{
+	struct completion *comp = recv_msg->user_msg_data;
+
+	if (comp)
+		complete(comp);
+}
+
+static struct ipmi_user_hndl ipmi_poweroff_handler = {
+	.ipmi_recv_hndl = receive_handler
+};
+
+
+static int ipmi_request_wait_for_response(ipmi_user_t            user,
+					  struct ipmi_addr       *addr,
+					  struct kernel_ipmi_msg *send_msg)
+{
+	int               rv;
+	struct completion comp;
+
+	init_completion(&comp);
+
+	rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp,
+				      &halt_smi_msg, &halt_recv_msg, 0);
+	if (rv)
+		return rv;
+
+	wait_for_completion(&comp);
+
+	return halt_recv_msg.msg.data[0];
+}
+
+/* Wait for message to complete, spinning. */
+static int ipmi_request_in_rc_mode(ipmi_user_t            user,
+				   struct ipmi_addr       *addr,
+				   struct kernel_ipmi_msg *send_msg)
+{
+	int rv;
+
+	atomic_set(&dummy_count, 2);
+	rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
+				      &halt_smi_msg, &halt_recv_msg, 0);
+	if (rv) {
+		atomic_set(&dummy_count, 0);
+		return rv;
+	}
+
+	/*
+	 * Spin until our message is done.
+	 */
+	while (atomic_read(&dummy_count) > 0) {
+		ipmi_poll_interface(user);
+		cpu_relax();
+	}
+
+	return halt_recv_msg.msg.data[0];
+}
+
+/*
+ * ATCA Support
+ */
+
+#define IPMI_NETFN_ATCA			0x2c
+#define IPMI_ATCA_SET_POWER_CMD		0x11
+#define IPMI_ATCA_GET_ADDR_INFO_CMD	0x01
+#define IPMI_PICMG_ID			0
+
+#define IPMI_NETFN_OEM				0x2e
+#define IPMI_ATCA_PPS_GRACEFUL_RESTART		0x11
+#define IPMI_ATCA_PPS_IANA			"\x00\x40\x0A"
+#define IPMI_MOTOROLA_MANUFACTURER_ID		0x0000A1
+#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID	0x0051
+
+static void (*atca_oem_poweroff_hook)(ipmi_user_t user);
+
+static void pps_poweroff_atca(ipmi_user_t user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	printk(KERN_INFO PFX "PPS powerdown hook used");
+
+	send_msg.netfn = IPMI_NETFN_OEM;
+	send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+	send_msg.data = IPMI_ATCA_PPS_IANA;
+	send_msg.data_len = 3;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+		printk(KERN_ERR PFX "Unable to send ATCA ,"
+		       " IPMI error 0x%x\n", rv);
+	}
+	return;
+}
+
+static int ipmi_atca_detect(ipmi_user_t user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	unsigned char                     data[1];
+
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	/*
+	 * Use get address info to check and see if we are ATCA
+	 */
+	send_msg.netfn = IPMI_NETFN_ATCA;
+	send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD;
+	data[0] = IPMI_PICMG_ID;
+	send_msg.data = data;
+	send_msg.data_len = sizeof(data);
+	rv = ipmi_request_wait_for_response(user,
+					    (struct ipmi_addr *) &smi_addr,
+					    &send_msg);
+
+	printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
+	       mfg_id, prod_id);
+	if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+	    && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+		printk(KERN_INFO PFX
+		       "Installing Pigeon Point Systems Poweroff Hook\n");
+		atca_oem_poweroff_hook = pps_poweroff_atca;
+	}
+	return !rv;
+}
+
+static void ipmi_poweroff_atca(ipmi_user_t user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	unsigned char                     data[4];
+
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	printk(KERN_INFO PFX "Powering down via ATCA power command\n");
+
+	/*
+	 * Power down
+	 */
+	send_msg.netfn = IPMI_NETFN_ATCA;
+	send_msg.cmd = IPMI_ATCA_SET_POWER_CMD;
+	data[0] = IPMI_PICMG_ID;
+	data[1] = 0; /* FRU id */
+	data[2] = 0; /* Power Level */
+	data[3] = 0; /* Don't change saved presets */
+	send_msg.data = data;
+	send_msg.data_len = sizeof(data);
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	/*
+	 * At this point, the system may be shutting down, and most
+	 * serial drivers (if used) will have interrupts turned off
+	 * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+	 * return code
+	 */
+	if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+		printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
+		       " IPMI error 0x%x\n", rv);
+		goto out;
+	}
+
+	if (atca_oem_poweroff_hook)
+		atca_oem_poweroff_hook(user);
+ out:
+	return;
+}
+
+/*
+ * CPI1 Support
+ */
+
+#define IPMI_NETFN_OEM_1				0xf8
+#define OEM_GRP_CMD_SET_RESET_STATE		0x84
+#define OEM_GRP_CMD_SET_POWER_STATE		0x82
+#define IPMI_NETFN_OEM_8				0xf8
+#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL	0x80
+#define OEM_GRP_CMD_GET_SLOT_GA			0xa3
+#define IPMI_NETFN_SENSOR_EVT			0x10
+#define IPMI_CMD_GET_EVENT_RECEIVER		0x01
+
+#define IPMI_CPI1_PRODUCT_ID		0x000157
+#define IPMI_CPI1_MANUFACTURER_ID	0x0108
+
+static int ipmi_cpi1_detect(ipmi_user_t user)
+{
+	return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
+		&& (prod_id == IPMI_CPI1_PRODUCT_ID));
+}
+
+static void ipmi_poweroff_cpi1(ipmi_user_t user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct ipmi_ipmb_addr             ipmb_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	unsigned char                     data[1];
+	int                               slot;
+	unsigned char                     hotswap_ipmb;
+	unsigned char                     aer_addr;
+	unsigned char                     aer_lun;
+
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
+
+	/*
+	 * Get IPMI ipmb address
+	 */
+	send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+	send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA;
+	send_msg.data = NULL;
+	send_msg.data_len = 0;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv)
+		goto out;
+	slot = halt_recv_msg.msg.data[1];
+	hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot);
+
+	/*
+	 * Get active event receiver
+	 */
+	send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2;
+	send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER;
+	send_msg.data = NULL;
+	send_msg.data_len = 0;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv)
+		goto out;
+	aer_addr = halt_recv_msg.msg.data[1];
+	aer_lun = halt_recv_msg.msg.data[2];
+
+	/*
+	 * Setup IPMB address target instead of local target
+	 */
+	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+	ipmb_addr.channel = 0;
+	ipmb_addr.slave_addr = aer_addr;
+	ipmb_addr.lun = aer_lun;
+
+	/*
+	 * Send request hotswap control to remove blade from dpv
+	 */
+	send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+	send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL;
+	send_msg.data = &hotswap_ipmb;
+	send_msg.data_len = 1;
+	ipmi_request_in_rc_mode(user,
+				(struct ipmi_addr *) &ipmb_addr,
+				&send_msg);
+
+	/*
+	 * Set reset asserted
+	 */
+	send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+	send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE;
+	send_msg.data = data;
+	data[0] = 1; /* Reset asserted state */
+	send_msg.data_len = 1;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv)
+		goto out;
+
+	/*
+	 * Power down
+	 */
+	send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+	send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE;
+	send_msg.data = data;
+	data[0] = 1; /* Power down state */
+	send_msg.data_len = 1;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv)
+		goto out;
+
+ out:
+	return;
+}
+
+/*
+ * ipmi_dell_chassis_detect()
+ * Dell systems with IPMI < 1.5 don't set the chassis capability bit
+ * but they can handle a chassis poweroff or powercycle command.
+ */
+
+#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
+static int ipmi_dell_chassis_detect(ipmi_user_t user)
+{
+	const char ipmi_version_major = ipmi_version & 0xF;
+	const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
+	const char mfr[3] = DELL_IANA_MFR_ID;
+	if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
+	    ipmi_version_major <= 1 &&
+	    ipmi_version_minor < 5)
+		return 1;
+	return 0;
+}
+
+/*
+ * Standard chassis support
+ */
+
+#define IPMI_NETFN_CHASSIS_REQUEST	0
+#define IPMI_CHASSIS_CONTROL_CMD	0x02
+
+static int ipmi_chassis_detect(ipmi_user_t user)
+{
+	/* Chassis support, use it. */
+	return (capabilities & 0x80);
+}
+
+static void ipmi_poweroff_chassis(ipmi_user_t user)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	unsigned char                     data[1];
+
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+ powercyclefailed:
+	printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
+		(poweroff_powercycle ? "cycle" : "down"));
+
+	/*
+	 * Power down
+	 */
+	send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
+	send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
+	if (poweroff_powercycle)
+		data[0] = IPMI_CHASSIS_POWER_CYCLE;
+	else
+		data[0] = IPMI_CHASSIS_POWER_DOWN;
+	send_msg.data = data;
+	send_msg.data_len = sizeof(data);
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv) {
+		if (poweroff_powercycle) {
+			/* power cycle failed, default to power down */
+			printk(KERN_ERR PFX "Unable to send chassis power " \
+			       "cycle message, IPMI error 0x%x\n", rv);
+			poweroff_powercycle = 0;
+			goto powercyclefailed;
+		}
+
+		printk(KERN_ERR PFX "Unable to send chassis power " \
+		       "down message, IPMI error 0x%x\n", rv);
+	}
+}
+
+
+/* Table of possible power off functions. */
+struct poweroff_function {
+	char *platform_type;
+	int  (*detect)(ipmi_user_t user);
+	void (*poweroff_func)(ipmi_user_t user);
+};
+
+static struct poweroff_function poweroff_functions[] = {
+	{ .platform_type	= "ATCA",
+	  .detect		= ipmi_atca_detect,
+	  .poweroff_func	= ipmi_poweroff_atca },
+	{ .platform_type	= "CPI1",
+	  .detect		= ipmi_cpi1_detect,
+	  .poweroff_func	= ipmi_poweroff_cpi1 },
+	{ .platform_type	= "chassis",
+	  .detect		= ipmi_dell_chassis_detect,
+	  .poweroff_func	= ipmi_poweroff_chassis },
+	/* Chassis should generally be last, other things should override
+	   it. */
+	{ .platform_type	= "chassis",
+	  .detect		= ipmi_chassis_detect,
+	  .poweroff_func	= ipmi_poweroff_chassis },
+};
+#define NUM_PO_FUNCS (sizeof(poweroff_functions) \
+		      / sizeof(struct poweroff_function))
+
+
+/* Called on a powerdown request. */
+static void ipmi_poweroff_function(void)
+{
+	if (!ready)
+		return;
+
+	/* Use run-to-completion mode, since interrupts may be off. */
+	specific_poweroff_func(ipmi_user);
+}
+
+/* Wait for an IPMI interface to be installed, the first one installed
+   will be grabbed by this code and used to perform the powerdown. */
+static void ipmi_po_new_smi(int if_num, struct device *device)
+{
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	int                               i;
+
+	if (ready)
+		return;
+
+	if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
+		return;
+
+	rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
+			      &ipmi_user);
+	if (rv) {
+		printk(KERN_ERR PFX "could not create IPMI user, error %d\n",
+		       rv);
+		return;
+	}
+
+	ipmi_ifnum = if_num;
+
+	/*
+	 * Do a get device ide and store some results, since this is
+	 * used by several functions.
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
+
+	send_msg.netfn = IPMI_NETFN_APP_REQUEST;
+	send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+	send_msg.data = NULL;
+	send_msg.data_len = 0;
+	rv = ipmi_request_wait_for_response(ipmi_user,
+					    (struct ipmi_addr *) &smi_addr,
+					    &send_msg);
+	if (rv) {
+		printk(KERN_ERR PFX "Unable to send IPMI get device id info,"
+		       " IPMI error 0x%x\n", rv);
+		goto out_err;
+	}
+
+	if (halt_recv_msg.msg.data_len < 12) {
+		printk(KERN_ERR PFX "(chassis) IPMI get device id info too,"
+		       " short, was %d bytes, needed %d bytes\n",
+		       halt_recv_msg.msg.data_len, 12);
+		goto out_err;
+	}
+
+	mfg_id = (halt_recv_msg.msg.data[7]
+		  | (halt_recv_msg.msg.data[8] << 8)
+		  | (halt_recv_msg.msg.data[9] << 16));
+	prod_id = (halt_recv_msg.msg.data[10]
+		   | (halt_recv_msg.msg.data[11] << 8));
+	capabilities = halt_recv_msg.msg.data[6];
+	ipmi_version = halt_recv_msg.msg.data[5];
+
+
+	/* Scan for a poweroff method */
+	for (i = 0; i < NUM_PO_FUNCS; i++) {
+		if (poweroff_functions[i].detect(ipmi_user))
+			goto found;
+	}
+
+ out_err:
+	printk(KERN_ERR PFX "Unable to find a poweroff function that"
+	       " will work, giving up\n");
+	ipmi_destroy_user(ipmi_user);
+	return;
+
+ found:
+	printk(KERN_INFO PFX "Found a %s style poweroff function\n",
+	       poweroff_functions[i].platform_type);
+	specific_poweroff_func = poweroff_functions[i].poweroff_func;
+	old_poweroff_func = pm_power_off;
+	pm_power_off = ipmi_poweroff_function;
+	ready = 1;
+}
+
+static void ipmi_po_smi_gone(int if_num)
+{
+	if (!ready)
+		return;
+
+	if (ipmi_ifnum != if_num)
+		return;
+
+	ready = 0;
+	ipmi_destroy_user(ipmi_user);
+	pm_power_off = old_poweroff_func;
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+	.owner    = THIS_MODULE,
+	.new_smi  = ipmi_po_new_smi,
+	.smi_gone = ipmi_po_smi_gone
+};
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/sysctl.h>
+
+static struct ctl_table ipmi_table[] = {
+	{ .procname	= "poweroff_powercycle",
+	  .data		= &poweroff_powercycle,
+	  .maxlen	= sizeof(poweroff_powercycle),
+	  .mode		= 0644,
+	  .proc_handler	= proc_dointvec },
+	{ }
+};
+
+static struct ctl_table ipmi_dir_table[] = {
+	{ .procname	= "ipmi",
+	  .mode		= 0555,
+	  .child	= ipmi_table },
+	{ }
+};
+
+static struct ctl_table ipmi_root_table[] = {
+	{ .procname	= "dev",
+	  .mode		= 0555,
+	  .child	= ipmi_dir_table },
+	{ }
+};
+
+static struct ctl_table_header *ipmi_table_header;
+#endif /* CONFIG_PROC_FS */
+
+/*
+ * Startup and shutdown functions.
+ */
+static int __init ipmi_poweroff_init(void)
+{
+	int rv;
+
+	printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
+	       " IPMI Powerdown via sys_reboot.\n");
+
+	if (poweroff_powercycle)
+		printk(KERN_INFO PFX "Power cycle is enabled.\n");
+
+#ifdef CONFIG_PROC_FS
+	ipmi_table_header = register_sysctl_table(ipmi_root_table);
+	if (!ipmi_table_header) {
+		printk(KERN_ERR PFX "Unable to register powercycle sysctl\n");
+		rv = -ENOMEM;
+		goto out_err;
+	}
+#endif
+
+	rv = ipmi_smi_watcher_register(&smi_watcher);
+
+#ifdef CONFIG_PROC_FS
+	if (rv) {
+		unregister_sysctl_table(ipmi_table_header);
+		printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
+		goto out_err;
+	}
+
+ out_err:
+#endif
+	return rv;
+}
+
+#ifdef MODULE
+static void __exit ipmi_poweroff_cleanup(void)
+{
+	int rv;
+
+#ifdef CONFIG_PROC_FS
+	unregister_sysctl_table(ipmi_table_header);
+#endif
+
+	ipmi_smi_watcher_unregister(&smi_watcher);
+
+	if (ready) {
+		rv = ipmi_destroy_user(ipmi_user);
+		if (rv)
+			printk(KERN_ERR PFX "could not cleanup the IPMI"
+			       " user: 0x%x\n", rv);
+		pm_power_off = old_poweroff_func;
+	}
+}
+module_exit(ipmi_poweroff_cleanup);
+#endif
+
+module_init(ipmi_poweroff_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_si_intf.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_si_intf.c
new file mode 100644
index 0000000..1965cb5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_si_intf.c
@@ -0,0 +1,3960 @@
+/*
+ * ipmi_si.c
+ *
+ * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
+ * BT).
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SMI state
+ * machine.  It does the configuration, handles timers and interrupts,
+ * and drives the real SMI state machine.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <asm/io.h>
+#include "ipmi_si_sm.h"
+#include "ipmi_dmi.h"
+#include <linux/dmi.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/acpi.h>
+
+#ifdef CONFIG_PARISC
+#include <asm/hardware.h>	/* for register_parisc_driver() stuff */
+#include <asm/parisc-device.h>
+#endif
+
+#define PFX "ipmi_si: "
+
+/* Measure times between events in the driver. */
+#undef DEBUG_TIMING
+
+/* Call every 10 ms. */
+#define SI_TIMEOUT_TIME_USEC	10000
+#define SI_USEC_PER_JIFFY	(1000000/HZ)
+#define SI_TIMEOUT_JIFFIES	(SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
+#define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
+				      short timeout */
+
+enum si_intf_state {
+	SI_NORMAL,
+	SI_GETTING_FLAGS,
+	SI_GETTING_EVENTS,
+	SI_CLEARING_FLAGS,
+	SI_GETTING_MESSAGES,
+	SI_CHECKING_ENABLES,
+	SI_SETTING_ENABLES
+	/* FIXME - add watchdog stuff. */
+};
+
+/* Some BT-specific defines we need here. */
+#define IPMI_BT_INTMASK_REG		2
+#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT	2
+#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT	1
+
+enum si_type {
+	SI_KCS, SI_SMIC, SI_BT
+};
+
+static const char * const si_to_str[] = { "kcs", "smic", "bt" };
+
+#define DEVICE_NAME "ipmi_si"
+
+static struct platform_driver ipmi_driver;
+
+/*
+ * Indexes into stats[] in smi_info below.
+ */
+enum si_stat_indexes {
+	/*
+	 * Number of times the driver requested a timer while an operation
+	 * was in progress.
+	 */
+	SI_STAT_short_timeouts = 0,
+
+	/*
+	 * Number of times the driver requested a timer while nothing was in
+	 * progress.
+	 */
+	SI_STAT_long_timeouts,
+
+	/* Number of times the interface was idle while being polled. */
+	SI_STAT_idles,
+
+	/* Number of interrupts the driver handled. */
+	SI_STAT_interrupts,
+
+	/* Number of time the driver got an ATTN from the hardware. */
+	SI_STAT_attentions,
+
+	/* Number of times the driver requested flags from the hardware. */
+	SI_STAT_flag_fetches,
+
+	/* Number of times the hardware didn't follow the state machine. */
+	SI_STAT_hosed_count,
+
+	/* Number of completed messages. */
+	SI_STAT_complete_transactions,
+
+	/* Number of IPMI events received from the hardware. */
+	SI_STAT_events,
+
+	/* Number of watchdog pretimeouts. */
+	SI_STAT_watchdog_pretimeouts,
+
+	/* Number of asynchronous messages received. */
+	SI_STAT_incoming_messages,
+
+
+	/* This *must* remain last, add new values above this. */
+	SI_NUM_STATS
+};
+
+struct smi_info {
+	int                    intf_num;
+	ipmi_smi_t             intf;
+	struct si_sm_data      *si_sm;
+	const struct si_sm_handlers *handlers;
+	enum si_type           si_type;
+	spinlock_t             si_lock;
+	struct ipmi_smi_msg    *waiting_msg;
+	struct ipmi_smi_msg    *curr_msg;
+	enum si_intf_state     si_state;
+
+	/*
+	 * Used to handle the various types of I/O that can occur with
+	 * IPMI
+	 */
+	struct si_sm_io io;
+	int (*io_setup)(struct smi_info *info);
+	void (*io_cleanup)(struct smi_info *info);
+	int (*irq_setup)(struct smi_info *info);
+	void (*irq_cleanup)(struct smi_info *info);
+	unsigned int io_size;
+	enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+	void (*addr_source_cleanup)(struct smi_info *info);
+	void *addr_source_data;
+
+	/*
+	 * Per-OEM handler, called from handle_flags().  Returns 1
+	 * when handle_flags() needs to be re-run or 0 indicating it
+	 * set si_state itself.
+	 */
+	int (*oem_data_avail_handler)(struct smi_info *smi_info);
+
+	/*
+	 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+	 * is set to hold the flags until we are done handling everything
+	 * from the flags.
+	 */
+#define RECEIVE_MSG_AVAIL	0x01
+#define EVENT_MSG_BUFFER_FULL	0x02
+#define WDT_PRE_TIMEOUT_INT	0x08
+#define OEM0_DATA_AVAIL     0x20
+#define OEM1_DATA_AVAIL     0x40
+#define OEM2_DATA_AVAIL     0x80
+#define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
+			     OEM1_DATA_AVAIL | \
+			     OEM2_DATA_AVAIL)
+	unsigned char       msg_flags;
+
+	/* Does the BMC have an event buffer? */
+	bool		    has_event_buffer;
+
+	/*
+	 * If set to true, this will request events the next time the
+	 * state machine is idle.
+	 */
+	atomic_t            req_events;
+
+	/*
+	 * If true, run the state machine to completion on every send
+	 * call.  Generally used after a panic to make sure stuff goes
+	 * out.
+	 */
+	bool                run_to_completion;
+
+	/* The I/O port of an SI interface. */
+	int                 port;
+
+	/*
+	 * The space between start addresses of the two ports.  For
+	 * instance, if the first port is 0xca2 and the spacing is 4, then
+	 * the second port is 0xca6.
+	 */
+	unsigned int        spacing;
+
+	/* zero if no irq; */
+	int                 irq;
+
+	/* The timer for this si. */
+	struct timer_list   si_timer;
+
+	/* This flag is set, if the timer can be set */
+	bool		    timer_can_start;
+
+	/* This flag is set, if the timer is running (timer_pending() isn't enough) */
+	bool		    timer_running;
+
+	/* The time (in jiffies) the last timeout occurred at. */
+	unsigned long       last_timeout_jiffies;
+
+	/* Are we waiting for the events, pretimeouts, received msgs? */
+	atomic_t            need_watch;
+
+	/*
+	 * The driver will disable interrupts when it gets into a
+	 * situation where it cannot handle messages due to lack of
+	 * memory.  Once that situation clears up, it will re-enable
+	 * interrupts.
+	 */
+	bool interrupt_disabled;
+
+	/*
+	 * Does the BMC support events?
+	 */
+	bool supports_event_msg_buff;
+
+	/*
+	 * Can we disable interrupts the global enables receive irq
+	 * bit?  There are currently two forms of brokenness, some
+	 * systems cannot disable the bit (which is technically within
+	 * the spec but a bad idea) and some systems have the bit
+	 * forced to zero even though interrupts work (which is
+	 * clearly outside the spec).  The next bool tells which form
+	 * of brokenness is present.
+	 */
+	bool cannot_disable_irq;
+
+	/*
+	 * Some systems are broken and cannot set the irq enable
+	 * bit, even if they support interrupts.
+	 */
+	bool irq_enable_broken;
+
+	/* Is the driver in maintenance mode? */
+	bool in_maintenance_mode;
+
+	/*
+	 * Did we get an attention that we did not handle?
+	 */
+	bool got_attn;
+
+	/* From the get device id response... */
+	struct ipmi_device_id device_id;
+
+	/* Driver model stuff. */
+	struct device *dev;
+	struct platform_device *pdev;
+
+	/*
+	 * True if we allocated the device, false if it came from
+	 * someplace else (like PCI).
+	 */
+	bool dev_registered;
+
+	/* Slave address, could be reported from DMI. */
+	unsigned char slave_addr;
+
+	/* Counters and things for the proc filesystem. */
+	atomic_t stats[SI_NUM_STATS];
+
+	struct task_struct *thread;
+
+	struct list_head link;
+	union ipmi_smi_info_union addr_info;
+};
+
+#define smi_inc_stat(smi, stat) \
+	atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+#define smi_get_stat(smi, stat) \
+	((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+
+#define SI_MAX_PARMS 4
+
+static int force_kipmid[SI_MAX_PARMS];
+static int num_force_kipmid;
+#ifdef CONFIG_PCI
+static bool pci_registered;
+#endif
+#ifdef CONFIG_PARISC
+static bool parisc_registered;
+#endif
+
+static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
+static int num_max_busy_us;
+
+static bool unload_when_empty = true;
+
+static int add_smi(struct smi_info *smi);
+static int try_smi_init(struct smi_info *smi);
+static void cleanup_one_si(struct smi_info *to_clean);
+static void cleanup_ipmi_si(void);
+
+#ifdef DEBUG_TIMING
+void debug_timestamp(char *msg)
+{
+	struct timespec64 t;
+
+	getnstimeofday64(&t);
+	pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
+}
+#else
+#define debug_timestamp(x)
+#endif
+
+static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
+static int register_xaction_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&xaction_notifier_list, nb);
+}
+
+static void deliver_recv_msg(struct smi_info *smi_info,
+			     struct ipmi_smi_msg *msg)
+{
+	/* Deliver the message to the upper layer. */
+	if (smi_info->intf)
+		ipmi_smi_msg_received(smi_info->intf, msg);
+	else
+		ipmi_free_smi_msg(msg);
+}
+
+static void return_hosed_msg(struct smi_info *smi_info, int cCode)
+{
+	struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+	if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
+		cCode = IPMI_ERR_UNSPECIFIED;
+	/* else use it as is */
+
+	/* Make it a response */
+	msg->rsp[0] = msg->data[0] | 4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = cCode;
+	msg->rsp_size = 3;
+
+	smi_info->curr_msg = NULL;
+	deliver_recv_msg(smi_info, msg);
+}
+
+static enum si_sm_result start_next_msg(struct smi_info *smi_info)
+{
+	int              rv;
+
+	if (!smi_info->waiting_msg) {
+		smi_info->curr_msg = NULL;
+		rv = SI_SM_IDLE;
+	} else {
+		int err;
+
+		smi_info->curr_msg = smi_info->waiting_msg;
+		smi_info->waiting_msg = NULL;
+		debug_timestamp("Start2");
+		err = atomic_notifier_call_chain(&xaction_notifier_list,
+				0, smi_info);
+		if (err & NOTIFY_STOP_MASK) {
+			rv = SI_SM_CALL_WITHOUT_DELAY;
+			goto out;
+		}
+		err = smi_info->handlers->start_transaction(
+			smi_info->si_sm,
+			smi_info->curr_msg->data,
+			smi_info->curr_msg->data_size);
+		if (err)
+			return_hosed_msg(smi_info, err);
+
+		rv = SI_SM_CALL_WITHOUT_DELAY;
+	}
+out:
+	return rv;
+}
+
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+	if (!smi_info->timer_can_start)
+		return;
+	smi_info->last_timeout_jiffies = jiffies;
+	mod_timer(&smi_info->si_timer, new_val);
+	smi_info->timer_running = true;
+}
+
+/*
+ * Start a new message and (re)start the timer and thread.
+ */
+static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
+			  unsigned int size)
+{
+	smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+	if (smi_info->thread)
+		wake_up_process(smi_info->thread);
+
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
+}
+
+static void start_check_enables(struct smi_info *smi_info)
+{
+	unsigned char msg[2];
+
+	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+
+	start_new_msg(smi_info, msg, 2);
+	smi_info->si_state = SI_CHECKING_ENABLES;
+}
+
+static void start_clear_flags(struct smi_info *smi_info)
+{
+	unsigned char msg[3];
+
+	/* Make sure the watchdog pre-timeout flag is not set at startup. */
+	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+	msg[2] = WDT_PRE_TIMEOUT_INT;
+
+	start_new_msg(smi_info, msg, 3);
+	smi_info->si_state = SI_CLEARING_FLAGS;
+}
+
+static void start_getting_msg_queue(struct smi_info *smi_info)
+{
+	smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+	smi_info->curr_msg->data_size = 2;
+
+	start_new_msg(smi_info, smi_info->curr_msg->data,
+		      smi_info->curr_msg->data_size);
+	smi_info->si_state = SI_GETTING_MESSAGES;
+}
+
+static void start_getting_events(struct smi_info *smi_info)
+{
+	smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+	smi_info->curr_msg->data_size = 2;
+
+	start_new_msg(smi_info, smi_info->curr_msg->data,
+		      smi_info->curr_msg->data_size);
+	smi_info->si_state = SI_GETTING_EVENTS;
+}
+
+/*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+ * polled until we can allocate some memory.  Once we have some
+ * memory, we will re-enable the interrupt.
+ *
+ * Note that we cannot just use disable_irq(), since the interrupt may
+ * be shared.
+ */
+static inline bool disable_si_irq(struct smi_info *smi_info)
+{
+	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+		smi_info->interrupt_disabled = true;
+		start_check_enables(smi_info);
+		return true;
+	}
+	return false;
+}
+
+static inline bool enable_si_irq(struct smi_info *smi_info)
+{
+	if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
+		smi_info->interrupt_disabled = false;
+		start_check_enables(smi_info);
+		return true;
+	}
+	return false;
+}
+
+/*
+ * Allocate a message.  If unable to allocate, start the interrupt
+ * disable process and return NULL.  If able to allocate but
+ * interrupts are disabled, free the message and return NULL after
+ * starting the interrupt enable process.
+ */
+static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
+{
+	struct ipmi_smi_msg *msg;
+
+	msg = ipmi_alloc_smi_msg();
+	if (!msg) {
+		if (!disable_si_irq(smi_info))
+			smi_info->si_state = SI_NORMAL;
+	} else if (enable_si_irq(smi_info)) {
+		ipmi_free_smi_msg(msg);
+		msg = NULL;
+	}
+	return msg;
+}
+
+static void handle_flags(struct smi_info *smi_info)
+{
+retry:
+	if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+		/* Watchdog pre-timeout */
+		smi_inc_stat(smi_info, watchdog_pretimeouts);
+
+		start_clear_flags(smi_info);
+		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+		if (smi_info->intf)
+			ipmi_smi_watchdog_pretimeout(smi_info->intf);
+	} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
+		/* Messages available. */
+		smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+		if (!smi_info->curr_msg)
+			return;
+
+		start_getting_msg_queue(smi_info);
+	} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
+		/* Events available. */
+		smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+		if (!smi_info->curr_msg)
+			return;
+
+		start_getting_events(smi_info);
+	} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
+		   smi_info->oem_data_avail_handler) {
+		if (smi_info->oem_data_avail_handler(smi_info))
+			goto retry;
+	} else
+		smi_info->si_state = SI_NORMAL;
+}
+
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+			     IPMI_BMC_EVT_MSG_INTR)
+
+static u8 current_global_enables(struct smi_info *smi_info, u8 base,
+				 bool *irq_on)
+{
+	u8 enables = 0;
+
+	if (smi_info->supports_event_msg_buff)
+		enables |= IPMI_BMC_EVT_MSG_BUFF;
+
+	if (((smi_info->irq && !smi_info->interrupt_disabled) ||
+	     smi_info->cannot_disable_irq) &&
+	    !smi_info->irq_enable_broken)
+		enables |= IPMI_BMC_RCV_MSG_INTR;
+
+	if (smi_info->supports_event_msg_buff &&
+	    smi_info->irq && !smi_info->interrupt_disabled &&
+	    !smi_info->irq_enable_broken)
+		enables |= IPMI_BMC_EVT_MSG_INTR;
+
+	*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
+
+	return enables;
+}
+
+static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
+{
+	u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
+
+	irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
+
+	if ((bool)irqstate == irq_on)
+		return;
+
+	if (irq_on)
+		smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+				     IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+	else
+		smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
+}
+
+static void handle_transaction_done(struct smi_info *smi_info)
+{
+	struct ipmi_smi_msg *msg;
+
+	debug_timestamp("Done");
+	switch (smi_info->si_state) {
+	case SI_NORMAL:
+		if (!smi_info->curr_msg)
+			break;
+
+		smi_info->curr_msg->rsp_size
+			= smi_info->handlers->get_result(
+				smi_info->si_sm,
+				smi_info->curr_msg->rsp,
+				IPMI_MAX_MSG_LENGTH);
+
+		/*
+		 * Do this here becase deliver_recv_msg() releases the
+		 * lock, and a new message can be put in during the
+		 * time the lock is released.
+		 */
+		msg = smi_info->curr_msg;
+		smi_info->curr_msg = NULL;
+		deliver_recv_msg(smi_info, msg);
+		break;
+
+	case SI_GETTING_FLAGS:
+	{
+		unsigned char msg[4];
+		unsigned int  len;
+
+		/* We got the flags from the SMI, now handle them. */
+		len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+		if (msg[2] != 0) {
+			/* Error fetching flags, just give up for now. */
+			smi_info->si_state = SI_NORMAL;
+		} else if (len < 4) {
+			/*
+			 * Hmm, no flags.  That's technically illegal, but
+			 * don't use uninitialized data.
+			 */
+			smi_info->si_state = SI_NORMAL;
+		} else {
+			smi_info->msg_flags = msg[3];
+			handle_flags(smi_info);
+		}
+		break;
+	}
+
+	case SI_CLEARING_FLAGS:
+	{
+		unsigned char msg[3];
+
+		/* We cleared the flags. */
+		smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
+		if (msg[2] != 0) {
+			/* Error clearing flags */
+			dev_warn(smi_info->dev,
+				 "Error clearing flags: %2.2x\n", msg[2]);
+		}
+		smi_info->si_state = SI_NORMAL;
+		break;
+	}
+
+	case SI_GETTING_EVENTS:
+	{
+		smi_info->curr_msg->rsp_size
+			= smi_info->handlers->get_result(
+				smi_info->si_sm,
+				smi_info->curr_msg->rsp,
+				IPMI_MAX_MSG_LENGTH);
+
+		/*
+		 * Do this here becase deliver_recv_msg() releases the
+		 * lock, and a new message can be put in during the
+		 * time the lock is released.
+		 */
+		msg = smi_info->curr_msg;
+		smi_info->curr_msg = NULL;
+		if (msg->rsp[2] != 0) {
+			/* Error getting event, probably done. */
+			msg->done(msg);
+
+			/* Take off the event flag. */
+			smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+			handle_flags(smi_info);
+		} else {
+			smi_inc_stat(smi_info, events);
+
+			/*
+			 * Do this before we deliver the message
+			 * because delivering the message releases the
+			 * lock and something else can mess with the
+			 * state.
+			 */
+			handle_flags(smi_info);
+
+			deliver_recv_msg(smi_info, msg);
+		}
+		break;
+	}
+
+	case SI_GETTING_MESSAGES:
+	{
+		smi_info->curr_msg->rsp_size
+			= smi_info->handlers->get_result(
+				smi_info->si_sm,
+				smi_info->curr_msg->rsp,
+				IPMI_MAX_MSG_LENGTH);
+
+		/*
+		 * Do this here becase deliver_recv_msg() releases the
+		 * lock, and a new message can be put in during the
+		 * time the lock is released.
+		 */
+		msg = smi_info->curr_msg;
+		smi_info->curr_msg = NULL;
+		if (msg->rsp[2] != 0) {
+			/* Error getting event, probably done. */
+			msg->done(msg);
+
+			/* Take off the msg flag. */
+			smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+			handle_flags(smi_info);
+		} else {
+			smi_inc_stat(smi_info, incoming_messages);
+
+			/*
+			 * Do this before we deliver the message
+			 * because delivering the message releases the
+			 * lock and something else can mess with the
+			 * state.
+			 */
+			handle_flags(smi_info);
+
+			deliver_recv_msg(smi_info, msg);
+		}
+		break;
+	}
+
+	case SI_CHECKING_ENABLES:
+	{
+		unsigned char msg[4];
+		u8 enables;
+		bool irq_on;
+
+		/* We got the flags from the SMI, now handle them. */
+		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+		if (msg[2] != 0) {
+			dev_warn(smi_info->dev,
+				 "Couldn't get irq info: %x.\n", msg[2]);
+			dev_warn(smi_info->dev,
+				 "Maybe ok, but ipmi might run very slowly.\n");
+			smi_info->si_state = SI_NORMAL;
+			break;
+		}
+		enables = current_global_enables(smi_info, 0, &irq_on);
+		if (smi_info->si_type == SI_BT)
+			/* BT has its own interrupt enable bit. */
+			check_bt_irq(smi_info, irq_on);
+		if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
+			/* Enables are not correct, fix them. */
+			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+			msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+			msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
+			smi_info->handlers->start_transaction(
+				smi_info->si_sm, msg, 3);
+			smi_info->si_state = SI_SETTING_ENABLES;
+		} else if (smi_info->supports_event_msg_buff) {
+			smi_info->curr_msg = ipmi_alloc_smi_msg();
+			if (!smi_info->curr_msg) {
+				smi_info->si_state = SI_NORMAL;
+				break;
+			}
+			start_getting_events(smi_info);
+		} else {
+			smi_info->si_state = SI_NORMAL;
+		}
+		break;
+	}
+
+	case SI_SETTING_ENABLES:
+	{
+		unsigned char msg[4];
+
+		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+		if (msg[2] != 0)
+			dev_warn(smi_info->dev,
+				 "Could not set the global enables: 0x%x.\n",
+				 msg[2]);
+
+		if (smi_info->supports_event_msg_buff) {
+			smi_info->curr_msg = ipmi_alloc_smi_msg();
+			if (!smi_info->curr_msg) {
+				smi_info->si_state = SI_NORMAL;
+				break;
+			}
+			start_getting_events(smi_info);
+		} else {
+			smi_info->si_state = SI_NORMAL;
+		}
+		break;
+	}
+	}
+}
+
+/*
+ * Called on timeouts and events.  Timeouts should pass the elapsed
+ * time, interrupts should pass in zero.  Must be called with
+ * si_lock held and interrupts disabled.
+ */
+static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
+					   int time)
+{
+	enum si_sm_result si_sm_result;
+
+restart:
+	/*
+	 * There used to be a loop here that waited a little while
+	 * (around 25us) before giving up.  That turned out to be
+	 * pointless, the minimum delays I was seeing were in the 300us
+	 * range, which is far too long to wait in an interrupt.  So
+	 * we just run until the state machine tells us something
+	 * happened or it needs a delay.
+	 */
+	si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
+	time = 0;
+	while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
+		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+
+	if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
+		smi_inc_stat(smi_info, complete_transactions);
+
+		handle_transaction_done(smi_info);
+		goto restart;
+	} else if (si_sm_result == SI_SM_HOSED) {
+		smi_inc_stat(smi_info, hosed_count);
+
+		/*
+		 * Do the before return_hosed_msg, because that
+		 * releases the lock.
+		 */
+		smi_info->si_state = SI_NORMAL;
+		if (smi_info->curr_msg != NULL) {
+			/*
+			 * If we were handling a user message, format
+			 * a response to send to the upper layer to
+			 * tell it about the error.
+			 */
+			return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+		}
+		goto restart;
+	}
+
+	/*
+	 * We prefer handling attn over new messages.  But don't do
+	 * this if there is not yet an upper layer to handle anything.
+	 */
+	if (likely(smi_info->intf) &&
+	    (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) {
+		unsigned char msg[2];
+
+		if (smi_info->si_state != SI_NORMAL) {
+			/*
+			 * We got an ATTN, but we are doing something else.
+			 * Handle the ATTN later.
+			 */
+			smi_info->got_attn = true;
+		} else {
+			smi_info->got_attn = false;
+			smi_inc_stat(smi_info, attentions);
+
+			/*
+			 * Got a attn, send down a get message flags to see
+			 * what's causing it.  It would be better to handle
+			 * this in the upper layer, but due to the way
+			 * interrupts work with the SMI, that's not really
+			 * possible.
+			 */
+			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+			msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+			start_new_msg(smi_info, msg, 2);
+			smi_info->si_state = SI_GETTING_FLAGS;
+			goto restart;
+		}
+	}
+
+	/* If we are currently idle, try to start the next message. */
+	if (si_sm_result == SI_SM_IDLE) {
+		smi_inc_stat(smi_info, idles);
+
+		si_sm_result = start_next_msg(smi_info);
+		if (si_sm_result != SI_SM_IDLE)
+			goto restart;
+	}
+
+	if ((si_sm_result == SI_SM_IDLE)
+	    && (atomic_read(&smi_info->req_events))) {
+		/*
+		 * We are idle and the upper layer requested that I fetch
+		 * events, so do so.
+		 */
+		atomic_set(&smi_info->req_events, 0);
+
+		/*
+		 * Take this opportunity to check the interrupt and
+		 * message enable state for the BMC.  The BMC can be
+		 * asynchronously reset, and may thus get interrupts
+		 * disable and messages disabled.
+		 */
+		if (smi_info->supports_event_msg_buff || smi_info->irq) {
+			start_check_enables(smi_info);
+		} else {
+			smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+			if (!smi_info->curr_msg)
+				goto out;
+
+			start_getting_events(smi_info);
+		}
+		goto restart;
+	}
+
+	if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
+		/* Ok it if fails, the timer will just go off. */
+		if (del_timer(&smi_info->si_timer))
+			smi_info->timer_running = false;
+	}
+
+out:
+	return si_sm_result;
+}
+
+static void check_start_timer_thread(struct smi_info *smi_info)
+{
+	if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+		smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+		if (smi_info->thread)
+			wake_up_process(smi_info->thread);
+
+		start_next_msg(smi_info);
+		smi_event_handler(smi_info, 0);
+	}
+}
+
+static void flush_messages(void *send_info)
+{
+	struct smi_info *smi_info = send_info;
+	enum si_sm_result result;
+
+	/*
+	 * Currently, this function is called only in run-to-completion
+	 * mode.  This means we are single-threaded, no need for locks.
+	 */
+	result = smi_event_handler(smi_info, 0);
+	while (result != SI_SM_IDLE) {
+		udelay(SI_SHORT_TIMEOUT_USEC);
+		result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
+	}
+}
+
+static void sender(void                *send_info,
+		   struct ipmi_smi_msg *msg)
+{
+	struct smi_info   *smi_info = send_info;
+	unsigned long     flags;
+
+	debug_timestamp("Enqueue");
+
+	if (smi_info->run_to_completion) {
+		/*
+		 * If we are running to completion, start it.  Upper
+		 * layer will call flush_messages to clear it out.
+		 */
+		smi_info->waiting_msg = msg;
+		return;
+	}
+
+	spin_lock_irqsave(&smi_info->si_lock, flags);
+	/*
+	 * The following two lines don't need to be under the lock for
+	 * the lock's sake, but they do need SMP memory barriers to
+	 * avoid getting things out of order.  We are already claiming
+	 * the lock, anyway, so just do it under the lock to avoid the
+	 * ordering problem.
+	 */
+	BUG_ON(smi_info->waiting_msg);
+	smi_info->waiting_msg = msg;
+	check_start_timer_thread(smi_info);
+	spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void set_run_to_completion(void *send_info, bool i_run_to_completion)
+{
+	struct smi_info   *smi_info = send_info;
+
+	smi_info->run_to_completion = i_run_to_completion;
+	if (i_run_to_completion)
+		flush_messages(smi_info);
+}
+
+/*
+ * Use -1 in the nsec value of the busy waiting timespec to tell that
+ * we are spinning in kipmid looking for something and not delaying
+ * between checks
+ */
+static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
+{
+	ts->tv_nsec = -1;
+}
+static inline int ipmi_si_is_busy(struct timespec64 *ts)
+{
+	return ts->tv_nsec != -1;
+}
+
+static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
+					const struct smi_info *smi_info,
+					struct timespec64 *busy_until)
+{
+	unsigned int max_busy_us = 0;
+
+	if (smi_info->intf_num < num_max_busy_us)
+		max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
+	if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
+		ipmi_si_set_not_busy(busy_until);
+	else if (!ipmi_si_is_busy(busy_until)) {
+		getnstimeofday64(busy_until);
+		timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
+	} else {
+		struct timespec64 now;
+
+		getnstimeofday64(&now);
+		if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
+			ipmi_si_set_not_busy(busy_until);
+			return 0;
+		}
+	}
+	return 1;
+}
+
+
+/*
+ * A busy-waiting loop for speeding up IPMI operation.
+ *
+ * Lousy hardware makes this hard.  This is only enabled for systems
+ * that are not BT and do not have interrupts.  It starts spinning
+ * when an operation is complete or until max_busy tells it to stop
+ * (if that is enabled).  See the paragraph on kimid_max_busy_us in
+ * Documentation/IPMI.txt for details.
+ */
+static int ipmi_thread(void *data)
+{
+	struct smi_info *smi_info = data;
+	unsigned long flags;
+	enum si_sm_result smi_result;
+	struct timespec64 busy_until;
+
+	ipmi_si_set_not_busy(&busy_until);
+	set_user_nice(current, MAX_NICE);
+	while (!kthread_should_stop()) {
+		int busy_wait;
+
+		spin_lock_irqsave(&(smi_info->si_lock), flags);
+		smi_result = smi_event_handler(smi_info, 0);
+
+		/*
+		 * If the driver is doing something, there is a possible
+		 * race with the timer.  If the timer handler see idle,
+		 * and the thread here sees something else, the timer
+		 * handler won't restart the timer even though it is
+		 * required.  So start it here if necessary.
+		 */
+		if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
+			smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+		spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+		busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+						  &busy_until);
+		if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+			; /* do nothing */
+		} else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
+			/*
+			 * In maintenance mode we run as fast as
+			 * possible to allow firmware updates to
+			 * complete as fast as possible, but normally
+			 * don't bang on the scheduler.
+			 */
+			if (smi_info->in_maintenance_mode)
+				schedule();
+			else
+				usleep_range(100, 200);
+		} else if (smi_result == SI_SM_IDLE) {
+			if (atomic_read(&smi_info->need_watch)) {
+				schedule_timeout_interruptible(100);
+			} else {
+				/* Wait to be woken up when we are needed. */
+				__set_current_state(TASK_INTERRUPTIBLE);
+				schedule();
+			}
+		} else {
+			schedule_timeout_interruptible(1);
+		}
+	}
+	return 0;
+}
+
+
+static void poll(void *send_info)
+{
+	struct smi_info *smi_info = send_info;
+	unsigned long flags = 0;
+	bool run_to_completion = smi_info->run_to_completion;
+
+	/*
+	 * Make sure there is some delay in the poll loop so we can
+	 * drive time forward and timeout things.
+	 */
+	udelay(10);
+	if (!run_to_completion)
+		spin_lock_irqsave(&smi_info->si_lock, flags);
+	smi_event_handler(smi_info, 10);
+	if (!run_to_completion)
+		spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void request_events(void *send_info)
+{
+	struct smi_info *smi_info = send_info;
+
+	if (!smi_info->has_event_buffer)
+		return;
+
+	atomic_set(&smi_info->req_events, 1);
+}
+
+static void set_need_watch(void *send_info, bool enable)
+{
+	struct smi_info *smi_info = send_info;
+	unsigned long flags;
+
+	atomic_set(&smi_info->need_watch, enable);
+	spin_lock_irqsave(&smi_info->si_lock, flags);
+	check_start_timer_thread(smi_info);
+	spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static int initialized;
+
+static void smi_timeout(unsigned long data)
+{
+	struct smi_info   *smi_info = (struct smi_info *) data;
+	enum si_sm_result smi_result;
+	unsigned long     flags;
+	unsigned long     jiffies_now;
+	long              time_diff;
+	long		  timeout;
+
+	spin_lock_irqsave(&(smi_info->si_lock), flags);
+	debug_timestamp("Timer");
+
+	jiffies_now = jiffies;
+	time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+		     * SI_USEC_PER_JIFFY);
+	smi_result = smi_event_handler(smi_info, time_diff);
+
+	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+		/* Running with interrupts, only do long timeouts. */
+		timeout = jiffies + SI_TIMEOUT_JIFFIES;
+		smi_inc_stat(smi_info, long_timeouts);
+		goto do_mod_timer;
+	}
+
+	/*
+	 * If the state machine asks for a short delay, then shorten
+	 * the timer timeout.
+	 */
+	if (smi_result == SI_SM_CALL_WITH_DELAY) {
+		smi_inc_stat(smi_info, short_timeouts);
+		timeout = jiffies + 1;
+	} else {
+		smi_inc_stat(smi_info, long_timeouts);
+		timeout = jiffies + SI_TIMEOUT_JIFFIES;
+	}
+
+do_mod_timer:
+	if (smi_result != SI_SM_IDLE)
+		smi_mod_timer(smi_info, timeout);
+	else
+		smi_info->timer_running = false;
+	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+}
+
+static irqreturn_t si_irq_handler(int irq, void *data)
+{
+	struct smi_info *smi_info = data;
+	unsigned long   flags;
+
+	spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+	smi_inc_stat(smi_info, interrupts);
+
+	debug_timestamp("Interrupt");
+
+	smi_event_handler(smi_info, 0);
+	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t si_bt_irq_handler(int irq, void *data)
+{
+	struct smi_info *smi_info = data;
+	/* We need to clear the IRQ flag for the BT interface. */
+	smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+			     IPMI_BT_INTMASK_CLEAR_IRQ_BIT
+			     | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+	return si_irq_handler(irq, data);
+}
+
+static int smi_start_processing(void       *send_info,
+				ipmi_smi_t intf)
+{
+	struct smi_info *new_smi = send_info;
+	int             enable = 0;
+
+	new_smi->intf = intf;
+
+	/* Set up the timer that drives the interface. */
+	setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+	new_smi->timer_can_start = true;
+	smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+
+	/* Try to claim any interrupts. */
+	if (new_smi->irq_setup)
+		new_smi->irq_setup(new_smi);
+
+	/*
+	 * Check if the user forcefully enabled the daemon.
+	 */
+	if (new_smi->intf_num < num_force_kipmid)
+		enable = force_kipmid[new_smi->intf_num];
+	/*
+	 * The BT interface is efficient enough to not need a thread,
+	 * and there is no need for a thread if we have interrupts.
+	 */
+	else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
+		enable = 1;
+
+	if (enable) {
+		new_smi->thread = kthread_run(ipmi_thread, new_smi,
+					      "kipmi%d", new_smi->intf_num);
+		if (IS_ERR(new_smi->thread)) {
+			dev_notice(new_smi->dev, "Could not start"
+				   " kernel thread due to error %ld, only using"
+				   " timers to drive the interface\n",
+				   PTR_ERR(new_smi->thread));
+			new_smi->thread = NULL;
+		}
+	}
+
+	return 0;
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+	struct smi_info *smi = send_info;
+
+	data->addr_src = smi->addr_source;
+	data->dev = smi->dev;
+	data->addr_info = smi->addr_info;
+	get_device(smi->dev);
+
+	return 0;
+}
+
+static void set_maintenance_mode(void *send_info, bool enable)
+{
+	struct smi_info   *smi_info = send_info;
+
+	if (!enable)
+		atomic_set(&smi_info->req_events, 0);
+	smi_info->in_maintenance_mode = enable;
+}
+
+static const struct ipmi_smi_handlers handlers = {
+	.owner                  = THIS_MODULE,
+	.start_processing       = smi_start_processing,
+	.get_smi_info		= get_smi_info,
+	.sender			= sender,
+	.request_events		= request_events,
+	.set_need_watch		= set_need_watch,
+	.set_maintenance_mode   = set_maintenance_mode,
+	.set_run_to_completion  = set_run_to_completion,
+	.flush_messages		= flush_messages,
+	.poll			= poll,
+};
+
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS.
+ */
+
+static LIST_HEAD(smi_infos);
+static DEFINE_MUTEX(smi_infos_lock);
+static int smi_num; /* Used to sequence the SMIs */
+
+#define DEFAULT_REGSPACING	1
+#define DEFAULT_REGSIZE		1
+
+#ifdef CONFIG_ACPI
+static bool          si_tryacpi = true;
+#endif
+#ifdef CONFIG_DMI
+static bool          si_trydmi = true;
+#endif
+static bool          si_tryplatform = true;
+#ifdef CONFIG_PCI
+static bool          si_trypci = true;
+#endif
+static char          *si_type[SI_MAX_PARMS];
+#define MAX_SI_TYPE_STR 30
+static char          si_type_str[MAX_SI_TYPE_STR];
+static unsigned long addrs[SI_MAX_PARMS];
+static unsigned int num_addrs;
+static unsigned int  ports[SI_MAX_PARMS];
+static unsigned int num_ports;
+static int           irqs[SI_MAX_PARMS];
+static unsigned int num_irqs;
+static int           regspacings[SI_MAX_PARMS];
+static unsigned int num_regspacings;
+static int           regsizes[SI_MAX_PARMS];
+static unsigned int num_regsizes;
+static int           regshifts[SI_MAX_PARMS];
+static unsigned int num_regshifts;
+static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
+static unsigned int num_slave_addrs;
+
+#define IPMI_IO_ADDR_SPACE  0
+#define IPMI_MEM_ADDR_SPACE 1
+static const char * const addr_space_to_str[] = { "i/o", "mem" };
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
+		 " Documentation/IPMI.txt in the kernel sources for the"
+		 " gory details.");
+
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+		 " default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
+		 " default scan of the interfaces identified via DMI");
+#endif
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the"
+		 " default scan of the interfaces identified via platform"
+		 " interfaces like openfirmware");
+#ifdef CONFIG_PCI
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
+		 " default scan of the interfaces identified via pci");
+#endif
+module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
+MODULE_PARM_DESC(type, "Defines the type of each interface, each"
+		 " interface separated by commas.  The types are 'kcs',"
+		 " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
+		 " the first interface to kcs and the second to bt");
+module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0);
+MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
+		 " addresses separated by commas.  Only use if an interface"
+		 " is in memory.  Otherwise, set it to zero or leave"
+		 " it blank.");
+module_param_hw_array(ports, uint, ioport, &num_ports, 0);
+MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
+		 " addresses separated by commas.  Only use if an interface"
+		 " is a port.  Otherwise, set it to zero or leave"
+		 " it blank.");
+module_param_hw_array(irqs, int, irq, &num_irqs, 0);
+MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
+		 " addresses separated by commas.  Only use if an interface"
+		 " has an interrupt.  Otherwise, set it to zero or leave"
+		 " it blank.");
+module_param_hw_array(regspacings, int, other, &num_regspacings, 0);
+MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
+		 " and each successive register used by the interface.  For"
+		 " instance, if the start address is 0xca2 and the spacing"
+		 " is 2, then the second address is at 0xca4.  Defaults"
+		 " to 1.");
+module_param_hw_array(regsizes, int, other, &num_regsizes, 0);
+MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
+		 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
+		 " 16-bit, 32-bit, or 64-bit register.  Use this if you"
+		 " the 8-bit IPMI register has to be read from a larger"
+		 " register.");
+module_param_hw_array(regshifts, int, other, &num_regshifts, 0);
+MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
+		 " IPMI register, in bits.  For instance, if the data"
+		 " is read from a 32-bit word and the IPMI data is in"
+		 " bit 8-15, then the shift would be 8");
+module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
+		 " the controller.  Normally this is 0x20, but can be"
+		 " overridden by this parm.  This is an array indexed"
+		 " by interface number.");
+module_param_array(force_kipmid, int, &num_force_kipmid, 0);
+MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
+		 " disabled(0).  Normally the IPMI driver auto-detects"
+		 " this, but the value may be overridden by this parm.");
+module_param(unload_when_empty, bool, 0);
+MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
+		 " specified or found, default is 1.  Setting to 0"
+		 " is useful for hot add of devices using hotmod.");
+module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
+MODULE_PARM_DESC(kipmid_max_busy_us,
+		 "Max time (in microseconds) to busy-wait for IPMI data before"
+		 " sleeping. 0 (default) means to wait forever. Set to 100-500"
+		 " if kipmid is using up a lot of CPU time.");
+
+
+static void std_irq_cleanup(struct smi_info *info)
+{
+	if (info->si_type == SI_BT)
+		/* Disable the interrupt in the BT interface. */
+		info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
+	free_irq(info->irq, info);
+}
+
+static int std_irq_setup(struct smi_info *info)
+{
+	int rv;
+
+	if (!info->irq)
+		return 0;
+
+	if (info->si_type == SI_BT) {
+		rv = request_irq(info->irq,
+				 si_bt_irq_handler,
+				 IRQF_SHARED,
+				 DEVICE_NAME,
+				 info);
+		if (!rv)
+			/* Enable the interrupt in the BT interface. */
+			info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
+					 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+	} else
+		rv = request_irq(info->irq,
+				 si_irq_handler,
+				 IRQF_SHARED,
+				 DEVICE_NAME,
+				 info);
+	if (rv) {
+		dev_warn(info->dev, "%s unable to claim interrupt %d,"
+			 " running polled\n",
+			 DEVICE_NAME, info->irq);
+		info->irq = 0;
+	} else {
+		info->irq_cleanup = std_irq_cleanup;
+		dev_info(info->dev, "Using irq %d\n", info->irq);
+	}
+
+	return rv;
+}
+
+static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
+{
+	unsigned int addr = io->addr_data;
+
+	return inb(addr + (offset * io->regspacing));
+}
+
+static void port_outb(const struct si_sm_io *io, unsigned int offset,
+		      unsigned char b)
+{
+	unsigned int addr = io->addr_data;
+
+	outb(b, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
+{
+	unsigned int addr = io->addr_data;
+
+	return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outw(const struct si_sm_io *io, unsigned int offset,
+		      unsigned char b)
+{
+	unsigned int addr = io->addr_data;
+
+	outw(b << io->regshift, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
+{
+	unsigned int addr = io->addr_data;
+
+	return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outl(const struct si_sm_io *io, unsigned int offset,
+		      unsigned char b)
+{
+	unsigned int addr = io->addr_data;
+
+	outl(b << io->regshift, addr+(offset * io->regspacing));
+}
+
+static void port_cleanup(struct smi_info *info)
+{
+	unsigned int addr = info->io.addr_data;
+	int          idx;
+
+	if (addr) {
+		for (idx = 0; idx < info->io_size; idx++)
+			release_region(addr + idx * info->io.regspacing,
+				       info->io.regsize);
+	}
+}
+
+static int port_setup(struct smi_info *info)
+{
+	unsigned int addr = info->io.addr_data;
+	int          idx;
+
+	if (!addr)
+		return -ENODEV;
+
+	info->io_cleanup = port_cleanup;
+
+	/*
+	 * Figure out the actual inb/inw/inl/etc routine to use based
+	 * upon the register size.
+	 */
+	switch (info->io.regsize) {
+	case 1:
+		info->io.inputb = port_inb;
+		info->io.outputb = port_outb;
+		break;
+	case 2:
+		info->io.inputb = port_inw;
+		info->io.outputb = port_outw;
+		break;
+	case 4:
+		info->io.inputb = port_inl;
+		info->io.outputb = port_outl;
+		break;
+	default:
+		dev_warn(info->dev, "Invalid register size: %d\n",
+			 info->io.regsize);
+		return -EINVAL;
+	}
+
+	/*
+	 * Some BIOSes reserve disjoint I/O regions in their ACPI
+	 * tables.  This causes problems when trying to register the
+	 * entire I/O region.  Therefore we must register each I/O
+	 * port separately.
+	 */
+	for (idx = 0; idx < info->io_size; idx++) {
+		if (request_region(addr + idx * info->io.regspacing,
+				   info->io.regsize, DEVICE_NAME) == NULL) {
+			/* Undo allocations */
+			while (idx--)
+				release_region(addr + idx * info->io.regspacing,
+					       info->io.regsize);
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+static unsigned char intf_mem_inb(const struct si_sm_io *io,
+				  unsigned int offset)
+{
+	return readb((io->addr)+(offset * io->regspacing));
+}
+
+static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
+			  unsigned char b)
+{
+	writeb(b, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inw(const struct si_sm_io *io,
+				  unsigned int offset)
+{
+	return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
+		& 0xff;
+}
+
+static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
+			  unsigned char b)
+{
+	writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inl(const struct si_sm_io *io,
+				  unsigned int offset)
+{
+	return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
+		& 0xff;
+}
+
+static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
+			  unsigned char b)
+{
+	writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+#ifdef readq
+static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
+{
+	return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
+		& 0xff;
+}
+
+static void mem_outq(const struct si_sm_io *io, unsigned int offset,
+		     unsigned char b)
+{
+	writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+#endif
+
+static void mem_region_cleanup(struct smi_info *info, int num)
+{
+	unsigned long addr = info->io.addr_data;
+	int idx;
+
+	for (idx = 0; idx < num; idx++)
+		release_mem_region(addr + idx * info->io.regspacing,
+				   info->io.regsize);
+}
+
+static void mem_cleanup(struct smi_info *info)
+{
+	if (info->io.addr) {
+		iounmap(info->io.addr);
+		mem_region_cleanup(info, info->io_size);
+	}
+}
+
+static int mem_setup(struct smi_info *info)
+{
+	unsigned long addr = info->io.addr_data;
+	int           mapsize, idx;
+
+	if (!addr)
+		return -ENODEV;
+
+	info->io_cleanup = mem_cleanup;
+
+	/*
+	 * Figure out the actual readb/readw/readl/etc routine to use based
+	 * upon the register size.
+	 */
+	switch (info->io.regsize) {
+	case 1:
+		info->io.inputb = intf_mem_inb;
+		info->io.outputb = intf_mem_outb;
+		break;
+	case 2:
+		info->io.inputb = intf_mem_inw;
+		info->io.outputb = intf_mem_outw;
+		break;
+	case 4:
+		info->io.inputb = intf_mem_inl;
+		info->io.outputb = intf_mem_outl;
+		break;
+#ifdef readq
+	case 8:
+		info->io.inputb = mem_inq;
+		info->io.outputb = mem_outq;
+		break;
+#endif
+	default:
+		dev_warn(info->dev, "Invalid register size: %d\n",
+			 info->io.regsize);
+		return -EINVAL;
+	}
+
+	/*
+	 * Some BIOSes reserve disjoint memory regions in their ACPI
+	 * tables.  This causes problems when trying to request the
+	 * entire region.  Therefore we must request each register
+	 * separately.
+	 */
+	for (idx = 0; idx < info->io_size; idx++) {
+		if (request_mem_region(addr + idx * info->io.regspacing,
+				       info->io.regsize, DEVICE_NAME) == NULL) {
+			/* Undo allocations */
+			mem_region_cleanup(info, idx);
+			return -EIO;
+		}
+	}
+
+	/*
+	 * Calculate the total amount of memory to claim.  This is an
+	 * unusual looking calculation, but it avoids claiming any
+	 * more memory than it has to.  It will claim everything
+	 * between the first address to the end of the last full
+	 * register.
+	 */
+	mapsize = ((info->io_size * info->io.regspacing)
+		   - (info->io.regspacing - info->io.regsize));
+	info->io.addr = ioremap(addr, mapsize);
+	if (info->io.addr == NULL) {
+		mem_region_cleanup(info, info->io_size);
+		return -EIO;
+	}
+	return 0;
+}
+
+/*
+ * Parms come in as <op1>[:op2[:op3...]].  ops are:
+ *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ *   rsp=<regspacing>
+ *   rsi=<regsize>
+ *   rsh=<regshift>
+ *   irq=<irq>
+ *   ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+	const char *name;
+	const int  val;
+};
+
+static const struct hotmod_vals hotmod_ops[] = {
+	{ "add",	HM_ADD },
+	{ "remove",	HM_REMOVE },
+	{ NULL }
+};
+
+static const struct hotmod_vals hotmod_si[] = {
+	{ "kcs",	SI_KCS },
+	{ "smic",	SI_SMIC },
+	{ "bt",		SI_BT },
+	{ NULL }
+};
+
+static const struct hotmod_vals hotmod_as[] = {
+	{ "mem",	IPMI_MEM_ADDR_SPACE },
+	{ "i/o",	IPMI_IO_ADDR_SPACE },
+	{ NULL }
+};
+
+static int parse_str(const struct hotmod_vals *v, int *val, char *name,
+		     char **curr)
+{
+	char *s;
+	int  i;
+
+	s = strchr(*curr, ',');
+	if (!s) {
+		pr_warn(PFX "No hotmod %s given.\n", name);
+		return -EINVAL;
+	}
+	*s = '\0';
+	s++;
+	for (i = 0; v[i].name; i++) {
+		if (strcmp(*curr, v[i].name) == 0) {
+			*val = v[i].val;
+			*curr = s;
+			return 0;
+		}
+	}
+
+	pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
+	return -EINVAL;
+}
+
+static int check_hotmod_int_op(const char *curr, const char *option,
+			       const char *name, int *val)
+{
+	char *n;
+
+	if (strcmp(curr, name) == 0) {
+		if (!option) {
+			pr_warn(PFX "No option given for '%s'\n", curr);
+			return -EINVAL;
+		}
+		*val = simple_strtoul(option, &n, 0);
+		if ((*n != '\0') || (*option == '\0')) {
+			pr_warn(PFX "Bad option given for '%s'\n", curr);
+			return -EINVAL;
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static struct smi_info *smi_info_alloc(void)
+{
+	struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+	if (info)
+		spin_lock_init(&info->si_lock);
+	return info;
+}
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp)
+{
+	char *str = kstrdup(val, GFP_KERNEL);
+	int  rv;
+	char *next, *curr, *s, *n, *o;
+	enum hotmod_op op;
+	enum si_type si_type;
+	int  addr_space;
+	unsigned long addr;
+	int regspacing;
+	int regsize;
+	int regshift;
+	int irq;
+	int ipmb;
+	int ival;
+	int len;
+	struct smi_info *info;
+
+	if (!str)
+		return -ENOMEM;
+
+	/* Kill any trailing spaces, as we can get a "\n" from echo. */
+	len = strlen(str);
+	ival = len - 1;
+	while ((ival >= 0) && isspace(str[ival])) {
+		str[ival] = '\0';
+		ival--;
+	}
+
+	for (curr = str; curr; curr = next) {
+		regspacing = 1;
+		regsize = 1;
+		regshift = 0;
+		irq = 0;
+		ipmb = 0; /* Choose the default if not specified */
+
+		next = strchr(curr, ':');
+		if (next) {
+			*next = '\0';
+			next++;
+		}
+
+		rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+		if (rv)
+			break;
+		op = ival;
+
+		rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+		if (rv)
+			break;
+		si_type = ival;
+
+		rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
+		if (rv)
+			break;
+
+		s = strchr(curr, ',');
+		if (s) {
+			*s = '\0';
+			s++;
+		}
+		addr = simple_strtoul(curr, &n, 0);
+		if ((*n != '\0') || (*curr == '\0')) {
+			pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
+			break;
+		}
+
+		while (s) {
+			curr = s;
+			s = strchr(curr, ',');
+			if (s) {
+				*s = '\0';
+				s++;
+			}
+			o = strchr(curr, '=');
+			if (o) {
+				*o = '\0';
+				o++;
+			}
+			rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+			rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+			rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+			rv = check_hotmod_int_op(curr, o, "irq", &irq);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+			rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
+			if (rv < 0)
+				goto out;
+			else if (rv)
+				continue;
+
+			rv = -EINVAL;
+			pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
+			goto out;
+		}
+
+		if (op == HM_ADD) {
+			info = smi_info_alloc();
+			if (!info) {
+				rv = -ENOMEM;
+				goto out;
+			}
+
+			info->addr_source = SI_HOTMOD;
+			info->si_type = si_type;
+			info->io.addr_data = addr;
+			info->io.addr_type = addr_space;
+			if (addr_space == IPMI_MEM_ADDR_SPACE)
+				info->io_setup = mem_setup;
+			else
+				info->io_setup = port_setup;
+
+			info->io.addr = NULL;
+			info->io.regspacing = regspacing;
+			if (!info->io.regspacing)
+				info->io.regspacing = DEFAULT_REGSPACING;
+			info->io.regsize = regsize;
+			if (!info->io.regsize)
+				info->io.regsize = DEFAULT_REGSIZE;
+			info->io.regshift = regshift;
+			info->irq = irq;
+			if (info->irq)
+				info->irq_setup = std_irq_setup;
+			info->slave_addr = ipmb;
+
+			rv = add_smi(info);
+			if (rv) {
+				kfree(info);
+				goto out;
+			}
+			mutex_lock(&smi_infos_lock);
+			rv = try_smi_init(info);
+			mutex_unlock(&smi_infos_lock);
+			if (rv) {
+				cleanup_one_si(info);
+				goto out;
+			}
+		} else {
+			/* remove */
+			struct smi_info *e, *tmp_e;
+
+			mutex_lock(&smi_infos_lock);
+			list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+				if (e->io.addr_type != addr_space)
+					continue;
+				if (e->si_type != si_type)
+					continue;
+				if (e->io.addr_data == addr)
+					cleanup_one_si(e);
+			}
+			mutex_unlock(&smi_infos_lock);
+		}
+	}
+	rv = len;
+out:
+	kfree(str);
+	return rv;
+}
+
+static int hardcode_find_bmc(void)
+{
+	int ret = -ENODEV;
+	int             i;
+	struct smi_info *info;
+
+	for (i = 0; i < SI_MAX_PARMS; i++) {
+		if (!ports[i] && !addrs[i])
+			continue;
+
+		info = smi_info_alloc();
+		if (!info)
+			return -ENOMEM;
+
+		info->addr_source = SI_HARDCODED;
+		pr_info(PFX "probing via hardcoded address\n");
+
+		if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+			info->si_type = SI_KCS;
+		} else if (strcmp(si_type[i], "smic") == 0) {
+			info->si_type = SI_SMIC;
+		} else if (strcmp(si_type[i], "bt") == 0) {
+			info->si_type = SI_BT;
+		} else {
+			pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
+				i, si_type[i]);
+			kfree(info);
+			continue;
+		}
+
+		if (ports[i]) {
+			/* An I/O port */
+			info->io_setup = port_setup;
+			info->io.addr_data = ports[i];
+			info->io.addr_type = IPMI_IO_ADDR_SPACE;
+		} else if (addrs[i]) {
+			/* A memory port */
+			info->io_setup = mem_setup;
+			info->io.addr_data = addrs[i];
+			info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+		} else {
+			pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
+				i);
+			kfree(info);
+			continue;
+		}
+
+		info->io.addr = NULL;
+		info->io.regspacing = regspacings[i];
+		if (!info->io.regspacing)
+			info->io.regspacing = DEFAULT_REGSPACING;
+		info->io.regsize = regsizes[i];
+		if (!info->io.regsize)
+			info->io.regsize = DEFAULT_REGSIZE;
+		info->io.regshift = regshifts[i];
+		info->irq = irqs[i];
+		if (info->irq)
+			info->irq_setup = std_irq_setup;
+		info->slave_addr = slave_addrs[i];
+
+		if (!add_smi(info)) {
+			mutex_lock(&smi_infos_lock);
+			if (try_smi_init(info))
+				cleanup_one_si(info);
+			mutex_unlock(&smi_infos_lock);
+			ret = 0;
+		} else {
+			kfree(info);
+		}
+	}
+	return ret;
+}
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially.  Once we don't find a table, there
+ * are no more.
+ */
+static int acpi_failure;
+
+/* For GPE-type interrupts. */
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+	u32 gpe_number, void *context)
+{
+	struct smi_info *smi_info = context;
+	unsigned long   flags;
+
+	spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+	smi_inc_stat(smi_info, interrupts);
+
+	debug_timestamp("ACPI_GPE");
+
+	smi_event_handler(smi_info, 0);
+	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+
+	return ACPI_INTERRUPT_HANDLED;
+}
+
+static void acpi_gpe_irq_cleanup(struct smi_info *info)
+{
+	if (!info->irq)
+		return;
+
+	acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
+}
+
+static int acpi_gpe_irq_setup(struct smi_info *info)
+{
+	acpi_status status;
+
+	if (!info->irq)
+		return 0;
+
+	status = acpi_install_gpe_handler(NULL,
+					  info->irq,
+					  ACPI_GPE_LEVEL_TRIGGERED,
+					  &ipmi_acpi_gpe,
+					  info);
+	if (status != AE_OK) {
+		dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
+			 " running polled\n", DEVICE_NAME, info->irq);
+		info->irq = 0;
+		return -EINVAL;
+	} else {
+		info->irq_cleanup = acpi_gpe_irq_cleanup;
+		dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
+		return 0;
+	}
+}
+
+/*
+ * Defined at
+ * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
+ */
+struct SPMITable {
+	s8	Signature[4];
+	u32	Length;
+	u8	Revision;
+	u8	Checksum;
+	s8	OEMID[6];
+	s8	OEMTableID[8];
+	s8	OEMRevision[4];
+	s8	CreatorID[4];
+	s8	CreatorRevision[4];
+	u8	InterfaceType;
+	u8	IPMIlegacy;
+	s16	SpecificationRevision;
+
+	/*
+	 * Bit 0 - SCI interrupt supported
+	 * Bit 1 - I/O APIC/SAPIC
+	 */
+	u8	InterruptType;
+
+	/*
+	 * If bit 0 of InterruptType is set, then this is the SCI
+	 * interrupt in the GPEx_STS register.
+	 */
+	u8	GPE;
+
+	s16	Reserved;
+
+	/*
+	 * If bit 1 of InterruptType is set, then this is the I/O
+	 * APIC/SAPIC interrupt.
+	 */
+	u32	GlobalSystemInterrupt;
+
+	/* The actual register address. */
+	struct acpi_generic_address addr;
+
+	u8	UID[4];
+
+	s8      spmi_id[1]; /* A '\0' terminated array starts here. */
+};
+
+static int try_init_spmi(struct SPMITable *spmi)
+{
+	struct smi_info  *info;
+	int rv;
+
+	if (spmi->IPMIlegacy != 1) {
+		pr_info(PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+		return -ENODEV;
+	}
+
+	info = smi_info_alloc();
+	if (!info) {
+		pr_err(PFX "Could not allocate SI data (3)\n");
+		return -ENOMEM;
+	}
+
+	info->addr_source = SI_SPMI;
+	pr_info(PFX "probing via SPMI\n");
+
+	/* Figure out the interface type. */
+	switch (spmi->InterfaceType) {
+	case 1:	/* KCS */
+		info->si_type = SI_KCS;
+		break;
+	case 2:	/* SMIC */
+		info->si_type = SI_SMIC;
+		break;
+	case 3:	/* BT */
+		info->si_type = SI_BT;
+		break;
+	case 4: /* SSIF, just ignore */
+		kfree(info);
+		return -EIO;
+	default:
+		pr_info(PFX "Unknown ACPI/SPMI SI type %d\n",
+			spmi->InterfaceType);
+		kfree(info);
+		return -EIO;
+	}
+
+	if (spmi->InterruptType & 1) {
+		/* We've got a GPE interrupt. */
+		info->irq = spmi->GPE;
+		info->irq_setup = acpi_gpe_irq_setup;
+	} else if (spmi->InterruptType & 2) {
+		/* We've got an APIC/SAPIC interrupt. */
+		info->irq = spmi->GlobalSystemInterrupt;
+		info->irq_setup = std_irq_setup;
+	} else {
+		/* Use the default interrupt setting. */
+		info->irq = 0;
+		info->irq_setup = NULL;
+	}
+
+	if (spmi->addr.bit_width) {
+		/* A (hopefully) properly formed register bit width. */
+		info->io.regspacing = spmi->addr.bit_width / 8;
+	} else {
+		info->io.regspacing = DEFAULT_REGSPACING;
+	}
+	info->io.regsize = info->io.regspacing;
+	info->io.regshift = spmi->addr.bit_offset;
+
+	if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+		info->io_setup = mem_setup;
+		info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+	} else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+		info->io_setup = port_setup;
+		info->io.addr_type = IPMI_IO_ADDR_SPACE;
+	} else {
+		kfree(info);
+		pr_warn(PFX "Unknown ACPI I/O Address type\n");
+		return -EIO;
+	}
+	info->io.addr_data = spmi->addr.address;
+
+	pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
+		(info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+		info->io.addr_data, info->io.regsize, info->io.regspacing,
+		info->irq);
+
+	rv = add_smi(info);
+	if (rv)
+		kfree(info);
+
+	return rv;
+}
+
+static void spmi_find_bmc(void)
+{
+	acpi_status      status;
+	struct SPMITable *spmi;
+	int              i;
+
+	if (acpi_disabled)
+		return;
+
+	if (acpi_failure)
+		return;
+
+	for (i = 0; ; i++) {
+		status = acpi_get_table(ACPI_SIG_SPMI, i+1,
+					(struct acpi_table_header **)&spmi);
+		if (status != AE_OK)
+			return;
+
+		try_init_spmi(spmi);
+	}
+}
+#endif
+
+#if defined(CONFIG_DMI) || defined(CONFIG_ACPI)
+struct resource *ipmi_get_info_from_resources(struct platform_device *pdev,
+					      struct smi_info *info)
+{
+	struct resource *res, *res_second;
+
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (res) {
+		info->io_setup = port_setup;
+		info->io.addr_type = IPMI_IO_ADDR_SPACE;
+	} else {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (res) {
+			info->io_setup = mem_setup;
+			info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+		}
+	}
+	if (!res) {
+		dev_err(&pdev->dev, "no I/O or memory address\n");
+		return NULL;
+	}
+	info->io.addr_data = res->start;
+
+	info->io.regspacing = DEFAULT_REGSPACING;
+	res_second = platform_get_resource(pdev,
+			       (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
+					IORESOURCE_IO : IORESOURCE_MEM,
+			       1);
+	if (res_second) {
+		if (res_second->start > info->io.addr_data)
+			info->io.regspacing =
+				res_second->start - info->io.addr_data;
+	}
+	info->io.regsize = DEFAULT_REGSIZE;
+	info->io.regshift = 0;
+
+	return res;
+}
+
+#endif
+
+#ifdef CONFIG_DMI
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+	struct smi_info *info;
+	u8 type, slave_addr;
+	int rv;
+
+	if (!si_trydmi)
+		return -ENODEV;
+
+	rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
+	if (rv)
+		return -ENODEV;
+
+	info = smi_info_alloc();
+	if (!info) {
+		pr_err(PFX "Could not allocate SI data\n");
+		return -ENOMEM;
+	}
+
+	info->addr_source = SI_SMBIOS;
+	pr_info(PFX "probing via SMBIOS\n");
+
+	switch (type) {
+	case IPMI_DMI_TYPE_KCS:
+		info->si_type = SI_KCS;
+		break;
+	case IPMI_DMI_TYPE_SMIC:
+		info->si_type = SI_SMIC;
+		break;
+	case IPMI_DMI_TYPE_BT:
+		info->si_type = SI_BT;
+		break;
+	default:
+		kfree(info);
+		return -EINVAL;
+	}
+
+	if (!ipmi_get_info_from_resources(pdev, info)) {
+		rv = -EINVAL;
+		goto err_free;
+	}
+
+	rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+	if (rv) {
+		dev_warn(&pdev->dev, "device has no slave-addr property");
+		info->slave_addr = 0x20;
+	} else {
+		info->slave_addr = slave_addr;
+	}
+
+	info->irq = platform_get_irq(pdev, 0);
+	if (info->irq > 0)
+		info->irq_setup = std_irq_setup;
+	else
+		info->irq = 0;
+
+	info->dev = &pdev->dev;
+
+	pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
+		(info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+		info->io.addr_data, info->io.regsize, info->io.regspacing,
+		info->irq);
+
+	if (add_smi(info))
+		kfree(info);
+
+	return 0;
+
+err_free:
+	kfree(info);
+	return rv;
+}
+#else
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_DMI */
+
+#ifdef CONFIG_PCI
+
+#define PCI_ERMC_CLASSCODE		0x0C0700
+#define PCI_ERMC_CLASSCODE_MASK		0xffffff00
+#define PCI_ERMC_CLASSCODE_TYPE_MASK	0xff
+#define PCI_ERMC_CLASSCODE_TYPE_SMIC	0x00
+#define PCI_ERMC_CLASSCODE_TYPE_KCS	0x01
+#define PCI_ERMC_CLASSCODE_TYPE_BT	0x02
+
+#define PCI_HP_VENDOR_ID    0x103C
+#define PCI_MMC_DEVICE_ID   0x121A
+#define PCI_MMC_ADDR_CW     0x10
+
+static void ipmi_pci_cleanup(struct smi_info *info)
+{
+	struct pci_dev *pdev = info->addr_source_data;
+
+	pci_disable_device(pdev);
+}
+
+static int ipmi_pci_probe_regspacing(struct smi_info *info)
+{
+	if (info->si_type == SI_KCS) {
+		unsigned char	status;
+		int		regspacing;
+
+		info->io.regsize = DEFAULT_REGSIZE;
+		info->io.regshift = 0;
+		info->io_size = 2;
+		info->handlers = &kcs_smi_handlers;
+
+		/* detect 1, 4, 16byte spacing */
+		for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+			info->io.regspacing = regspacing;
+			if (info->io_setup(info)) {
+				dev_err(info->dev,
+					"Could not setup I/O space\n");
+				return DEFAULT_REGSPACING;
+			}
+			/* write invalid cmd */
+			info->io.outputb(&info->io, 1, 0x10);
+			/* read status back */
+			status = info->io.inputb(&info->io, 1);
+			info->io_cleanup(info);
+			if (status)
+				return regspacing;
+			regspacing *= 4;
+		}
+	}
+	return DEFAULT_REGSPACING;
+}
+
+static struct pci_device_id ipmi_pci_blacklist[] = {
+	/*
+	 * This is a "Virtual IPMI device", whatever that is.  It appears
+	 * as a KCS device by the class, but it is not one.
+	 */
+	{ PCI_VDEVICE(REALTEK, 0x816c) },
+	{ 0, }
+};
+
+static int ipmi_pci_probe(struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	int rv;
+	int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
+	struct smi_info *info;
+
+	if (pci_match_id(ipmi_pci_blacklist, pdev))
+		return -ENODEV;
+
+	info = smi_info_alloc();
+	if (!info)
+		return -ENOMEM;
+
+	info->addr_source = SI_PCI;
+	dev_info(&pdev->dev, "probing via PCI");
+
+	switch (class_type) {
+	case PCI_ERMC_CLASSCODE_TYPE_SMIC:
+		info->si_type = SI_SMIC;
+		break;
+
+	case PCI_ERMC_CLASSCODE_TYPE_KCS:
+		info->si_type = SI_KCS;
+		break;
+
+	case PCI_ERMC_CLASSCODE_TYPE_BT:
+		info->si_type = SI_BT;
+		break;
+
+	default:
+		kfree(info);
+		dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
+		return -ENOMEM;
+	}
+
+	rv = pci_enable_device(pdev);
+	if (rv) {
+		dev_err(&pdev->dev, "couldn't enable PCI device\n");
+		kfree(info);
+		return rv;
+	}
+
+	info->addr_source_cleanup = ipmi_pci_cleanup;
+	info->addr_source_data = pdev;
+
+	if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
+		info->io_setup = port_setup;
+		info->io.addr_type = IPMI_IO_ADDR_SPACE;
+	} else {
+		info->io_setup = mem_setup;
+		info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+	}
+	info->io.addr_data = pci_resource_start(pdev, 0);
+
+	info->io.regspacing = ipmi_pci_probe_regspacing(info);
+	info->io.regsize = DEFAULT_REGSIZE;
+	info->io.regshift = 0;
+
+	info->irq = pdev->irq;
+	if (info->irq)
+		info->irq_setup = std_irq_setup;
+
+	info->dev = &pdev->dev;
+	pci_set_drvdata(pdev, info);
+
+	dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+		&pdev->resource[0], info->io.regsize, info->io.regspacing,
+		info->irq);
+
+	rv = add_smi(info);
+	if (rv) {
+		kfree(info);
+		pci_disable_device(pdev);
+	}
+
+	return rv;
+}
+
+static void ipmi_pci_remove(struct pci_dev *pdev)
+{
+	struct smi_info *info = pci_get_drvdata(pdev);
+	cleanup_one_si(info);
+}
+
+static const struct pci_device_id ipmi_pci_devices[] = {
+	{ PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
+	{ PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
+
+static struct pci_driver ipmi_pci_driver = {
+	.name =         DEVICE_NAME,
+	.id_table =     ipmi_pci_devices,
+	.probe =        ipmi_pci_probe,
+	.remove =       ipmi_pci_remove,
+};
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_match[] = {
+	{ .type = "ipmi", .compatible = "ipmi-kcs",
+	  .data = (void *)(unsigned long) SI_KCS },
+	{ .type = "ipmi", .compatible = "ipmi-smic",
+	  .data = (void *)(unsigned long) SI_SMIC },
+	{ .type = "ipmi", .compatible = "ipmi-bt",
+	  .data = (void *)(unsigned long) SI_BT },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
+
+static int of_ipmi_probe(struct platform_device *dev)
+{
+	const struct of_device_id *match;
+	struct smi_info *info;
+	struct resource resource;
+	const __be32 *regsize, *regspacing, *regshift;
+	struct device_node *np = dev->dev.of_node;
+	int ret;
+	int proplen;
+
+	dev_info(&dev->dev, "probing via device tree\n");
+
+	match = of_match_device(of_ipmi_match, &dev->dev);
+	if (!match)
+		return -ENODEV;
+
+	if (!of_device_is_available(np))
+		return -EINVAL;
+
+	ret = of_address_to_resource(np, 0, &resource);
+	if (ret) {
+		dev_warn(&dev->dev, PFX "invalid address from OF\n");
+		return ret;
+	}
+
+	regsize = of_get_property(np, "reg-size", &proplen);
+	if (regsize && proplen != 4) {
+		dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
+		return -EINVAL;
+	}
+
+	regspacing = of_get_property(np, "reg-spacing", &proplen);
+	if (regspacing && proplen != 4) {
+		dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
+		return -EINVAL;
+	}
+
+	regshift = of_get_property(np, "reg-shift", &proplen);
+	if (regshift && proplen != 4) {
+		dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
+		return -EINVAL;
+	}
+
+	info = smi_info_alloc();
+
+	if (!info) {
+		dev_err(&dev->dev,
+			"could not allocate memory for OF probe\n");
+		return -ENOMEM;
+	}
+
+	info->si_type		= (enum si_type) match->data;
+	info->addr_source	= SI_DEVICETREE;
+	info->irq_setup		= std_irq_setup;
+
+	if (resource.flags & IORESOURCE_IO) {
+		info->io_setup		= port_setup;
+		info->io.addr_type	= IPMI_IO_ADDR_SPACE;
+	} else {
+		info->io_setup		= mem_setup;
+		info->io.addr_type	= IPMI_MEM_ADDR_SPACE;
+	}
+
+	info->io.addr_data	= resource.start;
+
+	info->io.regsize	= regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+	info->io.regspacing	= regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+	info->io.regshift	= regshift ? be32_to_cpup(regshift) : 0;
+
+	info->irq		= irq_of_parse_and_map(dev->dev.of_node, 0);
+	info->dev		= &dev->dev;
+
+	dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
+		info->io.addr_data, info->io.regsize, info->io.regspacing,
+		info->irq);
+
+	dev_set_drvdata(&dev->dev, info);
+
+	ret = add_smi(info);
+	if (ret) {
+		kfree(info);
+		return ret;
+	}
+	return 0;
+}
+#else
+#define of_ipmi_match NULL
+static int of_ipmi_probe(struct platform_device *dev)
+{
+	return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int find_slave_address(struct smi_info *info, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+	if (!slave_addr) {
+		int type = -1;
+		u32 flags = IORESOURCE_IO;
+
+		switch (info->si_type) {
+		case SI_KCS:
+			type = IPMI_DMI_TYPE_KCS;
+			break;
+		case SI_BT:
+			type = IPMI_DMI_TYPE_BT;
+			break;
+		case SI_SMIC:
+			type = IPMI_DMI_TYPE_SMIC;
+			break;
+		}
+
+		if (info->io.addr_type == IPMI_MEM_ADDR_SPACE)
+			flags = IORESOURCE_MEM;
+
+		slave_addr = ipmi_dmi_get_slave_addr(type, flags,
+						     info->io.addr_data);
+	}
+#endif
+
+	return slave_addr;
+}
+
+static int acpi_ipmi_probe(struct platform_device *dev)
+{
+	struct smi_info *info;
+	acpi_handle handle;
+	acpi_status status;
+	unsigned long long tmp;
+	struct resource *res;
+	int rv = -EINVAL;
+
+	if (!si_tryacpi)
+		return -ENODEV;
+
+	handle = ACPI_HANDLE(&dev->dev);
+	if (!handle)
+		return -ENODEV;
+
+	info = smi_info_alloc();
+	if (!info)
+		return -ENOMEM;
+
+	info->addr_source = SI_ACPI;
+	dev_info(&dev->dev, PFX "probing via ACPI\n");
+
+	info->addr_info.acpi_info.acpi_handle = handle;
+
+	/* _IFT tells us the interface type: KCS, BT, etc */
+	status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+	if (ACPI_FAILURE(status)) {
+		dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n");
+		goto err_free;
+	}
+
+	switch (tmp) {
+	case 1:
+		info->si_type = SI_KCS;
+		break;
+	case 2:
+		info->si_type = SI_SMIC;
+		break;
+	case 3:
+		info->si_type = SI_BT;
+		break;
+	case 4: /* SSIF, just ignore */
+		rv = -ENODEV;
+		goto err_free;
+	default:
+		dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
+		goto err_free;
+	}
+
+	res = ipmi_get_info_from_resources(dev, info);
+	if (!res) {
+		rv = -EINVAL;
+		goto err_free;
+	}
+
+	/* If _GPE exists, use it; otherwise use standard interrupts */
+	status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+	if (ACPI_SUCCESS(status)) {
+		info->irq = tmp;
+		info->irq_setup = acpi_gpe_irq_setup;
+	} else {
+		int irq = platform_get_irq(dev, 0);
+
+		if (irq > 0) {
+			info->irq = irq;
+			info->irq_setup = std_irq_setup;
+		}
+	}
+
+	info->slave_addr = find_slave_address(info, info->slave_addr);
+
+	info->dev = &dev->dev;
+	platform_set_drvdata(dev, info);
+
+	dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
+		 res, info->io.regsize, info->io.regspacing,
+		 info->irq);
+
+	rv = add_smi(info);
+	if (rv)
+		kfree(info);
+
+	return rv;
+
+err_free:
+	kfree(info);
+	return rv;
+}
+
+static const struct acpi_device_id acpi_ipmi_match[] = {
+	{ "IPI0001", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match);
+#else
+static int acpi_ipmi_probe(struct platform_device *dev)
+{
+	return -ENODEV;
+}
+#endif
+
+static int ipmi_probe(struct platform_device *dev)
+{
+	if (of_ipmi_probe(dev) == 0)
+		return 0;
+
+	if (acpi_ipmi_probe(dev) == 0)
+		return 0;
+
+	return dmi_ipmi_probe(dev);
+}
+
+static int ipmi_remove(struct platform_device *dev)
+{
+	struct smi_info *info = dev_get_drvdata(&dev->dev);
+
+	cleanup_one_si(info);
+	return 0;
+}
+
+static struct platform_driver ipmi_driver = {
+	.driver = {
+		.name = DEVICE_NAME,
+		.of_match_table = of_ipmi_match,
+		.acpi_match_table = ACPI_PTR(acpi_ipmi_match),
+	},
+	.probe		= ipmi_probe,
+	.remove		= ipmi_remove,
+};
+
+#ifdef CONFIG_PARISC
+static int __init ipmi_parisc_probe(struct parisc_device *dev)
+{
+	struct smi_info *info;
+	int rv;
+
+	info = smi_info_alloc();
+
+	if (!info) {
+		dev_err(&dev->dev,
+			"could not allocate memory for PARISC probe\n");
+		return -ENOMEM;
+	}
+
+	info->si_type		= SI_KCS;
+	info->addr_source	= SI_DEVICETREE;
+	info->io_setup		= mem_setup;
+	info->io.addr_type	= IPMI_MEM_ADDR_SPACE;
+	info->io.addr_data	= dev->hpa.start;
+	info->io.regsize	= 1;
+	info->io.regspacing	= 1;
+	info->io.regshift	= 0;
+	info->irq		= 0; /* no interrupt */
+	info->irq_setup		= NULL;
+	info->dev		= &dev->dev;
+
+	dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data);
+
+	dev_set_drvdata(&dev->dev, info);
+
+	rv = add_smi(info);
+	if (rv) {
+		kfree(info);
+		return rv;
+	}
+
+	return 0;
+}
+
+static int __exit ipmi_parisc_remove(struct parisc_device *dev)
+{
+	cleanup_one_si(dev_get_drvdata(&dev->dev));
+	return 0;
+}
+
+static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
+	{ HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
+
+static struct parisc_driver ipmi_parisc_driver __refdata = {
+	.name =		"ipmi",
+	.id_table =	ipmi_parisc_tbl,
+	.probe =	ipmi_parisc_probe,
+	.remove =	__exit_p(ipmi_parisc_remove),
+};
+#endif /* CONFIG_PARISC */
+
+static int wait_for_msg_done(struct smi_info *smi_info)
+{
+	enum si_sm_result     smi_result;
+
+	smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
+	for (;;) {
+		if (smi_result == SI_SM_CALL_WITH_DELAY ||
+		    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+			schedule_timeout_uninterruptible(1);
+			smi_result = smi_info->handlers->event(
+				smi_info->si_sm, jiffies_to_usecs(1));
+		} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+			smi_result = smi_info->handlers->event(
+				smi_info->si_sm, 0);
+		} else
+			break;
+	}
+	if (smi_result == SI_SM_HOSED)
+		/*
+		 * We couldn't get the state machine to run, so whatever's at
+		 * the port is probably not an IPMI SMI interface.
+		 */
+		return -ENODEV;
+
+	return 0;
+}
+
+static int try_get_dev_id(struct smi_info *smi_info)
+{
+	unsigned char         msg[2];
+	unsigned char         *resp;
+	unsigned long         resp_len;
+	int                   rv = 0;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	/*
+	 * Do a Get Device ID command, since it comes back with some
+	 * useful info.
+	 */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_DEVICE_ID_CMD;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv)
+		goto out;
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	/* Check and record info from the get device id, in case we need it. */
+	rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
+
+out:
+	kfree(resp);
+	return rv;
+}
+
+static int get_global_enables(struct smi_info *smi_info, u8 *enables)
+{
+	unsigned char         msg[3];
+	unsigned char         *resp;
+	unsigned long         resp_len;
+	int                   rv;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv) {
+		dev_warn(smi_info->dev,
+			 "Error getting response from get global enables command: %d\n",
+			 rv);
+		goto out;
+	}
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	if (resp_len < 4 ||
+			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+			resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
+			resp[2] != 0) {
+		dev_warn(smi_info->dev,
+			 "Invalid return from get global enables command: %ld %x %x %x\n",
+			 resp_len, resp[0], resp[1], resp[2]);
+		rv = -EINVAL;
+		goto out;
+	} else {
+		*enables = resp[3];
+	}
+
+out:
+	kfree(resp);
+	return rv;
+}
+
+/*
+ * Returns 1 if it gets an error from the command.
+ */
+static int set_global_enables(struct smi_info *smi_info, u8 enables)
+{
+	unsigned char         msg[3];
+	unsigned char         *resp;
+	unsigned long         resp_len;
+	int                   rv;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+	msg[2] = enables;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv) {
+		dev_warn(smi_info->dev,
+			 "Error getting response from set global enables command: %d\n",
+			 rv);
+		goto out;
+	}
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	if (resp_len < 3 ||
+			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+			resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+		dev_warn(smi_info->dev,
+			 "Invalid return from set global enables command: %ld %x %x\n",
+			 resp_len, resp[0], resp[1]);
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (resp[2] != 0)
+		rv = 1;
+
+out:
+	kfree(resp);
+	return rv;
+}
+
+/*
+ * Some BMCs do not support clearing the receive irq bit in the global
+ * enables (even if they don't support interrupts on the BMC).  Check
+ * for this and handle it properly.
+ */
+static void check_clr_rcv_irq(struct smi_info *smi_info)
+{
+	u8 enables = 0;
+	int rv;
+
+	rv = get_global_enables(smi_info, &enables);
+	if (!rv) {
+		if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
+			/* Already clear, should work ok. */
+			return;
+
+		enables &= ~IPMI_BMC_RCV_MSG_INTR;
+		rv = set_global_enables(smi_info, enables);
+	}
+
+	if (rv < 0) {
+		dev_err(smi_info->dev,
+			"Cannot check clearing the rcv irq: %d\n", rv);
+		return;
+	}
+
+	if (rv) {
+		/*
+		 * An error when setting the event buffer bit means
+		 * clearing the bit is not supported.
+		 */
+		dev_warn(smi_info->dev,
+			 "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+		smi_info->cannot_disable_irq = true;
+	}
+}
+
+/*
+ * Some BMCs do not support setting the interrupt bits in the global
+ * enables even if they support interrupts.  Clearly bad, but we can
+ * compensate.
+ */
+static void check_set_rcv_irq(struct smi_info *smi_info)
+{
+	u8 enables = 0;
+	int rv;
+
+	if (!smi_info->irq)
+		return;
+
+	rv = get_global_enables(smi_info, &enables);
+	if (!rv) {
+		enables |= IPMI_BMC_RCV_MSG_INTR;
+		rv = set_global_enables(smi_info, enables);
+	}
+
+	if (rv < 0) {
+		dev_err(smi_info->dev,
+			"Cannot check setting the rcv irq: %d\n", rv);
+		return;
+	}
+
+	if (rv) {
+		/*
+		 * An error when setting the event buffer bit means
+		 * setting the bit is not supported.
+		 */
+		dev_warn(smi_info->dev,
+			 "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+		smi_info->cannot_disable_irq = true;
+		smi_info->irq_enable_broken = true;
+	}
+}
+
+static int try_enable_event_buffer(struct smi_info *smi_info)
+{
+	unsigned char         msg[3];
+	unsigned char         *resp;
+	unsigned long         resp_len;
+	int                   rv = 0;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv) {
+		pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
+		goto out;
+	}
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	if (resp_len < 4 ||
+			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+			resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
+			resp[2] != 0) {
+		pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+		/* buffer is already enabled, nothing to do. */
+		smi_info->supports_event_msg_buff = true;
+		goto out;
+	}
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+	msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+	rv = wait_for_msg_done(smi_info);
+	if (rv) {
+		pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
+		goto out;
+	}
+
+	resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+						  resp, IPMI_MAX_MSG_LENGTH);
+
+	if (resp_len < 3 ||
+			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+			resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+		pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
+		rv = -EINVAL;
+		goto out;
+	}
+
+	if (resp[2] != 0)
+		/*
+		 * An error when setting the event buffer bit means
+		 * that the event buffer is not supported.
+		 */
+		rv = -ENOENT;
+	else
+		smi_info->supports_event_msg_buff = true;
+
+out:
+	kfree(resp);
+	return rv;
+}
+
+static int smi_type_proc_show(struct seq_file *m, void *v)
+{
+	struct smi_info *smi = m->private;
+
+	seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+
+	return 0;
+}
+
+static int smi_type_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, smi_type_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_type_proc_ops = {
+	.open		= smi_type_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int smi_si_stats_proc_show(struct seq_file *m, void *v)
+{
+	struct smi_info *smi = m->private;
+
+	seq_printf(m, "interrupts_enabled:    %d\n",
+		       smi->irq && !smi->interrupt_disabled);
+	seq_printf(m, "short_timeouts:        %u\n",
+		       smi_get_stat(smi, short_timeouts));
+	seq_printf(m, "long_timeouts:         %u\n",
+		       smi_get_stat(smi, long_timeouts));
+	seq_printf(m, "idles:                 %u\n",
+		       smi_get_stat(smi, idles));
+	seq_printf(m, "interrupts:            %u\n",
+		       smi_get_stat(smi, interrupts));
+	seq_printf(m, "attentions:            %u\n",
+		       smi_get_stat(smi, attentions));
+	seq_printf(m, "flag_fetches:          %u\n",
+		       smi_get_stat(smi, flag_fetches));
+	seq_printf(m, "hosed_count:           %u\n",
+		       smi_get_stat(smi, hosed_count));
+	seq_printf(m, "complete_transactions: %u\n",
+		       smi_get_stat(smi, complete_transactions));
+	seq_printf(m, "events:                %u\n",
+		       smi_get_stat(smi, events));
+	seq_printf(m, "watchdog_pretimeouts:  %u\n",
+		       smi_get_stat(smi, watchdog_pretimeouts));
+	seq_printf(m, "incoming_messages:     %u\n",
+		       smi_get_stat(smi, incoming_messages));
+	return 0;
+}
+
+static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_si_stats_proc_ops = {
+	.open		= smi_si_stats_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int smi_params_proc_show(struct seq_file *m, void *v)
+{
+	struct smi_info *smi = m->private;
+
+	seq_printf(m,
+		   "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+		   si_to_str[smi->si_type],
+		   addr_space_to_str[smi->io.addr_type],
+		   smi->io.addr_data,
+		   smi->io.regspacing,
+		   smi->io.regsize,
+		   smi->io.regshift,
+		   smi->irq,
+		   smi->slave_addr);
+
+	return 0;
+}
+
+static int smi_params_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, smi_params_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_params_proc_ops = {
+	.open		= smi_params_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/*
+ * oem_data_avail_to_receive_msg_avail
+ * @info - smi_info structure with msg_flags set
+ *
+ * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
+ * Returns 1 indicating need to re-run handle_flags().
+ */
+static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
+{
+	smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
+			       RECEIVE_MSG_AVAIL);
+	return 1;
+}
+
+/*
+ * setup_dell_poweredge_oem_data_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Systems that match, but have firmware version < 1.40 may assert
+ * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
+ * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
+ * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
+ * as RECEIVE_MSG_AVAIL instead.
+ *
+ * As Dell has no plans to release IPMI 1.5 firmware that *ever*
+ * assert the OEM[012] bits, and if it did, the driver would have to
+ * change to handle that properly, we don't actually check for the
+ * firmware version.
+ * Device ID = 0x20                BMC on PowerEdge 8G servers
+ * Device Revision = 0x80
+ * Firmware Revision1 = 0x01       BMC version 1.40
+ * Firmware Revision2 = 0x40       BCD encoded
+ * IPMI Version = 0x51             IPMI 1.5
+ * Manufacturer ID = A2 02 00      Dell IANA
+ *
+ * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
+ * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
+ *
+ */
+#define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
+#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
+#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
+#define DELL_IANA_MFR_ID 0x0002a2
+static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
+{
+	struct ipmi_device_id *id = &smi_info->device_id;
+	if (id->manufacturer_id == DELL_IANA_MFR_ID) {
+		if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
+		    id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
+		    id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
+			smi_info->oem_data_avail_handler =
+				oem_data_avail_to_receive_msg_avail;
+		} else if (ipmi_version_major(id) < 1 ||
+			   (ipmi_version_major(id) == 1 &&
+			    ipmi_version_minor(id) < 5)) {
+			smi_info->oem_data_avail_handler =
+				oem_data_avail_to_receive_msg_avail;
+		}
+	}
+}
+
+#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
+static void return_hosed_msg_badsize(struct smi_info *smi_info)
+{
+	struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+	/* Make it a response */
+	msg->rsp[0] = msg->data[0] | 4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
+	msg->rsp_size = 3;
+	smi_info->curr_msg = NULL;
+	deliver_recv_msg(smi_info, msg);
+}
+
+/*
+ * dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
+ * not respond to a Get SDR command if the length of the data
+ * requested is exactly 0x3A, which leads to command timeouts and no
+ * data returned.  This intercepts such commands, and causes userspace
+ * callers to try again with a different-sized buffer, which succeeds.
+ */
+
+#define STORAGE_NETFN 0x0A
+#define STORAGE_CMD_GET_SDR 0x23
+static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
+					     unsigned long unused,
+					     void *in)
+{
+	struct smi_info *smi_info = in;
+	unsigned char *data = smi_info->curr_msg->data;
+	unsigned int size   = smi_info->curr_msg->data_size;
+	if (size >= 8 &&
+	    (data[0]>>2) == STORAGE_NETFN &&
+	    data[1] == STORAGE_CMD_GET_SDR &&
+	    data[7] == 0x3A) {
+		return_hosed_msg_badsize(smi_info);
+		return NOTIFY_STOP;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block dell_poweredge_bt_xaction_notifier = {
+	.notifier_call	= dell_poweredge_bt_xaction_handler,
+};
+
+/*
+ * setup_dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.start_transaction_pre_hook
+ * when we know what function to use there.
+ */
+static void
+setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
+{
+	struct ipmi_device_id *id = &smi_info->device_id;
+	if (id->manufacturer_id == DELL_IANA_MFR_ID &&
+	    smi_info->si_type == SI_BT)
+		register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
+}
+
+/*
+ * setup_oem_data_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.oem_data_available_handler
+ * when we know what function to use there.
+ */
+
+static void setup_oem_data_handler(struct smi_info *smi_info)
+{
+	setup_dell_poweredge_oem_data_handler(smi_info);
+}
+
+static void setup_xaction_handlers(struct smi_info *smi_info)
+{
+	setup_dell_poweredge_bt_xaction_handler(smi_info);
+}
+
+static void check_for_broken_irqs(struct smi_info *smi_info)
+{
+	check_clr_rcv_irq(smi_info);
+	check_set_rcv_irq(smi_info);
+}
+
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
+{
+	if (smi_info->thread != NULL)
+		kthread_stop(smi_info->thread);
+
+	smi_info->timer_can_start = false;
+	if (smi_info->timer_running)
+		del_timer_sync(&smi_info->si_timer);
+}
+
+static struct smi_info *find_dup_si(struct smi_info *info)
+{
+	struct smi_info *e;
+
+	list_for_each_entry(e, &smi_infos, link) {
+		if (e->io.addr_type != info->io.addr_type)
+			continue;
+		if (e->io.addr_data == info->io.addr_data) {
+			/*
+			 * This is a cheap hack, ACPI doesn't have a defined
+			 * slave address but SMBIOS does.  Pick it up from
+			 * any source that has it available.
+			 */
+			if (info->slave_addr && !e->slave_addr)
+				e->slave_addr = info->slave_addr;
+			return e;
+		}
+	}
+
+	return NULL;
+}
+
+static int add_smi(struct smi_info *new_smi)
+{
+	int rv = 0;
+	struct smi_info *dup;
+
+	mutex_lock(&smi_infos_lock);
+	dup = find_dup_si(new_smi);
+	if (dup) {
+		if (new_smi->addr_source == SI_ACPI &&
+		    dup->addr_source == SI_SMBIOS) {
+			/* We prefer ACPI over SMBIOS. */
+			dev_info(dup->dev,
+				 "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
+				 si_to_str[new_smi->si_type]);
+			cleanup_one_si(dup);
+		} else {
+			dev_info(new_smi->dev,
+				 "%s-specified %s state machine: duplicate\n",
+				 ipmi_addr_src_to_str(new_smi->addr_source),
+				 si_to_str[new_smi->si_type]);
+			rv = -EBUSY;
+			goto out_err;
+		}
+	}
+
+	pr_info(PFX "Adding %s-specified %s state machine\n",
+		ipmi_addr_src_to_str(new_smi->addr_source),
+		si_to_str[new_smi->si_type]);
+
+	/* So we know not to free it unless we have allocated one. */
+	new_smi->intf = NULL;
+	new_smi->si_sm = NULL;
+	new_smi->handlers = NULL;
+
+	list_add_tail(&new_smi->link, &smi_infos);
+
+out_err:
+	mutex_unlock(&smi_infos_lock);
+	return rv;
+}
+
+/*
+ * Try to start up an interface.  Must be called with smi_infos_lock
+ * held, primarily to keep smi_num consistent, we only one to do these
+ * one at a time.
+ */
+static int try_smi_init(struct smi_info *new_smi)
+{
+	int rv = 0;
+	int i;
+	char *init_name = NULL;
+
+	pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+		ipmi_addr_src_to_str(new_smi->addr_source),
+		si_to_str[new_smi->si_type],
+		addr_space_to_str[new_smi->io.addr_type],
+		new_smi->io.addr_data,
+		new_smi->slave_addr, new_smi->irq);
+
+	switch (new_smi->si_type) {
+	case SI_KCS:
+		new_smi->handlers = &kcs_smi_handlers;
+		break;
+
+	case SI_SMIC:
+		new_smi->handlers = &smic_smi_handlers;
+		break;
+
+	case SI_BT:
+		new_smi->handlers = &bt_smi_handlers;
+		break;
+
+	default:
+		/* No support for anything else yet. */
+		rv = -EIO;
+		goto out_err;
+	}
+
+	new_smi->intf_num = smi_num;
+
+	/* Do this early so it's available for logs. */
+	if (!new_smi->dev) {
+		init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
+				      new_smi->intf_num);
+
+		/*
+		 * If we don't already have a device from something
+		 * else (like PCI), then register a new one.
+		 */
+		new_smi->pdev = platform_device_alloc("ipmi_si",
+						      new_smi->intf_num);
+		if (!new_smi->pdev) {
+			pr_err(PFX "Unable to allocate platform device\n");
+			goto out_err;
+		}
+		new_smi->dev = &new_smi->pdev->dev;
+		new_smi->dev->driver = &ipmi_driver.driver;
+		/* Nulled by device_add() */
+		new_smi->dev->init_name = init_name;
+	}
+
+	/* Allocate the state machine's data and initialize it. */
+	new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
+	if (!new_smi->si_sm) {
+		pr_err(PFX "Could not allocate state machine memory\n");
+		rv = -ENOMEM;
+		goto out_err;
+	}
+	new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
+							&new_smi->io);
+
+	/* Now that we know the I/O size, we can set up the I/O. */
+	rv = new_smi->io_setup(new_smi);
+	if (rv) {
+		dev_err(new_smi->dev, "Could not set up I/O space\n");
+		goto out_err;
+	}
+
+	/* Do low-level detection first. */
+	if (new_smi->handlers->detect(new_smi->si_sm)) {
+		if (new_smi->addr_source)
+			dev_err(new_smi->dev, "Interface detection failed\n");
+		rv = -ENODEV;
+		goto out_err;
+	}
+
+	/*
+	 * Attempt a get device id command.  If it fails, we probably
+	 * don't have a BMC here.
+	 */
+	rv = try_get_dev_id(new_smi);
+	if (rv) {
+		if (new_smi->addr_source)
+			dev_err(new_smi->dev, "There appears to be no BMC at this location\n");
+		goto out_err;
+	}
+
+	setup_oem_data_handler(new_smi);
+	setup_xaction_handlers(new_smi);
+	check_for_broken_irqs(new_smi);
+
+	new_smi->waiting_msg = NULL;
+	new_smi->curr_msg = NULL;
+	atomic_set(&new_smi->req_events, 0);
+	new_smi->run_to_completion = false;
+	for (i = 0; i < SI_NUM_STATS; i++)
+		atomic_set(&new_smi->stats[i], 0);
+
+	new_smi->interrupt_disabled = true;
+	atomic_set(&new_smi->need_watch, 0);
+
+	rv = try_enable_event_buffer(new_smi);
+	if (rv == 0)
+		new_smi->has_event_buffer = true;
+
+	/*
+	 * Start clearing the flags before we enable interrupts or the
+	 * timer to avoid racing with the timer.
+	 */
+	start_clear_flags(new_smi);
+
+	/*
+	 * IRQ is defined to be set when non-zero.  req_events will
+	 * cause a global flags check that will enable interrupts.
+	 */
+	if (new_smi->irq) {
+		new_smi->interrupt_disabled = false;
+		atomic_set(&new_smi->req_events, 1);
+	}
+
+	if (new_smi->pdev) {
+		rv = platform_device_add(new_smi->pdev);
+		if (rv) {
+			dev_err(new_smi->dev,
+				"Unable to register system interface device: %d\n",
+				rv);
+			goto out_err;
+		}
+		new_smi->dev_registered = true;
+	}
+
+	rv = ipmi_register_smi(&handlers,
+			       new_smi,
+			       &new_smi->device_id,
+			       new_smi->dev,
+			       new_smi->slave_addr);
+	if (rv) {
+		dev_err(new_smi->dev, "Unable to register device: error %d\n",
+			rv);
+		goto out_err_stop_timer;
+	}
+
+	rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
+				     &smi_type_proc_ops,
+				     new_smi);
+	if (rv) {
+		dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+		goto out_err_stop_timer;
+	}
+
+	rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
+				     &smi_si_stats_proc_ops,
+				     new_smi);
+	if (rv) {
+		dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+		goto out_err_stop_timer;
+	}
+
+	rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
+				     &smi_params_proc_ops,
+				     new_smi);
+	if (rv) {
+		dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+		goto out_err_stop_timer;
+	}
+
+	/* Don't increment till we know we have succeeded. */
+	smi_num++;
+
+	dev_info(new_smi->dev, "IPMI %s interface initialized\n",
+		 si_to_str[new_smi->si_type]);
+
+	WARN_ON(new_smi->dev->init_name != NULL);
+	kfree(init_name);
+
+	return 0;
+
+out_err_stop_timer:
+	stop_timer_and_thread(new_smi);
+
+out_err:
+	new_smi->interrupt_disabled = true;
+
+	if (new_smi->intf) {
+		ipmi_smi_t intf = new_smi->intf;
+		new_smi->intf = NULL;
+		ipmi_unregister_smi(intf);
+	}
+
+	if (new_smi->irq_cleanup) {
+		new_smi->irq_cleanup(new_smi);
+		new_smi->irq_cleanup = NULL;
+	}
+
+	/*
+	 * Wait until we know that we are out of any interrupt
+	 * handlers might have been running before we freed the
+	 * interrupt.
+	 */
+	synchronize_sched();
+
+	if (new_smi->si_sm) {
+		if (new_smi->handlers)
+			new_smi->handlers->cleanup(new_smi->si_sm);
+		kfree(new_smi->si_sm);
+		new_smi->si_sm = NULL;
+	}
+	if (new_smi->addr_source_cleanup) {
+		new_smi->addr_source_cleanup(new_smi);
+		new_smi->addr_source_cleanup = NULL;
+	}
+	if (new_smi->io_cleanup) {
+		new_smi->io_cleanup(new_smi);
+		new_smi->io_cleanup = NULL;
+	}
+
+	if (new_smi->dev_registered) {
+		platform_device_unregister(new_smi->pdev);
+		new_smi->dev_registered = false;
+		new_smi->pdev = NULL;
+	} else if (new_smi->pdev) {
+		platform_device_put(new_smi->pdev);
+		new_smi->pdev = NULL;
+	}
+
+	kfree(init_name);
+
+	return rv;
+}
+
+static int init_ipmi_si(void)
+{
+	int  i;
+	char *str;
+	int  rv;
+	struct smi_info *e;
+	enum ipmi_addr_src type = SI_INVALID;
+
+	if (initialized)
+		return 0;
+	initialized = 1;
+
+	if (si_tryplatform) {
+		rv = platform_driver_register(&ipmi_driver);
+		if (rv) {
+			pr_err(PFX "Unable to register driver: %d\n", rv);
+			return rv;
+		}
+	}
+
+	/* Parse out the si_type string into its components. */
+	str = si_type_str;
+	if (*str != '\0') {
+		for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
+			si_type[i] = str;
+			str = strchr(str, ',');
+			if (str) {
+				*str = '\0';
+				str++;
+			} else {
+				break;
+			}
+		}
+	}
+
+	pr_info("IPMI System Interface driver.\n");
+
+	/* If the user gave us a device, they presumably want us to use it */
+	if (!hardcode_find_bmc())
+		return 0;
+
+#ifdef CONFIG_PCI
+	if (si_trypci) {
+		rv = pci_register_driver(&ipmi_pci_driver);
+		if (rv)
+			pr_err(PFX "Unable to register PCI driver: %d\n", rv);
+		else
+			pci_registered = true;
+	}
+#endif
+
+#ifdef CONFIG_ACPI
+	if (si_tryacpi)
+		spmi_find_bmc();
+#endif
+
+#ifdef CONFIG_PARISC
+	register_parisc_driver(&ipmi_parisc_driver);
+	parisc_registered = true;
+#endif
+
+	/* We prefer devices with interrupts, but in the case of a machine
+	   with multiple BMCs we assume that there will be several instances
+	   of a given type so if we succeed in registering a type then also
+	   try to register everything else of the same type */
+
+	mutex_lock(&smi_infos_lock);
+	list_for_each_entry(e, &smi_infos, link) {
+		/* Try to register a device if it has an IRQ and we either
+		   haven't successfully registered a device yet or this
+		   device has the same type as one we successfully registered */
+		if (e->irq && (!type || e->addr_source == type)) {
+			if (!try_smi_init(e)) {
+				type = e->addr_source;
+			}
+		}
+	}
+
+	/* type will only have been set if we successfully registered an si */
+	if (type) {
+		mutex_unlock(&smi_infos_lock);
+		return 0;
+	}
+
+	/* Fall back to the preferred device */
+
+	list_for_each_entry(e, &smi_infos, link) {
+		if (!e->irq && (!type || e->addr_source == type)) {
+			if (!try_smi_init(e)) {
+				type = e->addr_source;
+			}
+		}
+	}
+	mutex_unlock(&smi_infos_lock);
+
+	if (type)
+		return 0;
+
+	mutex_lock(&smi_infos_lock);
+	if (unload_when_empty && list_empty(&smi_infos)) {
+		mutex_unlock(&smi_infos_lock);
+		cleanup_ipmi_si();
+		pr_warn(PFX "Unable to find any System Interface(s)\n");
+		return -ENODEV;
+	} else {
+		mutex_unlock(&smi_infos_lock);
+		return 0;
+	}
+}
+module_init(init_ipmi_si);
+
+static void cleanup_one_si(struct smi_info *to_clean)
+{
+	int           rv = 0;
+
+	if (!to_clean)
+		return;
+
+	if (to_clean->intf) {
+		ipmi_smi_t intf = to_clean->intf;
+
+		to_clean->intf = NULL;
+		rv = ipmi_unregister_smi(intf);
+		if (rv) {
+			pr_err(PFX "Unable to unregister device: errno=%d\n",
+			       rv);
+		}
+	}
+
+	if (to_clean->dev)
+		dev_set_drvdata(to_clean->dev, NULL);
+
+	list_del(&to_clean->link);
+
+	/*
+	 * Make sure that interrupts, the timer and the thread are
+	 * stopped and will not run again.
+	 */
+	if (to_clean->irq_cleanup)
+		to_clean->irq_cleanup(to_clean);
+	stop_timer_and_thread(to_clean);
+
+	/*
+	 * Timeouts are stopped, now make sure the interrupts are off
+	 * in the BMC.  Note that timers and CPU interrupts are off,
+	 * so no need for locks.
+	 */
+	while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+		poll(to_clean);
+		schedule_timeout_uninterruptible(1);
+	}
+	if (to_clean->handlers)
+		disable_si_irq(to_clean);
+	while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+		poll(to_clean);
+		schedule_timeout_uninterruptible(1);
+	}
+
+	if (to_clean->handlers)
+		to_clean->handlers->cleanup(to_clean->si_sm);
+
+	kfree(to_clean->si_sm);
+
+	if (to_clean->addr_source_cleanup)
+		to_clean->addr_source_cleanup(to_clean);
+	if (to_clean->io_cleanup)
+		to_clean->io_cleanup(to_clean);
+
+	if (to_clean->dev_registered)
+		platform_device_unregister(to_clean->pdev);
+
+	kfree(to_clean);
+}
+
+static void cleanup_ipmi_si(void)
+{
+	struct smi_info *e, *tmp_e;
+
+	if (!initialized)
+		return;
+
+#ifdef CONFIG_PCI
+	if (pci_registered)
+		pci_unregister_driver(&ipmi_pci_driver);
+#endif
+#ifdef CONFIG_PARISC
+	if (parisc_registered)
+		unregister_parisc_driver(&ipmi_parisc_driver);
+#endif
+
+	platform_driver_unregister(&ipmi_driver);
+
+	mutex_lock(&smi_infos_lock);
+	list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
+		cleanup_one_si(e);
+	mutex_unlock(&smi_infos_lock);
+}
+module_exit(cleanup_ipmi_si);
+
+MODULE_ALIAS("platform:dmi-ipmi-si");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
+		   " system interfaces.");
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_si_sm.h b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_si_sm.h
new file mode 100644
index 0000000..a705027
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_si_sm.h
@@ -0,0 +1,141 @@
+/*
+ * ipmi_si_sm.h
+ *
+ * State machine interface for low-level IPMI system management
+ * interface state machines.  This code is the interface between
+ * the ipmi_smi code (that handles the policy of a KCS, SMIC, or
+ * BT interface) and the actual low-level state machine.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * This is defined by the state machines themselves, it is an opaque
+ * data type for them to use.
+ */
+struct si_sm_data;
+
+/*
+ * The structure for doing I/O in the state machine.  The state
+ * machine doesn't have the actual I/O routines, they are done through
+ * this interface.
+ */
+struct si_sm_io {
+	unsigned char (*inputb)(const struct si_sm_io *io, unsigned int offset);
+	void (*outputb)(const struct si_sm_io *io,
+			unsigned int  offset,
+			unsigned char b);
+
+	/*
+	 * Generic info used by the actual handling routines, the
+	 * state machine shouldn't touch these.
+	 */
+	void __iomem *addr;
+	int  regspacing;
+	int  regsize;
+	int  regshift;
+	int addr_type;
+	long addr_data;
+};
+
+/* Results of SMI events. */
+enum si_sm_result {
+	SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
+	SI_SM_CALL_WITH_DELAY,	/* Delay some before calling again. */
+	SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
+	SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
+	SI_SM_IDLE,		/* The SM is in idle state. */
+	SI_SM_HOSED,		/* The hardware violated the state machine. */
+
+	/*
+	 * The hardware is asserting attn and the state machine is
+	 * idle.
+	 */
+	SI_SM_ATTN
+};
+
+/* Handlers for the SMI state machine. */
+struct si_sm_handlers {
+	/*
+	 * Put the version number of the state machine here so the
+	 * upper layer can print it.
+	 */
+	char *version;
+
+	/*
+	 * Initialize the data and return the amount of I/O space to
+	 * reserve for the space.
+	 */
+	unsigned int (*init_data)(struct si_sm_data *smi,
+				  struct si_sm_io   *io);
+
+	/*
+	 * Start a new transaction in the state machine.  This will
+	 * return -2 if the state machine is not idle, -1 if the size
+	 * is invalid (to large or too small), or 0 if the transaction
+	 * is successfully completed.
+	 */
+	int (*start_transaction)(struct si_sm_data *smi,
+				 unsigned char *data, unsigned int size);
+
+	/*
+	 * Return the results after the transaction.  This will return
+	 * -1 if the buffer is too small, zero if no transaction is
+	 * present, or the actual length of the result data.
+	 */
+	int (*get_result)(struct si_sm_data *smi,
+			  unsigned char *data, unsigned int length);
+
+	/*
+	 * Call this periodically (for a polled interface) or upon
+	 * receiving an interrupt (for a interrupt-driven interface).
+	 * If interrupt driven, you should probably poll this
+	 * periodically when not in idle state.  This should be called
+	 * with the time that passed since the last call, if it is
+	 * significant.  Time is in microseconds.
+	 */
+	enum si_sm_result (*event)(struct si_sm_data *smi, long time);
+
+	/*
+	 * Attempt to detect an SMI.  Returns 0 on success or nonzero
+	 * on failure.
+	 */
+	int (*detect)(struct si_sm_data *smi);
+
+	/* The interface is shutting down, so clean it up. */
+	void (*cleanup)(struct si_sm_data *smi);
+
+	/* Return the size of the SMI structure in bytes. */
+	int (*size)(void);
+};
+
+/* Current state machines that we can use. */
+extern const struct si_sm_handlers kcs_smi_handlers;
+extern const struct si_sm_handlers smic_smi_handlers;
+extern const struct si_sm_handlers bt_smi_handlers;
+
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_smic_sm.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_smic_sm.c
new file mode 100644
index 0000000..8f7c73f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_smic_sm.c
@@ -0,0 +1,600 @@
+/*
+ * ipmi_smic_sm.c
+ *
+ * The state-machine driver for an IPMI SMIC driver
+ *
+ * It started as a copy of Corey Minyard's driver for the KSC interface
+ * and the kernel patch "mmcdev-patch-245" by HP
+ *
+ * modified by:	Hannes Schulz <schulz@schwaar.com>
+ *		ipmi@schwaar.com
+ *
+ *
+ * Corey Minyard's driver for the KSC interface has the following
+ * copyright notice:
+ *   Copyright 2002 MontaVista Software Inc.
+ *
+ * the kernel patch "mmcdev-patch-245" by HP has the following
+ * copyright notice:
+ * (c) Copyright 2001 Grant Grundler (c) Copyright
+ * 2001 Hewlett-Packard Company
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.  */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h>		/* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* smic_debug is a bit-field
+ *	SMIC_DEBUG_ENABLE -	turned on for now
+ *	SMIC_DEBUG_MSG -	commands and their responses
+ *	SMIC_DEBUG_STATES -	state machine
+*/
+#define SMIC_DEBUG_STATES	4
+#define SMIC_DEBUG_MSG		2
+#define	SMIC_DEBUG_ENABLE	1
+
+static int smic_debug = 1;
+module_param(smic_debug, int, 0644);
+MODULE_PARM_DESC(smic_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+enum smic_states {
+	SMIC_IDLE,
+	SMIC_START_OP,
+	SMIC_OP_OK,
+	SMIC_WRITE_START,
+	SMIC_WRITE_NEXT,
+	SMIC_WRITE_END,
+	SMIC_WRITE2READ,
+	SMIC_READ_START,
+	SMIC_READ_NEXT,
+	SMIC_READ_END,
+	SMIC_HOSED
+};
+
+#define MAX_SMIC_READ_SIZE 80
+#define MAX_SMIC_WRITE_SIZE 80
+#define SMIC_MAX_ERROR_RETRIES 3
+
+/* Timeouts in microseconds. */
+#define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC)
+
+/* SMIC Flags Register Bits */
+#define SMIC_RX_DATA_READY	0x80
+#define SMIC_TX_DATA_READY	0x40
+
+/*
+ * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
+ * a few systems, and then only by Systems Management
+ * Interrupts, not by the OS.  Always ignore these bits.
+ *
+ */
+#define SMIC_SMI		0x10
+#define SMIC_EVM_DATA_AVAIL	0x08
+#define SMIC_SMS_DATA_AVAIL	0x04
+#define SMIC_FLAG_BSY		0x01
+
+/* SMIC Error Codes */
+#define	EC_NO_ERROR		0x00
+#define	EC_ABORTED		0x01
+#define	EC_ILLEGAL_CONTROL	0x02
+#define	EC_NO_RESPONSE		0x03
+#define	EC_ILLEGAL_COMMAND	0x04
+#define	EC_BUFFER_FULL		0x05
+
+struct si_sm_data {
+	enum smic_states state;
+	struct si_sm_io *io;
+	unsigned char	 write_data[MAX_SMIC_WRITE_SIZE];
+	int		 write_pos;
+	int		 write_count;
+	int		 orig_write_count;
+	unsigned char	 read_data[MAX_SMIC_READ_SIZE];
+	int		 read_pos;
+	int		 truncated;
+	unsigned int	 error_retries;
+	long		 smic_timeout;
+};
+
+static unsigned int init_smic_data(struct si_sm_data *smic,
+				   struct si_sm_io *io)
+{
+	smic->state = SMIC_IDLE;
+	smic->io = io;
+	smic->write_pos = 0;
+	smic->write_count = 0;
+	smic->orig_write_count = 0;
+	smic->read_pos = 0;
+	smic->error_retries = 0;
+	smic->truncated = 0;
+	smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+
+	/* We use 3 bytes of I/O. */
+	return 3;
+}
+
+static int start_smic_transaction(struct si_sm_data *smic,
+				  unsigned char *data, unsigned int size)
+{
+	unsigned int i;
+
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > MAX_SMIC_WRITE_SIZE)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED))
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
+	if (smic_debug & SMIC_DEBUG_MSG) {
+		printk(KERN_DEBUG "start_smic_transaction -");
+		for (i = 0; i < size; i++)
+			printk(" %02x", (unsigned char) data[i]);
+		printk("\n");
+	}
+	smic->error_retries = 0;
+	memcpy(smic->write_data, data, size);
+	smic->write_count = size;
+	smic->orig_write_count = size;
+	smic->write_pos = 0;
+	smic->read_pos = 0;
+	smic->state = SMIC_START_OP;
+	smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+	return 0;
+}
+
+static int smic_get_result(struct si_sm_data *smic,
+			   unsigned char *data, unsigned int length)
+{
+	int i;
+
+	if (smic_debug & SMIC_DEBUG_MSG) {
+		printk(KERN_DEBUG "smic_get result -");
+		for (i = 0; i < smic->read_pos; i++)
+			printk(" %02x", smic->read_data[i]);
+		printk("\n");
+	}
+	if (length < smic->read_pos) {
+		smic->read_pos = length;
+		smic->truncated = 1;
+	}
+	memcpy(data, smic->read_data, smic->read_pos);
+
+	if ((length >= 3) && (smic->read_pos < 3)) {
+		data[2] = IPMI_ERR_UNSPECIFIED;
+		smic->read_pos = 3;
+	}
+	if (smic->truncated) {
+		data[2] = IPMI_ERR_MSG_TRUNCATED;
+		smic->truncated = 0;
+	}
+	return smic->read_pos;
+}
+
+static inline unsigned char read_smic_flags(struct si_sm_data *smic)
+{
+	return smic->io->inputb(smic->io, 2);
+}
+
+static inline unsigned char read_smic_status(struct si_sm_data *smic)
+{
+	return smic->io->inputb(smic->io, 1);
+}
+
+static inline unsigned char read_smic_data(struct si_sm_data *smic)
+{
+	return smic->io->inputb(smic->io, 0);
+}
+
+static inline void write_smic_flags(struct si_sm_data *smic,
+				    unsigned char   flags)
+{
+	smic->io->outputb(smic->io, 2, flags);
+}
+
+static inline void write_smic_control(struct si_sm_data *smic,
+				      unsigned char   control)
+{
+	smic->io->outputb(smic->io, 1, control);
+}
+
+static inline void write_si_sm_data(struct si_sm_data *smic,
+				    unsigned char   data)
+{
+	smic->io->outputb(smic->io, 0, data);
+}
+
+static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
+{
+	(smic->error_retries)++;
+	if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
+		if (smic_debug & SMIC_DEBUG_ENABLE)
+			printk(KERN_WARNING
+			       "ipmi_smic_drv: smic hosed: %s\n", reason);
+		smic->state = SMIC_HOSED;
+	} else {
+		smic->write_count = smic->orig_write_count;
+		smic->write_pos = 0;
+		smic->read_pos = 0;
+		smic->state = SMIC_START_OP;
+		smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+	}
+}
+
+static inline void write_next_byte(struct si_sm_data *smic)
+{
+	write_si_sm_data(smic, smic->write_data[smic->write_pos]);
+	(smic->write_pos)++;
+	(smic->write_count)--;
+}
+
+static inline void read_next_byte(struct si_sm_data *smic)
+{
+	if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
+		read_smic_data(smic);
+		smic->truncated = 1;
+	} else {
+		smic->read_data[smic->read_pos] = read_smic_data(smic);
+		smic->read_pos++;
+	}
+}
+
+/*  SMIC Control/Status Code Components */
+#define	SMIC_GET_STATUS		0x00	/* Control form's name */
+#define	SMIC_READY		0x00	/* Status  form's name */
+#define	SMIC_WR_START		0x01	/* Unified Control/Status names... */
+#define	SMIC_WR_NEXT		0x02
+#define	SMIC_WR_END		0x03
+#define	SMIC_RD_START		0x04
+#define	SMIC_RD_NEXT		0x05
+#define	SMIC_RD_END		0x06
+#define	SMIC_CODE_MASK		0x0f
+
+#define	SMIC_CONTROL		0x00
+#define	SMIC_STATUS		0x80
+#define	SMIC_CS_MASK		0x80
+
+#define	SMIC_SMS		0x40
+#define	SMIC_SMM		0x60
+#define	SMIC_STREAM_MASK	0x60
+
+/*  SMIC Control Codes */
+#define	SMIC_CC_SMS_GET_STATUS	(SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS)
+#define	SMIC_CC_SMS_WR_START	(SMIC_CONTROL|SMIC_SMS|SMIC_WR_START)
+#define	SMIC_CC_SMS_WR_NEXT	(SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT)
+#define	SMIC_CC_SMS_WR_END	(SMIC_CONTROL|SMIC_SMS|SMIC_WR_END)
+#define	SMIC_CC_SMS_RD_START	(SMIC_CONTROL|SMIC_SMS|SMIC_RD_START)
+#define	SMIC_CC_SMS_RD_NEXT	(SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT)
+#define	SMIC_CC_SMS_RD_END	(SMIC_CONTROL|SMIC_SMS|SMIC_RD_END)
+
+#define	SMIC_CC_SMM_GET_STATUS	(SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS)
+#define	SMIC_CC_SMM_WR_START	(SMIC_CONTROL|SMIC_SMM|SMIC_WR_START)
+#define	SMIC_CC_SMM_WR_NEXT	(SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT)
+#define	SMIC_CC_SMM_WR_END	(SMIC_CONTROL|SMIC_SMM|SMIC_WR_END)
+#define	SMIC_CC_SMM_RD_START	(SMIC_CONTROL|SMIC_SMM|SMIC_RD_START)
+#define	SMIC_CC_SMM_RD_NEXT	(SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT)
+#define	SMIC_CC_SMM_RD_END	(SMIC_CONTROL|SMIC_SMM|SMIC_RD_END)
+
+/*  SMIC Status Codes */
+#define	SMIC_SC_SMS_READY	(SMIC_STATUS|SMIC_SMS|SMIC_READY)
+#define	SMIC_SC_SMS_WR_START	(SMIC_STATUS|SMIC_SMS|SMIC_WR_START)
+#define	SMIC_SC_SMS_WR_NEXT	(SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT)
+#define	SMIC_SC_SMS_WR_END	(SMIC_STATUS|SMIC_SMS|SMIC_WR_END)
+#define	SMIC_SC_SMS_RD_START	(SMIC_STATUS|SMIC_SMS|SMIC_RD_START)
+#define	SMIC_SC_SMS_RD_NEXT	(SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT)
+#define	SMIC_SC_SMS_RD_END	(SMIC_STATUS|SMIC_SMS|SMIC_RD_END)
+
+#define	SMIC_SC_SMM_READY	(SMIC_STATUS|SMIC_SMM|SMIC_READY)
+#define	SMIC_SC_SMM_WR_START	(SMIC_STATUS|SMIC_SMM|SMIC_WR_START)
+#define	SMIC_SC_SMM_WR_NEXT	(SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT)
+#define	SMIC_SC_SMM_WR_END	(SMIC_STATUS|SMIC_SMM|SMIC_WR_END)
+#define	SMIC_SC_SMM_RD_START	(SMIC_STATUS|SMIC_SMM|SMIC_RD_START)
+#define	SMIC_SC_SMM_RD_NEXT	(SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT)
+#define	SMIC_SC_SMM_RD_END	(SMIC_STATUS|SMIC_SMM|SMIC_RD_END)
+
+/* these are the control/status codes we actually use
+	SMIC_CC_SMS_GET_STATUS	0x40
+	SMIC_CC_SMS_WR_START	0x41
+	SMIC_CC_SMS_WR_NEXT	0x42
+	SMIC_CC_SMS_WR_END	0x43
+	SMIC_CC_SMS_RD_START	0x44
+	SMIC_CC_SMS_RD_NEXT	0x45
+	SMIC_CC_SMS_RD_END	0x46
+
+	SMIC_SC_SMS_READY	0xC0
+	SMIC_SC_SMS_WR_START	0xC1
+	SMIC_SC_SMS_WR_NEXT	0xC2
+	SMIC_SC_SMS_WR_END	0xC3
+	SMIC_SC_SMS_RD_START	0xC4
+	SMIC_SC_SMS_RD_NEXT	0xC5
+	SMIC_SC_SMS_RD_END	0xC6
+*/
+
+static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
+{
+	unsigned char status;
+	unsigned char flags;
+	unsigned char data;
+
+	if (smic->state == SMIC_HOSED) {
+		init_smic_data(smic, smic->io);
+		return SI_SM_HOSED;
+	}
+	if (smic->state != SMIC_IDLE) {
+		if (smic_debug & SMIC_DEBUG_STATES)
+			printk(KERN_DEBUG
+			       "smic_event - smic->smic_timeout = %ld,"
+			       " time = %ld\n",
+			       smic->smic_timeout, time);
+		/*
+		 * FIXME: smic_event is sometimes called with time >
+		 * SMIC_RETRY_TIMEOUT
+		 */
+		if (time < SMIC_RETRY_TIMEOUT) {
+			smic->smic_timeout -= time;
+			if (smic->smic_timeout < 0) {
+				start_error_recovery(smic, "smic timed out.");
+				return SI_SM_CALL_WITH_DELAY;
+			}
+		}
+	}
+	flags = read_smic_flags(smic);
+	if (flags & SMIC_FLAG_BSY)
+		return SI_SM_CALL_WITH_DELAY;
+
+	status = read_smic_status(smic);
+	if (smic_debug & SMIC_DEBUG_STATES)
+		printk(KERN_DEBUG
+		       "smic_event - state = %d, flags = 0x%02x,"
+		       " status = 0x%02x\n",
+		       smic->state, flags, status);
+
+	switch (smic->state) {
+	case SMIC_IDLE:
+		/* in IDLE we check for available messages */
+		if (flags & SMIC_SMS_DATA_AVAIL)
+			return SI_SM_ATTN;
+		return SI_SM_IDLE;
+
+	case SMIC_START_OP:
+		/* sanity check whether smic is really idle */
+		write_smic_control(smic, SMIC_CC_SMS_GET_STATUS);
+		write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+		smic->state = SMIC_OP_OK;
+		break;
+
+	case SMIC_OP_OK:
+		if (status != SMIC_SC_SMS_READY) {
+			/* this should not happen */
+			start_error_recovery(smic,
+					     "state = SMIC_OP_OK,"
+					     " status != SMIC_SC_SMS_READY");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		/* OK so far; smic is idle let us start ... */
+		write_smic_control(smic, SMIC_CC_SMS_WR_START);
+		write_next_byte(smic);
+		write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+		smic->state = SMIC_WRITE_START;
+		break;
+
+	case SMIC_WRITE_START:
+		if (status != SMIC_SC_SMS_WR_START) {
+			start_error_recovery(smic,
+					     "state = SMIC_WRITE_START, "
+					     "status != SMIC_SC_SMS_WR_START");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		/*
+		 * we must not issue WR_(NEXT|END) unless
+		 * TX_DATA_READY is set
+		 * */
+		if (flags & SMIC_TX_DATA_READY) {
+			if (smic->write_count == 1) {
+				/* last byte */
+				write_smic_control(smic, SMIC_CC_SMS_WR_END);
+				smic->state = SMIC_WRITE_END;
+			} else {
+				write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+				smic->state = SMIC_WRITE_NEXT;
+			}
+			write_next_byte(smic);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+		} else
+			return SI_SM_CALL_WITH_DELAY;
+		break;
+
+	case SMIC_WRITE_NEXT:
+		if (status != SMIC_SC_SMS_WR_NEXT) {
+			start_error_recovery(smic,
+					     "state = SMIC_WRITE_NEXT, "
+					     "status != SMIC_SC_SMS_WR_NEXT");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		/* this is the same code as in SMIC_WRITE_START */
+		if (flags & SMIC_TX_DATA_READY) {
+			if (smic->write_count == 1) {
+				write_smic_control(smic, SMIC_CC_SMS_WR_END);
+				smic->state = SMIC_WRITE_END;
+			} else {
+				write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+				smic->state = SMIC_WRITE_NEXT;
+			}
+			write_next_byte(smic);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+		} else
+			return SI_SM_CALL_WITH_DELAY;
+		break;
+
+	case SMIC_WRITE_END:
+		if (status != SMIC_SC_SMS_WR_END) {
+			start_error_recovery(smic,
+					     "state = SMIC_WRITE_END, "
+					     "status != SMIC_SC_SMS_WR_END");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		/* data register holds an error code */
+		data = read_smic_data(smic);
+		if (data != 0) {
+			if (smic_debug & SMIC_DEBUG_ENABLE)
+				printk(KERN_DEBUG
+				       "SMIC_WRITE_END: data = %02x\n", data);
+			start_error_recovery(smic,
+					     "state = SMIC_WRITE_END, "
+					     "data != SUCCESS");
+			return SI_SM_CALL_WITH_DELAY;
+		} else
+			smic->state = SMIC_WRITE2READ;
+		break;
+
+	case SMIC_WRITE2READ:
+		/*
+		 * we must wait for RX_DATA_READY to be set before we
+		 * can continue
+		 */
+		if (flags & SMIC_RX_DATA_READY) {
+			write_smic_control(smic, SMIC_CC_SMS_RD_START);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+			smic->state = SMIC_READ_START;
+		} else
+			return SI_SM_CALL_WITH_DELAY;
+		break;
+
+	case SMIC_READ_START:
+		if (status != SMIC_SC_SMS_RD_START) {
+			start_error_recovery(smic,
+					     "state = SMIC_READ_START, "
+					     "status != SMIC_SC_SMS_RD_START");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		if (flags & SMIC_RX_DATA_READY) {
+			read_next_byte(smic);
+			write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+			smic->state = SMIC_READ_NEXT;
+		} else
+			return SI_SM_CALL_WITH_DELAY;
+		break;
+
+	case SMIC_READ_NEXT:
+		switch (status) {
+		/*
+		 * smic tells us that this is the last byte to be read
+		 * --> clean up
+		 */
+		case SMIC_SC_SMS_RD_END:
+			read_next_byte(smic);
+			write_smic_control(smic, SMIC_CC_SMS_RD_END);
+			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+			smic->state = SMIC_READ_END;
+			break;
+		case SMIC_SC_SMS_RD_NEXT:
+			if (flags & SMIC_RX_DATA_READY) {
+				read_next_byte(smic);
+				write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+				write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+				smic->state = SMIC_READ_NEXT;
+			} else
+				return SI_SM_CALL_WITH_DELAY;
+			break;
+		default:
+			start_error_recovery(
+				smic,
+				"state = SMIC_READ_NEXT, "
+				"status != SMIC_SC_SMS_RD_(NEXT|END)");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		break;
+
+	case SMIC_READ_END:
+		if (status != SMIC_SC_SMS_READY) {
+			start_error_recovery(smic,
+					     "state = SMIC_READ_END, "
+					     "status != SMIC_SC_SMS_READY");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+		data = read_smic_data(smic);
+		/* data register holds an error code */
+		if (data != 0) {
+			if (smic_debug & SMIC_DEBUG_ENABLE)
+				printk(KERN_DEBUG
+				       "SMIC_READ_END: data = %02x\n", data);
+			start_error_recovery(smic,
+					     "state = SMIC_READ_END, "
+					     "data != SUCCESS");
+			return SI_SM_CALL_WITH_DELAY;
+		} else {
+			smic->state = SMIC_IDLE;
+			return SI_SM_TRANSACTION_COMPLETE;
+		}
+
+	case SMIC_HOSED:
+		init_smic_data(smic, smic->io);
+		return SI_SM_HOSED;
+
+	default:
+		if (smic_debug & SMIC_DEBUG_ENABLE) {
+			printk(KERN_DEBUG "smic->state = %d\n", smic->state);
+			start_error_recovery(smic, "state = UNKNOWN");
+			return SI_SM_CALL_WITH_DELAY;
+		}
+	}
+	smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+	return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int smic_detect(struct si_sm_data *smic)
+{
+	/*
+	 * It's impossible for the SMIC fnags register to be all 1's,
+	 * (assuming a properly functioning, self-initialized BMC)
+	 * but that's what you get from reading a bogus address, so we
+	 * test that first.
+	 */
+	if (read_smic_flags(smic) == 0xff)
+		return 1;
+
+	return 0;
+}
+
+static void smic_cleanup(struct si_sm_data *kcs)
+{
+}
+
+static int smic_size(void)
+{
+	return sizeof(struct si_sm_data);
+}
+
+const struct si_sm_handlers smic_smi_handlers = {
+	.init_data         = init_smic_data,
+	.start_transaction = start_smic_transaction,
+	.get_result        = smic_get_result,
+	.event             = smic_event,
+	.detect            = smic_detect,
+	.cleanup           = smic_cleanup,
+	.size              = smic_size,
+};
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_ssif.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_ssif.c
new file mode 100644
index 0000000..cf87bfe
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_ssif.c
@@ -0,0 +1,2116 @@
+/*
+ * ipmi_ssif.c
+ *
+ * The interface to the IPMI driver for SMBus access to a SMBus
+ * compliant device.  Called SSIF by the IPMI spec.
+ *
+ * Author: Intel Corporation
+ *         Todd Davis <todd.c.davis@intel.com>
+ *
+ * Rewritten by Corey Minyard <minyard@acm.org> to support the
+ * non-blocking I2C interface, add support for multi-part
+ * transactions, add PEC support, and general clenaup.
+ *
+ * Copyright 2003 Intel Corporation
+ * Copyright 2005 MontaVista Software
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SSIF state
+ * machine.  It does the configuration, handles timers and interrupts,
+ * and drives the real SSIF state machine.
+ */
+
+/*
+ * TODO: Figure out how to use SMB alerts.  This will require a new
+ * interface into the I2C driver, I believe.
+ */
+
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/ipmi_smi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/kthread.h>
+#include <linux/acpi.h>
+#include <linux/ctype.h>
+#include <linux/time64.h>
+#include "ipmi_dmi.h"
+
+#define PFX "ipmi_ssif: "
+#define DEVICE_NAME "ipmi_ssif"
+
+#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD	0x57
+
+#define	SSIF_IPMI_REQUEST			2
+#define	SSIF_IPMI_MULTI_PART_REQUEST_START	6
+#define	SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE	7
+#define	SSIF_IPMI_RESPONSE			3
+#define	SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE	9
+
+/* ssif_debug is a bit-field
+ *	SSIF_DEBUG_MSG -	commands and their responses
+ *	SSIF_DEBUG_STATES -	message states
+ *	SSIF_DEBUG_TIMING -	 Measure times between events in the driver
+ */
+#define SSIF_DEBUG_TIMING	4
+#define SSIF_DEBUG_STATE	2
+#define SSIF_DEBUG_MSG		1
+#define SSIF_NODEBUG		0
+#define SSIF_DEFAULT_DEBUG	(SSIF_NODEBUG)
+
+/*
+ * Timer values
+ */
+#define SSIF_MSG_USEC		20000	/* 20ms between message tries. */
+#define SSIF_MSG_PART_USEC	5000	/* 5ms for a message part */
+
+/* How many times to we retry sending/receiving the message. */
+#define	SSIF_SEND_RETRIES	5
+#define	SSIF_RECV_RETRIES	250
+
+#define SSIF_MSG_MSEC		(SSIF_MSG_USEC / 1000)
+#define SSIF_MSG_JIFFIES	((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+#define SSIF_MSG_PART_JIFFIES	((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+
+enum ssif_intf_state {
+	SSIF_NORMAL,
+	SSIF_GETTING_FLAGS,
+	SSIF_GETTING_EVENTS,
+	SSIF_CLEARING_FLAGS,
+	SSIF_GETTING_MESSAGES,
+	/* FIXME - add watchdog stuff. */
+};
+
+#define SSIF_IDLE(ssif)	 ((ssif)->ssif_state == SSIF_NORMAL \
+			  && (ssif)->curr_msg == NULL)
+
+/*
+ * Indexes into stats[] in ssif_info below.
+ */
+enum ssif_stat_indexes {
+	/* Number of total messages sent. */
+	SSIF_STAT_sent_messages = 0,
+
+	/*
+	 * Number of message parts sent.  Messages may be broken into
+	 * parts if they are long.
+	 */
+	SSIF_STAT_sent_messages_parts,
+
+	/*
+	 * Number of time a message was retried.
+	 */
+	SSIF_STAT_send_retries,
+
+	/*
+	 * Number of times the send of a message failed.
+	 */
+	SSIF_STAT_send_errors,
+
+	/*
+	 * Number of message responses received.
+	 */
+	SSIF_STAT_received_messages,
+
+	/*
+	 * Number of message fragments received.
+	 */
+	SSIF_STAT_received_message_parts,
+
+	/*
+	 * Number of times the receive of a message was retried.
+	 */
+	SSIF_STAT_receive_retries,
+
+	/*
+	 * Number of errors receiving messages.
+	 */
+	SSIF_STAT_receive_errors,
+
+	/*
+	 * Number of times a flag fetch was requested.
+	 */
+	SSIF_STAT_flag_fetches,
+
+	/*
+	 * Number of times the hardware didn't follow the state machine.
+	 */
+	SSIF_STAT_hosed,
+
+	/*
+	 * Number of received events.
+	 */
+	SSIF_STAT_events,
+
+	/* Number of asyncronous messages received. */
+	SSIF_STAT_incoming_messages,
+
+	/* Number of watchdog pretimeouts. */
+	SSIF_STAT_watchdog_pretimeouts,
+
+	/* Number of alers received. */
+	SSIF_STAT_alerts,
+
+	/* Always add statistics before this value, it must be last. */
+	SSIF_NUM_STATS
+};
+
+struct ssif_addr_info {
+	struct i2c_board_info binfo;
+	char *adapter_name;
+	int debug;
+	int slave_addr;
+	enum ipmi_addr_src addr_src;
+	union ipmi_smi_info_union addr_info;
+	struct device *dev;
+	struct i2c_client *client;
+
+	struct i2c_client *added_client;
+
+	struct mutex clients_mutex;
+	struct list_head clients;
+
+	struct list_head link;
+};
+
+struct ssif_info;
+
+typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result,
+			     unsigned char *data, unsigned int len);
+
+struct ssif_info {
+	ipmi_smi_t          intf;
+	int                 intf_num;
+	spinlock_t	    lock;
+	struct ipmi_smi_msg *waiting_msg;
+	struct ipmi_smi_msg *curr_msg;
+	enum ssif_intf_state ssif_state;
+	unsigned long       ssif_debug;
+
+	struct ipmi_smi_handlers handlers;
+
+	enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+	union ipmi_smi_info_union addr_info;
+
+	/*
+	 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+	 * is set to hold the flags until we are done handling everything
+	 * from the flags.
+	 */
+#define RECEIVE_MSG_AVAIL	0x01
+#define EVENT_MSG_BUFFER_FULL	0x02
+#define WDT_PRE_TIMEOUT_INT	0x08
+	unsigned char       msg_flags;
+
+	u8		    global_enables;
+	bool		    has_event_buffer;
+	bool		    supports_alert;
+
+	/*
+	 * Used to tell what we should do with alerts.  If we are
+	 * waiting on a response, read the data immediately.
+	 */
+	bool		    got_alert;
+	bool		    waiting_alert;
+
+	/*
+	 * If set to true, this will request events the next time the
+	 * state machine is idle.
+	 */
+	bool                req_events;
+
+	/*
+	 * If set to true, this will request flags the next time the
+	 * state machine is idle.
+	 */
+	bool                req_flags;
+
+	/*
+	 * Used to perform timer operations when run-to-completion
+	 * mode is on.  This is a countdown timer.
+	 */
+	int                 rtc_us_timer;
+
+	/* Used for sending/receiving data.  +1 for the length. */
+	unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
+	unsigned int  data_len;
+
+	/* Temp receive buffer, gets copied into data. */
+	unsigned char recv[I2C_SMBUS_BLOCK_MAX];
+
+	struct i2c_client *client;
+	ssif_i2c_done done_handler;
+
+	/* Thread interface handling */
+	struct task_struct *thread;
+	struct completion wake_thread;
+	bool stopping;
+	int i2c_read_write;
+	int i2c_command;
+	unsigned char *i2c_data;
+	unsigned int i2c_size;
+
+	/* From the device id response. */
+	struct ipmi_device_id device_id;
+
+	struct timer_list retry_timer;
+	int retries_left;
+
+	/* Info from SSIF cmd */
+	unsigned char max_xmit_msg_size;
+	unsigned char max_recv_msg_size;
+	unsigned int  multi_support;
+	int           supports_pec;
+
+#define SSIF_NO_MULTI		0
+#define SSIF_MULTI_2_PART	1
+#define SSIF_MULTI_n_PART	2
+	unsigned char *multi_data;
+	unsigned int  multi_len;
+	unsigned int  multi_pos;
+
+	atomic_t stats[SSIF_NUM_STATS];
+};
+
+#define ssif_inc_stat(ssif, stat) \
+	atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat])
+#define ssif_get_stat(ssif, stat) \
+	((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
+
+static bool initialized;
+
+static atomic_t next_intf = ATOMIC_INIT(0);
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+			     struct ipmi_smi_msg *msg);
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags);
+static int start_send(struct ssif_info *ssif_info,
+		      unsigned char   *data,
+		      unsigned int    len);
+
+static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
+					  unsigned long *flags)
+{
+	spin_lock_irqsave(&ssif_info->lock, *flags);
+	return flags;
+}
+
+static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info,
+				  unsigned long *flags)
+{
+	spin_unlock_irqrestore(&ssif_info->lock, *flags);
+}
+
+static void deliver_recv_msg(struct ssif_info *ssif_info,
+			     struct ipmi_smi_msg *msg)
+{
+	ipmi_smi_t    intf = ssif_info->intf;
+
+	if (!intf) {
+		ipmi_free_smi_msg(msg);
+	} else if (msg->rsp_size < 0) {
+		return_hosed_msg(ssif_info, msg);
+		pr_err(PFX
+		       "Malformed message in deliver_recv_msg: rsp_size = %d\n",
+		       msg->rsp_size);
+	} else {
+		ipmi_smi_msg_received(intf, msg);
+	}
+}
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+			     struct ipmi_smi_msg *msg)
+{
+	ssif_inc_stat(ssif_info, hosed);
+
+	/* Make it a response */
+	msg->rsp[0] = msg->data[0] | 4;
+	msg->rsp[1] = msg->data[1];
+	msg->rsp[2] = 0xFF; /* Unknown error. */
+	msg->rsp_size = 3;
+
+	deliver_recv_msg(ssif_info, msg);
+}
+
+/*
+ * Must be called with the message lock held.  This will release the
+ * message lock.  Note that the caller will check SSIF_IDLE and start a
+ * new operation, so there is no need to check for new messages to
+ * start in here.
+ */
+static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	unsigned char msg[3];
+
+	ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+	ssif_info->ssif_state = SSIF_CLEARING_FLAGS;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	/* Make sure the watchdog pre-timeout flag is not set at startup. */
+	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+	msg[2] = WDT_PRE_TIMEOUT_INT;
+
+	if (start_send(ssif_info, msg, 3) != 0) {
+		/* Error, just go to normal state. */
+		ssif_info->ssif_state = SSIF_NORMAL;
+	}
+}
+
+static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	unsigned char mb[2];
+
+	ssif_info->req_flags = false;
+	ssif_info->ssif_state = SSIF_GETTING_FLAGS;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	mb[1] = IPMI_GET_MSG_FLAGS_CMD;
+	if (start_send(ssif_info, mb, 2) != 0)
+		ssif_info->ssif_state = SSIF_NORMAL;
+}
+
+static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+			     struct ipmi_smi_msg *msg)
+{
+	if (start_send(ssif_info, msg->data, msg->data_size) != 0) {
+		unsigned long oflags;
+
+		flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+		ssif_info->curr_msg = NULL;
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		ipmi_free_smi_msg(msg);
+	}
+}
+
+static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	struct ipmi_smi_msg *msg;
+
+	ssif_info->req_events = false;
+
+	msg = ipmi_alloc_smi_msg();
+	if (!msg) {
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		return;
+	}
+
+	ssif_info->curr_msg = msg;
+	ssif_info->ssif_state = SSIF_GETTING_EVENTS;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+	msg->data_size = 2;
+
+	check_start_send(ssif_info, flags, msg);
+}
+
+static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+				 unsigned long *flags)
+{
+	struct ipmi_smi_msg *msg;
+
+	msg = ipmi_alloc_smi_msg();
+	if (!msg) {
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		return;
+	}
+
+	ssif_info->curr_msg = msg;
+	ssif_info->ssif_state = SSIF_GETTING_MESSAGES;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+	msg->data[1] = IPMI_GET_MSG_CMD;
+	msg->data_size = 2;
+
+	check_start_send(ssif_info, flags, msg);
+}
+
+/*
+ * Must be called with the message lock held.  This will release the
+ * message lock.  Note that the caller will check SSIF_IDLE and start a
+ * new operation, so there is no need to check for new messages to
+ * start in here.
+ */
+static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+		ipmi_smi_t intf = ssif_info->intf;
+		/* Watchdog pre-timeout */
+		ssif_inc_stat(ssif_info, watchdog_pretimeouts);
+		start_clear_flags(ssif_info, flags);
+		if (intf)
+			ipmi_smi_watchdog_pretimeout(intf);
+	} else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL)
+		/* Messages available. */
+		start_recv_msg_fetch(ssif_info, flags);
+	else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL)
+		/* Events available. */
+		start_event_fetch(ssif_info, flags);
+	else {
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+	}
+}
+
+static int ipmi_ssif_thread(void *data)
+{
+	struct ssif_info *ssif_info = data;
+
+	while (!kthread_should_stop()) {
+		int result;
+
+		/* Wait for something to do */
+		result = wait_for_completion_interruptible(
+						&ssif_info->wake_thread);
+		if (ssif_info->stopping)
+			break;
+		if (result == -ERESTARTSYS)
+			continue;
+		init_completion(&ssif_info->wake_thread);
+
+		if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
+			result = i2c_smbus_write_block_data(
+				ssif_info->client, ssif_info->i2c_command,
+				ssif_info->i2c_data[0],
+				ssif_info->i2c_data + 1);
+			ssif_info->done_handler(ssif_info, result, NULL, 0);
+		} else {
+			result = i2c_smbus_read_block_data(
+				ssif_info->client, ssif_info->i2c_command,
+				ssif_info->i2c_data);
+			if (result < 0)
+				ssif_info->done_handler(ssif_info, result,
+							NULL, 0);
+			else
+				ssif_info->done_handler(ssif_info, 0,
+							ssif_info->i2c_data,
+							result);
+		}
+	}
+
+	return 0;
+}
+
+static int ssif_i2c_send(struct ssif_info *ssif_info,
+			ssif_i2c_done handler,
+			int read_write, int command,
+			unsigned char *data, unsigned int size)
+{
+	ssif_info->done_handler = handler;
+
+	ssif_info->i2c_read_write = read_write;
+	ssif_info->i2c_command = command;
+	ssif_info->i2c_data = data;
+	ssif_info->i2c_size = size;
+	complete(&ssif_info->wake_thread);
+	return 0;
+}
+
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+			     unsigned char *data, unsigned int len);
+
+static void start_get(struct ssif_info *ssif_info)
+{
+	int rv;
+
+	ssif_info->rtc_us_timer = 0;
+	ssif_info->multi_pos = 0;
+
+	rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+			  SSIF_IPMI_RESPONSE,
+			  ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+	if (rv < 0) {
+		/* request failed, just return the error. */
+		if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+			pr_info("Error from i2c_non_blocking_op(5)\n");
+
+		msg_done_handler(ssif_info, -EIO, NULL, 0);
+	}
+}
+
+static void retry_timeout(unsigned long data)
+{
+	struct ssif_info *ssif_info = (void *) data;
+	unsigned long oflags, *flags;
+	bool waiting;
+
+	if (ssif_info->stopping)
+		return;
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	waiting = ssif_info->waiting_alert;
+	ssif_info->waiting_alert = false;
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	if (waiting)
+		start_get(ssif_info);
+}
+
+
+static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+		       unsigned int data)
+{
+	struct ssif_info *ssif_info = i2c_get_clientdata(client);
+	unsigned long oflags, *flags;
+	bool do_get = false;
+
+	if (type != I2C_PROTOCOL_SMBUS_ALERT)
+		return;
+
+	ssif_inc_stat(ssif_info, alerts);
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	if (ssif_info->waiting_alert) {
+		ssif_info->waiting_alert = false;
+		del_timer(&ssif_info->retry_timer);
+		do_get = true;
+	} else if (ssif_info->curr_msg) {
+		ssif_info->got_alert = true;
+	}
+	ipmi_ssif_unlock_cond(ssif_info, flags);
+	if (do_get)
+		start_get(ssif_info);
+}
+
+static int start_resend(struct ssif_info *ssif_info);
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+			     unsigned char *data, unsigned int len)
+{
+	struct ipmi_smi_msg *msg;
+	unsigned long oflags, *flags;
+	int rv;
+
+	/*
+	 * We are single-threaded here, so no need for a lock until we
+	 * start messing with driver states or the queues.
+	 */
+
+	if (result < 0) {
+		ssif_info->retries_left--;
+		if (ssif_info->retries_left > 0) {
+			ssif_inc_stat(ssif_info, receive_retries);
+
+			flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+			ssif_info->waiting_alert = true;
+			ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+			if (!ssif_info->stopping)
+				mod_timer(&ssif_info->retry_timer,
+					  jiffies + SSIF_MSG_JIFFIES);
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+			return;
+		}
+
+		ssif_inc_stat(ssif_info, receive_errors);
+
+		if  (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+			pr_info("Error in msg_done_handler: %d\n", result);
+		len = 0;
+		goto continue_op;
+	}
+
+	if ((len > 1) && (ssif_info->multi_pos == 0)
+				&& (data[0] == 0x00) && (data[1] == 0x01)) {
+		/* Start of multi-part read.  Start the next transaction. */
+		int i;
+
+		ssif_inc_stat(ssif_info, received_message_parts);
+
+		/* Remove the multi-part read marker. */
+		len -= 2;
+		data += 2;
+		for (i = 0; i < len; i++)
+			ssif_info->data[i] = data[i];
+		ssif_info->multi_len = len;
+		ssif_info->multi_pos = 1;
+
+		rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+				  SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+				  ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+		if (rv < 0) {
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info("Error from i2c_non_blocking_op(1)\n");
+
+			result = -EIO;
+		} else
+			return;
+	} else if (ssif_info->multi_pos) {
+		/* Middle of multi-part read.  Start the next transaction. */
+		int i;
+		unsigned char blocknum;
+
+		if (len == 0) {
+			result = -EIO;
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info(PFX "Middle message with no data\n");
+
+			goto continue_op;
+		}
+
+		blocknum = data[0];
+		len--;
+		data++;
+
+		if (blocknum != 0xff && len != 31) {
+		    /* All blocks but the last must have 31 data bytes. */
+			result = -EIO;
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info("Received middle message <31\n");
+
+			goto continue_op;
+		}
+
+		if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
+			/* Received message too big, abort the operation. */
+			result = -E2BIG;
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info("Received message too big\n");
+
+			goto continue_op;
+		}
+
+		for (i = 0; i < len; i++)
+			ssif_info->data[i + ssif_info->multi_len] = data[i];
+		ssif_info->multi_len += len;
+		if (blocknum == 0xff) {
+			/* End of read */
+			len = ssif_info->multi_len;
+			data = ssif_info->data;
+		} else if (blocknum + 1 != ssif_info->multi_pos) {
+			/*
+			 * Out of sequence block, just abort.  Block
+			 * numbers start at zero for the second block,
+			 * but multi_pos starts at one, so the +1.
+			 */
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				dev_dbg(&ssif_info->client->dev,
+					"Received message out of sequence, expected %u, got %u\n",
+					ssif_info->multi_pos - 1, blocknum);
+			result = -EIO;
+		} else {
+			ssif_inc_stat(ssif_info, received_message_parts);
+
+			ssif_info->multi_pos++;
+
+			rv = ssif_i2c_send(ssif_info, msg_done_handler,
+					   I2C_SMBUS_READ,
+					   SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+					   ssif_info->recv,
+					   I2C_SMBUS_BLOCK_DATA);
+			if (rv < 0) {
+				if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+					pr_info(PFX
+						"Error from ssif_i2c_send\n");
+
+				result = -EIO;
+			} else
+				return;
+		}
+	}
+
+ continue_op:
+	if (result < 0) {
+		ssif_inc_stat(ssif_info, receive_errors);
+	} else {
+		ssif_inc_stat(ssif_info, received_messages);
+		ssif_inc_stat(ssif_info, received_message_parts);
+	}
+
+	if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+		pr_info(PFX "DONE 1: state = %d, result=%d.\n",
+			ssif_info->ssif_state, result);
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	msg = ssif_info->curr_msg;
+	if (msg) {
+		if (data) {
+			if (len > IPMI_MAX_MSG_LENGTH)
+				len = IPMI_MAX_MSG_LENGTH;
+			memcpy(msg->rsp, data, len);
+		} else {
+			len = 0;
+		}
+		msg->rsp_size = len;
+		ssif_info->curr_msg = NULL;
+	}
+
+	switch (ssif_info->ssif_state) {
+	case SSIF_NORMAL:
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		if (!msg)
+			break;
+
+		if (result < 0)
+			return_hosed_msg(ssif_info, msg);
+		else
+			deliver_recv_msg(ssif_info, msg);
+		break;
+
+	case SSIF_GETTING_FLAGS:
+		/* We got the flags from the SSIF, now handle them. */
+		if ((result < 0) || (len < 4) || (data[2] != 0)) {
+			/*
+			 * Error fetching flags, or invalid length,
+			 * just give up for now.
+			 */
+			ssif_info->ssif_state = SSIF_NORMAL;
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+			pr_warn(PFX "Error getting flags: %d %d, %x\n",
+			       result, len, (len >= 3) ? data[2] : 0);
+		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+			   || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+			/*
+			 * Don't abort here, maybe it was a queued
+			 * response to a previous command.
+			 */
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+			pr_warn(PFX "Invalid response getting flags: %x %x\n",
+				data[0], data[1]);
+		} else {
+			ssif_inc_stat(ssif_info, flag_fetches);
+			ssif_info->msg_flags = data[3];
+			handle_flags(ssif_info, flags);
+		}
+		break;
+
+	case SSIF_CLEARING_FLAGS:
+		/* We cleared the flags. */
+		if ((result < 0) || (len < 3) || (data[2] != 0)) {
+			/* Error clearing flags */
+			pr_warn(PFX "Error clearing flags: %d %d, %x\n",
+			       result, len, (len >= 3) ? data[2] : 0);
+		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+			   || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
+			pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+				data[0], data[1]);
+		}
+		ssif_info->ssif_state = SSIF_NORMAL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		break;
+
+	case SSIF_GETTING_EVENTS:
+		if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+			/* Error getting event, probably done. */
+			msg->done(msg);
+
+			/* Take off the event flag. */
+			ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+			handle_flags(ssif_info, flags);
+		} else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+			   || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) {
+			pr_warn(PFX "Invalid response getting events: %x %x\n",
+				msg->rsp[0], msg->rsp[1]);
+			msg->done(msg);
+			/* Take off the event flag. */
+			ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+			handle_flags(ssif_info, flags);
+		} else {
+			handle_flags(ssif_info, flags);
+			ssif_inc_stat(ssif_info, events);
+			deliver_recv_msg(ssif_info, msg);
+		}
+		break;
+
+	case SSIF_GETTING_MESSAGES:
+		if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+			/* Error getting event, probably done. */
+			msg->done(msg);
+
+			/* Take off the msg flag. */
+			ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+			handle_flags(ssif_info, flags);
+		} else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+			   || msg->rsp[1] != IPMI_GET_MSG_CMD) {
+			pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+				msg->rsp[0], msg->rsp[1]);
+			msg->done(msg);
+
+			/* Take off the msg flag. */
+			ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+			handle_flags(ssif_info, flags);
+		} else {
+			ssif_inc_stat(ssif_info, incoming_messages);
+			handle_flags(ssif_info, flags);
+			deliver_recv_msg(ssif_info, msg);
+		}
+		break;
+	}
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+		if (ssif_info->req_events)
+			start_event_fetch(ssif_info, flags);
+		else if (ssif_info->req_flags)
+			start_flag_fetch(ssif_info, flags);
+		else
+			start_next_msg(ssif_info, flags);
+	} else
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+
+	if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+		pr_info(PFX "DONE 2: state = %d.\n", ssif_info->ssif_state);
+}
+
+static void msg_written_handler(struct ssif_info *ssif_info, int result,
+				unsigned char *data, unsigned int len)
+{
+	int rv;
+
+	/* We are single-threaded here, so no need for a lock. */
+	if (result < 0) {
+		ssif_info->retries_left--;
+		if (ssif_info->retries_left > 0) {
+			if (!start_resend(ssif_info)) {
+				ssif_inc_stat(ssif_info, send_retries);
+				return;
+			}
+			/* request failed, just return the error. */
+			ssif_inc_stat(ssif_info, send_errors);
+
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info(PFX
+					"Out of retries in msg_written_handler\n");
+			msg_done_handler(ssif_info, -EIO, NULL, 0);
+			return;
+		}
+
+		ssif_inc_stat(ssif_info, send_errors);
+
+		/*
+		 * Got an error on transmit, let the done routine
+		 * handle it.
+		 */
+		if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+			pr_info("Error in msg_written_handler: %d\n", result);
+
+		msg_done_handler(ssif_info, result, NULL, 0);
+		return;
+	}
+
+	if (ssif_info->multi_data) {
+		/*
+		 * In the middle of a multi-data write.  See the comment
+		 * in the SSIF_MULTI_n_PART case in the probe function
+		 * for details on the intricacies of this.
+		 */
+		int left;
+		unsigned char *data_to_send;
+
+		ssif_inc_stat(ssif_info, sent_messages_parts);
+
+		left = ssif_info->multi_len - ssif_info->multi_pos;
+		if (left > 32)
+			left = 32;
+		/* Length byte. */
+		ssif_info->multi_data[ssif_info->multi_pos] = left;
+		data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
+		ssif_info->multi_pos += left;
+		if (left < 32)
+			/*
+			 * Write is finished.  Note that we must end
+			 * with a write of less than 32 bytes to
+			 * complete the transaction, even if it is
+			 * zero bytes.
+			 */
+			ssif_info->multi_data = NULL;
+
+		rv = ssif_i2c_send(ssif_info, msg_written_handler,
+				  I2C_SMBUS_WRITE,
+				  SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+				  data_to_send,
+				  I2C_SMBUS_BLOCK_DATA);
+		if (rv < 0) {
+			/* request failed, just return the error. */
+			ssif_inc_stat(ssif_info, send_errors);
+
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info("Error from i2c_non_blocking_op(3)\n");
+			msg_done_handler(ssif_info, -EIO, NULL, 0);
+		}
+	} else {
+		/* Ready to request the result. */
+		unsigned long oflags, *flags;
+
+		ssif_inc_stat(ssif_info, sent_messages);
+		ssif_inc_stat(ssif_info, sent_messages_parts);
+
+		flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+		if (ssif_info->got_alert) {
+			/* The result is already ready, just start it. */
+			ssif_info->got_alert = false;
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+			start_get(ssif_info);
+		} else {
+			/* Wait a jiffie then request the next message */
+			ssif_info->waiting_alert = true;
+			ssif_info->retries_left = SSIF_RECV_RETRIES;
+			ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+			if (!ssif_info->stopping)
+				mod_timer(&ssif_info->retry_timer,
+					  jiffies + SSIF_MSG_PART_JIFFIES);
+			ipmi_ssif_unlock_cond(ssif_info, flags);
+		}
+	}
+}
+
+static int start_resend(struct ssif_info *ssif_info)
+{
+	int rv;
+	int command;
+
+	ssif_info->got_alert = false;
+
+	if (ssif_info->data_len > 32) {
+		command = SSIF_IPMI_MULTI_PART_REQUEST_START;
+		ssif_info->multi_data = ssif_info->data;
+		ssif_info->multi_len = ssif_info->data_len;
+		/*
+		 * Subtle thing, this is 32, not 33, because we will
+		 * overwrite the thing at position 32 (which was just
+		 * transmitted) with the new length.
+		 */
+		ssif_info->multi_pos = 32;
+		ssif_info->data[0] = 32;
+	} else {
+		ssif_info->multi_data = NULL;
+		command = SSIF_IPMI_REQUEST;
+		ssif_info->data[0] = ssif_info->data_len;
+	}
+
+	rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+			  command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+	if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG))
+		pr_info("Error from i2c_non_blocking_op(4)\n");
+	return rv;
+}
+
+static int start_send(struct ssif_info *ssif_info,
+		      unsigned char   *data,
+		      unsigned int    len)
+{
+	if (len > IPMI_MAX_MSG_LENGTH)
+		return -E2BIG;
+	if (len > ssif_info->max_xmit_msg_size)
+		return -E2BIG;
+
+	ssif_info->retries_left = SSIF_SEND_RETRIES;
+	memcpy(ssif_info->data + 1, data, len);
+	ssif_info->data_len = len;
+	return start_resend(ssif_info);
+}
+
+/* Must be called with the message lock held. */
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
+{
+	struct ipmi_smi_msg *msg;
+	unsigned long oflags;
+
+ restart:
+	if (!SSIF_IDLE(ssif_info)) {
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		return;
+	}
+
+	if (!ssif_info->waiting_msg) {
+		ssif_info->curr_msg = NULL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+	} else {
+		int rv;
+
+		ssif_info->curr_msg = ssif_info->waiting_msg;
+		ssif_info->waiting_msg = NULL;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+		rv = start_send(ssif_info,
+				ssif_info->curr_msg->data,
+				ssif_info->curr_msg->data_size);
+		if (rv) {
+			msg = ssif_info->curr_msg;
+			ssif_info->curr_msg = NULL;
+			return_hosed_msg(ssif_info, msg);
+			flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+			goto restart;
+		}
+	}
+}
+
+static void sender(void                *send_info,
+		   struct ipmi_smi_msg *msg)
+{
+	struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+	unsigned long oflags, *flags;
+
+	BUG_ON(ssif_info->waiting_msg);
+	ssif_info->waiting_msg = msg;
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	start_next_msg(ssif_info, flags);
+
+	if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) {
+		struct timespec64 t;
+
+		ktime_get_real_ts64(&t);
+		pr_info("**Enqueue %02x %02x: %lld.%6.6ld\n",
+		       msg->data[0], msg->data[1],
+		       (long long) t.tv_sec, (long) t.tv_nsec / NSEC_PER_USEC);
+	}
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+	struct ssif_info *ssif_info = send_info;
+
+	data->addr_src = ssif_info->addr_source;
+	data->dev = &ssif_info->client->dev;
+	data->addr_info = ssif_info->addr_info;
+	get_device(data->dev);
+
+	return 0;
+}
+
+/*
+ * Instead of having our own timer to periodically check the message
+ * flags, we let the message handler drive us.
+ */
+static void request_events(void *send_info)
+{
+	struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+	unsigned long oflags, *flags;
+
+	if (!ssif_info->has_event_buffer)
+		return;
+
+	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+	/*
+	 * Request flags first, not events, because the lower layer
+	 * doesn't have a way to send an attention.  But make sure
+	 * event checking still happens.
+	 */
+	ssif_info->req_events = true;
+	if (SSIF_IDLE(ssif_info))
+		start_flag_fetch(ssif_info, flags);
+	else {
+		ssif_info->req_flags = true;
+		ipmi_ssif_unlock_cond(ssif_info, flags);
+	}
+}
+
+static int inc_usecount(void *send_info)
+{
+	struct ssif_info *ssif_info = send_info;
+
+	if (!i2c_get_adapter(i2c_adapter_id(ssif_info->client->adapter)))
+		return -ENODEV;
+
+	i2c_use_client(ssif_info->client);
+	return 0;
+}
+
+static void dec_usecount(void *send_info)
+{
+	struct ssif_info *ssif_info = send_info;
+
+	i2c_release_client(ssif_info->client);
+	i2c_put_adapter(ssif_info->client->adapter);
+}
+
+static int ssif_start_processing(void *send_info,
+				 ipmi_smi_t intf)
+{
+	struct ssif_info *ssif_info = send_info;
+
+	ssif_info->intf = intf;
+
+	return 0;
+}
+
+#define MAX_SSIF_BMCS 4
+
+static unsigned short addr[MAX_SSIF_BMCS];
+static int num_addrs;
+module_param_array(addr, ushort, &num_addrs, 0);
+MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs.");
+
+static char *adapter_name[MAX_SSIF_BMCS];
+static int num_adapter_names;
+module_param_array(adapter_name, charp, &num_adapter_names, 0);
+MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC.  By default all devices are scanned.");
+
+static int slave_addrs[MAX_SSIF_BMCS];
+static int num_slave_addrs;
+module_param_array(slave_addrs, int, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs,
+		 "The default IPMB slave address for the controller.");
+
+static bool alerts_broken;
+module_param(alerts_broken, bool, 0);
+MODULE_PARM_DESC(alerts_broken, "Don't enable alerts for the controller.");
+
+/*
+ * Bit 0 enables message debugging, bit 1 enables state debugging, and
+ * bit 2 enables timing debugging.  This is an array indexed by
+ * interface number"
+ */
+static int dbg[MAX_SSIF_BMCS];
+static int num_dbg;
+module_param_array(dbg, int, &num_dbg, 0);
+MODULE_PARM_DESC(dbg, "Turn on debugging.");
+
+static bool ssif_dbg_probe;
+module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
+MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
+
+static bool ssif_tryacpi = true;
+module_param_named(tryacpi, ssif_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
+
+static bool ssif_trydmi = true;
+module_param_named(trydmi, ssif_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)");
+
+static DEFINE_MUTEX(ssif_infos_mutex);
+static LIST_HEAD(ssif_infos);
+
+static int ssif_remove(struct i2c_client *client)
+{
+	struct ssif_info *ssif_info = i2c_get_clientdata(client);
+	struct ssif_addr_info *addr_info;
+	int rv;
+
+	if (!ssif_info)
+		return 0;
+
+	/*
+	 * After this point, we won't deliver anything asychronously
+	 * to the message handler.  We can unregister ourself.
+	 */
+	rv = ipmi_unregister_smi(ssif_info->intf);
+	if (rv) {
+		pr_err(PFX "Unable to unregister device: errno=%d\n", rv);
+		return rv;
+	}
+	ssif_info->intf = NULL;
+
+	/* make sure the driver is not looking for flags any more. */
+	while (ssif_info->ssif_state != SSIF_NORMAL)
+		schedule_timeout(1);
+
+	ssif_info->stopping = true;
+	del_timer_sync(&ssif_info->retry_timer);
+	if (ssif_info->thread) {
+		complete(&ssif_info->wake_thread);
+		kthread_stop(ssif_info->thread);
+	}
+
+	list_for_each_entry(addr_info, &ssif_infos, link) {
+		if (addr_info->client == client) {
+			addr_info->client = NULL;
+			break;
+		}
+	}
+
+	/*
+	 * No message can be outstanding now, we have removed the
+	 * upper layer and it permitted us to do so.
+	 */
+	kfree(ssif_info);
+	return 0;
+}
+
+static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
+		  int *resp_len, unsigned char *resp)
+{
+	int retry_cnt;
+	int ret;
+
+	retry_cnt = SSIF_SEND_RETRIES;
+ retry1:
+	ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
+	if (ret) {
+		retry_cnt--;
+		if (retry_cnt > 0)
+			goto retry1;
+		return -ENODEV;
+	}
+
+	ret = -ENODEV;
+	retry_cnt = SSIF_RECV_RETRIES;
+	while (retry_cnt > 0) {
+		ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE,
+						resp);
+		if (ret > 0)
+			break;
+		msleep(SSIF_MSG_MSEC);
+		retry_cnt--;
+		if (retry_cnt <= 0)
+			break;
+	}
+
+	if (ret > 0) {
+		/* Validate that the response is correct. */
+		if (ret < 3 ||
+		    (resp[0] != (msg[0] | (1 << 2))) ||
+		    (resp[1] != msg[1]))
+			ret = -EINVAL;
+		else {
+			*resp_len = ret;
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+	unsigned char *resp;
+	unsigned char msg[3];
+	int           rv;
+	int           len;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	/* Do a Get Device ID command, since it is required. */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_DEVICE_ID_CMD;
+	rv = do_cmd(client, 2, msg, &len, resp);
+	if (rv)
+		rv = -ENODEV;
+	else
+		strlcpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
+	kfree(resp);
+	return rv;
+}
+
+static int smi_type_proc_show(struct seq_file *m, void *v)
+{
+	seq_puts(m, "ssif\n");
+
+	return 0;
+}
+
+static int smi_type_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, smi_type_proc_show, inode->i_private);
+}
+
+static const struct file_operations smi_type_proc_ops = {
+	.open		= smi_type_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int smi_stats_proc_show(struct seq_file *m, void *v)
+{
+	struct ssif_info *ssif_info = m->private;
+
+	seq_printf(m, "sent_messages:          %u\n",
+		   ssif_get_stat(ssif_info, sent_messages));
+	seq_printf(m, "sent_messages_parts:    %u\n",
+		   ssif_get_stat(ssif_info, sent_messages_parts));
+	seq_printf(m, "send_retries:           %u\n",
+		   ssif_get_stat(ssif_info, send_retries));
+	seq_printf(m, "send_errors:            %u\n",
+		   ssif_get_stat(ssif_info, send_errors));
+	seq_printf(m, "received_messages:      %u\n",
+		   ssif_get_stat(ssif_info, received_messages));
+	seq_printf(m, "received_message_parts: %u\n",
+		   ssif_get_stat(ssif_info, received_message_parts));
+	seq_printf(m, "receive_retries:        %u\n",
+		   ssif_get_stat(ssif_info, receive_retries));
+	seq_printf(m, "receive_errors:         %u\n",
+		   ssif_get_stat(ssif_info, receive_errors));
+	seq_printf(m, "flag_fetches:           %u\n",
+		   ssif_get_stat(ssif_info, flag_fetches));
+	seq_printf(m, "hosed:                  %u\n",
+		   ssif_get_stat(ssif_info, hosed));
+	seq_printf(m, "events:                 %u\n",
+		   ssif_get_stat(ssif_info, events));
+	seq_printf(m, "watchdog_pretimeouts:   %u\n",
+		   ssif_get_stat(ssif_info, watchdog_pretimeouts));
+	seq_printf(m, "alerts:                 %u\n",
+		   ssif_get_stat(ssif_info, alerts));
+	return 0;
+}
+
+static int smi_stats_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_stats_proc_ops = {
+	.open		= smi_stats_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int strcmp_nospace(char *s1, char *s2)
+{
+	while (*s1 && *s2) {
+		while (isspace(*s1))
+			s1++;
+		while (isspace(*s2))
+			s2++;
+		if (*s1 > *s2)
+			return 1;
+		if (*s1 < *s2)
+			return -1;
+		s1++;
+		s2++;
+	}
+	return 0;
+}
+
+static struct ssif_addr_info *ssif_info_find(unsigned short addr,
+					     char *adapter_name,
+					     bool match_null_name)
+{
+	struct ssif_addr_info *info, *found = NULL;
+
+restart:
+	list_for_each_entry(info, &ssif_infos, link) {
+		if (info->binfo.addr == addr) {
+			if (info->adapter_name || adapter_name) {
+				if (!info->adapter_name != !adapter_name) {
+					/* One is NULL and one is not */
+					continue;
+				}
+				if (adapter_name &&
+				    strcmp_nospace(info->adapter_name,
+						   adapter_name))
+					/* Names do not match */
+					continue;
+			}
+			found = info;
+			break;
+		}
+	}
+
+	if (!found && match_null_name) {
+		/* Try to get an exact match first, then try with a NULL name */
+		adapter_name = NULL;
+		match_null_name = false;
+		goto restart;
+	}
+
+	return found;
+}
+
+static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
+{
+#ifdef CONFIG_ACPI
+	acpi_handle acpi_handle;
+
+	acpi_handle = ACPI_HANDLE(dev);
+	if (acpi_handle) {
+		ssif_info->addr_source = SI_ACPI;
+		ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
+		return true;
+	}
+#endif
+	return false;
+}
+
+static int find_slave_address(struct i2c_client *client, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+	if (!slave_addr)
+		slave_addr = ipmi_dmi_get_slave_addr(
+			IPMI_DMI_TYPE_SSIF,
+			i2c_adapter_id(client->adapter),
+			client->addr);
+#endif
+
+	return slave_addr;
+}
+
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+			     IPMI_BMC_EVT_MSG_INTR)
+
+static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	unsigned char     msg[3];
+	unsigned char     *resp;
+	struct ssif_info   *ssif_info;
+	int               rv = 0;
+	int               len;
+	int               i;
+	u8		  slave_addr = 0;
+	struct ssif_addr_info *addr_info = NULL;
+
+	resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL);
+	if (!ssif_info) {
+		kfree(resp);
+		return -ENOMEM;
+	}
+
+	if (!check_acpi(ssif_info, &client->dev)) {
+		addr_info = ssif_info_find(client->addr, client->adapter->name,
+					   true);
+		if (!addr_info) {
+			/* Must have come in through sysfs. */
+			ssif_info->addr_source = SI_HOTMOD;
+		} else {
+			ssif_info->addr_source = addr_info->addr_src;
+			ssif_info->ssif_debug = addr_info->debug;
+			ssif_info->addr_info = addr_info->addr_info;
+			addr_info->client = client;
+			slave_addr = addr_info->slave_addr;
+		}
+	}
+
+	slave_addr = find_slave_address(client, slave_addr);
+
+	pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
+	       ipmi_addr_src_to_str(ssif_info->addr_source),
+	       client->addr, client->adapter->name, slave_addr);
+
+	/*
+	 * Do a Get Device ID command, since it comes back with some
+	 * useful info.
+	 */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_DEVICE_ID_CMD;
+	rv = do_cmd(client, 2, msg, &len, resp);
+	if (rv)
+		goto out;
+
+	rv = ipmi_demangle_device_id(resp, len, &ssif_info->device_id);
+	if (rv)
+		goto out;
+
+	ssif_info->client = client;
+	i2c_set_clientdata(client, ssif_info);
+
+	/* Now check for system interface capabilities */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
+	msg[2] = 0; /* SSIF */
+	rv = do_cmd(client, 3, msg, &len, resp);
+	if (!rv && (len >= 3) && (resp[2] == 0)) {
+		if (len < 7) {
+			if (ssif_dbg_probe)
+				pr_info(PFX "SSIF info too short: %d\n", len);
+			goto no_support;
+		}
+
+		/* Got a good SSIF response, handle it. */
+		ssif_info->max_xmit_msg_size = resp[5];
+		ssif_info->max_recv_msg_size = resp[6];
+		ssif_info->multi_support = (resp[4] >> 6) & 0x3;
+		ssif_info->supports_pec = (resp[4] >> 3) & 0x1;
+
+		/* Sanitize the data */
+		switch (ssif_info->multi_support) {
+		case SSIF_NO_MULTI:
+			if (ssif_info->max_xmit_msg_size > 32)
+				ssif_info->max_xmit_msg_size = 32;
+			if (ssif_info->max_recv_msg_size > 32)
+				ssif_info->max_recv_msg_size = 32;
+			break;
+
+		case SSIF_MULTI_2_PART:
+			if (ssif_info->max_xmit_msg_size > 63)
+				ssif_info->max_xmit_msg_size = 63;
+			if (ssif_info->max_recv_msg_size > 62)
+				ssif_info->max_recv_msg_size = 62;
+			break;
+
+		case SSIF_MULTI_n_PART:
+			/*
+			 * The specification is rather confusing at
+			 * this point, but I think I understand what
+			 * is meant.  At least I have a workable
+			 * solution.  With multi-part messages, you
+			 * cannot send a message that is a multiple of
+			 * 32-bytes in length, because the start and
+			 * middle messages are 32-bytes and the end
+			 * message must be at least one byte.  You
+			 * can't fudge on an extra byte, that would
+			 * screw up things like fru data writes.  So
+			 * we limit the length to 63 bytes.  That way
+			 * a 32-byte message gets sent as a single
+			 * part.  A larger message will be a 32-byte
+			 * start and the next message is always going
+			 * to be 1-31 bytes in length.  Not ideal, but
+			 * it should work.
+			 */
+			if (ssif_info->max_xmit_msg_size > 63)
+				ssif_info->max_xmit_msg_size = 63;
+			break;
+
+		default:
+			/* Data is not sane, just give up. */
+			goto no_support;
+		}
+	} else {
+ no_support:
+		/* Assume no multi-part or PEC support */
+		pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
+		       rv, len, resp[2]);
+
+		ssif_info->max_xmit_msg_size = 32;
+		ssif_info->max_recv_msg_size = 32;
+		ssif_info->multi_support = SSIF_NO_MULTI;
+		ssif_info->supports_pec = 0;
+	}
+
+	/* Make sure the NMI timeout is cleared. */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+	msg[2] = WDT_PRE_TIMEOUT_INT;
+	rv = do_cmd(client, 3, msg, &len, resp);
+	if (rv || (len < 3) || (resp[2] != 0))
+		pr_warn(PFX "Unable to clear message flags: %d %d %2.2x\n",
+			rv, len, resp[2]);
+
+	/* Attempt to enable the event buffer. */
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+	rv = do_cmd(client, 2, msg, &len, resp);
+	if (rv || (len < 4) || (resp[2] != 0)) {
+		pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+			rv, len, resp[2]);
+		rv = 0; /* Not fatal */
+		goto found;
+	}
+
+	ssif_info->global_enables = resp[3];
+
+	if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+		ssif_info->has_event_buffer = true;
+		/* buffer is already enabled, nothing to do. */
+		goto found;
+	}
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+	msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF;
+	rv = do_cmd(client, 3, msg, &len, resp);
+	if (rv || (len < 2)) {
+		pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
+			rv, len, resp[2]);
+		rv = 0; /* Not fatal */
+		goto found;
+	}
+
+	if (resp[2] == 0) {
+		/* A successful return means the event buffer is supported. */
+		ssif_info->has_event_buffer = true;
+		ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF;
+	}
+
+	/* Some systems don't behave well if you enable alerts. */
+	if (alerts_broken)
+		goto found;
+
+	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+	msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+	msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
+	rv = do_cmd(client, 3, msg, &len, resp);
+	if (rv || (len < 2)) {
+		pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
+			rv, len, resp[2]);
+		rv = 0; /* Not fatal */
+		goto found;
+	}
+
+	if (resp[2] == 0) {
+		/* A successful return means the alert is supported. */
+		ssif_info->supports_alert = true;
+		ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR;
+	}
+
+ found:
+	ssif_info->intf_num = atomic_inc_return(&next_intf);
+
+	if (ssif_dbg_probe) {
+		pr_info("ssif_probe: i2c_probe found device at i2c address %x\n",
+			client->addr);
+	}
+
+	spin_lock_init(&ssif_info->lock);
+	ssif_info->ssif_state = SSIF_NORMAL;
+	setup_timer(&ssif_info->retry_timer, retry_timeout,
+		    (unsigned long)ssif_info);
+
+	for (i = 0; i < SSIF_NUM_STATS; i++)
+		atomic_set(&ssif_info->stats[i], 0);
+
+	if (ssif_info->supports_pec)
+		ssif_info->client->flags |= I2C_CLIENT_PEC;
+
+	ssif_info->handlers.owner = THIS_MODULE;
+	ssif_info->handlers.start_processing = ssif_start_processing;
+	ssif_info->handlers.get_smi_info = get_smi_info;
+	ssif_info->handlers.sender = sender;
+	ssif_info->handlers.request_events = request_events;
+	ssif_info->handlers.inc_usecount = inc_usecount;
+	ssif_info->handlers.dec_usecount = dec_usecount;
+
+	{
+		unsigned int thread_num;
+
+		thread_num = ((i2c_adapter_id(ssif_info->client->adapter)
+			       << 8) |
+			      ssif_info->client->addr);
+		init_completion(&ssif_info->wake_thread);
+		ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info,
+					       "kssif%4.4x", thread_num);
+		if (IS_ERR(ssif_info->thread)) {
+			rv = PTR_ERR(ssif_info->thread);
+			dev_notice(&ssif_info->client->dev,
+				   "Could not start kernel thread: error %d\n",
+				   rv);
+			goto out;
+		}
+	}
+
+	rv = ipmi_register_smi(&ssif_info->handlers,
+			       ssif_info,
+			       &ssif_info->device_id,
+			       &ssif_info->client->dev,
+			       slave_addr);
+	 if (rv) {
+		pr_err(PFX "Unable to register device: error %d\n", rv);
+		goto out;
+	}
+
+	rv = ipmi_smi_add_proc_entry(ssif_info->intf, "type",
+				     &smi_type_proc_ops,
+				     ssif_info);
+	if (rv) {
+		pr_err(PFX "Unable to create proc entry: %d\n", rv);
+		goto out_err_unreg;
+	}
+
+	rv = ipmi_smi_add_proc_entry(ssif_info->intf, "ssif_stats",
+				     &smi_stats_proc_ops,
+				     ssif_info);
+	if (rv) {
+		pr_err(PFX "Unable to create proc entry: %d\n", rv);
+		goto out_err_unreg;
+	}
+
+ out:
+	if (rv) {
+		if (addr_info)
+			addr_info->client = NULL;
+
+		dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
+		kfree(ssif_info);
+	}
+	kfree(resp);
+	return rv;
+
+ out_err_unreg:
+	ipmi_unregister_smi(ssif_info->intf);
+	goto out;
+}
+
+static int ssif_adapter_handler(struct device *adev, void *opaque)
+{
+	struct ssif_addr_info *addr_info = opaque;
+
+	if (adev->type != &i2c_adapter_type)
+		return 0;
+
+	addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
+						 &addr_info->binfo);
+
+	if (!addr_info->adapter_name)
+		return 1; /* Only try the first I2C adapter by default. */
+	return 0;
+}
+
+static int new_ssif_client(int addr, char *adapter_name,
+			   int debug, int slave_addr,
+			   enum ipmi_addr_src addr_src,
+			   struct device *dev)
+{
+	struct ssif_addr_info *addr_info;
+	int rv = 0;
+
+	mutex_lock(&ssif_infos_mutex);
+	if (ssif_info_find(addr, adapter_name, false)) {
+		rv = -EEXIST;
+		goto out_unlock;
+	}
+
+	addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL);
+	if (!addr_info) {
+		rv = -ENOMEM;
+		goto out_unlock;
+	}
+
+	if (adapter_name) {
+		addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL);
+		if (!addr_info->adapter_name) {
+			kfree(addr_info);
+			rv = -ENOMEM;
+			goto out_unlock;
+		}
+	}
+
+	strncpy(addr_info->binfo.type, DEVICE_NAME,
+		sizeof(addr_info->binfo.type));
+	addr_info->binfo.addr = addr;
+	addr_info->binfo.platform_data = addr_info;
+	addr_info->debug = debug;
+	addr_info->slave_addr = slave_addr;
+	addr_info->addr_src = addr_src;
+	addr_info->dev = dev;
+
+	if (dev)
+		dev_set_drvdata(dev, addr_info);
+
+	list_add_tail(&addr_info->link, &ssif_infos);
+
+	if (initialized)
+		i2c_for_each_dev(addr_info, ssif_adapter_handler);
+	/* Otherwise address list will get it */
+
+out_unlock:
+	mutex_unlock(&ssif_infos_mutex);
+	return rv;
+}
+
+static void free_ssif_clients(void)
+{
+	struct ssif_addr_info *info, *tmp;
+
+	mutex_lock(&ssif_infos_mutex);
+	list_for_each_entry_safe(info, tmp, &ssif_infos, link) {
+		list_del(&info->link);
+		kfree(info->adapter_name);
+		kfree(info);
+	}
+	mutex_unlock(&ssif_infos_mutex);
+}
+
+static unsigned short *ssif_address_list(void)
+{
+	struct ssif_addr_info *info;
+	unsigned int count = 0, i;
+	unsigned short *address_list;
+
+	list_for_each_entry(info, &ssif_infos, link)
+		count++;
+
+	address_list = kzalloc(sizeof(*address_list) * (count + 1), GFP_KERNEL);
+	if (!address_list)
+		return NULL;
+
+	i = 0;
+	list_for_each_entry(info, &ssif_infos, link) {
+		unsigned short addr = info->binfo.addr;
+		int j;
+
+		for (j = 0; j < i; j++) {
+			if (address_list[j] == addr)
+				goto skip_addr;
+		}
+		address_list[i] = addr;
+skip_addr:
+		i++;
+	}
+	address_list[i] = I2C_CLIENT_END;
+
+	return address_list;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ssif_acpi_match[] = {
+	{ "IPI0001", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, ssif_acpi_match);
+
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially.  Once we don't find a table, there
+ * are no more.
+ */
+static int acpi_failure;
+
+/*
+ * Defined in the IPMI 2.0 spec.
+ */
+struct SPMITable {
+	s8	Signature[4];
+	u32	Length;
+	u8	Revision;
+	u8	Checksum;
+	s8	OEMID[6];
+	s8	OEMTableID[8];
+	s8	OEMRevision[4];
+	s8	CreatorID[4];
+	s8	CreatorRevision[4];
+	u8	InterfaceType;
+	u8	IPMIlegacy;
+	s16	SpecificationRevision;
+
+	/*
+	 * Bit 0 - SCI interrupt supported
+	 * Bit 1 - I/O APIC/SAPIC
+	 */
+	u8	InterruptType;
+
+	/*
+	 * If bit 0 of InterruptType is set, then this is the SCI
+	 * interrupt in the GPEx_STS register.
+	 */
+	u8	GPE;
+
+	s16	Reserved;
+
+	/*
+	 * If bit 1 of InterruptType is set, then this is the I/O
+	 * APIC/SAPIC interrupt.
+	 */
+	u32	GlobalSystemInterrupt;
+
+	/* The actual register address. */
+	struct acpi_generic_address addr;
+
+	u8	UID[4];
+
+	s8      spmi_id[1]; /* A '\0' terminated array starts here. */
+};
+
+static int try_init_spmi(struct SPMITable *spmi)
+{
+	unsigned short myaddr;
+
+	if (num_addrs >= MAX_SSIF_BMCS)
+		return -1;
+
+	if (spmi->IPMIlegacy != 1) {
+		pr_warn("IPMI: Bad SPMI legacy: %d\n", spmi->IPMIlegacy);
+		return -ENODEV;
+	}
+
+	if (spmi->InterfaceType != 4)
+		return -ENODEV;
+
+	if (spmi->addr.space_id != ACPI_ADR_SPACE_SMBUS) {
+		pr_warn(PFX "Invalid ACPI SSIF I/O Address type: %d\n",
+			spmi->addr.space_id);
+		return -EIO;
+	}
+
+	myaddr = spmi->addr.address & 0x7f;
+
+	return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI, NULL);
+}
+
+static void spmi_find_bmc(void)
+{
+	acpi_status      status;
+	struct SPMITable *spmi;
+	int              i;
+
+	if (acpi_disabled)
+		return;
+
+	if (acpi_failure)
+		return;
+
+	for (i = 0; ; i++) {
+		status = acpi_get_table(ACPI_SIG_SPMI, i+1,
+					(struct acpi_table_header **)&spmi);
+		if (status != AE_OK)
+			return;
+
+		try_init_spmi(spmi);
+	}
+}
+#else
+static void spmi_find_bmc(void) { }
+#endif
+
+#ifdef CONFIG_DMI
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+	u8 type, slave_addr = 0;
+	u16 i2c_addr;
+	int rv;
+
+	if (!ssif_trydmi)
+		return -ENODEV;
+
+	rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
+	if (rv)
+		return -ENODEV;
+
+	if (type != IPMI_DMI_TYPE_SSIF)
+		return -ENODEV;
+
+	rv = device_property_read_u16(&pdev->dev, "i2c-addr", &i2c_addr);
+	if (rv) {
+		dev_warn(&pdev->dev, PFX "No i2c-addr property\n");
+		return -ENODEV;
+	}
+
+	rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+	if (rv)
+		dev_warn(&pdev->dev, "device has no slave-addr property");
+
+	return new_ssif_client(i2c_addr, NULL, 0,
+			       slave_addr, SI_SMBIOS, &pdev->dev);
+}
+#else
+static int dmi_ipmi_probe(struct platform_device *pdev)
+{
+	return -ENODEV;
+}
+#endif
+
+static const struct i2c_device_id ssif_id[] = {
+	{ DEVICE_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ssif_id);
+
+static struct i2c_driver ssif_i2c_driver = {
+	.class		= I2C_CLASS_HWMON,
+	.driver		= {
+		.name			= DEVICE_NAME
+	},
+	.probe		= ssif_probe,
+	.remove		= ssif_remove,
+	.alert		= ssif_alert,
+	.id_table	= ssif_id,
+	.detect		= ssif_detect
+};
+
+static int ssif_platform_probe(struct platform_device *dev)
+{
+	return dmi_ipmi_probe(dev);
+}
+
+static int ssif_platform_remove(struct platform_device *dev)
+{
+	struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev);
+
+	if (!addr_info)
+		return 0;
+
+	mutex_lock(&ssif_infos_mutex);
+	if (addr_info->added_client)
+		i2c_unregister_device(addr_info->added_client);
+
+	list_del(&addr_info->link);
+	kfree(addr_info);
+	mutex_unlock(&ssif_infos_mutex);
+	return 0;
+}
+
+static struct platform_driver ipmi_driver = {
+	.driver = {
+		.name = DEVICE_NAME,
+	},
+	.probe		= ssif_platform_probe,
+	.remove		= ssif_platform_remove,
+};
+
+static int init_ipmi_ssif(void)
+{
+	int i;
+	int rv;
+
+	if (initialized)
+		return 0;
+
+	pr_info("IPMI SSIF Interface driver\n");
+
+	/* build list for i2c from addr list */
+	for (i = 0; i < num_addrs; i++) {
+		rv = new_ssif_client(addr[i], adapter_name[i],
+				     dbg[i], slave_addrs[i],
+				     SI_HARDCODED, NULL);
+		if (rv)
+			pr_err(PFX
+			       "Couldn't add hardcoded device at addr 0x%x\n",
+			       addr[i]);
+	}
+
+	if (ssif_tryacpi)
+		ssif_i2c_driver.driver.acpi_match_table	=
+			ACPI_PTR(ssif_acpi_match);
+
+	if (ssif_tryacpi)
+		spmi_find_bmc();
+
+	if (ssif_trydmi) {
+		rv = platform_driver_register(&ipmi_driver);
+		if (rv)
+			pr_err(PFX "Unable to register driver: %d\n", rv);
+	}
+
+	ssif_i2c_driver.address_list = ssif_address_list();
+
+	rv = i2c_add_driver(&ssif_i2c_driver);
+	if (!rv)
+		initialized = true;
+
+	return rv;
+}
+module_init(init_ipmi_ssif);
+
+static void cleanup_ipmi_ssif(void)
+{
+	if (!initialized)
+		return;
+
+	initialized = false;
+
+	i2c_del_driver(&ssif_i2c_driver);
+
+	platform_driver_unregister(&ipmi_driver);
+
+	free_ssif_clients();
+}
+module_exit(cleanup_ipmi_ssif);
+
+MODULE_ALIAS("platform:dmi-ipmi-ssif");
+MODULE_AUTHOR("Todd C Davis <todd.c.davis@intel.com>, Corey Minyard <minyard@acm.org>");
+MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus");
+MODULE_LICENSE("GPL");
diff --git a/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_watchdog.c b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_watchdog.c
new file mode 100644
index 0000000..3d832d0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/char/ipmi/ipmi_watchdog.c
@@ -0,0 +1,1393 @@
+/*
+ * ipmi_watchdog.c
+ *
+ * A watchdog timer based upon the IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ *         Corey Minyard <minyard@mvista.com>
+ *         source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/mutex.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/kdebug.h>
+#include <linux/rwsem.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/nmi.h>
+#include <linux/reboot.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/sched/signal.h>
+
+#ifdef CONFIG_X86
+/*
+ * This is ugly, but I've determined that x86 is the only architecture
+ * that can reasonably support the IPMI NMI watchdog timeout at this
+ * time.  If another architecture adds this capability somehow, it
+ * will have to be a somewhat different mechanism and I have no idea
+ * how it will work.  So in the unlikely event that another
+ * architecture supports this, we can figure out a good generic
+ * mechanism for it at that time.
+ */
+#include <asm/kdebug.h>
+#include <asm/nmi.h>
+#define HAVE_DIE_NMI
+#endif
+
+#define	PFX "IPMI Watchdog: "
+
+/*
+ * The IPMI command/response information for the watchdog timer.
+ */
+
+/* values for byte 1 of the set command, byte 2 of the get response. */
+#define WDOG_DONT_LOG		(1 << 7)
+#define WDOG_DONT_STOP_ON_SET	(1 << 6)
+#define WDOG_SET_TIMER_USE(byte, use) \
+	byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7)
+#define WDOG_TIMER_USE_BIOS_FRB2	1
+#define WDOG_TIMER_USE_BIOS_POST	2
+#define WDOG_TIMER_USE_OS_LOAD		3
+#define WDOG_TIMER_USE_SMS_OS		4
+#define WDOG_TIMER_USE_OEM		5
+
+/* values for byte 2 of the set command, byte 3 of the get response. */
+#define WDOG_SET_PRETIMEOUT_ACT(byte, use) \
+	byte = ((byte) & 0x8f) | (((use) & 0x7) << 4)
+#define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7)
+#define WDOG_PRETIMEOUT_NONE		0
+#define WDOG_PRETIMEOUT_SMI		1
+#define WDOG_PRETIMEOUT_NMI		2
+#define WDOG_PRETIMEOUT_MSG_INT		3
+
+/* Operations that can be performed on a pretimout. */
+#define WDOG_PREOP_NONE		0
+#define WDOG_PREOP_PANIC	1
+/* Cause data to be available to read.  Doesn't work in NMI mode. */
+#define WDOG_PREOP_GIVE_DATA	2
+
+/* Actions to perform on a full timeout. */
+#define WDOG_SET_TIMEOUT_ACT(byte, use) \
+	byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7)
+#define WDOG_TIMEOUT_NONE		0
+#define WDOG_TIMEOUT_RESET		1
+#define WDOG_TIMEOUT_POWER_DOWN		2
+#define WDOG_TIMEOUT_POWER_CYCLE	3
+
+/*
+ * Byte 3 of the get command, byte 4 of the get response is the
+ * pre-timeout in seconds.
+ */
+
+/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
+#define WDOG_EXPIRE_CLEAR_BIOS_FRB2	(1 << 1)
+#define WDOG_EXPIRE_CLEAR_BIOS_POST	(1 << 2)
+#define WDOG_EXPIRE_CLEAR_OS_LOAD	(1 << 3)
+#define WDOG_EXPIRE_CLEAR_SMS_OS	(1 << 4)
+#define WDOG_EXPIRE_CLEAR_OEM		(1 << 5)
+
+/*
+ * Setting/getting the watchdog timer value.  This is for bytes 5 and
+ * 6 (the timeout time) of the set command, and bytes 6 and 7 (the
+ * timeout time) and 8 and 9 (the current countdown value) of the
+ * response.  The timeout value is given in seconds (in the command it
+ * is 100ms intervals).
+ */
+#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
+	(byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
+#define WDOG_GET_TIMEOUT(byte1, byte2) \
+	(((byte1) | ((byte2) << 8)) / 10)
+
+#define IPMI_WDOG_RESET_TIMER		0x22
+#define IPMI_WDOG_SET_TIMER		0x24
+#define IPMI_WDOG_GET_TIMER		0x25
+
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP	0x80
+
+static DEFINE_MUTEX(ipmi_watchdog_mutex);
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+static ipmi_user_t watchdog_user;
+static int watchdog_ifnum;
+
+/* Default the timeout to 10 seconds. */
+static int timeout = 10;
+
+/* The pre-timeout is disabled by default. */
+static int pretimeout;
+
+/* Default timeout to set on panic */
+static int panic_wdt_timeout = 255;
+
+/* Default action is to reset the board on a timeout. */
+static unsigned char action_val = WDOG_TIMEOUT_RESET;
+
+static char action[16] = "reset";
+
+static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE;
+
+static char preaction[16] = "pre_none";
+
+static unsigned char preop_val = WDOG_PREOP_NONE;
+
+static char preop[16] = "preop_none";
+static DEFINE_SPINLOCK(ipmi_read_lock);
+static char data_to_read;
+static DECLARE_WAIT_QUEUE_HEAD(read_q);
+static struct fasync_struct *fasync_q;
+static char pretimeout_since_last_heartbeat;
+static char expect_close;
+
+static int ifnum_to_use = -1;
+
+/* Parameters to ipmi_set_timeout */
+#define IPMI_SET_TIMEOUT_NO_HB			0
+#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY	1
+#define IPMI_SET_TIMEOUT_FORCE_HB		2
+
+static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
+
+/*
+ * If true, the driver will start running as soon as it is configured
+ * and ready.
+ */
+static int start_now;
+
+static int set_param_timeout(const char *val, const struct kernel_param *kp)
+{
+	char *endp;
+	int  l;
+	int  rv = 0;
+
+	if (!val)
+		return -EINVAL;
+	l = simple_strtoul(val, &endp, 0);
+	if (endp == val)
+		return -EINVAL;
+
+	*((int *)kp->arg) = l;
+	if (watchdog_user)
+		rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+	return rv;
+}
+
+static const struct kernel_param_ops param_ops_timeout = {
+	.set = set_param_timeout,
+	.get = param_get_int,
+};
+#define param_check_timeout param_check_int
+
+typedef int (*action_fn)(const char *intval, char *outval);
+
+static int action_op(const char *inval, char *outval);
+static int preaction_op(const char *inval, char *outval);
+static int preop_op(const char *inval, char *outval);
+static void check_parms(void);
+
+static int set_param_str(const char *val, const struct kernel_param *kp)
+{
+	action_fn  fn = (action_fn) kp->arg;
+	int        rv = 0;
+	char       valcp[16];
+	char       *s;
+
+	strncpy(valcp, val, 16);
+	valcp[15] = '\0';
+
+	s = strstrip(valcp);
+
+	rv = fn(s, NULL);
+	if (rv)
+		goto out;
+
+	check_parms();
+	if (watchdog_user)
+		rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ out:
+	return rv;
+}
+
+static int get_param_str(char *buffer, const struct kernel_param *kp)
+{
+	action_fn fn = (action_fn) kp->arg;
+	int       rv;
+
+	rv = fn(NULL, buffer);
+	if (rv)
+		return rv;
+	return strlen(buffer);
+}
+
+
+static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+	if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+		return 0;
+
+	ipmi_unregister_watchdog(watchdog_ifnum);
+	ipmi_register_watchdog(ifnum_to_use);
+	return 0;
+}
+
+static const struct kernel_param_ops param_ops_wdog_ifnum = {
+	.set = set_param_wdog_ifnum,
+	.get = param_get_int,
+};
+
+#define param_check_wdog_ifnum param_check_int
+
+static const struct kernel_param_ops param_ops_str = {
+	.set = set_param_str,
+	.get = get_param_str,
+};
+
+module_param(ifnum_to_use, wdog_ifnum, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+		 "timer.  Setting to -1 defaults to the first registered "
+		 "interface");
+
+module_param(timeout, timeout, 0644);
+MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
+
+module_param(pretimeout, timeout, 0644);
+MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
+
+module_param(panic_wdt_timeout, timeout, 0644);
+MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
+
+module_param_cb(action, &param_ops_str, action_op, 0644);
+MODULE_PARM_DESC(action, "Timeout action. One of: "
+		 "reset, none, power_cycle, power_off.");
+
+module_param_cb(preaction, &param_ops_str, preaction_op, 0644);
+MODULE_PARM_DESC(preaction, "Pretimeout action.  One of: "
+		 "pre_none, pre_smi, pre_nmi, pre_int.");
+
+module_param_cb(preop, &param_ops_str, preop_op, 0644);
+MODULE_PARM_DESC(preop, "Pretimeout driver operation.  One of: "
+		 "preop_none, preop_panic, preop_give_data.");
+
+module_param(start_now, int, 0444);
+MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
+		 "soon as the driver is loaded.");
+
+module_param(nowayout, bool, 0644);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+		 "(default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+/* Default state of the timer. */
+static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+
+/* If shutting down via IPMI, we ignore the heartbeat. */
+static int ipmi_ignore_heartbeat;
+
+/* Is someone using the watchdog?  Only one user is allowed. */
+static unsigned long ipmi_wdog_open;
+
+/*
+ * If set to 1, the heartbeat command will set the state to reset and
+ * start the timer.  The timer doesn't normally run when the driver is
+ * first opened until the heartbeat is set the first time, this
+ * variable is used to accomplish this.
+ */
+static int ipmi_start_timer_on_heartbeat;
+
+/* IPMI version of the BMC. */
+static unsigned char ipmi_version_major;
+static unsigned char ipmi_version_minor;
+
+/* If a pretimeout occurs, this is used to allow only one panic to happen. */
+static atomic_t preop_panic_excl = ATOMIC_INIT(-1);
+
+#ifdef HAVE_DIE_NMI
+static int testing_nmi;
+static int nmi_handler_registered;
+#endif
+
+static int ipmi_heartbeat(void);
+
+/*
+ * We use a mutex to make sure that only one thing can send a set
+ * timeout at one time, because we only have one copy of the data.
+ * The mutex is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
+static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
+static DEFINE_MUTEX(set_timeout_lock);
+static DECLARE_COMPLETION(set_timeout_wait);
+static void set_timeout_free_smi(struct ipmi_smi_msg *msg)
+{
+    if (atomic_dec_and_test(&set_timeout_tofree))
+	    complete(&set_timeout_wait);
+}
+static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
+{
+    if (atomic_dec_and_test(&set_timeout_tofree))
+	    complete(&set_timeout_wait);
+}
+static struct ipmi_smi_msg set_timeout_smi_msg = {
+	.done = set_timeout_free_smi
+};
+static struct ipmi_recv_msg set_timeout_recv_msg = {
+	.done = set_timeout_free_recv
+};
+
+static int i_ipmi_set_timeout(struct ipmi_smi_msg  *smi_msg,
+			      struct ipmi_recv_msg *recv_msg,
+			      int                  *send_heartbeat_now)
+{
+	struct kernel_ipmi_msg            msg;
+	unsigned char                     data[6];
+	int                               rv;
+	struct ipmi_system_interface_addr addr;
+	int                               hbnow = 0;
+
+
+	/* These can be cleared as we are setting the timeout. */
+	pretimeout_since_last_heartbeat = 0;
+
+	data[0] = 0;
+	WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
+
+	if ((ipmi_version_major > 1)
+	    || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
+		/* This is an IPMI 1.5-only feature. */
+		data[0] |= WDOG_DONT_STOP_ON_SET;
+	} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+		/*
+		 * In ipmi 1.0, setting the timer stops the watchdog, we
+		 * need to start it back up again.
+		 */
+		hbnow = 1;
+	}
+
+	data[1] = 0;
+	WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
+	if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) {
+	    WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
+	    data[2] = pretimeout;
+	} else {
+	    WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE);
+	    data[2] = 0; /* No pretimeout. */
+	}
+	data[3] = 0;
+	WDOG_SET_TIMEOUT(data[4], data[5], timeout);
+
+	addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	addr.channel = IPMI_BMC_CHANNEL;
+	addr.lun = 0;
+
+	msg.netfn = 0x06;
+	msg.cmd = IPMI_WDOG_SET_TIMER;
+	msg.data = data;
+	msg.data_len = sizeof(data);
+	rv = ipmi_request_supply_msgs(watchdog_user,
+				      (struct ipmi_addr *) &addr,
+				      0,
+				      &msg,
+				      NULL,
+				      smi_msg,
+				      recv_msg,
+				      1);
+	if (rv) {
+		printk(KERN_WARNING PFX "set timeout error: %d\n",
+		       rv);
+	}
+
+	if (send_heartbeat_now)
+	    *send_heartbeat_now = hbnow;
+
+	return rv;
+}
+
+static int ipmi_set_timeout(int do_heartbeat)
+{
+	int send_heartbeat_now;
+	int rv;
+
+
+	/* We can only send one of these at a time. */
+	mutex_lock(&set_timeout_lock);
+
+	atomic_set(&set_timeout_tofree, 2);
+
+	rv = i_ipmi_set_timeout(&set_timeout_smi_msg,
+				&set_timeout_recv_msg,
+				&send_heartbeat_now);
+	if (rv) {
+		mutex_unlock(&set_timeout_lock);
+		goto out;
+	}
+
+	wait_for_completion(&set_timeout_wait);
+
+	mutex_unlock(&set_timeout_lock);
+
+	if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
+	    || ((send_heartbeat_now)
+		&& (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+		rv = ipmi_heartbeat();
+
+out:
+	return rv;
+}
+
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
+static void panic_smi_free(struct ipmi_smi_msg *msg)
+{
+	atomic_dec(&panic_done_count);
+}
+static void panic_recv_free(struct ipmi_recv_msg *msg)
+{
+	atomic_dec(&panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = {
+	.done = panic_smi_free
+};
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = {
+	.done = panic_recv_free
+};
+
+static void panic_halt_ipmi_heartbeat(void)
+{
+	struct kernel_ipmi_msg             msg;
+	struct ipmi_system_interface_addr addr;
+	int rv;
+
+	/*
+	 * Don't reset the timer if we have the timer turned off, that
+	 * re-enables the watchdog.
+	 */
+	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+		return;
+
+	addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	addr.channel = IPMI_BMC_CHANNEL;
+	addr.lun = 0;
+
+	msg.netfn = 0x06;
+	msg.cmd = IPMI_WDOG_RESET_TIMER;
+	msg.data = NULL;
+	msg.data_len = 0;
+	atomic_add(1, &panic_done_count);
+	rv = ipmi_request_supply_msgs(watchdog_user,
+				      (struct ipmi_addr *) &addr,
+				      0,
+				      &msg,
+				      NULL,
+				      &panic_halt_heartbeat_smi_msg,
+				      &panic_halt_heartbeat_recv_msg,
+				      1);
+	if (rv)
+		atomic_sub(1, &panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_smi_msg = {
+	.done = panic_smi_free
+};
+static struct ipmi_recv_msg panic_halt_recv_msg = {
+	.done = panic_recv_free
+};
+
+/*
+ * Special call, doesn't claim any locks.  This is only to be called
+ * at panic or halt time, in run-to-completion mode, when the caller
+ * is the only CPU and the only thing that will be going is these IPMI
+ * calls.
+ */
+static void panic_halt_ipmi_set_timeout(void)
+{
+	int send_heartbeat_now;
+	int rv;
+
+	/* Wait for the messages to be free. */
+	while (atomic_read(&panic_done_count) != 0)
+		ipmi_poll_interface(watchdog_user);
+	atomic_add(1, &panic_done_count);
+	rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
+				&panic_halt_recv_msg,
+				&send_heartbeat_now);
+	if (rv) {
+		atomic_sub(1, &panic_done_count);
+		printk(KERN_WARNING PFX
+		       "Unable to extend the watchdog timeout.");
+	} else {
+		if (send_heartbeat_now)
+			panic_halt_ipmi_heartbeat();
+	}
+	while (atomic_read(&panic_done_count) != 0)
+		ipmi_poll_interface(watchdog_user);
+}
+
+/*
+ * We use a mutex to make sure that only one thing can send a
+ * heartbeat at one time, because we only have one copy of the data.
+ * The semaphore is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
+static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
+static DEFINE_MUTEX(heartbeat_lock);
+static DECLARE_COMPLETION(heartbeat_wait);
+static void heartbeat_free_smi(struct ipmi_smi_msg *msg)
+{
+    if (atomic_dec_and_test(&heartbeat_tofree))
+	    complete(&heartbeat_wait);
+}
+static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
+{
+    if (atomic_dec_and_test(&heartbeat_tofree))
+	    complete(&heartbeat_wait);
+}
+static struct ipmi_smi_msg heartbeat_smi_msg = {
+	.done = heartbeat_free_smi
+};
+static struct ipmi_recv_msg heartbeat_recv_msg = {
+	.done = heartbeat_free_recv
+};
+
+static int ipmi_heartbeat(void)
+{
+	struct kernel_ipmi_msg            msg;
+	int                               rv;
+	struct ipmi_system_interface_addr addr;
+	int				  timeout_retries = 0;
+
+	if (ipmi_ignore_heartbeat)
+		return 0;
+
+	if (ipmi_start_timer_on_heartbeat) {
+		ipmi_start_timer_on_heartbeat = 0;
+		ipmi_watchdog_state = action_val;
+		return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+	} else if (pretimeout_since_last_heartbeat) {
+		/*
+		 * A pretimeout occurred, make sure we set the timeout.
+		 * We don't want to set the action, though, we want to
+		 * leave that alone (thus it can't be combined with the
+		 * above operation.
+		 */
+		return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+	}
+
+	mutex_lock(&heartbeat_lock);
+
+restart:
+	atomic_set(&heartbeat_tofree, 2);
+
+	/*
+	 * Don't reset the timer if we have the timer turned off, that
+	 * re-enables the watchdog.
+	 */
+	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
+		mutex_unlock(&heartbeat_lock);
+		return 0;
+	}
+
+	addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	addr.channel = IPMI_BMC_CHANNEL;
+	addr.lun = 0;
+
+	msg.netfn = 0x06;
+	msg.cmd = IPMI_WDOG_RESET_TIMER;
+	msg.data = NULL;
+	msg.data_len = 0;
+	rv = ipmi_request_supply_msgs(watchdog_user,
+				      (struct ipmi_addr *) &addr,
+				      0,
+				      &msg,
+				      NULL,
+				      &heartbeat_smi_msg,
+				      &heartbeat_recv_msg,
+				      1);
+	if (rv) {
+		mutex_unlock(&heartbeat_lock);
+		printk(KERN_WARNING PFX "heartbeat failure: %d\n",
+		       rv);
+		return rv;
+	}
+
+	/* Wait for the heartbeat to be sent. */
+	wait_for_completion(&heartbeat_wait);
+
+	if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)  {
+		timeout_retries++;
+		if (timeout_retries > 3) {
+			printk(KERN_ERR PFX ": Unable to restore the IPMI"
+			       " watchdog's settings, giving up.\n");
+			rv = -EIO;
+			goto out_unlock;
+		}
+
+		/*
+		 * The timer was not initialized, that means the BMC was
+		 * probably reset and lost the watchdog information.  Attempt
+		 * to restore the timer's info.  Note that we still hold
+		 * the heartbeat lock, to keep a heartbeat from happening
+		 * in this process, so must say no heartbeat to avoid a
+		 * deadlock on this mutex.
+		 */
+		rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+		if (rv) {
+			printk(KERN_ERR PFX ": Unable to send the command to"
+			       " set the watchdog's settings, giving up.\n");
+			goto out_unlock;
+		}
+
+		/* We might need a new heartbeat, so do it now */
+		goto restart;
+	} else if (heartbeat_recv_msg.msg.data[0] != 0) {
+		/*
+		 * Got an error in the heartbeat response.  It was already
+		 * reported in ipmi_wdog_msg_handler, but we should return
+		 * an error here.
+		 */
+		rv = -EINVAL;
+	}
+
+out_unlock:
+	mutex_unlock(&heartbeat_lock);
+
+	return rv;
+}
+
+static struct watchdog_info ident = {
+	.options	= 0,	/* WDIOF_SETTIMEOUT, */
+	.firmware_version = 1,
+	.identity	= "IPMI"
+};
+
+static int ipmi_ioctl(struct file *file,
+		      unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	int i;
+	int val;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		i = copy_to_user(argp, &ident, sizeof(ident));
+		return i ? -EFAULT : 0;
+
+	case WDIOC_SETTIMEOUT:
+		i = copy_from_user(&val, argp, sizeof(int));
+		if (i)
+			return -EFAULT;
+		timeout = val;
+		return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+	case WDIOC_GETTIMEOUT:
+		i = copy_to_user(argp, &timeout, sizeof(timeout));
+		if (i)
+			return -EFAULT;
+		return 0;
+
+	case WDIOC_SETPRETIMEOUT:
+		i = copy_from_user(&val, argp, sizeof(int));
+		if (i)
+			return -EFAULT;
+		pretimeout = val;
+		return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+	case WDIOC_GETPRETIMEOUT:
+		i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
+		if (i)
+			return -EFAULT;
+		return 0;
+
+	case WDIOC_KEEPALIVE:
+		return ipmi_heartbeat();
+
+	case WDIOC_SETOPTIONS:
+		i = copy_from_user(&val, argp, sizeof(int));
+		if (i)
+			return -EFAULT;
+		if (val & WDIOS_DISABLECARD) {
+			ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+			ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+			ipmi_start_timer_on_heartbeat = 0;
+		}
+
+		if (val & WDIOS_ENABLECARD) {
+			ipmi_watchdog_state = action_val;
+			ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+		}
+		return 0;
+
+	case WDIOC_GETSTATUS:
+		val = 0;
+		i = copy_to_user(argp, &val, sizeof(val));
+		if (i)
+			return -EFAULT;
+		return 0;
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+static long ipmi_unlocked_ioctl(struct file *file,
+				unsigned int cmd,
+				unsigned long arg)
+{
+	int ret;
+
+	mutex_lock(&ipmi_watchdog_mutex);
+	ret = ipmi_ioctl(file, cmd, arg);
+	mutex_unlock(&ipmi_watchdog_mutex);
+
+	return ret;
+}
+
+static ssize_t ipmi_write(struct file *file,
+			  const char  __user *buf,
+			  size_t      len,
+			  loff_t      *ppos)
+{
+	int rv;
+
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/* In case it was set long ago */
+			expect_close = 0;
+
+			for (i = 0; i != len; i++) {
+				char c;
+
+				if (get_user(c, buf + i))
+					return -EFAULT;
+				if (c == 'V')
+					expect_close = 42;
+			}
+		}
+		rv = ipmi_heartbeat();
+		if (rv)
+			return rv;
+	}
+	return len;
+}
+
+static ssize_t ipmi_read(struct file *file,
+			 char        __user *buf,
+			 size_t      count,
+			 loff_t      *ppos)
+{
+	int          rv = 0;
+	wait_queue_entry_t wait;
+
+	if (count <= 0)
+		return 0;
+
+	/*
+	 * Reading returns if the pretimeout has gone off, and it only does
+	 * it once per pretimeout.
+	 */
+	spin_lock(&ipmi_read_lock);
+	if (!data_to_read) {
+		if (file->f_flags & O_NONBLOCK) {
+			rv = -EAGAIN;
+			goto out;
+		}
+
+		init_waitqueue_entry(&wait, current);
+		add_wait_queue(&read_q, &wait);
+		while (!data_to_read) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock(&ipmi_read_lock);
+			schedule();
+			spin_lock(&ipmi_read_lock);
+		}
+		remove_wait_queue(&read_q, &wait);
+
+		if (signal_pending(current)) {
+			rv = -ERESTARTSYS;
+			goto out;
+		}
+	}
+	data_to_read = 0;
+
+ out:
+	spin_unlock(&ipmi_read_lock);
+
+	if (rv == 0) {
+		if (copy_to_user(buf, &data_to_read, 1))
+			rv = -EFAULT;
+		else
+			rv = 1;
+	}
+
+	return rv;
+}
+
+static int ipmi_open(struct inode *ino, struct file *filep)
+{
+	switch (iminor(ino)) {
+	case WATCHDOG_MINOR:
+		if (test_and_set_bit(0, &ipmi_wdog_open))
+			return -EBUSY;
+
+
+		/*
+		 * Don't start the timer now, let it start on the
+		 * first heartbeat.
+		 */
+		ipmi_start_timer_on_heartbeat = 1;
+		return nonseekable_open(ino, filep);
+
+	default:
+		return (-ENODEV);
+	}
+}
+
+static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+{
+	unsigned int mask = 0;
+
+	poll_wait(file, &read_q, wait);
+
+	spin_lock(&ipmi_read_lock);
+	if (data_to_read)
+		mask |= (POLLIN | POLLRDNORM);
+	spin_unlock(&ipmi_read_lock);
+
+	return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+	int result;
+
+	result = fasync_helper(fd, file, on, &fasync_q);
+
+	return (result);
+}
+
+static int ipmi_close(struct inode *ino, struct file *filep)
+{
+	if (iminor(ino) == WATCHDOG_MINOR) {
+		if (expect_close == 42) {
+			ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+			ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+		} else {
+			printk(KERN_CRIT PFX
+			       "Unexpected close, not stopping watchdog!\n");
+			ipmi_heartbeat();
+		}
+		clear_bit(0, &ipmi_wdog_open);
+	}
+
+	expect_close = 0;
+
+	return 0;
+}
+
+static const struct file_operations ipmi_wdog_fops = {
+	.owner   = THIS_MODULE,
+	.read    = ipmi_read,
+	.poll    = ipmi_poll,
+	.write   = ipmi_write,
+	.unlocked_ioctl = ipmi_unlocked_ioctl,
+	.open    = ipmi_open,
+	.release = ipmi_close,
+	.fasync  = ipmi_fasync,
+	.llseek  = no_llseek,
+};
+
+static struct miscdevice ipmi_wdog_miscdev = {
+	.minor		= WATCHDOG_MINOR,
+	.name		= "watchdog",
+	.fops		= &ipmi_wdog_fops
+};
+
+static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
+				  void                 *handler_data)
+{
+	if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+			msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+		printk(KERN_INFO PFX "response: The IPMI controller appears"
+		       " to have been reset, will attempt to reinitialize"
+		       " the watchdog timer\n");
+	else if (msg->msg.data[0] != 0)
+		printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
+		       msg->msg.data[0],
+		       msg->msg.cmd);
+
+	ipmi_free_recv_msg(msg);
+}
+
+static void ipmi_wdog_pretimeout_handler(void *handler_data)
+{
+	if (preaction_val != WDOG_PRETIMEOUT_NONE) {
+		if (preop_val == WDOG_PREOP_PANIC) {
+			if (atomic_inc_and_test(&preop_panic_excl))
+				panic("Watchdog pre-timeout");
+		} else if (preop_val == WDOG_PREOP_GIVE_DATA) {
+			spin_lock(&ipmi_read_lock);
+			data_to_read = 1;
+			wake_up_interruptible(&read_q);
+			kill_fasync(&fasync_q, SIGIO, POLL_IN);
+
+			spin_unlock(&ipmi_read_lock);
+		}
+	}
+
+	/*
+	 * On some machines, the heartbeat will give an error and not
+	 * work unless we re-enable the timer.  So do so.
+	 */
+	pretimeout_since_last_heartbeat = 1;
+}
+
+static const struct ipmi_user_hndl ipmi_hndlrs = {
+	.ipmi_recv_hndl           = ipmi_wdog_msg_handler,
+	.ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
+};
+
+static void ipmi_register_watchdog(int ipmi_intf)
+{
+	int rv = -EBUSY;
+
+	if (watchdog_user)
+		goto out;
+
+	if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+		goto out;
+
+	watchdog_ifnum = ipmi_intf;
+
+	rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
+	if (rv < 0) {
+		printk(KERN_CRIT PFX "Unable to register with ipmi\n");
+		goto out;
+	}
+
+	ipmi_get_version(watchdog_user,
+			 &ipmi_version_major,
+			 &ipmi_version_minor);
+
+	rv = misc_register(&ipmi_wdog_miscdev);
+	if (rv < 0) {
+		ipmi_destroy_user(watchdog_user);
+		watchdog_user = NULL;
+		printk(KERN_CRIT PFX "Unable to register misc device\n");
+	}
+
+#ifdef HAVE_DIE_NMI
+	if (nmi_handler_registered) {
+		int old_pretimeout = pretimeout;
+		int old_timeout = timeout;
+		int old_preop_val = preop_val;
+
+		/*
+		 * Set the pretimeout to go off in a second and give
+		 * ourselves plenty of time to stop the timer.
+		 */
+		ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+		preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
+		pretimeout = 99;
+		timeout = 100;
+
+		testing_nmi = 1;
+
+		rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+		if (rv) {
+			printk(KERN_WARNING PFX "Error starting timer to"
+			       " test NMI: 0x%x.  The NMI pretimeout will"
+			       " likely not work\n", rv);
+			rv = 0;
+			goto out_restore;
+		}
+
+		msleep(1500);
+
+		if (testing_nmi != 2) {
+			printk(KERN_WARNING PFX "IPMI NMI didn't seem to"
+			       " occur.  The NMI pretimeout will"
+			       " likely not work\n");
+		}
+ out_restore:
+		testing_nmi = 0;
+		preop_val = old_preop_val;
+		pretimeout = old_pretimeout;
+		timeout = old_timeout;
+	}
+#endif
+
+ out:
+	if ((start_now) && (rv == 0)) {
+		/* Run from startup, so start the timer now. */
+		start_now = 0; /* Disable this function after first startup. */
+		ipmi_watchdog_state = action_val;
+		ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+		printk(KERN_INFO PFX "Starting now!\n");
+	} else {
+		/* Stop the timer now. */
+		ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+		ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+	}
+}
+
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+	int rv;
+
+	if (!watchdog_user)
+		goto out;
+
+	if (watchdog_ifnum != ipmi_intf)
+		goto out;
+
+	/* Make sure no one can call us any more. */
+	misc_deregister(&ipmi_wdog_miscdev);
+
+	/*
+	 * Wait to make sure the message makes it out.  The lower layer has
+	 * pointers to our buffers, we want to make sure they are done before
+	 * we release our memory.
+	 */
+	while (atomic_read(&set_timeout_tofree))
+		schedule_timeout_uninterruptible(1);
+
+	/* Disconnect from IPMI. */
+	rv = ipmi_destroy_user(watchdog_user);
+	if (rv) {
+		printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
+		       rv);
+	}
+	watchdog_user = NULL;
+
+ out:
+	return;
+}
+
+#ifdef HAVE_DIE_NMI
+static int
+ipmi_nmi(unsigned int val, struct pt_regs *regs)
+{
+	/*
+	 * If we get here, it's an NMI that's not a memory or I/O
+	 * error.  We can't truly tell if it's from IPMI or not
+	 * without sending a message, and sending a message is almost
+	 * impossible because of locking.
+	 */
+
+	if (testing_nmi) {
+		testing_nmi = 2;
+		return NMI_HANDLED;
+	}
+
+	/* If we are not expecting a timeout, ignore it. */
+	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+		return NMI_DONE;
+
+	if (preaction_val != WDOG_PRETIMEOUT_NMI)
+		return NMI_DONE;
+
+	/*
+	 * If no one else handled the NMI, we assume it was the IPMI
+	 * watchdog.
+	 */
+	if (preop_val == WDOG_PREOP_PANIC) {
+		/* On some machines, the heartbeat will give
+		   an error and not work unless we re-enable
+		   the timer.   So do so. */
+		pretimeout_since_last_heartbeat = 1;
+		if (atomic_inc_and_test(&preop_panic_excl))
+			nmi_panic(regs, PFX "pre-timeout");
+	}
+
+	return NMI_HANDLED;
+}
+#endif
+
+static int wdog_reboot_handler(struct notifier_block *this,
+			       unsigned long         code,
+			       void                  *unused)
+{
+	static int reboot_event_handled;
+
+	if ((watchdog_user) && (!reboot_event_handled)) {
+		/* Make sure we only do this once. */
+		reboot_event_handled = 1;
+
+		if (code == SYS_POWER_OFF || code == SYS_HALT) {
+			/* Disable the WDT if we are shutting down. */
+			ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+			ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+		} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+			/* Set a long timer to let the reboot happen or
+			   reset if it hangs, but only if the watchdog
+			   timer was already running. */
+			if (timeout < 120)
+				timeout = 120;
+			pretimeout = 0;
+			ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+			ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+		}
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_reboot_notifier = {
+	.notifier_call	= wdog_reboot_handler,
+	.next		= NULL,
+	.priority	= 0
+};
+
+static int wdog_panic_handler(struct notifier_block *this,
+			      unsigned long         event,
+			      void                  *unused)
+{
+	static int panic_event_handled;
+
+	/* On a panic, if we have a panic timeout, make sure to extend
+	   the watchdog timer to a reasonable value to complete the
+	   panic, if the watchdog timer is running.  Plus the
+	   pretimeout is meaningless at panic time. */
+	if (watchdog_user && !panic_event_handled &&
+	    ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+		/* Make sure we do this only once. */
+		panic_event_handled = 1;
+
+		timeout = panic_wdt_timeout;
+		pretimeout = 0;
+		panic_halt_ipmi_set_timeout();
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_panic_notifier = {
+	.notifier_call	= wdog_panic_handler,
+	.next		= NULL,
+	.priority	= 150	/* priority: INT_MAX >= x >= 0 */
+};
+
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+	ipmi_register_watchdog(if_num);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+	ipmi_unregister_watchdog(if_num);
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+	.owner    = THIS_MODULE,
+	.new_smi  = ipmi_new_smi,
+	.smi_gone = ipmi_smi_gone
+};
+
+static int action_op(const char *inval, char *outval)
+{
+	if (outval)
+		strcpy(outval, action);
+
+	if (!inval)
+		return 0;
+
+	if (strcmp(inval, "reset") == 0)
+		action_val = WDOG_TIMEOUT_RESET;
+	else if (strcmp(inval, "none") == 0)
+		action_val = WDOG_TIMEOUT_NONE;
+	else if (strcmp(inval, "power_cycle") == 0)
+		action_val = WDOG_TIMEOUT_POWER_CYCLE;
+	else if (strcmp(inval, "power_off") == 0)
+		action_val = WDOG_TIMEOUT_POWER_DOWN;
+	else
+		return -EINVAL;
+	strcpy(action, inval);
+	return 0;
+}
+
+static int preaction_op(const char *inval, char *outval)
+{
+	if (outval)
+		strcpy(outval, preaction);
+
+	if (!inval)
+		return 0;
+
+	if (strcmp(inval, "pre_none") == 0)
+		preaction_val = WDOG_PRETIMEOUT_NONE;
+	else if (strcmp(inval, "pre_smi") == 0)
+		preaction_val = WDOG_PRETIMEOUT_SMI;
+#ifdef HAVE_DIE_NMI
+	else if (strcmp(inval, "pre_nmi") == 0)
+		preaction_val = WDOG_PRETIMEOUT_NMI;
+#endif
+	else if (strcmp(inval, "pre_int") == 0)
+		preaction_val = WDOG_PRETIMEOUT_MSG_INT;
+	else
+		return -EINVAL;
+	strcpy(preaction, inval);
+	return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+	if (outval)
+		strcpy(outval, preop);
+
+	if (!inval)
+		return 0;
+
+	if (strcmp(inval, "preop_none") == 0)
+		preop_val = WDOG_PREOP_NONE;
+	else if (strcmp(inval, "preop_panic") == 0)
+		preop_val = WDOG_PREOP_PANIC;
+	else if (strcmp(inval, "preop_give_data") == 0)
+		preop_val = WDOG_PREOP_GIVE_DATA;
+	else
+		return -EINVAL;
+	strcpy(preop, inval);
+	return 0;
+}
+
+static void check_parms(void)
+{
+#ifdef HAVE_DIE_NMI
+	int do_nmi = 0;
+	int rv;
+
+	if (preaction_val == WDOG_PRETIMEOUT_NMI) {
+		do_nmi = 1;
+		if (preop_val == WDOG_PREOP_GIVE_DATA) {
+			printk(KERN_WARNING PFX "Pretimeout op is to give data"
+			       " but NMI pretimeout is enabled, setting"
+			       " pretimeout op to none\n");
+			preop_op("preop_none", NULL);
+			do_nmi = 0;
+		}
+	}
+	if (do_nmi && !nmi_handler_registered) {
+		rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
+						"ipmi");
+		if (rv) {
+			printk(KERN_WARNING PFX
+			       "Can't register nmi handler\n");
+			return;
+		} else
+			nmi_handler_registered = 1;
+	} else if (!do_nmi && nmi_handler_registered) {
+		unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+		nmi_handler_registered = 0;
+	}
+#endif
+}
+
+static int __init ipmi_wdog_init(void)
+{
+	int rv;
+
+	if (action_op(action, NULL)) {
+		action_op("reset", NULL);
+		printk(KERN_INFO PFX "Unknown action '%s', defaulting to"
+		       " reset\n", action);
+	}
+
+	if (preaction_op(preaction, NULL)) {
+		preaction_op("pre_none", NULL);
+		printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to"
+		       " none\n", preaction);
+	}
+
+	if (preop_op(preop, NULL)) {
+		preop_op("preop_none", NULL);
+		printk(KERN_INFO PFX "Unknown preop '%s', defaulting to"
+		       " none\n", preop);
+	}
+
+	check_parms();
+
+	register_reboot_notifier(&wdog_reboot_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&wdog_panic_notifier);
+
+	rv = ipmi_smi_watcher_register(&smi_watcher);
+	if (rv) {
+#ifdef HAVE_DIE_NMI
+		if (nmi_handler_registered)
+			unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+#endif
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+						 &wdog_panic_notifier);
+		unregister_reboot_notifier(&wdog_reboot_notifier);
+		printk(KERN_WARNING PFX "can't register smi watcher\n");
+		return rv;
+	}
+
+	printk(KERN_INFO PFX "driver initialized\n");
+
+	return 0;
+}
+
+static void __exit ipmi_wdog_exit(void)
+{
+	ipmi_smi_watcher_unregister(&smi_watcher);
+	ipmi_unregister_watchdog(watchdog_ifnum);
+
+#ifdef HAVE_DIE_NMI
+	if (nmi_handler_registered)
+		unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
+#endif
+
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &wdog_panic_notifier);
+	unregister_reboot_notifier(&wdog_reboot_notifier);
+}
+module_exit(ipmi_wdog_exit);
+module_init(ipmi_wdog_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface.");