ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/visorbus/Kconfig b/marvell/linux/drivers/visorbus/Kconfig
new file mode 100644
index 0000000..d702c44
--- /dev/null
+++ b/marvell/linux/drivers/visorbus/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Unisys visorbus configuration
+#
+
+config UNISYS_VISORBUS
+	tristate "Unisys visorbus driver"
+	depends on X86_64 && ACPI
+	---help---
+	The visorbus driver is a virtualized bus for the Unisys s-Par firmware.
+	Virtualized devices allow Linux guests on a system to share disks and
+	network cards that do not have SR-IOV support, and to be accessed using
+	the partition desktop application. The visorbus driver is required to
+	discover devices on an s-Par guest, and must be present for any other
+	s-Par guest driver to function correctly.
diff --git a/marvell/linux/drivers/visorbus/Makefile b/marvell/linux/drivers/visorbus/Makefile
new file mode 100644
index 0000000..e8df59d
--- /dev/null
+++ b/marvell/linux/drivers/visorbus/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Unisys visorbus
+#
+
+obj-$(CONFIG_UNISYS_VISORBUS)	+= visorbus.o
+
+visorbus-y := visorbus_main.o
+visorbus-y += visorchannel.o
+visorbus-y += visorchipset.o
diff --git a/marvell/linux/drivers/visorbus/controlvmchannel.h b/marvell/linux/drivers/visorbus/controlvmchannel.h
new file mode 100644
index 0000000..8c57562
--- /dev/null
+++ b/marvell/linux/drivers/visorbus/controlvmchannel.h
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
+ * All rights reserved.
+ */
+
+#ifndef __CONTROLVMCHANNEL_H__
+#define __CONTROLVMCHANNEL_H__
+
+#include <linux/uuid.h>
+#include <linux/visorbus.h>
+
+/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
+#define VISOR_CONTROLVM_CHANNEL_GUID \
+	GUID_INIT(0x2b3c2d10, 0x7ef5, 0x4ad8, \
+		  0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
+
+#define CONTROLVM_MESSAGE_MAX 64
+
+/*
+ * Must increment this whenever you insert or delete fields within this channel
+ * struct.  Also increment whenever you change the meaning of fields within this
+ * channel struct so as to break pre-existing software. Note that you can
+ * usually add fields to the END of the channel struct withOUT needing to
+ * increment this.
+ */
+#define VISOR_CONTROLVM_CHANNEL_VERSIONID 1
+
+/* Defines for various channel queues */
+#define CONTROLVM_QUEUE_REQUEST		0
+#define CONTROLVM_QUEUE_RESPONSE	1
+#define CONTROLVM_QUEUE_EVENT		2
+#define CONTROLVM_QUEUE_ACK		3
+
+/* Max num of messages stored during IOVM creation to be reused after crash */
+#define CONTROLVM_CRASHMSG_MAX 2
+
+/*
+ * struct visor_segment_state
+ * @enabled:   May enter other states.
+ * @active:    Assigned to active partition.
+ * @alive:     Configure message sent to service/server.
+ * @revoked:   Similar to partition state ShuttingDown.
+ * @allocated: Memory (device/port number) has been selected by Command.
+ * @known:     Has been introduced to the service/guest partition.
+ * @ready:     Service/Guest partition has responded to introduction.
+ * @operating: Resource is configured and operating.
+ * @reserved:  Natural alignment.
+ *
+ * Note: Don't use high bit unless we need to switch to ushort which is
+ * non-compliant.
+ */
+struct visor_segment_state  {
+	u16 enabled:1;
+	u16 active:1;
+	u16 alive:1;
+	u16 revoked:1;
+	u16 allocated:1;
+	u16 known:1;
+	u16 ready:1;
+	u16 operating:1;
+	u16 reserved:8;
+} __packed;
+
+static const struct visor_segment_state segment_state_running = {
+	1, 1, 1, 0, 1, 1, 1, 1
+};
+
+static const struct visor_segment_state segment_state_paused = {
+	1, 1, 1, 0, 1, 1, 1, 0
+};
+
+static const struct visor_segment_state segment_state_standby = {
+	1, 1, 0, 0, 1, 1, 1, 0
+};
+
+/*
+ * enum controlvm_id
+ * @CONTROLVM_INVALID:
+ * @CONTROLVM_BUS_CREATE:		CP --> SP, GP.
+ * @CONTROLVM_BUS_DESTROY:		CP --> SP, GP.
+ * @CONTROLVM_BUS_CONFIGURE:		CP --> SP.
+ * @CONTROLVM_BUS_CHANGESTATE:		CP --> SP, GP.
+ * @CONTROLVM_BUS_CHANGESTATE_EVENT:	SP, GP --> CP.
+ * @CONTROLVM_DEVICE_CREATE:		CP --> SP, GP.
+ * @CONTROLVM_DEVICE_DESTROY:		CP --> SP, GP.
+ * @CONTROLVM_DEVICE_CONFIGURE:		CP --> SP.
+ * @CONTROLVM_DEVICE_CHANGESTATE:	CP --> SP, GP.
+ * @CONTROLVM_DEVICE_CHANGESTATE_EVENT:	SP, GP --> CP.
+ * @CONTROLVM_DEVICE_RECONFIGURE:	CP --> Boot.
+ * @CONTROLVM_CHIPSET_INIT:		CP --> SP, GP.
+ * @CONTROLVM_CHIPSET_STOP:		CP --> SP, GP.
+ * @CONTROLVM_CHIPSET_READY:		CP --> SP.
+ * @CONTROLVM_CHIPSET_SELFTEST:		CP --> SP.
+ *
+ * Ids for commands that may appear in either queue of a ControlVm channel.
+ *
+ * Commands that are initiated by the command partition (CP), by an IO or
+ * console service partition (SP), or by a guest partition (GP) are:
+ * - issued on the RequestQueue queue (q #0) in the ControlVm channel
+ * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel
+ *
+ * Events that are initiated by an IO or console service partition (SP) or
+ * by a guest partition (GP) are:
+ * - issued on the EventQueue queue (q #2) in the ControlVm channel
+ * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
+ */
+enum controlvm_id {
+	CONTROLVM_INVALID = 0,
+	/*
+	 * SWITCH commands required Parameter: SwitchNumber.
+	 * BUS commands required Parameter: BusNumber
+	 */
+	CONTROLVM_BUS_CREATE = 0x101,
+	CONTROLVM_BUS_DESTROY = 0x102,
+	CONTROLVM_BUS_CONFIGURE = 0x104,
+	CONTROLVM_BUS_CHANGESTATE = 0x105,
+	CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106,
+	/* DEVICE commands required Parameter: BusNumber, DeviceNumber */
+	CONTROLVM_DEVICE_CREATE = 0x201,
+	CONTROLVM_DEVICE_DESTROY = 0x202,
+	CONTROLVM_DEVICE_CONFIGURE = 0x203,
+	CONTROLVM_DEVICE_CHANGESTATE = 0x204,
+	CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205,
+	CONTROLVM_DEVICE_RECONFIGURE = 0x206,
+	/* CHIPSET commands */
+	CONTROLVM_CHIPSET_INIT = 0x301,
+	CONTROLVM_CHIPSET_STOP = 0x302,
+	CONTROLVM_CHIPSET_READY = 0x304,
+	CONTROLVM_CHIPSET_SELFTEST = 0x305,
+};
+
+/*
+ * struct irq_info
+ * @reserved1:	     Natural alignment purposes
+ * @recv_irq_handle: Specifies interrupt handle. It is used to retrieve the
+ *		     corresponding interrupt pin from Monitor; and the interrupt
+ *		     pin is used to connect to the corresponding interrupt.
+ *		     Used by IOPart-GP only.
+ * @recv_irq_vector: Specifies interrupt vector. It, interrupt pin, and shared
+ *		     are used to connect to the corresponding interrupt.
+ *		     Used by IOPart-GP only.
+ * @recv_irq_shared: Specifies if the recvInterrupt is shared.  It, interrupt
+ *		     pin and vector are used to connect to 0 = not shared;
+ *		     1 = shared the corresponding interrupt.
+ *		     Used by IOPart-GP only.
+ * @reserved:	     Natural alignment purposes
+ */
+struct irq_info {
+	u64 reserved1;
+	u64 recv_irq_handle;
+	u32 recv_irq_vector;
+	u8 recv_irq_shared;
+	u8 reserved[3];
+} __packed;
+
+/*
+ * struct efi_visor_indication
+ * @boot_to_fw_ui: Stop in UEFI UI
+ * @clear_nvram:   Clear NVRAM
+ * @clear_cmos:	   Clear CMOS
+ * @boot_to_tool:  Run install tool
+ * @reserved:	   Natural alignment
+ */
+struct efi_visor_indication  {
+	u64 boot_to_fw_ui:1;
+	u64 clear_nvram:1;
+	u64 clear_cmos:1;
+	u64 boot_to_tool:1;
+	/* Remaining bits are available */
+	u64 reserved:60;
+} __packed;
+
+enum visor_chipset_feature {
+	VISOR_CHIPSET_FEATURE_REPLY = 0x00000001,
+	VISOR_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
+};
+
+/*
+ * struct controlvm_message_header
+ * @id:		       See CONTROLVM_ID.
+ * @message_size:      Includes size of this struct + size of message.
+ * @segment_index:     Index of segment containing Vm message/information.
+ * @completion_status: Error status code or result of  message completion.
+ * @struct flags:
+ *	@failed:	     =1 in a response to signify failure.
+ *	@response_expected:  =1 in all messages that expect a response.
+ *	@server:	     =1 in all bus & device-related messages where the
+ *			     message receiver is to act as the bus or device
+ *			     server.
+ *	@test_message:	     =1 for testing use only (Control and Command
+ *			     ignore this).
+ *	@partial_completion: =1 if there are forthcoming responses/acks
+ *                           associated with this message.
+ *      @preserve:	     =1 this is to let us know to preserve channel
+ *			     contents.
+ *	@writer_in_diag:     =1 the DiagWriter is active in the Diagnostic
+ *			     Partition.
+ *	@reserve:	     Natural alignment.
+ * @reserved:	       Natural alignment.
+ * @message_handle:    Identifies the particular message instance.
+ * @payload_vm_offset: Offset of payload area from start of this instance.
+ * @payload_max_bytes: Maximum bytes allocated in payload area of ControlVm
+ *		       segment.
+ * @payload_bytes:     Actual number of bytes of payload area to copy between
+ *		       IO/Command. If non-zero, there is a payload to copy.
+ *
+ * This is the common structure that is at the beginning of every
+ * ControlVm message (both commands and responses) in any ControlVm
+ * queue.  Commands are easily distinguished from responses by
+ * looking at the flags.response field.
+ */
+struct controlvm_message_header  {
+	u32 id;
+	/*
+	 * For requests, indicates the message type. For responses, indicates
+	 * the type of message we are responding to.
+	 */
+	u32 message_size;
+	u32 segment_index;
+	u32 completion_status;
+	struct  {
+		u32 failed:1;
+		u32 response_expected:1;
+		u32 server:1;
+		u32 test_message:1;
+		u32 partial_completion:1;
+		u32 preserve:1;
+		u32 writer_in_diag:1;
+		u32 reserve:25;
+	} __packed flags;
+	u32 reserved;
+	u64 message_handle;
+	u64 payload_vm_offset;
+	u32 payload_max_bytes;
+	u32 payload_bytes;
+} __packed;
+
+/*
+ * struct controlvm_packet_device_create - For CONTROLVM_DEVICE_CREATE
+ * @bus_no:	    Bus # (0..n-1) from the msg receiver's end.
+ * @dev_no:	    Bus-relative (0..n-1) device number.
+ * @channel_addr:   Guest physical address of the channel, which can be
+ *		    dereferenced by the receiver of this ControlVm command.
+ * @channel_bytes:  Specifies size of the channel in bytes.
+ * @data_type_uuid: Specifies format of data in channel.
+ * @dev_inst_uuid:  Instance guid for the device.
+ * @irq_info intr:  Specifies interrupt information.
+ */
+struct controlvm_packet_device_create  {
+	u32 bus_no;
+	u32 dev_no;
+	u64 channel_addr;
+	u64 channel_bytes;
+	guid_t data_type_guid;
+	guid_t dev_inst_guid;
+	struct irq_info intr;
+} __packed;
+
+/*
+ * struct controlvm_packet_device_configure - For CONTROLVM_DEVICE_CONFIGURE
+ * @bus_no: Bus number (0..n-1) from the msg receiver's perspective.
+ * @dev_no: Bus-relative (0..n-1) device number.
+ */
+struct controlvm_packet_device_configure  {
+	u32 bus_no;
+	u32 dev_no;
+} __packed;
+
+/* Total 128 bytes */
+struct controlvm_message_device_create {
+	struct controlvm_message_header header;
+	struct controlvm_packet_device_create packet;
+} __packed;
+
+/* Total 56 bytes */
+struct controlvm_message_device_configure  {
+	struct controlvm_message_header header;
+	struct controlvm_packet_device_configure packet;
+} __packed;
+
+/*
+ * struct controlvm_message_packet - This is the format for a message in any
+ *                                   ControlVm queue.
+ * @struct create_bus:		For CONTROLVM_BUS_CREATE.
+ *	@bus_no:	     Bus # (0..n-1) from the msg receiver's perspective.
+ *	@dev_count:	     Indicates the max number of devices on this bus.
+ *	@channel_addr:	     Guest physical address of the channel, which can be
+ *			     dereferenced by the receiver of this ControlVM
+ *			     command.
+ *	@channel_bytes:	     Size of the channel.
+ *	@bus_data_type_uuid: Indicates format of data in bus channel.
+ *	@bus_inst_uuid:	     Instance uuid for the bus.
+ *
+ * @struct destroy_bus:		For CONTROLVM_BUS_DESTROY.
+ *	@bus_no: Bus # (0..n-1) from the msg receiver's perspective.
+ *	@reserved: Natural alignment purposes.
+ *
+ * @struct configure_bus:	For CONTROLVM_BUS_CONFIGURE.
+ *	@bus_no:	      Bus # (0..n-1) from the receiver's perspective.
+ *	@reserved1:	      For alignment purposes.
+ *	@guest_handle:	      This is used to convert guest physical address to
+ *			      physical address.
+ *	@recv_bus_irq_handle: Specifies interrupt info. It is used by SP to
+ *			      register to receive interrupts from the CP. This
+ *			      interrupt is used for bus level notifications.
+ *			      The corresponding sendBusInterruptHandle is kept
+ *			      in CP.
+ *
+ * @struct create_device:	For CONTROLVM_DEVICE_CREATE.
+ *
+ * @struct destroy_device:	For CONTROLVM_DEVICE_DESTROY.
+ *	@bus_no: Bus # (0..n-1) from the msg receiver's perspective.
+ *	@dev_no: Bus-relative (0..n-1) device number.
+ *
+ * @struct configure_device:	For CONTROLVM_DEVICE_CONFIGURE.
+ *
+ * @struct reconfigure_device:	For CONTROLVM_DEVICE_RECONFIGURE.
+ *	@bus_no: Bus # (0..n-1) from the msg receiver's perspective.
+ *	@dev_no: Bus-relative (0..n-1) device number.
+ *
+ * @struct bus_change_state:	For CONTROLVM_BUS_CHANGESTATE.
+ *	@bus_no:
+ *	@struct state:
+ *	@reserved: Natural alignment purposes.
+ *
+ * @struct device_change_state:	For CONTROLVM_DEVICE_CHANGESTATE.
+ *	@bus_no:
+ *	@dev_no:
+ *	@struct state:
+ *	@struct flags:
+ *		@phys_device: =1 if message is for a physical device.
+ *		@reserved:    Natural alignment.
+ *		@reserved1:   Natural alignment.
+ *	@reserved:    Natural alignment purposes.
+ *
+ * @struct device_change_state_event:	For CONTROLVM_DEVICE_CHANGESTATE_EVENT.
+ *	@bus_no:
+ *	@dev_no:
+ *	@struct state:
+ *	@reserved:     Natural alignment purposes.
+ *
+ * @struct init_chipset:	For CONTROLVM_CHIPSET_INIT.
+ *	@bus_count:	  Indicates the max number of busses.
+ *	@switch_count:    Indicates the max number of switches.
+ *	@enum features:
+ *	@platform_number:
+ *
+ * @struct chipset_selftest:	For CONTROLVM_CHIPSET_SELFTEST.
+ *      @options: Reserved.
+ *      @test:	  Bit 0 set to run embedded selftest.
+ *
+ * @addr:   A physical address of something, that can be dereferenced by the
+ *	    receiver of this ControlVm command.
+ *
+ * @handle: A handle of something (depends on command id).
+ */
+struct controlvm_message_packet  {
+	union  {
+		struct  {
+			u32 bus_no;
+			u32 dev_count;
+			u64 channel_addr;
+			u64 channel_bytes;
+			guid_t bus_data_type_guid;
+			guid_t bus_inst_guid;
+		} __packed create_bus;
+		struct  {
+			u32 bus_no;
+			u32 reserved;
+		} __packed destroy_bus;
+		struct  {
+			u32 bus_no;
+			u32 reserved1;
+			u64 guest_handle;
+			u64 recv_bus_irq_handle;
+		} __packed configure_bus;
+		struct controlvm_packet_device_create create_device;
+		struct  {
+			u32 bus_no;
+			u32 dev_no;
+		} __packed destroy_device;
+		struct controlvm_packet_device_configure configure_device;
+		struct  {
+			u32 bus_no;
+			u32 dev_no;
+		} __packed reconfigure_device;
+		struct  {
+			u32 bus_no;
+			struct visor_segment_state state;
+			u8 reserved[2];
+		} __packed bus_change_state;
+		struct  {
+			u32 bus_no;
+			u32 dev_no;
+			struct visor_segment_state state;
+			struct  {
+				u32 phys_device:1;
+				u32 reserved:31;
+				u32 reserved1;
+			} __packed flags;
+			u8 reserved[2];
+		} __packed device_change_state;
+		struct  {
+			u32 bus_no;
+			u32 dev_no;
+			struct visor_segment_state state;
+			u8 reserved[6];
+		} __packed device_change_state_event;
+		struct  {
+			u32 bus_count;
+			u32 switch_count;
+			enum visor_chipset_feature features;
+			u32 platform_number;
+		} __packed init_chipset;
+		struct  {
+			u32 options;
+			u32 test;
+		} __packed chipset_selftest;
+		u64 addr;
+		u64 handle;
+	};
+} __packed;
+
+/* All messages in any ControlVm queue have this layout. */
+struct controlvm_message {
+	struct controlvm_message_header hdr;
+	struct controlvm_message_packet cmd;
+} __packed;
+
+/*
+ * struct visor_controlvm_channel
+ * @struct header:
+ * @gp_controlvm:			Guest phys addr of this channel.
+ * @gp_partition_tables:		Guest phys addr of partition tables.
+ * @gp_diag_guest:			Guest phys addr of diagnostic channel.
+ * @gp_boot_romdisk:			Guest phys addr of (read* only) Boot
+ *					ROM disk.
+ * @gp_boot_ramdisk:			Guest phys addr of writable Boot RAM
+ *					disk.
+ * @gp_acpi_table:			Guest phys addr of acpi table.
+ * @gp_control_channel:			Guest phys addr of control channel.
+ * @gp_diag_romdisk:			Guest phys addr of diagnostic ROM disk.
+ * @gp_nvram:				Guest phys addr of NVRAM channel.
+ * @request_payload_offset:		Offset to request payload area.
+ * @event_payload_offset:		Offset to event payload area.
+ * @request_payload_bytes:		Bytes available in request payload area.
+ * @event_payload_bytes:		Bytes available in event payload area.
+ * @control_channel_bytes:
+ * @nvram_channel_bytes:		Bytes in PartitionNvram segment.
+ * @message_bytes:			sizeof(CONTROLVM_MESSAGE).
+ * @message_count:			CONTROLVM_MESSAGE_MAX.
+ * @gp_smbios_table:			Guest phys addr of SMBIOS tables.
+ * @gp_physical_smbios_table:		Guest phys addr of SMBIOS table.
+ * @gp_reserved:			VISOR_MAX_GUESTS_PER_SERVICE.
+ * @virtual_guest_firmware_image_base:	Guest physical address of EFI firmware
+ *					image base.
+ * @virtual_guest_firmware_entry_point:	Guest physical address of EFI firmware
+ *					entry point.
+ * @virtual_guest_firmware_image_size:	Guest EFI firmware image size.
+ * @virtual_guest_firmware_boot_base:	GPA = 1MB where EFI firmware image is
+ *					copied to.
+ * @virtual_guest_image_base:
+ * @virtual_guest_image_size:
+ * @prototype_control_channel_offset:
+ * @virtual_guest_partition_handle:
+ * @restore_action:			Restore Action field to restore the
+ *					guest partition.
+ * @dump_action:			For Windows guests it shows if the
+ *					visordisk is in dump mode.
+ * @nvram_fail_count:
+ * @saved_crash_message_count:		= CONTROLVM_CRASHMSG_MAX.
+ * @saved_crash_message_offset:		Offset to request payload area needed
+ *					for crash dump.
+ * @installation_error:			Type of error encountered during
+ *					installation.
+ * @installation_text_id:		Id of string to display.
+ * @installation_remaining_steps:	Number of remaining installation steps
+ *					(for progress bars).
+ * @tool_action:			VISOR_TOOL_ACTIONS Installation Action
+ *					field.
+ * @reserved: Alignment.
+ * @struct efi_visor_ind:
+ * @sp_reserved:
+ * @reserved2:				Force signals to begin on 128-byte
+ *					cache line.
+ * @struct request_queue:		Guest partition uses this queue to send
+ *					requests to Control.
+ * @struct response_queue:		Control uses this queue to respond to
+ *					service or guest partition request.
+ * @struct event_queue:			Control uses this queue to send events
+ *					to guest partition.
+ * @struct event_ack_queue:		Service or guest partition uses this
+ *					queue to ack Control events.
+ * @struct request_msg:			Request fixed-size message pool -
+ *					does not include payload.
+ * @struct response_msg:		Response fixed-size message pool -
+ *					does not include payload.
+ * @struct event_msg:			Event fixed-size message pool -
+ *					does not include payload.
+ * @struct event_ack_msg:		Ack fixed-size message pool -
+ *					does not include payload.
+ * @struct saved_crash_msg:		Message stored during IOVM creation to
+ *					be reused after crash.
+ */
+struct visor_controlvm_channel {
+	struct channel_header header;
+	u64 gp_controlvm;
+	u64 gp_partition_tables;
+	u64 gp_diag_guest;
+	u64 gp_boot_romdisk;
+	u64 gp_boot_ramdisk;
+	u64 gp_acpi_table;
+	u64 gp_control_channel;
+	u64 gp_diag_romdisk;
+	u64 gp_nvram;
+	u64 request_payload_offset;
+	u64 event_payload_offset;
+	u32 request_payload_bytes;
+	u32 event_payload_bytes;
+	u32 control_channel_bytes;
+	u32 nvram_channel_bytes;
+	u32 message_bytes;
+	u32 message_count;
+	u64 gp_smbios_table;
+	u64 gp_physical_smbios_table;
+	char gp_reserved[2688];
+	u64 virtual_guest_firmware_image_base;
+	u64 virtual_guest_firmware_entry_point;
+	u64 virtual_guest_firmware_image_size;
+	u64 virtual_guest_firmware_boot_base;
+	u64 virtual_guest_image_base;
+	u64 virtual_guest_image_size;
+	u64 prototype_control_channel_offset;
+	u64 virtual_guest_partition_handle;
+	u16 restore_action;
+	u16 dump_action;
+	u16 nvram_fail_count;
+	u16 saved_crash_message_count;
+	u32 saved_crash_message_offset;
+	u32 installation_error;
+	u32 installation_text_id;
+	u16 installation_remaining_steps;
+	u8 tool_action;
+	u8 reserved;
+	struct efi_visor_indication efi_visor_ind;
+	u32 sp_reserved;
+	u8 reserved2[28];
+	struct signal_queue_header request_queue;
+	struct signal_queue_header response_queue;
+	struct signal_queue_header event_queue;
+	struct signal_queue_header event_ack_queue;
+	struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
+	struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX];
+	struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX];
+	struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX];
+	struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
+} __packed;
+
+/*
+ * struct visor_controlvm_parameters_header
+ *
+ * The following header will be located at the beginning of PayloadVmOffset for
+ * various ControlVm commands. The receiver of a ControlVm command with a
+ * PayloadVmOffset will dereference this address and then use connection_offset,
+ * initiator_offset, and target_offset to get the location of UTF-8 formatted
+ * strings that can be parsed to obtain command-specific information. The value
+ * of total_length should equal PayloadBytes. The format of the strings at
+ * PayloadVmOffset will take different forms depending on the message.
+ */
+struct visor_controlvm_parameters_header {
+	u32 total_length;
+	u32 header_length;
+	u32 connection_offset;
+	u32 connection_length;
+	u32 initiator_offset;
+	u32 initiator_length;
+	u32 target_offset;
+	u32 target_length;
+	u32 client_offset;
+	u32 client_length;
+	u32 name_offset;
+	u32 name_length;
+	guid_t id;
+	u32 revision;
+	/* Natural alignment */
+	u32 reserved;
+} __packed;
+
+/* General Errors------------------------------------------------------[0-99] */
+#define CONTROLVM_RESP_SUCCESS			   0
+#define CONTROLVM_RESP_ALREADY_DONE		   1
+#define CONTROLVM_RESP_IOREMAP_FAILED		   2
+#define CONTROLVM_RESP_KMALLOC_FAILED		   3
+#define CONTROLVM_RESP_ID_UNKNOWN		   4
+#define CONTROLVM_RESP_ID_INVALID_FOR_CLIENT	   5
+/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
+#define CONTROLVM_RESP_CLIENT_SWITCHCOUNT_NONZERO  100
+#define CONTROLVM_RESP_EXPECTED_CHIPSET_INIT	   101
+/* Maximum Limit----------------------------------------------------[200-299] */
+/* BUS_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_BUSES		   201
+/* DEVICE_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_DEVICES	   202
+/* Payload and Parameter Related------------------------------------[400-499] */
+/* SWITCH_ATTACHEXTPORT, DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_PAYLOAD_INVALID		   400
+/* Multiple */
+#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401
+/* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID	   402
+/* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID	   403
+/* Specified[Packet Structure] Value--------------------------------[500-599] */
+/* SWITCH_ATTACHINTPORT */
+/* BUS_CONFIGURE, DEVICE_CREATE, DEVICE_CONFIG, DEVICE_DESTROY */
+#define CONTROLVM_RESP_BUS_INVALID		   500
+/* SWITCH_ATTACHINTPORT*/
+/* DEVICE_CREATE, DEVICE_CONFIGURE, DEVICE_DESTROY */
+#define CONTROLVM_RESP_DEVICE_INVALID		   501
+/* DEVICE_CREATE, DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_CHANNEL_INVALID		   502
+/* Partition Driver Callback Interface------------------------------[600-699] */
+/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
+#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE	   604
+/* Unable to invoke VIRTPCI callback. VIRTPCI Callback returned error. */
+/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
+#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR   605
+/* Generic device callback returned error. */
+/* SWITCH_ATTACHEXTPORT, SWITCH_DETACHEXTPORT, DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_GENERIC_DRIVER_CALLBACK_ERROR   606
+/* Bus Related------------------------------------------------------[700-799] */
+/* BUS_DESTROY */
+#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED       700
+/* Channel Related--------------------------------------------------[800-899] */
+/* GET_CHANNELINFO, DEVICE_DESTROY */
+#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN	       800
+/* DEVICE_CREATE */
+#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL	       801
+/* Chipset Shutdown Related---------------------------------------[1000-1099] */
+#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_FAILED	       1000
+#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
+/* Chipset Stop Related-------------------------------------------[1100-1199] */
+#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_BUS	       1100
+#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_SWITCH      1101
+/* Device Related-------------------------------------------------[1400-1499] */
+#define CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT	       1400
+
+/* __CONTROLVMCHANNEL_H__ */
+#endif
diff --git a/marvell/linux/drivers/visorbus/vbuschannel.h b/marvell/linux/drivers/visorbus/vbuschannel.h
new file mode 100644
index 0000000..b1dce26
--- /dev/null
+++ b/marvell/linux/drivers/visorbus/vbuschannel.h
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
+ * All rights reserved.
+ */
+
+#ifndef __VBUSCHANNEL_H__
+#define __VBUSCHANNEL_H__
+
+/*
+ * The vbus channel is the channel area provided via the BUS_CREATE controlvm
+ * message for each virtual bus.  This channel area is provided to both server
+ * and client ends of the bus.  The channel header area is initialized by
+ * the server, and the remaining information is filled in by the client.
+ * We currently use this for the client to provide various information about
+ * the client devices and client drivers for the server end to see.
+ */
+
+#include <linux/uuid.h>
+#include <linux/visorbus.h>
+
+/* {193b331b-c58f-11da-95a9-00e08161165f} */
+#define VISOR_VBUS_CHANNEL_GUID						\
+	GUID_INIT(0x193b331b, 0xc58f, 0x11da,				\
+		  0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
+
+/*
+ * Must increment this whenever you insert or delete fields within this channel
+ * struct.  Also increment whenever you change the meaning of fields within this
+ * channel struct so as to break pre-existing software.  Note that you can
+ * usually add fields to the END of the channel struct withOUT needing to
+ * increment this.
+ */
+#define VISOR_VBUS_CHANNEL_VERSIONID 1
+
+/*
+ * struct visor_vbus_deviceinfo
+ * @devtype:  Short string identifying the device type.
+ * @drvname:  Driver .sys file name.
+ * @infostrs: Kernel vversion.
+ * @reserved: Pad size to 256 bytes.
+ *
+ * An array of this struct is present in the channel area for each vbus. It is
+ * filled in by the client side to provide info about the device and driver from
+ * the client's perspective.
+ */
+struct visor_vbus_deviceinfo {
+	u8 devtype[16];
+	u8 drvname[16];
+	u8 infostrs[96];
+	u8 reserved[128];
+} __packed;
+
+/*
+ * struct visor_vbus_headerinfo
+ * @struct_bytes:	      Size of this struct in bytes.
+ * @device_info_struct_bytes: Size of VISOR_VBUS_DEVICEINFO.
+ * @dev_info_count:	      Num of items in DevInfo member. This is the
+ *			      allocated size.
+ * @chp_info_offset:	      Byte offset from beginning of this struct to the
+ *			      ChpInfo struct.
+ * @bus_info_offset:	      Byte offset from beginning of this struct to the
+ *			      BusInfo struct.
+ * @dev_info_offset:	      Byte offset from beginning of this struct to the
+ *			      DevInfo array.
+ * @reserved:		      Natural alignment.
+ */
+struct visor_vbus_headerinfo {
+	u32 struct_bytes;
+	u32 device_info_struct_bytes;
+	u32 dev_info_count;
+	u32 chp_info_offset;
+	u32 bus_info_offset;
+	u32 dev_info_offset;
+	u8 reserved[104];
+} __packed;
+
+/*
+ * struct visor_vbus_channel
+ * @channel_header: Initialized by server.
+ * @hdr_info:	    Initialized by server.
+ * @chp_info:	    Describes client chipset device and driver.
+ * @bus_info:	    Describes client bus device and driver.
+ * @dev_info:	    Describes client device and driver for each device on the
+ *		    bus.
+ */
+struct visor_vbus_channel {
+	struct channel_header channel_header;
+	struct visor_vbus_headerinfo hdr_info;
+	struct visor_vbus_deviceinfo chp_info;
+	struct visor_vbus_deviceinfo bus_info;
+	struct visor_vbus_deviceinfo dev_info[0];
+} __packed;
+
+#endif
diff --git a/marvell/linux/drivers/visorbus/visorbus_main.c b/marvell/linux/drivers/visorbus/visorbus_main.c
new file mode 100644
index 0000000..152fd29
--- /dev/null
+++ b/marvell/linux/drivers/visorbus/visorbus_main.c
@@ -0,0 +1,1234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright � 2010 - 2015 UNISYS CORPORATION
+ * All rights reserved.
+ */
+
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/visorbus.h>
+#include <linux/uuid.h>
+
+#include "visorbus_private.h"
+
+static const guid_t visor_vbus_channel_guid = VISOR_VBUS_CHANNEL_GUID;
+
+/* Display string that is guaranteed to be no longer the 99 characters */
+#define LINESIZE 99
+#define POLLJIFFIES_NORMALCHANNEL 10
+
+/* stores whether bus_registration was successful */
+static bool initialized;
+static struct dentry *visorbus_debugfs_dir;
+
+/*
+ * DEVICE type attributes
+ *
+ * The modalias file will contain the guid of the device.
+ */
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct visor_device *vdev;
+	const guid_t *guid;
+
+	vdev = to_visor_device(dev);
+	guid = visorchannel_get_guid(vdev->visorchannel);
+	return sprintf(buf, "visorbus:%pUl\n", guid);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *visorbus_dev_attrs[] = {
+	&dev_attr_modalias.attr,
+	NULL,
+};
+
+ATTRIBUTE_GROUPS(visorbus_dev);
+
+/* filled in with info about parent chipset driver when we register with it */
+static struct visor_vbus_deviceinfo chipset_driverinfo;
+/* filled in with info about this driver, wrt it servicing client busses */
+static struct visor_vbus_deviceinfo clientbus_driverinfo;
+
+/* list of visor_device structs, linked via .list_all */
+static LIST_HEAD(list_all_bus_instances);
+/* list of visor_device structs, linked via .list_all */
+static LIST_HEAD(list_all_device_instances);
+
+/*
+ * Generic function useful for validating any type of channel when it is
+ * received by the client that will be accessing the channel.
+ * Note that <logCtx> is only needed for callers in the EFI environment, and
+ * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
+ */
+int visor_check_channel(struct channel_header *ch, struct device *dev,
+			const guid_t *expected_guid, char *chname,
+			u64 expected_min_bytes, u32 expected_version,
+			u64 expected_signature)
+{
+	if (!guid_is_null(expected_guid)) {
+		/* caller wants us to verify type GUID */
+		if (!guid_equal(&ch->chtype, expected_guid)) {
+			dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n",
+				chname, expected_guid, expected_guid,
+				&ch->chtype);
+			return 0;
+		}
+	}
+	/* verify channel size */
+	if (expected_min_bytes > 0) {
+		if (ch->size < expected_min_bytes) {
+			dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
+				chname, expected_guid,
+				(unsigned long long)expected_min_bytes,
+				ch->size);
+			return 0;
+		}
+	}
+	/* verify channel version */
+	if (expected_version > 0) {
+		if (ch->version_id != expected_version) {
+			dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8x\n",
+				chname, expected_guid,
+				(unsigned long)expected_version,
+				ch->version_id);
+			return 0;
+		}
+	}
+	/* verify channel signature */
+	if (expected_signature > 0) {
+		if (ch->signature != expected_signature) {
+			dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
+				chname, expected_guid,	expected_signature,
+				ch->signature);
+			return 0;
+		}
+	}
+	return 1;
+}
+
+static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
+{
+	struct visor_device *dev;
+	const guid_t *guid;
+
+	dev = to_visor_device(xdev);
+	guid = visorchannel_get_guid(dev->visorchannel);
+	return add_uevent_var(env, "MODALIAS=visorbus:%pUl", guid);
+}
+
+/*
+ * visorbus_match() - called automatically upon adding a visor_device
+ *                    (device_add), or adding a visor_driver
+ *                    (visorbus_register_visor_driver)
+ * @xdev: struct device for the device being matched
+ * @xdrv: struct device_driver for driver to match device against
+ *
+ * Return: 1 iff the provided driver can control the specified device
+ */
+static int visorbus_match(struct device *xdev, struct device_driver *xdrv)
+{
+	const guid_t *channel_type;
+	int i;
+	struct visor_device *dev;
+	struct visor_driver *drv;
+	struct visorchannel *chan;
+
+	dev = to_visor_device(xdev);
+	channel_type = visorchannel_get_guid(dev->visorchannel);
+	drv = to_visor_driver(xdrv);
+	chan = dev->visorchannel;
+	if (!drv->channel_types)
+		return 0;
+	for (i = 0; !guid_is_null(&drv->channel_types[i].guid); i++)
+		if (guid_equal(&drv->channel_types[i].guid, channel_type) &&
+		    visor_check_channel(visorchannel_get_header(chan),
+					xdev,
+					&drv->channel_types[i].guid,
+					(char *)drv->channel_types[i].name,
+					drv->channel_types[i].min_bytes,
+					drv->channel_types[i].version,
+					VISOR_CHANNEL_SIGNATURE))
+			return i + 1;
+	return 0;
+}
+
+/*
+ * This describes the TYPE of bus.
+ * (Don't confuse this with an INSTANCE of the bus.)
+ */
+static struct bus_type visorbus_type = {
+	.name = "visorbus",
+	.match = visorbus_match,
+	.uevent = visorbus_uevent,
+	.dev_groups = visorbus_dev_groups,
+};
+
+struct visor_busdev {
+	u32 bus_no;
+	u32 dev_no;
+};
+
+static int match_visorbus_dev_by_id(struct device *dev, const void *data)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+	const struct visor_busdev *id = data;
+
+	if (vdev->chipset_bus_no == id->bus_no &&
+	    vdev->chipset_dev_no == id->dev_no)
+		return 1;
+	return 0;
+}
+
+struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
+					       struct visor_device *from)
+{
+	struct device *dev;
+	struct device *dev_start = NULL;
+	struct visor_busdev id = {
+		.bus_no = bus_no,
+		.dev_no = dev_no
+	};
+
+	if (from)
+		dev_start = &from->device;
+	dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
+			      match_visorbus_dev_by_id);
+	if (!dev)
+		return NULL;
+	return to_visor_device(dev);
+}
+
+/*
+ * visorbus_release_busdevice() - called when device_unregister() is called for
+ *                                the bus device instance, after all other tasks
+ *                                involved with destroying the dev are complete
+ * @xdev: struct device for the bus being released
+ */
+static void visorbus_release_busdevice(struct device *xdev)
+{
+	struct visor_device *dev = dev_get_drvdata(xdev);
+
+	debugfs_remove(dev->debugfs_bus_info);
+	debugfs_remove_recursive(dev->debugfs_dir);
+	visorchannel_destroy(dev->visorchannel);
+	kfree(dev);
+}
+
+/*
+ * visorbus_release_device() - called when device_unregister() is called for
+ *                             each child device instance
+ * @xdev: struct device for the visor device being released
+ */
+static void visorbus_release_device(struct device *xdev)
+{
+	struct visor_device *dev = to_visor_device(xdev);
+
+	visorchannel_destroy(dev->visorchannel);
+	kfree(dev);
+}
+
+/*
+ * BUS specific channel attributes to appear under
+ * /sys/bus/visorbus<x>/dev<y>/channel
+ */
+
+static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+
+	return sprintf(buf, "0x%llx\n",
+		       visorchannel_get_physaddr(vdev->visorchannel));
+}
+static DEVICE_ATTR_RO(physaddr);
+
+static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+
+	return sprintf(buf, "0x%lx\n",
+		       visorchannel_get_nbytes(vdev->visorchannel));
+}
+static DEVICE_ATTR_RO(nbytes);
+
+static ssize_t clientpartition_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+
+	return sprintf(buf, "0x%llx\n",
+		       visorchannel_get_clientpartition(vdev->visorchannel));
+}
+static DEVICE_ATTR_RO(clientpartition);
+
+static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+	char typeid[LINESIZE];
+
+	return sprintf(buf, "%s\n",
+		       visorchannel_id(vdev->visorchannel, typeid));
+}
+static DEVICE_ATTR_RO(typeguid);
+
+static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+	char zoneid[LINESIZE];
+
+	return sprintf(buf, "%s\n",
+		       visorchannel_zoneid(vdev->visorchannel, zoneid));
+}
+static DEVICE_ATTR_RO(zoneguid);
+
+static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	int i = 0;
+	struct bus_type *xbus = dev->bus;
+	struct device_driver *xdrv = dev->driver;
+	struct visor_driver *drv = NULL;
+
+	if (!xdrv)
+		return 0;
+	i = xbus->match(dev, xdrv);
+	if (!i)
+		return 0;
+	drv = to_visor_driver(xdrv);
+	return sprintf(buf, "%s\n", drv->channel_types[i - 1].name);
+}
+static DEVICE_ATTR_RO(typename);
+
+static struct attribute *channel_attrs[] = {
+	&dev_attr_physaddr.attr,
+	&dev_attr_nbytes.attr,
+	&dev_attr_clientpartition.attr,
+	&dev_attr_typeguid.attr,
+	&dev_attr_zoneguid.attr,
+	&dev_attr_typename.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(channel);
+
+/*
+ *  BUS instance attributes
+ *
+ *  define & implement display of bus attributes under
+ *  /sys/bus/visorbus/devices/visorbus<n>.
+ */
+static ssize_t partition_handle_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+	u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
+
+	return sprintf(buf, "0x%llx\n", handle);
+}
+static DEVICE_ATTR_RO(partition_handle);
+
+static ssize_t partition_guid_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+
+	return sprintf(buf, "{%pUb}\n", &vdev->partition_guid);
+}
+static DEVICE_ATTR_RO(partition_guid);
+
+static ssize_t partition_name_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+
+	return sprintf(buf, "%s\n", vdev->name);
+}
+static DEVICE_ATTR_RO(partition_name);
+
+static ssize_t channel_addr_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+	u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
+
+	return sprintf(buf, "0x%llx\n", addr);
+}
+static DEVICE_ATTR_RO(channel_addr);
+
+static ssize_t channel_bytes_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+	u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
+
+	return sprintf(buf, "0x%llx\n", nbytes);
+}
+static DEVICE_ATTR_RO(channel_bytes);
+
+static ssize_t channel_id_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct visor_device *vdev = to_visor_device(dev);
+	int len = 0;
+
+	visorchannel_id(vdev->visorchannel, buf);
+	len = strlen(buf);
+	buf[len++] = '\n';
+	return len;
+}
+static DEVICE_ATTR_RO(channel_id);
+
+static struct attribute *visorbus_attrs[] = {
+	&dev_attr_partition_handle.attr,
+	&dev_attr_partition_guid.attr,
+	&dev_attr_partition_name.attr,
+	&dev_attr_channel_addr.attr,
+	&dev_attr_channel_bytes.attr,
+	&dev_attr_channel_id.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(visorbus);
+
+/*
+ *  BUS debugfs entries
+ *
+ *  define & implement display of debugfs attributes under
+ *  /sys/kernel/debug/visorbus/visorbus<n>.
+ */
+
+/*
+ * vbuschannel_print_devinfo() - format a struct visor_vbus_deviceinfo
+ *                               and write it to a seq_file
+ * @devinfo: the struct visor_vbus_deviceinfo to format
+ * @seq: seq_file to write to
+ * @devix: the device index to be included in the output data, or -1 if no
+ *         device index is to be included
+ *
+ * Reads @devInfo, and writes it in human-readable notation to @seq.
+ */
+static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
+				      struct seq_file *seq, int devix)
+{
+	/* uninitialized vbus device entry */
+	if (!isprint(devinfo->devtype[0]))
+		return;
+	if (devix >= 0)
+		seq_printf(seq, "[%d]", devix);
+	else
+		/* vbus device entry is for bus or chipset */
+		seq_puts(seq, "   ");
+	/*
+	 * Note: because the s-Par back-end is free to scribble in this area,
+	 * we never assume '\0'-termination.
+	 */
+	seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
+		   (int)sizeof(devinfo->devtype), devinfo->devtype);
+	seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
+		   (int)sizeof(devinfo->drvname), devinfo->drvname);
+	seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
+		   devinfo->infostrs);
+}
+
+static int bus_info_debugfs_show(struct seq_file *seq, void *v)
+{
+	int i = 0;
+	unsigned long off;
+	struct visor_vbus_deviceinfo dev_info;
+	struct visor_device *vdev = seq->private;
+	struct visorchannel *channel = vdev->visorchannel;
+
+	if (!channel)
+		return 0;
+
+	seq_printf(seq,
+		   "Client device/driver info for %s partition (vbus #%u):\n",
+		   ((vdev->name) ? (char *)(vdev->name) : ""),
+		   vdev->chipset_bus_no);
+	if (visorchannel_read(channel,
+			      offsetof(struct visor_vbus_channel, chp_info),
+			      &dev_info, sizeof(dev_info)) >= 0)
+		vbuschannel_print_devinfo(&dev_info, seq, -1);
+	if (visorchannel_read(channel,
+			      offsetof(struct visor_vbus_channel, bus_info),
+			      &dev_info, sizeof(dev_info)) >= 0)
+		vbuschannel_print_devinfo(&dev_info, seq, -1);
+
+	off = offsetof(struct visor_vbus_channel, dev_info);
+	while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
+		if (visorchannel_read(channel, off, &dev_info,
+				      sizeof(dev_info)) >= 0)
+			vbuschannel_print_devinfo(&dev_info, seq, i);
+		off += sizeof(dev_info);
+		i++;
+	}
+	return 0;
+}
+
+static int bus_info_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, bus_info_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations bus_info_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = bus_info_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void dev_periodic_work(struct timer_list *t)
+{
+	struct visor_device *dev = from_timer(dev, t, timer);
+	struct visor_driver *drv = to_visor_driver(dev->device.driver);
+
+	drv->channel_interrupt(dev);
+	mod_timer(&dev->timer, jiffies + POLLJIFFIES_NORMALCHANNEL);
+}
+
+static int dev_start_periodic_work(struct visor_device *dev)
+{
+	if (dev->being_removed || dev->timer_active)
+		return -EINVAL;
+
+	/* now up by at least 2 */
+	get_device(&dev->device);
+	dev->timer.expires = jiffies + POLLJIFFIES_NORMALCHANNEL;
+	add_timer(&dev->timer);
+	dev->timer_active = true;
+	return 0;
+}
+
+static void dev_stop_periodic_work(struct visor_device *dev)
+{
+	if (!dev->timer_active)
+		return;
+
+	del_timer_sync(&dev->timer);
+	dev->timer_active = false;
+	put_device(&dev->device);
+}
+
+/*
+ * visordriver_remove_device() - handle visor device going away
+ * @xdev: struct device for the visor device being removed
+ *
+ * This is called when device_unregister() is called for each child device
+ * instance, to notify the appropriate visorbus function driver that the device
+ * is going away, and to decrease the reference count of the device.
+ *
+ * Return: 0 iff successful
+ */
+static int visordriver_remove_device(struct device *xdev)
+{
+	struct visor_device *dev = to_visor_device(xdev);
+	struct visor_driver *drv = to_visor_driver(xdev->driver);
+
+	mutex_lock(&dev->visordriver_callback_lock);
+	dev->being_removed = true;
+	drv->remove(dev);
+	mutex_unlock(&dev->visordriver_callback_lock);
+	dev_stop_periodic_work(dev);
+	put_device(&dev->device);
+	return 0;
+}
+
+/*
+ * visorbus_unregister_visor_driver() - unregisters the provided driver
+ * @drv: the driver to unregister
+ *
+ * A visor function driver calls this function to unregister the driver,
+ * i.e., within its module_exit function.
+ */
+void visorbus_unregister_visor_driver(struct visor_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
+
+/*
+ * visorbus_read_channel() - reads from the designated channel into
+ *                           the provided buffer
+ * @dev:    the device whose channel is read from
+ * @offset: the offset into the channel at which reading starts
+ * @dest:   the destination buffer that is written into from the channel
+ * @nbytes: the number of bytes to read from the channel
+ *
+ * If receiving a message, use the visorchannel_signalremove() function instead.
+ *
+ * Return: integer indicating success (zero) or failure (non-zero)
+ */
+int visorbus_read_channel(struct visor_device *dev, unsigned long offset,
+			  void *dest, unsigned long nbytes)
+{
+	return visorchannel_read(dev->visorchannel, offset, dest, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorbus_read_channel);
+
+/*
+ * visorbus_write_channel() - writes the provided buffer into the designated
+ *                            channel
+ * @dev:    the device whose channel is written to
+ * @offset: the offset into the channel at which writing starts
+ * @src:    the source buffer that is written into the channel
+ * @nbytes: the number of bytes to write into the channel
+ *
+ * If sending a message, use the visorchannel_signalinsert() function instead.
+ *
+ * Return: integer indicating success (zero) or failure (non-zero)
+ */
+int visorbus_write_channel(struct visor_device *dev, unsigned long offset,
+			   void *src, unsigned long nbytes)
+{
+	return visorchannel_write(dev->visorchannel, offset, src, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorbus_write_channel);
+
+/*
+ * visorbus_enable_channel_interrupts() - enables interrupts on the
+ *                                        designated device
+ * @dev: the device on which to enable interrupts
+ *
+ * Currently we don't yet have a real interrupt, so for now we just call the
+ * interrupt function periodically via a timer.
+ */
+int visorbus_enable_channel_interrupts(struct visor_device *dev)
+{
+	struct visor_driver *drv = to_visor_driver(dev->device.driver);
+
+	if (!drv->channel_interrupt) {
+		dev_err(&dev->device, "%s no interrupt function!\n", __func__);
+		return -ENOENT;
+	}
+
+	return dev_start_periodic_work(dev);
+}
+EXPORT_SYMBOL_GPL(visorbus_enable_channel_interrupts);
+
+/*
+ * visorbus_disable_channel_interrupts() - disables interrupts on the
+ *                                         designated device
+ * @dev: the device on which to disable interrupts
+ */
+void visorbus_disable_channel_interrupts(struct visor_device *dev)
+{
+	dev_stop_periodic_work(dev);
+}
+EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
+
+/*
+ * create_visor_device() - create visor device as a result of receiving the
+ *                         controlvm device_create message for a new device
+ * @dev: a freshly-zeroed struct visor_device, containing only filled-in values
+ *       for chipset_bus_no and chipset_dev_no, that will be initialized
+ *
+ * This is how everything starts from the device end.
+ * This function is called when a channel first appears via a ControlVM
+ * message.  In response, this function allocates a visor_device to correspond
+ * to the new channel, and attempts to connect it the appropriate * driver. If
+ * the appropriate driver is found, the visor_driver.probe() function for that
+ * driver will be called, and will be passed the new * visor_device that we
+ * just created.
+ *
+ * It's ok if the appropriate driver is not yet loaded, because in that case
+ * the new device struct will just stick around in the bus' list of devices.
+ * When the appropriate driver calls visorbus_register_visor_driver(), the
+ * visor_driver.probe() for the new driver will be called with the new device.
+ *
+ * Return: 0 if successful, otherwise the negative value returned by
+ *         device_add() indicating the reason for failure
+ */
+int create_visor_device(struct visor_device *dev)
+{
+	int err;
+	u32 chipset_bus_no = dev->chipset_bus_no;
+	u32 chipset_dev_no = dev->chipset_dev_no;
+
+	mutex_init(&dev->visordriver_callback_lock);
+	dev->device.bus = &visorbus_type;
+	dev->device.groups = channel_groups;
+	device_initialize(&dev->device);
+	dev->device.release = visorbus_release_device;
+	/* keep a reference just for us (now 2) */
+	get_device(&dev->device);
+	timer_setup(&dev->timer, dev_periodic_work, 0);
+	/*
+	 * bus_id must be a unique name with respect to this bus TYPE (NOT bus
+	 * instance).  That's why we need to include the bus number within the
+	 * name.
+	 */
+	err = dev_set_name(&dev->device, "vbus%u:dev%u",
+			   chipset_bus_no, chipset_dev_no);
+	if (err)
+		goto err_put;
+	/*
+	 * device_add does this:
+	 *    bus_add_device(dev)
+	 *    ->device_attach(dev)
+	 *      ->for each driver drv registered on the bus that dev is on
+	 *          if (dev.drv)  **  device already has a driver **
+	 *            ** not sure we could ever get here... **
+	 *          else
+	 *            if (bus.match(dev,drv)) [visorbus_match]
+	 *              dev.drv = drv
+	 *              if (!drv.probe(dev))  [visordriver_probe_device]
+	 *                dev.drv = NULL
+	 *
+	 * Note that device_add does NOT fail if no driver failed to claim the
+	 * device.  The device will be linked onto bus_type.klist_devices
+	 * regardless (use bus_for_each_dev).
+	 */
+	err = device_add(&dev->device);
+	if (err < 0)
+		goto err_put;
+	list_add_tail(&dev->list_all, &list_all_device_instances);
+	dev->state.created = 1;
+	visorbus_response(dev, err, CONTROLVM_DEVICE_CREATE);
+	/* success: reference kept via unmatched get_device() */
+	return 0;
+
+err_put:
+	put_device(&dev->device);
+	dev_err(&dev->device, "Creating visor device failed. %d\n", err);
+	return err;
+}
+
+void remove_visor_device(struct visor_device *dev)
+{
+	list_del(&dev->list_all);
+	put_device(&dev->device);
+	if (dev->pending_msg_hdr)
+		visorbus_response(dev, 0, CONTROLVM_DEVICE_DESTROY);
+	device_unregister(&dev->device);
+}
+
+static int get_vbus_header_info(struct visorchannel *chan,
+				struct device *dev,
+				struct visor_vbus_headerinfo *hdr_info)
+{
+	int err;
+
+	if (!visor_check_channel(visorchannel_get_header(chan),
+				 dev,
+				 &visor_vbus_channel_guid,
+				 "vbus",
+				 sizeof(struct visor_vbus_channel),
+				 VISOR_VBUS_CHANNEL_VERSIONID,
+				 VISOR_CHANNEL_SIGNATURE))
+		return -EINVAL;
+
+	err = visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
+				sizeof(*hdr_info));
+	if (err < 0)
+		return err;
+	if (hdr_info->struct_bytes < sizeof(struct visor_vbus_headerinfo))
+		return -EINVAL;
+	if (hdr_info->device_info_struct_bytes <
+	    sizeof(struct visor_vbus_deviceinfo))
+		return -EINVAL;
+	return 0;
+}
+
+/*
+ * write_vbus_chp_info() - write the contents of <info> to the struct
+ *                         visor_vbus_channel.chp_info
+ * @chan:     indentifies the s-Par channel that will be updated
+ * @hdr_info: used to find appropriate channel offset to write data
+ * @info:     contains the information to write
+ *
+ * Writes chipset info into the channel memory to be used for diagnostic
+ * purposes.
+ *
+ * Returns no value since this is debug information and not needed for
+ * device functionality.
+ */
+static void write_vbus_chp_info(struct visorchannel *chan,
+				struct visor_vbus_headerinfo *hdr_info,
+				struct visor_vbus_deviceinfo *info)
+{
+	int off;
+
+	if (hdr_info->chp_info_offset == 0)
+		return;
+
+	off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
+	visorchannel_write(chan, off, info, sizeof(*info));
+}
+
+/*
+ * write_vbus_bus_info() - write the contents of <info> to the struct
+ *                         visor_vbus_channel.bus_info
+ * @chan:     indentifies the s-Par channel that will be updated
+ * @hdr_info: used to find appropriate channel offset to write data
+ * @info:     contains the information to write
+ *
+ * Writes bus info into the channel memory to be used for diagnostic
+ * purposes.
+ *
+ * Returns no value since this is debug information and not needed for
+ * device functionality.
+ */
+static void write_vbus_bus_info(struct visorchannel *chan,
+				struct visor_vbus_headerinfo *hdr_info,
+				struct visor_vbus_deviceinfo *info)
+{
+	int off;
+
+	if (hdr_info->bus_info_offset == 0)
+		return;
+
+	off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
+	visorchannel_write(chan, off, info, sizeof(*info));
+}
+
+/*
+ * write_vbus_dev_info() - write the contents of <info> to the struct
+ *                         visor_vbus_channel.dev_info[<devix>]
+ * @chan:     indentifies the s-Par channel that will be updated
+ * @hdr_info: used to find appropriate channel offset to write data
+ * @info:     contains the information to write
+ * @devix:    the relative device number (0..n-1) of the device on the bus
+ *
+ * Writes device info into the channel memory to be used for diagnostic
+ * purposes.
+ *
+ * Returns no value since this is debug information and not needed for
+ * device functionality.
+ */
+static void write_vbus_dev_info(struct visorchannel *chan,
+				struct visor_vbus_headerinfo *hdr_info,
+				struct visor_vbus_deviceinfo *info,
+				unsigned int devix)
+{
+	int off;
+
+	if (hdr_info->dev_info_offset == 0)
+		return;
+	off = (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
+	      (hdr_info->device_info_struct_bytes * devix);
+	visorchannel_write(chan, off, info, sizeof(*info));
+}
+
+static void bus_device_info_init(
+		struct visor_vbus_deviceinfo *bus_device_info_ptr,
+		const char *dev_type, const char *drv_name)
+{
+	memset(bus_device_info_ptr, 0, sizeof(struct visor_vbus_deviceinfo));
+	snprintf(bus_device_info_ptr->devtype,
+		 sizeof(bus_device_info_ptr->devtype),
+		 "%s", (dev_type) ? dev_type : "unknownType");
+	snprintf(bus_device_info_ptr->drvname,
+		 sizeof(bus_device_info_ptr->drvname),
+		 "%s", (drv_name) ? drv_name : "unknownDriver");
+	snprintf(bus_device_info_ptr->infostrs,
+		 sizeof(bus_device_info_ptr->infostrs), "kernel ver. %s",
+		 utsname()->release);
+}
+
+/*
+ * publish_vbus_dev_info() - for a child device just created on a client bus,
+ *			     fill in information about the driver that is
+ *			     controlling this device into the appropriate slot
+ *			     within the vbus channel of the bus instance
+ * @visordev: struct visor_device for the desired device
+ */
+static void publish_vbus_dev_info(struct visor_device *visordev)
+{
+	int i;
+	struct visor_device *bdev;
+	struct visor_driver *visordrv;
+	u32 bus_no = visordev->chipset_bus_no;
+	u32 dev_no = visordev->chipset_dev_no;
+	struct visor_vbus_deviceinfo dev_info;
+	const char *chan_type_name = NULL;
+	struct visor_vbus_headerinfo *hdr_info;
+
+	if (!visordev->device.driver)
+		return;
+	bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+	if (!bdev)
+		return;
+	hdr_info = (struct visor_vbus_headerinfo *)bdev->vbus_hdr_info;
+	if (!hdr_info)
+		return;
+	visordrv = to_visor_driver(visordev->device.driver);
+
+	/*
+	 * Within the list of device types (by GUID) that the driver
+	 * says it supports, find out which one of those types matches
+	 * the type of this device, so that we can include the device
+	 * type name
+	 */
+	for (i = 0; visordrv->channel_types[i].name; i++) {
+		if (guid_equal(&visordrv->channel_types[i].guid,
+			       &visordev->channel_type_guid)) {
+			chan_type_name = visordrv->channel_types[i].name;
+			break;
+		}
+	}
+	bus_device_info_init(&dev_info, chan_type_name, visordrv->name);
+	write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
+	write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
+	write_vbus_bus_info(bdev->visorchannel, hdr_info,
+			    &clientbus_driverinfo);
+}
+
+/*
+ * visordriver_probe_device() - handle new visor device coming online
+ * @xdev: struct device for the visor device being probed
+ *
+ * This is called automatically upon adding a visor_device (device_add), or
+ * adding a visor_driver (visorbus_register_visor_driver), but only after
+ * visorbus_match() has returned 1 to indicate a successful match between
+ * driver and device.
+ *
+ * If successful, a reference to the device will be held onto via get_device().
+ *
+ * Return: 0 if successful, meaning the function driver's probe() function
+ *         was successful with this device, otherwise a negative errno
+ *         value indicating failure reason
+ */
+static int visordriver_probe_device(struct device *xdev)
+{
+	int err;
+	struct visor_driver *drv = to_visor_driver(xdev->driver);
+	struct visor_device *dev = to_visor_device(xdev);
+
+	mutex_lock(&dev->visordriver_callback_lock);
+	dev->being_removed = false;
+	err = drv->probe(dev);
+	if (err) {
+		mutex_unlock(&dev->visordriver_callback_lock);
+		return err;
+	}
+	/* success: reference kept via unmatched get_device() */
+	get_device(&dev->device);
+	publish_vbus_dev_info(dev);
+	mutex_unlock(&dev->visordriver_callback_lock);
+	return 0;
+}
+
+/*
+ * visorbus_register_visor_driver() - registers the provided visor driver for
+ *				      handling one or more visor device
+ *                                    types (channel_types)
+ * @drv: the driver to register
+ *
+ * A visor function driver calls this function to register the driver. The
+ * caller MUST fill in the following fields within the #drv structure:
+ *     name, version, owner, channel_types, probe, remove
+ *
+ * Here's how the whole Linux bus / driver / device model works.
+ *
+ * At system start-up, the visorbus kernel module is loaded, which registers
+ * visorbus_type as a bus type, using bus_register().
+ *
+ * All kernel modules that support particular device types on a
+ * visorbus bus are loaded.  Each of these kernel modules calls
+ * visorbus_register_visor_driver() in their init functions, passing a
+ * visor_driver struct.  visorbus_register_visor_driver() in turn calls
+ * register_driver(&visor_driver.driver).  This .driver member is
+ * initialized with generic methods (like probe), whose sole responsibility
+ * is to act as a broker for the real methods, which are within the
+ * visor_driver struct.  (This is the way the subclass behavior is
+ * implemented, since visor_driver is essentially a subclass of the
+ * generic driver.)  Whenever a driver_register() happens, core bus code in
+ * the kernel does (see device_attach() in drivers/base/dd.c):
+ *
+ *     for each dev associated with the bus (the bus that driver is on) that
+ *     does not yet have a driver
+ *         if bus.match(dev,newdriver) == yes_matched  ** .match specified
+ *                                                ** during bus_register().
+ *             newdriver.probe(dev)  ** for visor drivers, this will call
+ *                   ** the generic driver.probe implemented in visorbus.c,
+ *                   ** which in turn calls the probe specified within the
+ *                   ** struct visor_driver (which was specified by the
+ *                   ** actual device driver as part of
+ *                   ** visorbus_register_visor_driver()).
+ *
+ * The above dance also happens when a new device appears.
+ * So the question is, how are devices created within the system?
+ * Basically, just call device_add(dev).  See pci_bus_add_devices().
+ * pci_scan_device() shows an example of how to build a device struct.  It
+ * returns the newly-created struct to pci_scan_single_device(), who adds it
+ * to the list of devices at PCIBUS.devices.  That list of devices is what
+ * is traversed by pci_bus_add_devices().
+ *
+ * Return: integer indicating success (zero) or failure (non-zero)
+ */
+int visorbus_register_visor_driver(struct visor_driver *drv)
+{
+	/* can't register on a nonexistent bus */
+	if (!initialized)
+		return -ENODEV;
+	if (!drv->probe)
+		return -EINVAL;
+	if (!drv->remove)
+		return -EINVAL;
+	if (!drv->pause)
+		return -EINVAL;
+	if (!drv->resume)
+		return -EINVAL;
+
+	drv->driver.name = drv->name;
+	drv->driver.bus = &visorbus_type;
+	drv->driver.probe = visordriver_probe_device;
+	drv->driver.remove = visordriver_remove_device;
+	drv->driver.owner = drv->owner;
+	/*
+	 * driver_register does this:
+	 *   bus_add_driver(drv)
+	 *   ->if (drv.bus)  ** (bus_type) **
+	 *       driver_attach(drv)
+	 *         for each dev with bus type of drv.bus
+	 *           if (!dev.drv)  ** no driver assigned yet **
+	 *             if (bus.match(dev,drv))  [visorbus_match]
+	 *               dev.drv = drv
+	 *               if (!drv.probe(dev))   [visordriver_probe_device]
+	 *                 dev.drv = NULL
+	 */
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
+
+/*
+ * visorbus_create_instance() - create a device instance for the visorbus itself
+ * @dev: struct visor_device indicating the bus instance
+ *
+ * Return: 0 for success, otherwise negative errno value indicating reason for
+ *         failure
+ */
+int visorbus_create_instance(struct visor_device *dev)
+{
+	int id = dev->chipset_bus_no;
+	int err;
+	struct visor_vbus_headerinfo *hdr_info;
+
+	hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
+	if (!hdr_info)
+		return -ENOMEM;
+	dev_set_name(&dev->device, "visorbus%d", id);
+	dev->device.bus = &visorbus_type;
+	dev->device.groups = visorbus_groups;
+	dev->device.release = visorbus_release_busdevice;
+	dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
+					      visorbus_debugfs_dir);
+	dev->debugfs_bus_info = debugfs_create_file("client_bus_info", 0440,
+						    dev->debugfs_dir, dev,
+						    &bus_info_debugfs_fops);
+	dev_set_drvdata(&dev->device, dev);
+	err = get_vbus_header_info(dev->visorchannel, &dev->device, hdr_info);
+	if (err < 0)
+		goto err_debugfs_dir;
+	err = device_register(&dev->device);
+	if (err < 0)
+		goto err_debugfs_dir;
+	list_add_tail(&dev->list_all, &list_all_bus_instances);
+	dev->state.created = 1;
+	dev->vbus_hdr_info = (void *)hdr_info;
+	write_vbus_chp_info(dev->visorchannel, hdr_info, &chipset_driverinfo);
+	write_vbus_bus_info(dev->visorchannel, hdr_info, &clientbus_driverinfo);
+	visorbus_response(dev, err, CONTROLVM_BUS_CREATE);
+	return 0;
+
+err_debugfs_dir:
+	debugfs_remove_recursive(dev->debugfs_dir);
+	kfree(hdr_info);
+	dev_err(&dev->device, "%s failed: %d\n", __func__, err);
+	return err;
+}
+
+/*
+ * visorbus_remove_instance() - remove a device instance for the visorbus itself
+ * @dev: struct visor_device indentifying the bus to remove
+ */
+void visorbus_remove_instance(struct visor_device *dev)
+{
+	/*
+	 * Note that this will result in the release method for
+	 * dev->dev being called, which will call
+	 * visorbus_release_busdevice().  This has something to do with
+	 * the put_device() done in device_unregister(), but I have never
+	 * successfully been able to trace thru the code to see where/how
+	 * release() gets called.  But I know it does.
+	 */
+	kfree(dev->vbus_hdr_info);
+	list_del(&dev->list_all);
+	if (dev->pending_msg_hdr)
+		visorbus_response(dev, 0, CONTROLVM_BUS_DESTROY);
+	device_unregister(&dev->device);
+}
+
+/*
+ * remove_all_visor_devices() - remove all child visorbus device instances
+ */
+static void remove_all_visor_devices(void)
+{
+	struct list_head *listentry, *listtmp;
+
+	list_for_each_safe(listentry, listtmp, &list_all_device_instances) {
+		struct visor_device *dev;
+
+		dev = list_entry(listentry, struct visor_device, list_all);
+		remove_visor_device(dev);
+	}
+}
+
+/*
+ * pause_state_change_complete() - the callback function to be called by a
+ *                                 visorbus function driver when a
+ *                                 pending "pause device" operation has
+ *                                 completed
+ * @dev: struct visor_device identifying the paused device
+ * @status: 0 iff the pause state change completed successfully, otherwise
+ *          a negative errno value indicating the reason for failure
+ */
+static void pause_state_change_complete(struct visor_device *dev, int status)
+{
+	if (!dev->pausing)
+		return;
+
+	dev->pausing = false;
+	visorbus_device_changestate_response(dev, status,
+					     segment_state_standby);
+}
+
+/*
+ * resume_state_change_complete() - the callback function to be called by a
+ *                                  visorbus function driver when a
+ *                                  pending "resume device" operation has
+ *                                  completed
+ * @dev: struct visor_device identifying the resumed device
+ * @status: 0 iff the resume state change completed successfully, otherwise
+ *          a negative errno value indicating the reason for failure
+ */
+static void resume_state_change_complete(struct visor_device *dev, int status)
+{
+	if (!dev->resuming)
+		return;
+
+	dev->resuming = false;
+	/*
+	 * Notify the chipset driver that the resume is complete,
+	 * which will presumably want to send some sort of response to
+	 * the initiator.
+	 */
+	visorbus_device_changestate_response(dev, status,
+					     segment_state_running);
+}
+
+/*
+ * visorchipset_initiate_device_pause_resume() - start a pause or resume
+ *                                               operation for a visor device
+ * @dev: struct visor_device identifying the device being paused or resumed
+ * @is_pause: true to indicate pause operation, false to indicate resume
+ *
+ * Tell the subordinate function driver for a specific device to pause
+ * or resume that device.  Success/failure result is returned asynchronously
+ * via a callback function; see pause_state_change_complete() and
+ * resume_state_change_complete().
+ */
+static int visorchipset_initiate_device_pause_resume(struct visor_device *dev,
+						     bool is_pause)
+{
+	int err;
+	struct visor_driver *drv;
+
+	/* If no driver associated with the device nothing to pause/resume */
+	if (!dev->device.driver)
+		return 0;
+	if (dev->pausing || dev->resuming)
+		return -EBUSY;
+
+	drv = to_visor_driver(dev->device.driver);
+	if (is_pause) {
+		dev->pausing = true;
+		err = drv->pause(dev, pause_state_change_complete);
+	} else {
+		/*
+		 * The vbus_dev_info structure in the channel was been cleared,
+		 * make sure it is valid.
+		 */
+		publish_vbus_dev_info(dev);
+		dev->resuming = true;
+		err = drv->resume(dev, resume_state_change_complete);
+	}
+	return err;
+}
+
+/*
+ * visorchipset_device_pause() - start a pause operation for a visor device
+ * @dev_info: struct visor_device identifying the device being paused
+ *
+ * Tell the subordinate function driver for a specific device to pause
+ * that device.  Success/failure result is returned asynchronously
+ * via a callback function; see pause_state_change_complete().
+ */
+int visorchipset_device_pause(struct visor_device *dev_info)
+{
+	int err;
+
+	err = visorchipset_initiate_device_pause_resume(dev_info, true);
+	if (err < 0) {
+		dev_info->pausing = false;
+		return err;
+	}
+	return 0;
+}
+
+/*
+ * visorchipset_device_resume() - start a resume operation for a visor device
+ * @dev_info: struct visor_device identifying the device being resumed
+ *
+ * Tell the subordinate function driver for a specific device to resume
+ * that device.  Success/failure result is returned asynchronously
+ * via a callback function; see resume_state_change_complete().
+ */
+int visorchipset_device_resume(struct visor_device *dev_info)
+{
+	int err;
+
+	err = visorchipset_initiate_device_pause_resume(dev_info, false);
+	if (err < 0) {
+		dev_info->resuming = false;
+		return err;
+	}
+	return 0;
+}
+
+int visorbus_init(void)
+{
+	int err;
+
+	visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
+	bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
+	err = bus_register(&visorbus_type);
+	if (err < 0)
+		return err;
+	initialized = true;
+	bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset");
+	return 0;
+}
+
+void visorbus_exit(void)
+{
+	struct list_head *listentry, *listtmp;
+
+	remove_all_visor_devices();
+	list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
+		struct visor_device *dev;
+
+		dev = list_entry(listentry, struct visor_device, list_all);
+		visorbus_remove_instance(dev);
+	}
+	bus_unregister(&visorbus_type);
+	initialized = false;
+	debugfs_remove_recursive(visorbus_debugfs_dir);
+}
diff --git a/marvell/linux/drivers/visorbus/visorbus_private.h b/marvell/linux/drivers/visorbus/visorbus_private.h
new file mode 100644
index 0000000..366380b
--- /dev/null
+++ b/marvell/linux/drivers/visorbus/visorbus_private.h
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
+ * All rights reserved.
+ */
+
+#ifndef __VISORBUS_PRIVATE_H__
+#define __VISORBUS_PRIVATE_H__
+
+#include <linux/uuid.h>
+#include <linux/utsname.h>
+#include <linux/visorbus.h>
+
+#include "controlvmchannel.h"
+#include "vbuschannel.h"
+
+struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
+					       struct visor_device *from);
+int visorbus_create_instance(struct visor_device *dev);
+void visorbus_remove_instance(struct visor_device *bus_info);
+int create_visor_device(struct visor_device *dev_info);
+void remove_visor_device(struct visor_device *dev_info);
+int visorchipset_device_pause(struct visor_device *dev_info);
+int visorchipset_device_resume(struct visor_device *dev_info);
+void visorbus_response(struct visor_device *p, int response, int controlvm_id);
+void visorbus_device_changestate_response(struct visor_device *p, int response,
+					  struct visor_segment_state state);
+int visorbus_init(void);
+void visorbus_exit(void);
+
+/* visorchannel access functions */
+struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
+					 const guid_t *guid, bool needs_lock);
+void visorchannel_destroy(struct visorchannel *channel);
+int visorchannel_read(struct visorchannel *channel, ulong offset,
+		      void *dest, ulong nbytes);
+int visorchannel_write(struct visorchannel *channel, ulong offset,
+		       void *dest, ulong nbytes);
+u64 visorchannel_get_physaddr(struct visorchannel *channel);
+ulong visorchannel_get_nbytes(struct visorchannel *channel);
+char *visorchannel_id(struct visorchannel *channel, char *s);
+char *visorchannel_zoneid(struct visorchannel *channel, char *s);
+u64 visorchannel_get_clientpartition(struct visorchannel *channel);
+int visorchannel_set_clientpartition(struct visorchannel *channel,
+				     u64 partition_handle);
+char *visorchannel_guid_id(const guid_t *guid, char *s);
+void *visorchannel_get_header(struct visorchannel *channel);
+#endif
diff --git a/marvell/linux/drivers/visorbus/visorchannel.c b/marvell/linux/drivers/visorbus/visorchannel.c
new file mode 100644
index 0000000..bd890e0
--- /dev/null
+++ b/marvell/linux/drivers/visorbus/visorchannel.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
+ * All rights reserved.
+ */
+
+/*
+ *  This provides s-Par channel communication primitives, which are
+ *  independent of the mechanism used to access the channel data.
+ */
+
+#include <linux/uuid.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/visorbus.h>
+
+#include "visorbus_private.h"
+#include "controlvmchannel.h"
+
+#define VISOR_DRV_NAME "visorchannel"
+
+#define VISOR_CONSOLEVIDEO_CHANNEL_GUID \
+	GUID_INIT(0x3cd6e705, 0xd6a2, 0x4aa5, \
+		  0xad, 0x5c, 0x7b, 0x8, 0x88, 0x9d, 0xff, 0xe2)
+
+static const guid_t visor_video_guid = VISOR_CONSOLEVIDEO_CHANNEL_GUID;
+
+struct visorchannel {
+	u64 physaddr;
+	ulong nbytes;
+	void *mapped;
+	bool requested;
+	struct channel_header chan_hdr;
+	guid_t guid;
+	/*
+	 * channel creator knows if more than one thread will be inserting or
+	 * removing
+	 */
+	bool needs_lock;
+	/* protect head writes in chan_hdr */
+	spinlock_t insert_lock;
+	/* protect tail writes in chan_hdr */
+	spinlock_t remove_lock;
+	guid_t type;
+	guid_t inst;
+};
+
+void visorchannel_destroy(struct visorchannel *channel)
+{
+	if (!channel)
+		return;
+
+	if (channel->mapped) {
+		memunmap(channel->mapped);
+		if (channel->requested)
+			release_mem_region(channel->physaddr, channel->nbytes);
+	}
+	kfree(channel);
+}
+
+u64 visorchannel_get_physaddr(struct visorchannel *channel)
+{
+	return channel->physaddr;
+}
+
+ulong visorchannel_get_nbytes(struct visorchannel *channel)
+{
+	return channel->nbytes;
+}
+
+char *visorchannel_guid_id(const guid_t *guid, char *s)
+{
+	sprintf(s, "%pUL", guid);
+	return s;
+}
+
+char *visorchannel_id(struct visorchannel *channel, char *s)
+{
+	return visorchannel_guid_id(&channel->guid, s);
+}
+
+char *visorchannel_zoneid(struct visorchannel *channel, char *s)
+{
+	return visorchannel_guid_id(&channel->chan_hdr.zone_guid, s);
+}
+
+u64 visorchannel_get_clientpartition(struct visorchannel *channel)
+{
+	return channel->chan_hdr.partition_handle;
+}
+
+int visorchannel_set_clientpartition(struct visorchannel *channel,
+				     u64 partition_handle)
+{
+	channel->chan_hdr.partition_handle = partition_handle;
+	return 0;
+}
+
+/**
+ * visorchannel_get_guid() - queries the GUID of the designated channel
+ * @channel: the channel to query
+ *
+ * Return: the GUID of the provided channel
+ */
+const guid_t *visorchannel_get_guid(struct visorchannel *channel)
+{
+	return &channel->guid;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_guid);
+
+int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest,
+		      ulong nbytes)
+{
+	if (offset + nbytes > channel->nbytes)
+		return -EIO;
+
+	memcpy(dest, channel->mapped + offset, nbytes);
+	return 0;
+}
+
+int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest,
+		       ulong nbytes)
+{
+	size_t chdr_size = sizeof(struct channel_header);
+	size_t copy_size;
+
+	if (offset + nbytes > channel->nbytes)
+		return -EIO;
+
+	if (offset < chdr_size) {
+		copy_size = min(chdr_size - offset, nbytes);
+		memcpy(((char *)(&channel->chan_hdr)) + offset,
+		       dest, copy_size);
+	}
+	memcpy(channel->mapped + offset, dest, nbytes);
+	return 0;
+}
+
+void *visorchannel_get_header(struct visorchannel *channel)
+{
+	return &channel->chan_hdr;
+}
+
+/*
+ * Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a
+ * channel header
+ */
+static int sig_queue_offset(struct channel_header *chan_hdr, int q)
+{
+	return ((chan_hdr)->ch_space_offset +
+	       ((q) * sizeof(struct signal_queue_header)));
+}
+
+/*
+ * Return offset of a specific queue entry (data) from the beginning of a
+ * channel header
+ */
+static int sig_data_offset(struct channel_header *chan_hdr, int q,
+			   struct signal_queue_header *sig_hdr, int slot)
+{
+	return (sig_queue_offset(chan_hdr, q) + sig_hdr->sig_base_offset +
+	       (slot * sig_hdr->signal_size));
+}
+
+/*
+ * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back into
+ * host memory
+ */
+#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
+	visorchannel_write(channel, \
+			   sig_queue_offset(&channel->chan_hdr, queue) + \
+			   offsetof(struct signal_queue_header, FIELD), \
+			   &((sig_hdr)->FIELD), \
+			   sizeof((sig_hdr)->FIELD))
+
+static int sig_read_header(struct visorchannel *channel, u32 queue,
+			   struct signal_queue_header *sig_hdr)
+{
+	if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header))
+		return -EINVAL;
+
+	/* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
+	return visorchannel_read(channel,
+				 sig_queue_offset(&channel->chan_hdr, queue),
+				 sig_hdr, sizeof(struct signal_queue_header));
+}
+
+static int sig_read_data(struct visorchannel *channel, u32 queue,
+			 struct signal_queue_header *sig_hdr, u32 slot,
+			 void *data)
+{
+	int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
+						 sig_hdr, slot);
+
+	return visorchannel_read(channel, signal_data_offset,
+				 data, sig_hdr->signal_size);
+}
+
+static int sig_write_data(struct visorchannel *channel, u32 queue,
+			  struct signal_queue_header *sig_hdr, u32 slot,
+			  void *data)
+{
+	int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
+						 sig_hdr, slot);
+
+	return visorchannel_write(channel, signal_data_offset,
+				  data, sig_hdr->signal_size);
+}
+
+static int signalremove_inner(struct visorchannel *channel, u32 queue,
+			      void *msg)
+{
+	struct signal_queue_header sig_hdr;
+	int error;
+
+	error = sig_read_header(channel, queue, &sig_hdr);
+	if (error)
+		return error;
+	/* No signals to remove; have caller try again. */
+	if (sig_hdr.head == sig_hdr.tail)
+		return -EAGAIN;
+	sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
+	error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg);
+	if (error)
+		return error;
+	sig_hdr.num_received++;
+	/*
+	 * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
+	 * host memory. Required for channel sync.
+	 */
+	mb();
+	error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail);
+	if (error)
+		return error;
+	error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received);
+	if (error)
+		return error;
+	return 0;
+}
+
+/**
+ * visorchannel_signalremove() - removes a message from the designated
+ *                               channel/queue
+ * @channel: the channel the message will be removed from
+ * @queue:   the queue the message will be removed from
+ * @msg:     the message to remove
+ *
+ * Return: integer error code indicating the status of the removal
+ */
+int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
+			      void *msg)
+{
+	int rc;
+	unsigned long flags;
+
+	if (channel->needs_lock) {
+		spin_lock_irqsave(&channel->remove_lock, flags);
+		rc = signalremove_inner(channel, queue, msg);
+		spin_unlock_irqrestore(&channel->remove_lock, flags);
+	} else {
+		rc = signalremove_inner(channel, queue, msg);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalremove);
+
+static bool queue_empty(struct visorchannel *channel, u32 queue)
+{
+	struct signal_queue_header sig_hdr;
+
+	if (sig_read_header(channel, queue, &sig_hdr))
+		return true;
+	return (sig_hdr.head == sig_hdr.tail);
+}
+
+/**
+ * visorchannel_signalempty() - checks if the designated channel/queue contains
+ *				any messages
+ * @channel: the channel to query
+ * @queue:   the queue in the channel to query
+ *
+ * Return: boolean indicating whether any messages in the designated
+ *         channel/queue are present
+ */
+bool visorchannel_signalempty(struct visorchannel *channel, u32 queue)
+{
+	bool rc;
+	unsigned long flags;
+
+	if (!channel->needs_lock)
+		return queue_empty(channel, queue);
+	spin_lock_irqsave(&channel->remove_lock, flags);
+	rc = queue_empty(channel, queue);
+	spin_unlock_irqrestore(&channel->remove_lock, flags);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalempty);
+
+static int signalinsert_inner(struct visorchannel *channel, u32 queue,
+			      void *msg)
+{
+	struct signal_queue_header sig_hdr;
+	int err;
+
+	err = sig_read_header(channel, queue, &sig_hdr);
+	if (err)
+		return err;
+	sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
+	if (sig_hdr.head == sig_hdr.tail) {
+		sig_hdr.num_overflows++;
+		err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows);
+		if (err)
+			return err;
+		return -EIO;
+	}
+	err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
+	if (err)
+		return err;
+	sig_hdr.num_sent++;
+	/*
+	 * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
+	 * host memory. Required for channel sync.
+	 */
+	mb();
+	err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
+	if (err)
+		return err;
+	err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
+	if (err)
+		return err;
+	return 0;
+}
+
+/*
+ * visorchannel_create() - creates the struct visorchannel abstraction for a
+ *                         data area in memory, but does NOT modify this data
+ *                         area
+ * @physaddr:      physical address of start of channel
+ * @gfp:           gfp_t to use when allocating memory for the data struct
+ * @guid:          GUID that identifies channel type;
+ * @needs_lock:    must specify true if you have multiple threads of execution
+ *                 that will be calling visorchannel methods of this
+ *                 visorchannel at the same time
+ *
+ * Return: pointer to visorchannel that was created if successful,
+ *         otherwise NULL
+ */
+struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
+					 const guid_t *guid, bool needs_lock)
+{
+	struct visorchannel *channel;
+	int err;
+	size_t size = sizeof(struct channel_header);
+
+	if (physaddr == 0)
+		return NULL;
+
+	channel = kzalloc(sizeof(*channel), gfp);
+	if (!channel)
+		return NULL;
+	channel->needs_lock = needs_lock;
+	spin_lock_init(&channel->insert_lock);
+	spin_lock_init(&channel->remove_lock);
+	/*
+	 * Video driver constains the efi framebuffer so it will get a conflict
+	 * resource when requesting its full mem region. Since we are only
+	 * using the efi framebuffer for video we can ignore this. Remember that
+	 * we haven't requested it so we don't try to release later on.
+	 */
+	channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME);
+	if (!channel->requested && !guid_equal(guid, &visor_video_guid))
+		/* we only care about errors if this is not the video channel */
+		goto err_destroy_channel;
+	channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
+	if (!channel->mapped) {
+		release_mem_region(physaddr, size);
+		goto err_destroy_channel;
+	}
+	channel->physaddr = physaddr;
+	channel->nbytes = size;
+	err = visorchannel_read(channel, 0, &channel->chan_hdr, size);
+	if (err)
+		goto err_destroy_channel;
+	size = (ulong)channel->chan_hdr.size;
+	memunmap(channel->mapped);
+	if (channel->requested)
+		release_mem_region(channel->physaddr, channel->nbytes);
+	channel->mapped = NULL;
+	channel->requested = request_mem_region(channel->physaddr, size,
+						VISOR_DRV_NAME);
+	if (!channel->requested && !guid_equal(guid, &visor_video_guid))
+		/* we only care about errors if this is not the video channel */
+		goto err_destroy_channel;
+	channel->mapped = memremap(channel->physaddr, size, MEMREMAP_WB);
+	if (!channel->mapped) {
+		release_mem_region(channel->physaddr, size);
+		goto err_destroy_channel;
+	}
+	channel->nbytes = size;
+	guid_copy(&channel->guid, guid);
+	return channel;
+
+err_destroy_channel:
+	visorchannel_destroy(channel);
+	return NULL;
+}
+
+/**
+ * visorchannel_signalinsert() - inserts a message into the designated
+ *                               channel/queue
+ * @channel: the channel the message will be added to
+ * @queue:   the queue the message will be added to
+ * @msg:     the message to insert
+ *
+ * Return: integer error code indicating the status of the insertion
+ */
+int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
+			      void *msg)
+{
+	int rc;
+	unsigned long flags;
+
+	if (channel->needs_lock) {
+		spin_lock_irqsave(&channel->insert_lock, flags);
+		rc = signalinsert_inner(channel, queue, msg);
+		spin_unlock_irqrestore(&channel->insert_lock, flags);
+	} else {
+		rc = signalinsert_inner(channel, queue, msg);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
diff --git a/marvell/linux/drivers/visorbus/visorchipset.c b/marvell/linux/drivers/visorbus/visorchipset.c
new file mode 100644
index 0000000..5668cad
--- /dev/null
+++ b/marvell/linux/drivers/visorbus/visorchipset.c
@@ -0,0 +1,1691 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
+ * All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/crash_dump.h>
+#include <linux/visorbus.h>
+
+#include "visorbus_private.h"
+
+/* {72120008-4AAB-11DC-8530-444553544200} */
+#define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
+				   0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
+
+static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
+static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
+static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
+
+#define POLLJIFFIES_CONTROLVM_FAST 1
+#define POLLJIFFIES_CONTROLVM_SLOW 100
+
+#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
+
+#define UNISYS_VISOR_LEAF_ID 0x40000000
+
+/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
+#define UNISYS_VISOR_ID_EBX 0x73696e55
+#define UNISYS_VISOR_ID_ECX 0x70537379
+#define UNISYS_VISOR_ID_EDX 0x34367261
+
+/*
+ * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
+ * to slow polling mode. As soon as we get a controlvm message, we switch back
+ * to fast polling mode.
+ */
+#define MIN_IDLE_SECONDS 10
+
+struct parser_context {
+	unsigned long allocbytes;
+	unsigned long param_bytes;
+	u8 *curr;
+	unsigned long bytes_remaining;
+	bool byte_stream;
+	struct visor_controlvm_parameters_header data;
+};
+
+/* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
+#define VMCALL_CONTROLVM_ADDR 0x0501
+
+enum vmcall_result {
+	VMCALL_RESULT_SUCCESS = 0,
+	VMCALL_RESULT_INVALID_PARAM = 1,
+	VMCALL_RESULT_DATA_UNAVAILABLE = 2,
+	VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
+	VMCALL_RESULT_DEVICE_ERROR = 4,
+	VMCALL_RESULT_DEVICE_NOT_READY = 5
+};
+
+/*
+ * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
+ *					    parameters to VMCALL_CONTROLVM_ADDR
+ *					    interface.
+ * @address:	   The Guest-relative physical address of the ControlVm channel.
+ *		   This VMCall fills this in with the appropriate address.
+ *		   Contents provided by this VMCALL (OUT).
+ * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
+ *		   this in with the appropriate address. Contents provided by
+ *		   this VMCALL (OUT).
+ * @unused:	   Unused Bytes in the 64-Bit Aligned Struct.
+ */
+struct vmcall_io_controlvm_addr_params {
+	u64 address;
+	u32 channel_bytes;
+	u8 unused[4];
+} __packed;
+
+struct visorchipset_device {
+	struct acpi_device *acpi_device;
+	unsigned long poll_jiffies;
+	/* when we got our last controlvm message */
+	unsigned long most_recent_message_jiffies;
+	struct delayed_work periodic_controlvm_work;
+	struct visorchannel *controlvm_channel;
+	unsigned long controlvm_payload_bytes_buffered;
+	/*
+	 * The following variables are used to handle the scenario where we are
+	 * unable to offload the payload from a controlvm message due to memory
+	 * requirements. In this scenario, we simply stash the controlvm
+	 * message, then attempt to process it again the next time
+	 * controlvm_periodic_work() runs.
+	 */
+	struct controlvm_message controlvm_pending_msg;
+	bool controlvm_pending_msg_valid;
+	struct vmcall_io_controlvm_addr_params controlvm_params;
+};
+
+static struct visorchipset_device *chipset_dev;
+
+struct parahotplug_request {
+	struct list_head list;
+	int id;
+	unsigned long expiration;
+	struct controlvm_message msg;
+};
+
+/* prototypes for attributes */
+static ssize_t toolaction_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	u8 tool_action = 0;
+	int err;
+
+	err = visorchannel_read(chipset_dev->controlvm_channel,
+				offsetof(struct visor_controlvm_channel,
+					 tool_action),
+				&tool_action, sizeof(u8));
+	if (err)
+		return err;
+	return sprintf(buf, "%u\n", tool_action);
+}
+
+static ssize_t toolaction_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	u8 tool_action;
+	int err;
+
+	if (kstrtou8(buf, 10, &tool_action))
+		return -EINVAL;
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  tool_action),
+				 &tool_action, sizeof(u8));
+	if (err)
+		return err;
+	return count;
+}
+static DEVICE_ATTR_RW(toolaction);
+
+static ssize_t boottotool_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct efi_visor_indication efi_visor_indication;
+	int err;
+
+	err = visorchannel_read(chipset_dev->controlvm_channel,
+				offsetof(struct visor_controlvm_channel,
+					 efi_visor_ind),
+				&efi_visor_indication,
+				sizeof(struct efi_visor_indication));
+	if (err)
+		return err;
+	return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
+}
+
+static ssize_t boottotool_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int val, err;
+	struct efi_visor_indication efi_visor_indication;
+
+	if (kstrtoint(buf, 10, &val))
+		return -EINVAL;
+	efi_visor_indication.boot_to_tool = val;
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  efi_visor_ind),
+				 &(efi_visor_indication),
+				 sizeof(struct efi_visor_indication));
+	if (err)
+		return err;
+	return count;
+}
+static DEVICE_ATTR_RW(boottotool);
+
+static ssize_t error_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	u32 error = 0;
+	int err;
+
+	err = visorchannel_read(chipset_dev->controlvm_channel,
+				offsetof(struct visor_controlvm_channel,
+					 installation_error),
+				&error, sizeof(u32));
+	if (err)
+		return err;
+	return sprintf(buf, "%u\n", error);
+}
+
+static ssize_t error_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	u32 error;
+	int err;
+
+	if (kstrtou32(buf, 10, &error))
+		return -EINVAL;
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  installation_error),
+				 &error, sizeof(u32));
+	if (err)
+		return err;
+	return count;
+}
+static DEVICE_ATTR_RW(error);
+
+static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	u32 text_id = 0;
+	int err;
+
+	err = visorchannel_read(chipset_dev->controlvm_channel,
+				offsetof(struct visor_controlvm_channel,
+					 installation_text_id),
+				&text_id, sizeof(u32));
+	if (err)
+		return err;
+	return sprintf(buf, "%u\n", text_id);
+}
+
+static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	u32 text_id;
+	int err;
+
+	if (kstrtou32(buf, 10, &text_id))
+		return -EINVAL;
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  installation_text_id),
+				 &text_id, sizeof(u32));
+	if (err)
+		return err;
+	return count;
+}
+static DEVICE_ATTR_RW(textid);
+
+static ssize_t remaining_steps_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	u16 remaining_steps = 0;
+	int err;
+
+	err = visorchannel_read(chipset_dev->controlvm_channel,
+				offsetof(struct visor_controlvm_channel,
+					 installation_remaining_steps),
+				&remaining_steps, sizeof(u16));
+	if (err)
+		return err;
+	return sprintf(buf, "%hu\n", remaining_steps);
+}
+
+static ssize_t remaining_steps_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	u16 remaining_steps;
+	int err;
+
+	if (kstrtou16(buf, 10, &remaining_steps))
+		return -EINVAL;
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  installation_remaining_steps),
+				 &remaining_steps, sizeof(u16));
+	if (err)
+		return err;
+	return count;
+}
+static DEVICE_ATTR_RW(remaining_steps);
+
+static void controlvm_init_response(struct controlvm_message *msg,
+				    struct controlvm_message_header *msg_hdr,
+				    int response)
+{
+	memset(msg, 0, sizeof(struct controlvm_message));
+	memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
+	msg->hdr.payload_bytes = 0;
+	msg->hdr.payload_vm_offset = 0;
+	msg->hdr.payload_max_bytes = 0;
+	if (response < 0) {
+		msg->hdr.flags.failed = 1;
+		msg->hdr.completion_status = (u32)(-response);
+	}
+}
+
+static int controlvm_respond_chipset_init(
+				struct controlvm_message_header *msg_hdr,
+				int response,
+				enum visor_chipset_feature features)
+{
+	struct controlvm_message outmsg;
+
+	controlvm_init_response(&outmsg, msg_hdr, response);
+	outmsg.cmd.init_chipset.features = features;
+	return visorchannel_signalinsert(chipset_dev->controlvm_channel,
+					 CONTROLVM_QUEUE_REQUEST, &outmsg);
+}
+
+static int chipset_init(struct controlvm_message *inmsg)
+{
+	static int chipset_inited;
+	enum visor_chipset_feature features = 0;
+	int rc = CONTROLVM_RESP_SUCCESS;
+	int res = 0;
+
+	if (chipset_inited) {
+		rc = -CONTROLVM_RESP_ALREADY_DONE;
+		res = -EIO;
+		goto out_respond;
+	}
+	chipset_inited = 1;
+	/*
+	 * Set features to indicate we support parahotplug (if Command also
+	 * supports it). Set the "reply" bit so Command knows this is a
+	 * features-aware driver.
+	 */
+	features = inmsg->cmd.init_chipset.features &
+		   VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
+	features |= VISOR_CHIPSET_FEATURE_REPLY;
+
+out_respond:
+	if (inmsg->hdr.flags.response_expected)
+		res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+
+	return res;
+}
+
+static int controlvm_respond(struct controlvm_message_header *msg_hdr,
+			     int response, struct visor_segment_state *state)
+{
+	struct controlvm_message outmsg;
+
+	controlvm_init_response(&outmsg, msg_hdr, response);
+	if (outmsg.hdr.flags.test_message == 1)
+		return -EINVAL;
+	if (state) {
+		outmsg.cmd.device_change_state.state = *state;
+		outmsg.cmd.device_change_state.flags.phys_device = 1;
+	}
+	return visorchannel_signalinsert(chipset_dev->controlvm_channel,
+					 CONTROLVM_QUEUE_REQUEST, &outmsg);
+}
+
+enum crash_obj_type {
+	CRASH_DEV,
+	CRASH_BUS,
+};
+
+static int save_crash_message(struct controlvm_message *msg,
+			      enum crash_obj_type cr_type)
+{
+	u32 local_crash_msg_offset;
+	u16 local_crash_msg_count;
+	int err;
+
+	err = visorchannel_read(chipset_dev->controlvm_channel,
+				offsetof(struct visor_controlvm_channel,
+					 saved_crash_message_count),
+				&local_crash_msg_count, sizeof(u16));
+	if (err) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed to read message count\n");
+		return err;
+	}
+	if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"invalid number of messages\n");
+		return -EIO;
+	}
+	err = visorchannel_read(chipset_dev->controlvm_channel,
+				offsetof(struct visor_controlvm_channel,
+					 saved_crash_message_offset),
+				&local_crash_msg_offset, sizeof(u32));
+	if (err) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed to read offset\n");
+		return err;
+	}
+	switch (cr_type) {
+	case CRASH_DEV:
+		local_crash_msg_offset += sizeof(struct controlvm_message);
+		err = visorchannel_write(chipset_dev->controlvm_channel,
+					 local_crash_msg_offset, msg,
+					 sizeof(struct controlvm_message));
+		if (err) {
+			dev_err(&chipset_dev->acpi_device->dev,
+				"failed to write dev msg\n");
+			return err;
+		}
+		break;
+	case CRASH_BUS:
+		err = visorchannel_write(chipset_dev->controlvm_channel,
+					 local_crash_msg_offset, msg,
+					 sizeof(struct controlvm_message));
+		if (err) {
+			dev_err(&chipset_dev->acpi_device->dev,
+				"failed to write bus msg\n");
+			return err;
+		}
+		break;
+	default:
+		dev_err(&chipset_dev->acpi_device->dev,
+			"Invalid crash_obj_type\n");
+		break;
+	}
+	return 0;
+}
+
+static int controlvm_responder(enum controlvm_id cmd_id,
+			       struct controlvm_message_header *pending_msg_hdr,
+			       int response)
+{
+	if (pending_msg_hdr->id != (u32)cmd_id)
+		return -EINVAL;
+
+	return controlvm_respond(pending_msg_hdr, response, NULL);
+}
+
+static int device_changestate_responder(enum controlvm_id cmd_id,
+					struct visor_device *p, int response,
+					struct visor_segment_state state)
+{
+	struct controlvm_message outmsg;
+
+	if (p->pending_msg_hdr->id != cmd_id)
+		return -EINVAL;
+
+	controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
+	outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
+	outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
+	outmsg.cmd.device_change_state.state = state;
+	return visorchannel_signalinsert(chipset_dev->controlvm_channel,
+					 CONTROLVM_QUEUE_REQUEST, &outmsg);
+}
+
+static int visorbus_create(struct controlvm_message *inmsg)
+{
+	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	struct controlvm_message_header *pmsg_hdr;
+	u32 bus_no = cmd->create_bus.bus_no;
+	struct visor_device *bus_info;
+	struct visorchannel *visorchannel;
+	int err;
+
+	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+	if (bus_info && bus_info->state.created == 1) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed %s: already exists\n", __func__);
+		err = -EEXIST;
+		goto err_respond;
+	}
+	bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
+	if (!bus_info) {
+		err = -ENOMEM;
+		goto err_respond;
+	}
+	INIT_LIST_HEAD(&bus_info->list_all);
+	bus_info->chipset_bus_no = bus_no;
+	bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
+	if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
+		err = save_crash_message(inmsg, CRASH_BUS);
+		if (err)
+			goto err_free_bus_info;
+	}
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+		if (!pmsg_hdr) {
+			err = -ENOMEM;
+			goto err_free_bus_info;
+		}
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		bus_info->pending_msg_hdr = pmsg_hdr;
+	}
+	visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
+					   GFP_KERNEL,
+					   &cmd->create_bus.bus_data_type_guid,
+					   false);
+	if (!visorchannel) {
+		err = -ENOMEM;
+		goto err_free_pending_msg;
+	}
+	bus_info->visorchannel = visorchannel;
+	/* Response will be handled by visorbus_create_instance on success */
+	err = visorbus_create_instance(bus_info);
+	if (err)
+		goto err_destroy_channel;
+	return 0;
+
+err_destroy_channel:
+	visorchannel_destroy(visorchannel);
+
+err_free_pending_msg:
+	kfree(bus_info->pending_msg_hdr);
+
+err_free_bus_info:
+	kfree(bus_info);
+
+err_respond:
+	if (inmsg->hdr.flags.response_expected == 1)
+		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return err;
+}
+
+static int visorbus_destroy(struct controlvm_message *inmsg)
+{
+	struct controlvm_message_header *pmsg_hdr;
+	u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
+	struct visor_device *bus_info;
+	int err;
+
+	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+	if (!bus_info) {
+		err = -ENODEV;
+		goto err_respond;
+	}
+	if (bus_info->state.created == 0) {
+		err = -ENOENT;
+		goto err_respond;
+	}
+	if (bus_info->pending_msg_hdr) {
+		/* only non-NULL if dev is still waiting on a response */
+		err = -EEXIST;
+		goto err_respond;
+	}
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+		if (!pmsg_hdr) {
+			err = -ENOMEM;
+			goto err_respond;
+		}
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		bus_info->pending_msg_hdr = pmsg_hdr;
+	}
+	/* Response will be handled by visorbus_remove_instance */
+	visorbus_remove_instance(bus_info);
+	return 0;
+
+err_respond:
+	if (inmsg->hdr.flags.response_expected == 1)
+		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return err;
+}
+
+static const guid_t *parser_id_get(struct parser_context *ctx)
+{
+	return &ctx->data.id;
+}
+
+static void *parser_string_get(u8 *pscan, int nscan)
+{
+	int value_length;
+	void *value;
+
+	if (nscan == 0)
+		return NULL;
+
+	value_length = strnlen(pscan, nscan);
+	value = kzalloc(value_length + 1, GFP_KERNEL);
+	if (!value)
+		return NULL;
+	if (value_length > 0)
+		memcpy(value, pscan, value_length);
+	return value;
+}
+
+static void *parser_name_get(struct parser_context *ctx)
+{
+	struct visor_controlvm_parameters_header *phdr;
+
+	phdr = &ctx->data;
+	if ((unsigned long)phdr->name_offset +
+	    (unsigned long)phdr->name_length > ctx->param_bytes)
+		return NULL;
+	ctx->curr = (char *)&phdr + phdr->name_offset;
+	ctx->bytes_remaining = phdr->name_length;
+	return parser_string_get(ctx->curr, phdr->name_length);
+}
+
+static int visorbus_configure(struct controlvm_message *inmsg,
+			      struct parser_context *parser_ctx)
+{
+	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	u32 bus_no;
+	struct visor_device *bus_info;
+	int err = 0;
+
+	bus_no = cmd->configure_bus.bus_no;
+	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+	if (!bus_info) {
+		err = -EINVAL;
+		goto err_respond;
+	}
+	if (bus_info->state.created == 0) {
+		err = -EINVAL;
+		goto err_respond;
+	}
+	if (bus_info->pending_msg_hdr) {
+		err = -EIO;
+		goto err_respond;
+	}
+	err = visorchannel_set_clientpartition(bus_info->visorchannel,
+					       cmd->configure_bus.guest_handle);
+	if (err)
+		goto err_respond;
+	if (parser_ctx) {
+		const guid_t *partition_guid = parser_id_get(parser_ctx);
+
+		guid_copy(&bus_info->partition_guid, partition_guid);
+		bus_info->name = parser_name_get(parser_ctx);
+	}
+	if (inmsg->hdr.flags.response_expected == 1)
+		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return 0;
+
+err_respond:
+	dev_err(&chipset_dev->acpi_device->dev,
+		"%s exited with err: %d\n", __func__, err);
+	if (inmsg->hdr.flags.response_expected == 1)
+		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return err;
+}
+
+static int visorbus_device_create(struct controlvm_message *inmsg)
+{
+	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	struct controlvm_message_header *pmsg_hdr;
+	u32 bus_no = cmd->create_device.bus_no;
+	u32 dev_no = cmd->create_device.dev_no;
+	struct visor_device *dev_info;
+	struct visor_device *bus_info;
+	struct visorchannel *visorchannel;
+	int err;
+
+	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+	if (!bus_info) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed to get bus by id: %d\n", bus_no);
+		err = -ENODEV;
+		goto err_respond;
+	}
+	if (bus_info->state.created == 0) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"bus not created, id: %d\n", bus_no);
+		err = -EINVAL;
+		goto err_respond;
+	}
+	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
+	if (dev_info && dev_info->state.created == 1) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed to get bus by id: %d/%d\n", bus_no, dev_no);
+		err = -EEXIST;
+		goto err_respond;
+	}
+
+	dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+	if (!dev_info) {
+		err = -ENOMEM;
+		goto err_respond;
+	}
+	dev_info->chipset_bus_no = bus_no;
+	dev_info->chipset_dev_no = dev_no;
+	guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
+	dev_info->device.parent = &bus_info->device;
+	visorchannel = visorchannel_create(cmd->create_device.channel_addr,
+					   GFP_KERNEL,
+					   &cmd->create_device.data_type_guid,
+					   true);
+	if (!visorchannel) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed to create visorchannel: %d/%d\n",
+			bus_no, dev_no);
+		err = -ENOMEM;
+		goto err_free_dev_info;
+	}
+	dev_info->visorchannel = visorchannel;
+	guid_copy(&dev_info->channel_type_guid,
+		  &cmd->create_device.data_type_guid);
+	if (guid_equal(&cmd->create_device.data_type_guid,
+		       &visor_vhba_channel_guid)) {
+		err = save_crash_message(inmsg, CRASH_DEV);
+		if (err)
+			goto err_destroy_visorchannel;
+	}
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+		if (!pmsg_hdr) {
+			err = -ENOMEM;
+			goto err_destroy_visorchannel;
+		}
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		dev_info->pending_msg_hdr = pmsg_hdr;
+	}
+	/* create_visor_device will send response */
+	err = create_visor_device(dev_info);
+	if (err)
+		goto err_destroy_visorchannel;
+
+	return 0;
+
+err_destroy_visorchannel:
+	visorchannel_destroy(visorchannel);
+
+err_free_dev_info:
+	kfree(dev_info);
+
+err_respond:
+	if (inmsg->hdr.flags.response_expected == 1)
+		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return err;
+}
+
+static int visorbus_device_changestate(struct controlvm_message *inmsg)
+{
+	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	struct controlvm_message_header *pmsg_hdr;
+	u32 bus_no = cmd->device_change_state.bus_no;
+	u32 dev_no = cmd->device_change_state.dev_no;
+	struct visor_segment_state state = cmd->device_change_state.state;
+	struct visor_device *dev_info;
+	int err = 0;
+
+	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
+	if (!dev_info) {
+		err = -ENODEV;
+		goto err_respond;
+	}
+	if (dev_info->state.created == 0) {
+		err = -EINVAL;
+		goto err_respond;
+	}
+	if (dev_info->pending_msg_hdr) {
+		/* only non-NULL if dev is still waiting on a response */
+		err = -EIO;
+		goto err_respond;
+	}
+
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+		if (!pmsg_hdr) {
+			err = -ENOMEM;
+			goto err_respond;
+		}
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		dev_info->pending_msg_hdr = pmsg_hdr;
+	}
+	if (state.alive == segment_state_running.alive &&
+	    state.operating == segment_state_running.operating)
+		/* Response will be sent from visorchipset_device_resume */
+		err = visorchipset_device_resume(dev_info);
+	/* ServerNotReady / ServerLost / SegmentStateStandby */
+	else if (state.alive == segment_state_standby.alive &&
+		 state.operating == segment_state_standby.operating)
+		/*
+		 * technically this is standby case where server is lost.
+		 * Response will be sent from visorchipset_device_pause.
+		 */
+		err = visorchipset_device_pause(dev_info);
+	if (err)
+		goto err_respond;
+	return 0;
+
+err_respond:
+	dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
+	if (inmsg->hdr.flags.response_expected == 1)
+		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return err;
+}
+
+static int visorbus_device_destroy(struct controlvm_message *inmsg)
+{
+	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	struct controlvm_message_header *pmsg_hdr;
+	u32 bus_no = cmd->destroy_device.bus_no;
+	u32 dev_no = cmd->destroy_device.dev_no;
+	struct visor_device *dev_info;
+	int err;
+
+	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
+	if (!dev_info) {
+		err = -ENODEV;
+		goto err_respond;
+	}
+	if (dev_info->state.created == 0) {
+		err = -EINVAL;
+		goto err_respond;
+	}
+	if (dev_info->pending_msg_hdr) {
+		/* only non-NULL if dev is still waiting on a response */
+		err = -EIO;
+		goto err_respond;
+	}
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+		if (!pmsg_hdr) {
+			err = -ENOMEM;
+			goto err_respond;
+		}
+
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		dev_info->pending_msg_hdr = pmsg_hdr;
+	}
+	kfree(dev_info->name);
+	remove_visor_device(dev_info);
+	return 0;
+
+err_respond:
+	if (inmsg->hdr.flags.response_expected == 1)
+		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return err;
+}
+
+/*
+ * The general parahotplug flow works as follows. The visorchipset receives
+ * a DEVICE_CHANGESTATE message from Command specifying a physical device
+ * to enable or disable. The CONTROLVM message handler calls
+ * parahotplug_process_message, which then adds the message to a global list
+ * and kicks off a udev event which causes a user level script to enable or
+ * disable the specified device. The udev script then writes to
+ * /sys/devices/platform/visorchipset/parahotplug, which causes the
+ * parahotplug store functions to get called, at which point the
+ * appropriate CONTROLVM message is retrieved from the list and responded to.
+ */
+
+#define PARAHOTPLUG_TIMEOUT_MS 2000
+
+/*
+ * parahotplug_next_id() - generate unique int to match an outstanding
+ *                         CONTROLVM message with a udev script /sys
+ *                         response
+ *
+ * Return: a unique integer value
+ */
+static int parahotplug_next_id(void)
+{
+	static atomic_t id = ATOMIC_INIT(0);
+
+	return atomic_inc_return(&id);
+}
+
+/*
+ * parahotplug_next_expiration() - returns the time (in jiffies) when a
+ *                                 CONTROLVM message on the list should expire
+ *                                 -- PARAHOTPLUG_TIMEOUT_MS in the future
+ *
+ * Return: expected expiration time (in jiffies)
+ */
+static unsigned long parahotplug_next_expiration(void)
+{
+	return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
+}
+
+/*
+ * parahotplug_request_create() - create a parahotplug_request, which is
+ *                                basically a wrapper for a CONTROLVM_MESSAGE
+ *                                that we can stick on a list
+ * @msg: the message to insert in the request
+ *
+ * Return: the request containing the provided message
+ */
+static struct parahotplug_request *parahotplug_request_create(
+						struct controlvm_message *msg)
+{
+	struct parahotplug_request *req;
+
+	req = kmalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return NULL;
+	req->id = parahotplug_next_id();
+	req->expiration = parahotplug_next_expiration();
+	req->msg = *msg;
+	return req;
+}
+
+/*
+ * parahotplug_request_destroy() - free a parahotplug_request
+ * @req: the request to deallocate
+ */
+static void parahotplug_request_destroy(struct parahotplug_request *req)
+{
+	kfree(req);
+}
+
+static LIST_HEAD(parahotplug_request_list);
+/* lock for above */
+static DEFINE_SPINLOCK(parahotplug_request_list_lock);
+
+/*
+ * parahotplug_request_complete() - mark request as complete
+ * @id:     the id of the request
+ * @active: indicates whether the request is assigned to active partition
+ *
+ * Called from the /sys handler, which means the user script has
+ * finished the enable/disable. Find the matching identifier, and
+ * respond to the CONTROLVM message with success.
+ *
+ * Return: 0 on success or -EINVAL on failure
+ */
+static int parahotplug_request_complete(int id, u16 active)
+{
+	struct list_head *pos;
+	struct list_head *tmp;
+	struct parahotplug_request *req;
+
+	spin_lock(&parahotplug_request_list_lock);
+	/* Look for a request matching "id". */
+	list_for_each_safe(pos, tmp, &parahotplug_request_list) {
+		req = list_entry(pos, struct parahotplug_request, list);
+		if (req->id == id) {
+			/*
+			 * Found a match. Remove it from the list and
+			 * respond.
+			 */
+			list_del(pos);
+			spin_unlock(&parahotplug_request_list_lock);
+			req->msg.cmd.device_change_state.state.active = active;
+			if (req->msg.hdr.flags.response_expected)
+				controlvm_respond(
+				       &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
+				       &req->msg.cmd.device_change_state.state);
+			parahotplug_request_destroy(req);
+			return 0;
+		}
+	}
+	spin_unlock(&parahotplug_request_list_lock);
+	return -EINVAL;
+}
+
+/*
+ * devicedisabled_store() - disables the hotplug device
+ * @dev:   sysfs interface variable not utilized in this function
+ * @attr:  sysfs interface variable not utilized in this function
+ * @buf:   buffer containing the device id
+ * @count: the size of the buffer
+ *
+ * The parahotplug/devicedisabled interface gets called by our support script
+ * when an SR-IOV device has been shut down. The ID is passed to the script
+ * and then passed back when the device has been removed.
+ *
+ * Return: the size of the buffer for success or negative for error
+ */
+static ssize_t devicedisabled_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	unsigned int id;
+	int err;
+
+	if (kstrtouint(buf, 10, &id))
+		return -EINVAL;
+	err = parahotplug_request_complete(id, 0);
+	if (err < 0)
+		return err;
+	return count;
+}
+static DEVICE_ATTR_WO(devicedisabled);
+
+/*
+ * deviceenabled_store() - enables the hotplug device
+ * @dev:   sysfs interface variable not utilized in this function
+ * @attr:  sysfs interface variable not utilized in this function
+ * @buf:   buffer containing the device id
+ * @count: the size of the buffer
+ *
+ * The parahotplug/deviceenabled interface gets called by our support script
+ * when an SR-IOV device has been recovered. The ID is passed to the script
+ * and then passed back when the device has been brought back up.
+ *
+ * Return: the size of the buffer for success or negative for error
+ */
+static ssize_t deviceenabled_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	unsigned int id;
+
+	if (kstrtouint(buf, 10, &id))
+		return -EINVAL;
+	parahotplug_request_complete(id, 1);
+	return count;
+}
+static DEVICE_ATTR_WO(deviceenabled);
+
+static struct attribute *visorchipset_install_attrs[] = {
+	&dev_attr_toolaction.attr,
+	&dev_attr_boottotool.attr,
+	&dev_attr_error.attr,
+	&dev_attr_textid.attr,
+	&dev_attr_remaining_steps.attr,
+	NULL
+};
+
+static const struct attribute_group visorchipset_install_group = {
+	.name = "install",
+	.attrs = visorchipset_install_attrs
+};
+
+static struct attribute *visorchipset_parahotplug_attrs[] = {
+	&dev_attr_devicedisabled.attr,
+	&dev_attr_deviceenabled.attr,
+	NULL
+};
+
+static const struct attribute_group visorchipset_parahotplug_group = {
+	.name = "parahotplug",
+	.attrs = visorchipset_parahotplug_attrs
+};
+
+static const struct attribute_group *visorchipset_dev_groups[] = {
+	&visorchipset_install_group,
+	&visorchipset_parahotplug_group,
+	NULL
+};
+
+/*
+ * parahotplug_request_kickoff() - initiate parahotplug request
+ * @req: the request to initiate
+ *
+ * Cause uevent to run the user level script to do the disable/enable specified
+ * in the parahotplug_request.
+ */
+static int parahotplug_request_kickoff(struct parahotplug_request *req)
+{
+	struct controlvm_message_packet *cmd = &req->msg.cmd;
+	char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
+	     env_func[40];
+	char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
+			 env_func, NULL
+	};
+
+	sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
+	sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
+	sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
+		cmd->device_change_state.state.active);
+	sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
+		cmd->device_change_state.bus_no);
+	sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
+		cmd->device_change_state.dev_no >> 3);
+	sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
+		cmd->device_change_state.dev_no & 0x7);
+	return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
+				  KOBJ_CHANGE, envp);
+}
+
+/*
+ * parahotplug_process_message() - enables or disables a PCI device by kicking
+ *                                 off a udev script
+ * @inmsg: the message indicating whether to enable or disable
+ */
+static int parahotplug_process_message(struct controlvm_message *inmsg)
+{
+	struct parahotplug_request *req;
+	int err;
+
+	req = parahotplug_request_create(inmsg);
+	if (!req)
+		return -ENOMEM;
+	/*
+	 * For enable messages, just respond with success right away, we don't
+	 * need to wait to see if the enable was successful.
+	 */
+	if (inmsg->cmd.device_change_state.state.active) {
+		err = parahotplug_request_kickoff(req);
+		if (err)
+			goto err_respond;
+		controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
+				  &inmsg->cmd.device_change_state.state);
+		parahotplug_request_destroy(req);
+		return 0;
+	}
+	/*
+	 * For disable messages, add the request to the request list before
+	 * kicking off the udev script. It won't get responded to until the
+	 * script has indicated it's done.
+	 */
+	spin_lock(&parahotplug_request_list_lock);
+	list_add_tail(&req->list, &parahotplug_request_list);
+	spin_unlock(&parahotplug_request_list_lock);
+	err = parahotplug_request_kickoff(req);
+	if (err)
+		goto err_respond;
+	return 0;
+
+err_respond:
+	controlvm_respond(&inmsg->hdr, err,
+			  &inmsg->cmd.device_change_state.state);
+	return err;
+}
+
+/*
+ * chipset_ready_uevent() - sends chipset_ready action
+ *
+ * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
+{
+	int res;
+
+	res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
+	if (msg_hdr->flags.response_expected)
+		controlvm_respond(msg_hdr, res, NULL);
+	return res;
+}
+
+/*
+ * chipset_selftest_uevent() - sends chipset_selftest action
+ *
+ * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
+{
+	char env_selftest[20];
+	char *envp[] = { env_selftest, NULL };
+	int res;
+
+	sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
+	res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
+				 KOBJ_CHANGE, envp);
+	if (msg_hdr->flags.response_expected)
+		controlvm_respond(msg_hdr, res, NULL);
+	return res;
+}
+
+/*
+ * chipset_notready_uevent() - sends chipset_notready action
+ *
+ * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
+{
+	int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
+				 KOBJ_OFFLINE);
+
+	if (msg_hdr->flags.response_expected)
+		controlvm_respond(msg_hdr, res, NULL);
+	return res;
+}
+
+static int unisys_vmcall(unsigned long tuple, unsigned long param)
+{
+	int result = 0;
+	unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+	unsigned long reg_ebx;
+	unsigned long reg_ecx;
+
+	reg_ebx = param & 0xFFFFFFFF;
+	reg_ecx = param >> 32;
+	cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+	if (!(cpuid_ecx & 0x80000000))
+		return -EPERM;
+	__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+			     "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
+	if (result)
+		goto error;
+	return 0;
+
+/* Need to convert from VMCALL error codes to Linux */
+error:
+	switch (result) {
+	case VMCALL_RESULT_INVALID_PARAM:
+		return -EINVAL;
+	case VMCALL_RESULT_DATA_UNAVAILABLE:
+		return -ENODEV;
+	default:
+		return -EFAULT;
+	}
+}
+
+static int controlvm_channel_create(struct visorchipset_device *dev)
+{
+	struct visorchannel *chan;
+	u64 addr;
+	int err;
+
+	err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
+			    virt_to_phys(&dev->controlvm_params));
+	if (err)
+		return err;
+	addr = dev->controlvm_params.address;
+	chan = visorchannel_create(addr, GFP_KERNEL,
+				   &visor_controlvm_channel_guid, true);
+	if (!chan)
+		return -ENOMEM;
+	dev->controlvm_channel = chan;
+	return 0;
+}
+
+static void setup_crash_devices_work_queue(struct work_struct *work)
+{
+	struct controlvm_message local_crash_bus_msg;
+	struct controlvm_message local_crash_dev_msg;
+	struct controlvm_message msg = {
+		.hdr.id = CONTROLVM_CHIPSET_INIT,
+		.cmd.init_chipset = {
+			.bus_count = 23,
+			.switch_count = 0,
+		},
+	};
+	u32 local_crash_msg_offset;
+	u16 local_crash_msg_count;
+
+	/* send init chipset msg */
+	chipset_init(&msg);
+	/* get saved message count */
+	if (visorchannel_read(chipset_dev->controlvm_channel,
+			      offsetof(struct visor_controlvm_channel,
+				       saved_crash_message_count),
+			      &local_crash_msg_count, sizeof(u16)) < 0) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed to read channel\n");
+		return;
+	}
+	if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
+		dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
+		return;
+	}
+	/* get saved crash message offset */
+	if (visorchannel_read(chipset_dev->controlvm_channel,
+			      offsetof(struct visor_controlvm_channel,
+				       saved_crash_message_offset),
+			      &local_crash_msg_offset, sizeof(u32)) < 0) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed to read channel\n");
+		return;
+	}
+	/* read create device message for storage bus offset */
+	if (visorchannel_read(chipset_dev->controlvm_channel,
+			      local_crash_msg_offset,
+			      &local_crash_bus_msg,
+			      sizeof(struct controlvm_message)) < 0) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed to read channel\n");
+		return;
+	}
+	/* read create device message for storage device */
+	if (visorchannel_read(chipset_dev->controlvm_channel,
+			      local_crash_msg_offset +
+			      sizeof(struct controlvm_message),
+			      &local_crash_dev_msg,
+			      sizeof(struct controlvm_message)) < 0) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"failed to read channel\n");
+		return;
+	}
+	/* reuse IOVM create bus message */
+	if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"no valid create_bus message\n");
+		return;
+	}
+	visorbus_create(&local_crash_bus_msg);
+	/* reuse create device message for storage device */
+	if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
+		dev_err(&chipset_dev->acpi_device->dev,
+			"no valid create_device message\n");
+		return;
+	}
+	visorbus_device_create(&local_crash_dev_msg);
+}
+
+void visorbus_response(struct visor_device *bus_info, int response,
+		       int controlvm_id)
+{
+	if (!bus_info->pending_msg_hdr)
+		return;
+
+	controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
+	kfree(bus_info->pending_msg_hdr);
+	bus_info->pending_msg_hdr = NULL;
+}
+
+void visorbus_device_changestate_response(struct visor_device *dev_info,
+					  int response,
+					  struct visor_segment_state state)
+{
+	if (!dev_info->pending_msg_hdr)
+		return;
+
+	device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
+				     response, state);
+	kfree(dev_info->pending_msg_hdr);
+	dev_info->pending_msg_hdr = NULL;
+}
+
+static void parser_done(struct parser_context *ctx)
+{
+	chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
+	kfree(ctx);
+}
+
+static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
+						 bool *retry)
+{
+	unsigned long allocbytes;
+	struct parser_context *ctx;
+	void *mapping;
+
+	*retry = false;
+	/* alloc an extra byte to ensure payload is \0 terminated */
+	allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
+		     sizeof(struct visor_controlvm_parameters_header));
+	if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
+	     MAX_CONTROLVM_PAYLOAD_BYTES) {
+		*retry = true;
+		return NULL;
+	}
+	ctx = kzalloc(allocbytes, GFP_KERNEL);
+	if (!ctx) {
+		*retry = true;
+		return NULL;
+	}
+	ctx->allocbytes = allocbytes;
+	ctx->param_bytes = bytes;
+	mapping = memremap(addr, bytes, MEMREMAP_WB);
+	if (!mapping)
+		goto err_finish_ctx;
+	memcpy(&ctx->data, mapping, bytes);
+	memunmap(mapping);
+	ctx->byte_stream = true;
+	chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
+	return ctx;
+
+err_finish_ctx:
+	kfree(ctx);
+	return NULL;
+}
+
+/*
+ * handle_command() - process a controlvm message
+ * @inmsg:        the message to process
+ * @channel_addr: address of the controlvm channel
+ *
+ * Return:
+ *	0	- Successfully processed the message
+ *	-EAGAIN - ControlVM message was not processed and should be retried
+ *		  reading the next controlvm message; a scenario where this can
+ *		  occur is when we need to throttle the allocation of memory in
+ *		  which to copy out controlvm payload data.
+ *	< 0	- error: ControlVM message was processed but an error occurred.
+ */
+static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
+{
+	struct controlvm_message_packet *cmd = &inmsg.cmd;
+	u64 parm_addr;
+	u32 parm_bytes;
+	struct parser_context *parser_ctx = NULL;
+	struct controlvm_message ackmsg;
+	int err = 0;
+
+	/* create parsing context if necessary */
+	parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
+	parm_bytes = inmsg.hdr.payload_bytes;
+	/*
+	 * Parameter and channel addresses within test messages actually lie
+	 * within our OS-controlled memory. We need to know that, because it
+	 * makes a difference in how we compute the virtual address.
+	 */
+	if (parm_bytes) {
+		bool retry;
+
+		parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
+		if (!parser_ctx && retry)
+			return -EAGAIN;
+	}
+	controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
+	err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
+					CONTROLVM_QUEUE_ACK, &ackmsg);
+	if (err)
+		return err;
+	switch (inmsg.hdr.id) {
+	case CONTROLVM_CHIPSET_INIT:
+		err = chipset_init(&inmsg);
+		break;
+	case CONTROLVM_BUS_CREATE:
+		err = visorbus_create(&inmsg);
+		break;
+	case CONTROLVM_BUS_DESTROY:
+		err = visorbus_destroy(&inmsg);
+		break;
+	case CONTROLVM_BUS_CONFIGURE:
+		err = visorbus_configure(&inmsg, parser_ctx);
+		break;
+	case CONTROLVM_DEVICE_CREATE:
+		err = visorbus_device_create(&inmsg);
+		break;
+	case CONTROLVM_DEVICE_CHANGESTATE:
+		if (cmd->device_change_state.flags.phys_device) {
+			err = parahotplug_process_message(&inmsg);
+		} else {
+			/*
+			 * save the hdr and cmd structures for later use when
+			 * sending back the response to Command
+			 */
+			err = visorbus_device_changestate(&inmsg);
+			break;
+		}
+		break;
+	case CONTROLVM_DEVICE_DESTROY:
+		err = visorbus_device_destroy(&inmsg);
+		break;
+	case CONTROLVM_DEVICE_CONFIGURE:
+		/* no op just send a respond that we passed */
+		if (inmsg.hdr.flags.response_expected)
+			controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
+					  NULL);
+		break;
+	case CONTROLVM_CHIPSET_READY:
+		err = chipset_ready_uevent(&inmsg.hdr);
+		break;
+	case CONTROLVM_CHIPSET_SELFTEST:
+		err = chipset_selftest_uevent(&inmsg.hdr);
+		break;
+	case CONTROLVM_CHIPSET_STOP:
+		err = chipset_notready_uevent(&inmsg.hdr);
+		break;
+	default:
+		err = -ENOMSG;
+		if (inmsg.hdr.flags.response_expected)
+			controlvm_respond(&inmsg.hdr,
+					  -CONTROLVM_RESP_ID_UNKNOWN, NULL);
+		break;
+	}
+	if (parser_ctx) {
+		parser_done(parser_ctx);
+		parser_ctx = NULL;
+	}
+	return err;
+}
+
+/*
+ * read_controlvm_event() - retreives the next message from the
+ *                          CONTROLVM_QUEUE_EVENT queue in the controlvm
+ *                          channel
+ * @msg: pointer to the retrieved message
+ *
+ * Return: 0 if valid message was retrieved or -error
+ */
+static int read_controlvm_event(struct controlvm_message *msg)
+{
+	int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
+					    CONTROLVM_QUEUE_EVENT, msg);
+
+	if (err)
+		return err;
+	/* got a message */
+	if (msg->hdr.flags.test_message == 1)
+		return -EINVAL;
+	return 0;
+}
+
+/*
+ * parahotplug_process_list() - remove any request from the list that's been on
+ *                              there too long and respond with an error
+ */
+static void parahotplug_process_list(void)
+{
+	struct list_head *pos;
+	struct list_head *tmp;
+
+	spin_lock(&parahotplug_request_list_lock);
+	list_for_each_safe(pos, tmp, &parahotplug_request_list) {
+		struct parahotplug_request *req =
+		    list_entry(pos, struct parahotplug_request, list);
+
+		if (!time_after_eq(jiffies, req->expiration))
+			continue;
+		list_del(pos);
+		if (req->msg.hdr.flags.response_expected)
+			controlvm_respond(
+				&req->msg.hdr,
+				CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
+				&req->msg.cmd.device_change_state.state);
+		parahotplug_request_destroy(req);
+	}
+	spin_unlock(&parahotplug_request_list_lock);
+}
+
+static void controlvm_periodic_work(struct work_struct *work)
+{
+	struct controlvm_message inmsg;
+	int count = 0;
+	int err;
+
+	/* Drain the RESPONSE queue make it empty */
+	do {
+		err = visorchannel_signalremove(chipset_dev->controlvm_channel,
+						CONTROLVM_QUEUE_RESPONSE,
+						&inmsg);
+	} while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
+	if (err != -EAGAIN)
+		goto schedule_out;
+	if (chipset_dev->controlvm_pending_msg_valid) {
+		/*
+		 * we throttled processing of a prior msg, so try to process
+		 * it again rather than reading a new one
+		 */
+		inmsg = chipset_dev->controlvm_pending_msg;
+		chipset_dev->controlvm_pending_msg_valid = false;
+		err = 0;
+	} else {
+		err = read_controlvm_event(&inmsg);
+	}
+	while (!err) {
+		chipset_dev->most_recent_message_jiffies = jiffies;
+		err = handle_command(inmsg,
+				     visorchannel_get_physaddr
+				     (chipset_dev->controlvm_channel));
+		if (err == -EAGAIN) {
+			chipset_dev->controlvm_pending_msg = inmsg;
+			chipset_dev->controlvm_pending_msg_valid = true;
+			break;
+		}
+
+		err = read_controlvm_event(&inmsg);
+	}
+	/* parahotplug_worker */
+	parahotplug_process_list();
+
+/*
+ * The controlvm messages are sent in a bulk. If we start receiving messages, we
+ * want the polling to be fast. If we do not receive any message for
+ * MIN_IDLE_SECONDS, we can slow down the polling.
+ */
+schedule_out:
+	if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
+				(HZ * MIN_IDLE_SECONDS))) {
+		/*
+		 * it's been longer than MIN_IDLE_SECONDS since we processed
+		 * our last controlvm message; slow down the polling
+		 */
+		if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
+			chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
+	} else {
+		if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
+			chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
+	}
+	schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
+			      chipset_dev->poll_jiffies);
+}
+
+static int visorchipset_init(struct acpi_device *acpi_device)
+{
+	int err = -ENOMEM;
+	struct visorchannel *controlvm_channel;
+
+	chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
+	if (!chipset_dev)
+		goto error;
+	err = controlvm_channel_create(chipset_dev);
+	if (err)
+		goto error_free_chipset_dev;
+	acpi_device->driver_data = chipset_dev;
+	chipset_dev->acpi_device = acpi_device;
+	chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
+	err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
+				  visorchipset_dev_groups);
+	if (err < 0)
+		goto error_destroy_channel;
+	controlvm_channel = chipset_dev->controlvm_channel;
+	if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
+				 &chipset_dev->acpi_device->dev,
+				 &visor_controlvm_channel_guid,
+				 "controlvm",
+				 sizeof(struct visor_controlvm_channel),
+				 VISOR_CONTROLVM_CHANNEL_VERSIONID,
+				 VISOR_CHANNEL_SIGNATURE)) {
+		err = -ENODEV;
+		goto error_delete_groups;
+	}
+	/* if booting in a crash kernel */
+	if (is_kdump_kernel())
+		INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
+				  setup_crash_devices_work_queue);
+	else
+		INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
+				  controlvm_periodic_work);
+	chipset_dev->most_recent_message_jiffies = jiffies;
+	chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
+	schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
+			      chipset_dev->poll_jiffies);
+	err = visorbus_init();
+	if (err < 0)
+		goto error_cancel_work;
+	return 0;
+
+error_cancel_work:
+	cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
+
+error_delete_groups:
+	sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
+			    visorchipset_dev_groups);
+
+error_destroy_channel:
+	visorchannel_destroy(chipset_dev->controlvm_channel);
+
+error_free_chipset_dev:
+	kfree(chipset_dev);
+
+error:
+	dev_err(&acpi_device->dev, "failed with error %d\n", err);
+	return err;
+}
+
+static int visorchipset_exit(struct acpi_device *acpi_device)
+{
+	visorbus_exit();
+	cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
+	sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
+			    visorchipset_dev_groups);
+	visorchannel_destroy(chipset_dev->controlvm_channel);
+	kfree(chipset_dev);
+	return 0;
+}
+
+static const struct acpi_device_id unisys_device_ids[] = {
+	{"PNP0A07", 0},
+	{"", 0},
+};
+
+static struct acpi_driver unisys_acpi_driver = {
+	.name = "unisys_acpi",
+	.class = "unisys_acpi_class",
+	.owner = THIS_MODULE,
+	.ids = unisys_device_ids,
+	.ops = {
+		.add = visorchipset_init,
+		.remove = visorchipset_exit,
+	},
+};
+
+MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
+
+static __init int visorutil_spar_detect(void)
+{
+	unsigned int eax, ebx, ecx, edx;
+
+	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+		/* check the ID */
+		cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
+		return  (ebx == UNISYS_VISOR_ID_EBX) &&
+			(ecx == UNISYS_VISOR_ID_ECX) &&
+			(edx == UNISYS_VISOR_ID_EDX);
+	}
+	return 0;
+}
+
+static int __init init_unisys(void)
+{
+	int result;
+
+	if (!visorutil_spar_detect())
+		return -ENODEV;
+	result = acpi_bus_register_driver(&unisys_acpi_driver);
+	if (result)
+		return -ENODEV;
+	pr_info("Unisys Visorchipset Driver Loaded.\n");
+	return 0;
+};
+
+static void __exit exit_unisys(void)
+{
+	acpi_bus_unregister_driver(&unisys_acpi_driver);
+}
+
+module_init(init_unisys);
+module_exit(exit_unisys);
+
+MODULE_AUTHOR("Unisys");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");