aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/unisys/visorbus
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/unisys/visorbus')
-rw-r--r--drivers/staging/unisys/visorbus/Kconfig9
-rw-r--r--drivers/staging/unisys/visorbus/Makefile13
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h485
-rw-r--r--drivers/staging/unisys/visorbus/controlvmcompletionstatus.h94
-rw-r--r--drivers/staging/unisys/visorbus/iovmcall_gnuc.h49
-rw-r--r--drivers/staging/unisys/visorbus/periodic_work.c204
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h94
-rw-r--r--drivers/staging/unisys/visorbus/vbusdeviceinfo.h213
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c1518
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h69
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c613
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c2437
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h149
13 files changed, 5947 insertions, 0 deletions
diff --git a/drivers/staging/unisys/visorbus/Kconfig b/drivers/staging/unisys/visorbus/Kconfig
new file mode 100644
index 000000000000..9b299ac86015
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/Kconfig
@@ -0,0 +1,9 @@
+#
+# Unisys visorbus configuration
+#
+
+config UNISYS_VISORBUS
+ tristate "Unisys visorbus driver"
+ depends on UNISYSSPAR
+ ---help---
+ If you say Y here, you will enable the Unisys visorbus driver.
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile
new file mode 100644
index 000000000000..fa27ee5f336c
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for Unisys visorbus
+#
+
+obj-$(CONFIG_UNISYS_VISORBUS) += visorbus.o
+
+visorbus-y := visorbus_main.o
+visorbus-y += visorchannel.o
+visorbus-y += visorchipset.o
+visorbus-y += periodic_work.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/visorutil
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
new file mode 100644
index 000000000000..a50d9cf4bed7
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -0,0 +1,485 @@
+/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CONTROLVMCHANNEL_H__
+#define __CONTROLVMCHANNEL_H__
+
+#include <linux/uuid.h>
+#include "channel.h"
+
+/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
+#define SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID \
+ UUID_LE(0x2b3c2d10, 0x7ef5, 0x4ad8, \
+ 0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
+
+#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE \
+ ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define CONTROLVM_MESSAGE_MAX 64
+
+/* Must increment this whenever you insert or delete fields within
+ * this channel struct. Also increment whenever you change the meaning
+ * of fields within this channel struct so as to break pre-existing
+ * software. Note that you can usually add fields to the END of the
+ * channel struct withOUT needing to increment this.
+ */
+#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID 1
+
+#define SPAR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \
+ spar_check_channel_client(ch, \
+ SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID, \
+ "controlvm", \
+ sizeof(struct spar_controlvm_channel_protocol), \
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE)
+
+#define MAX_SERIAL_NUM 32
+
+/* Defines for various channel queues */
+#define CONTROLVM_QUEUE_REQUEST 0
+#define CONTROLVM_QUEUE_RESPONSE 1
+#define CONTROLVM_QUEUE_EVENT 2
+#define CONTROLVM_QUEUE_ACK 3
+
+/* Max num of messages stored during IOVM creation to be reused after crash */
+#define CONTROLVM_CRASHMSG_MAX 2
+
+struct spar_segment_state {
+ u16 enabled:1; /* Bit 0: May enter other states */
+ u16 active:1; /* Bit 1: Assigned to active partition */
+ u16 alive:1; /* Bit 2: Configure message sent to
+ * service/server */
+ u16 revoked:1; /* Bit 3: similar to partition state
+ * ShuttingDown */
+ u16 allocated:1; /* Bit 4: memory (device/port number)
+ * has been selected by Command */
+ u16 known:1; /* Bit 5: has been introduced to the
+ * service/guest partition */
+ u16 ready:1; /* Bit 6: service/Guest partition has
+ * responded to introduction */
+ u16 operating:1; /* Bit 7: resource is configured and
+ * operating */
+ /* Note: don't use high bit unless we need to switch to ushort
+ * which is non-compliant */
+};
+
+static const struct spar_segment_state segment_state_running = {
+ 1, 1, 1, 0, 1, 1, 1, 1
+};
+
+static const struct spar_segment_state segment_state_paused = {
+ 1, 1, 1, 0, 1, 1, 1, 0
+};
+
+static const struct spar_segment_state segment_state_standby = {
+ 1, 1, 0, 0, 1, 1, 1, 0
+};
+
+/* Ids for commands that may appear in either queue of a ControlVm channel.
+ *
+ * Commands that are initiated by the command partition (CP), by an IO or
+ * console service partition (SP), or by a guest partition (GP)are:
+ * - issued on the RequestQueue queue (q #0) in the ControlVm channel
+ * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel
+ *
+ * Events that are initiated by an IO or console service partition (SP) or
+ * by a guest partition (GP) are:
+ * - issued on the EventQueue queue (q #2) in the ControlVm channel
+ * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
+ */
+enum controlvm_id {
+ CONTROLVM_INVALID = 0,
+ /* SWITCH commands required Parameter: SwitchNumber */
+ /* BUS commands required Parameter: BusNumber */
+ CONTROLVM_BUS_CREATE = 0x101, /* CP --> SP, GP */
+ CONTROLVM_BUS_DESTROY = 0x102, /* CP --> SP, GP */
+ CONTROLVM_BUS_CONFIGURE = 0x104, /* CP --> SP */
+ CONTROLVM_BUS_CHANGESTATE = 0x105, /* CP --> SP, GP */
+ CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106, /* SP, GP --> CP */
+/* DEVICE commands required Parameter: BusNumber, DeviceNumber */
+
+ CONTROLVM_DEVICE_CREATE = 0x201, /* CP --> SP, GP */
+ CONTROLVM_DEVICE_DESTROY = 0x202, /* CP --> SP, GP */
+ CONTROLVM_DEVICE_CONFIGURE = 0x203, /* CP --> SP */
+ CONTROLVM_DEVICE_CHANGESTATE = 0x204, /* CP --> SP, GP */
+ CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205, /* SP, GP --> CP */
+ CONTROLVM_DEVICE_RECONFIGURE = 0x206, /* CP --> Boot */
+/* CHIPSET commands */
+ CONTROLVM_CHIPSET_INIT = 0x301, /* CP --> SP, GP */
+ CONTROLVM_CHIPSET_STOP = 0x302, /* CP --> SP, GP */
+ CONTROLVM_CHIPSET_READY = 0x304, /* CP --> SP */
+ CONTROLVM_CHIPSET_SELFTEST = 0x305, /* CP --> SP */
+
+};
+
+struct irq_info {
+ u64 reserved1;
+
+ /* specifies interrupt handle. It is used to retrieve the
+ * corresponding interrupt pin from Monitor; and the
+ * interrupt pin is used to connect to the corresponding
+ * interrupt. Used by IOPart-GP only.
+ */
+ u64 recv_irq_handle;
+
+ /* specifies interrupt vector. It, interrupt pin, and shared are
+ * used to connect to the corresponding interrupt. Used by
+ * IOPart-GP only.
+ */
+ u32 recv_irq_vector;
+
+ /* specifies if the recvInterrupt is shared. It, interrupt pin
+ * and vector are used to connect to 0 = not shared; 1 = shared.
+ * the corresponding interrupt. Used by IOPart-GP only.
+ */
+ u8 recv_irq_shared;
+ u8 reserved[3]; /* Natural alignment purposes */
+};
+
+struct pci_id {
+ u16 domain;
+ u8 bus;
+ u8 slot;
+ u8 func;
+ u8 reserved[3]; /* Natural alignment purposes */
+};
+
+struct efi_spar_indication {
+ u64 boot_to_fw_ui:1; /* Bit 0: Stop in uefi ui */
+ u64 clear_nvram:1; /* Bit 1: Clear NVRAM */
+ u64 clear_cmos:1; /* Bit 2: Clear CMOS */
+ u64 boot_to_tool:1; /* Bit 3: Run install tool */
+ /* remaining bits are available */
+};
+
+enum ultra_chipset_feature {
+ ULTRA_CHIPSET_FEATURE_REPLY = 0x00000001,
+ ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
+};
+
+/* This is the common structure that is at the beginning of every
+ * ControlVm message (both commands and responses) in any ControlVm
+ * queue. Commands are easily distinguished from responses by
+ * looking at the flags.response field.
+ */
+struct controlvm_message_header {
+ u32 id; /* See CONTROLVM_ID. */
+ /* For requests, indicates the message type. */
+ /* For responses, indicates the type of message we are responding to. */
+
+ u32 message_size; /* Includes size of this struct + size
+ * of message */
+ u32 segment_index; /* Index of segment containing Vm
+ * message/information */
+ u32 completion_status; /* Error status code or result of
+ * message completion */
+ struct {
+ u32 failed:1; /* =1 in a response to * signify
+ * failure */
+ u32 response_expected:1; /* =1 in all messages that expect a
+ * response (Control ignores this
+ * bit) */
+ u32 server:1; /* =1 in all bus & device-related
+ * messages where the message
+ * receiver is to act as the bus or
+ * device server */
+ u32 test_message:1; /* =1 for testing use only
+ * (Control and Command ignore this
+ * bit) */
+ u32 partial_completion:1; /* =1 if there are forthcoming
+ * responses/acks associated
+ * with this message */
+ u32 preserve:1; /* =1 this is to let us know to
+ * preserve channel contents
+ * (for running guests)*/
+ u32 writer_in_diag:1; /* =1 the DiagWriter is active in the
+ * Diagnostic Partition*/
+ } flags;
+ u32 reserved; /* Natural alignment */
+ u64 message_handle; /* Identifies the particular message instance,
+ * and is used to match particular */
+ /* request instances with the corresponding response instance. */
+ u64 payload_vm_offset; /* Offset of payload area from start of this
+ * instance of ControlVm segment */
+ u32 payload_max_bytes; /* Maximum bytes allocated in payload
+ * area of ControlVm segment */
+ u32 payload_bytes; /* Actual number of bytes of payload
+ * area to copy between IO/Command; */
+ /* if non-zero, there is a payload to copy. */
+};
+
+struct controlvm_packet_device_create {
+ u32 bus_no; /* bus # (0..n-1) from the msg receiver's end */
+ u32 dev_no; /* bus-relative (0..n-1) device number */
+ u64 channel_addr; /* Guest physical address of the channel, which
+ * can be dereferenced by the receiver of this
+ * ControlVm command */
+ u64 channel_bytes; /* specifies size of the channel in bytes */
+ uuid_le data_type_uuid; /* specifies format of data in channel */
+ uuid_le dev_inst_uuid; /* instance guid for the device */
+ struct irq_info intr; /* specifies interrupt information */
+}; /* for CONTROLVM_DEVICE_CREATE */
+
+struct controlvm_packet_device_configure {
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ /* Control uses header SegmentIndex field to access bus number... */
+ u32 dev_no; /* bus-relative (0..n-1) device number */
+} ; /* for CONTROLVM_DEVICE_CONFIGURE */
+
+struct controlvm_message_device_create {
+ struct controlvm_message_header header;
+ struct controlvm_packet_device_create packet;
+}; /* total 128 bytes */
+
+struct controlvm_message_device_configure {
+ struct controlvm_message_header header;
+ struct controlvm_packet_device_configure packet;
+}; /* total 56 bytes */
+
+/* This is the format for a message in any ControlVm queue. */
+struct controlvm_message_packet {
+ union {
+ struct {
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 dev_count; /* indicates the max number of
+ * devices on this bus */
+ u64 channel_addr; /* Guest physical address of
+ * the channel, which can be
+ * dereferenced by the receiver
+ * of this ControlVm command */
+ u64 channel_bytes; /* size of the channel */
+ uuid_le bus_data_type_uuid; /* indicates format of
+ * data in bus channel*/
+ uuid_le bus_inst_uuid; /* instance uuid for the bus */
+ } create_bus; /* for CONTROLVM_BUS_CREATE */
+ struct {
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 reserved; /* Natural alignment purposes */
+ } destroy_bus; /* for CONTROLVM_BUS_DESTROY */
+ struct {
+ u32 bus_no; /* bus # (0..n-1) from the receiver's
+ * perspective */
+ u32 reserved1; /* for alignment purposes */
+ u64 guest_handle; /* This is used to convert
+ * guest physical address to
+ * physical address */
+ u64 recv_bus_irq_handle;
+ /* specifies interrupt info. It is used by SP
+ * to register to receive interrupts from the
+ * CP. This interrupt is used for bus level
+ * notifications. The corresponding
+ * sendBusInterruptHandle is kept in CP. */
+ } configure_bus; /* for CONTROLVM_BUS_CONFIGURE */
+ /* for CONTROLVM_DEVICE_CREATE */
+ struct controlvm_packet_device_create create_device;
+ struct {
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 dev_no; /* bus-relative (0..n-1) device # */
+ } destroy_device; /* for CONTROLVM_DEVICE_DESTROY */
+ /* for CONTROLVM_DEVICE_CONFIGURE */
+ struct controlvm_packet_device_configure configure_device;
+ struct {
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 dev_no; /* bus-relative (0..n-1) device # */
+ } reconfigure_device; /* for CONTROLVM_DEVICE_RECONFIGURE */
+ struct {
+ u32 bus_no;
+ struct spar_segment_state state;
+ u8 reserved[2]; /* Natural alignment purposes */
+ } bus_change_state; /* for CONTROLVM_BUS_CHANGESTATE */
+ struct {
+ u32 bus_no;
+ u32 dev_no;
+ struct spar_segment_state state;
+ struct {
+ u32 phys_device:1; /* =1 if message is for
+ * a physical device */
+ } flags;
+ u8 reserved[2]; /* Natural alignment purposes */
+ } device_change_state; /* for CONTROLVM_DEVICE_CHANGESTATE */
+ struct {
+ u32 bus_no;
+ u32 dev_no;
+ struct spar_segment_state state;
+ u8 reserved[6]; /* Natural alignment purposes */
+ } device_change_state_event;
+ /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
+ struct {
+ u32 bus_count; /* indicates the max number of busses */
+ u32 switch_count; /* indicates the max number of
+ * switches if a service partition */
+ enum ultra_chipset_feature features;
+ u32 platform_number; /* Platform Number */
+ } init_chipset; /* for CONTROLVM_CHIPSET_INIT */
+ struct {
+ u32 options; /* reserved */
+ u32 test; /* bit 0 set to run embedded selftest */
+ } chipset_selftest; /* for CONTROLVM_CHIPSET_SELFTEST */
+ u64 addr; /* a physical address of something, that can be
+ * dereferenced by the receiver of this
+ * ControlVm command (depends on command id) */
+ u64 handle; /* a handle of something (depends on command
+ * id) */
+ };
+};
+
+/* All messages in any ControlVm queue have this layout. */
+struct controlvm_message {
+ struct controlvm_message_header hdr;
+ struct controlvm_message_packet cmd;
+};
+
+struct spar_controlvm_channel_protocol {
+ struct channel_header header;
+ u64 gp_controlvm; /* guest phys addr of this channel */
+ u64 gp_partition_tables;/* guest phys addr of partition tables */
+ u64 gp_diag_guest; /* guest phys addr of diagnostic channel */
+ u64 gp_boot_romdisk;/* guest phys addr of (read* only) Boot ROM disk */
+ u64 gp_boot_ramdisk;/* guest phys addr of writable Boot RAM disk */
+ u64 gp_acpi_table; /* guest phys addr of acpi table */
+ u64 gp_control_channel;/* guest phys addr of control channel */
+ u64 gp_diag_romdisk;/* guest phys addr of diagnostic ROM disk */
+ u64 gp_nvram; /* guest phys addr of NVRAM channel */
+ u64 request_payload_offset; /* Offset to request payload area */
+ u64 event_payload_offset; /* Offset to event payload area */
+ u32 request_payload_bytes; /* Bytes available in request payload
+ * area */
+ u32 event_payload_bytes;/* Bytes available in event payload area */
+ u32 control_channel_bytes;
+ u32 nvram_channel_bytes; /* Bytes in PartitionNvram segment */
+ u32 message_bytes; /* sizeof(CONTROLVM_MESSAGE) */
+ u32 message_count; /* CONTROLVM_MESSAGE_MAX */
+ u64 gp_smbios_table; /* guest phys addr of SMBIOS tables */
+ u64 gp_physical_smbios_table; /* guest phys addr of SMBIOS table */
+ /* ULTRA_MAX_GUESTS_PER_SERVICE */
+ char gp_reserved[2688];
+
+ /* guest physical address of EFI firmware image base */
+ u64 virtual_guest_firmware_image_base;
+
+ /* guest physical address of EFI firmware entry point */
+ u64 virtual_guest_firmware_entry_point;
+
+ /* guest EFI firmware image size */
+ u64 virtual_guest_firmware_image_size;
+
+ /* GPA = 1MB where EFI firmware image is copied to */
+ u64 virtual_guest_firmware_boot_base;
+ u64 virtual_guest_image_base;
+ u64 virtual_guest_image_size;
+ u64 prototype_control_channel_offset;
+ u64 virtual_guest_partition_handle;
+
+ u16 restore_action; /* Restore Action field to restore the guest
+ * partition */
+ u16 dump_action; /* For Windows guests it shows if the visordisk
+ * is running in dump mode */
+ u16 nvram_fail_count;
+ u16 saved_crash_message_count; /* = CONTROLVM_CRASHMSG_MAX */
+ u32 saved_crash_message_offset; /* Offset to request payload area needed
+ * for crash dump */
+ u32 installation_error; /* Type of error encountered during
+ * installation */
+ u32 installation_text_id; /* Id of string to display */
+ u16 installation_remaining_steps;/* Number of remaining installation
+ * steps (for progress bars) */
+ u8 tool_action; /* ULTRA_TOOL_ACTIONS Installation Action
+ * field */
+ u8 reserved; /* alignment */
+ struct efi_spar_indication efi_spar_ind;
+ struct efi_spar_indication efi_spar_ind_supported;
+ u32 sp_reserved;
+ u8 reserved2[28]; /* Force signals to begin on 128-byte cache
+ * line */
+ struct signal_queue_header request_queue;/* Service or guest partition
+ * uses this queue to send
+ * requests to Control */
+ struct signal_queue_header response_queue;/* Control uses this queue to
+ * respond to service or guest
+ * partition requests */
+ struct signal_queue_header event_queue; /* Control uses this queue to
+ * send events to service or
+ * guest partition */
+ struct signal_queue_header event_ack_queue;/* Service or guest partition
+ * uses this queue to ack
+ * Control events */
+
+ /* Request fixed-size message pool - does not include payload */
+ struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
+
+ /* Response fixed-size message pool - does not include payload */
+ struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX];
+
+ /* Event fixed-size message pool - does not include payload */
+ struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX];
+
+ /* Ack fixed-size message pool - does not include payload */
+ struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX];
+
+ /* Message stored during IOVM creation to be reused after crash */
+ struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
+};
+
+/* Offsets for VM channel attributes */
+#define VM_CH_REQ_QUEUE_OFFSET \
+ offsetof(struct spar_controlvm_channel_protocol, request_queue)
+#define VM_CH_RESP_QUEUE_OFFSET \
+ offsetof(struct spar_controlvm_channel_protocol, response_queue)
+#define VM_CH_EVENT_QUEUE_OFFSET \
+ offsetof(struct spar_controlvm_channel_protocol, event_queue)
+#define VM_CH_ACK_QUEUE_OFFSET \
+ offsetof(struct spar_controlvm_channel_protocol, event_ack_queue)
+#define VM_CH_REQ_MSG_OFFSET \
+ offsetof(struct spar_controlvm_channel_protocol, request_msg)
+#define VM_CH_RESP_MSG_OFFSET \
+ offsetof(struct spar_controlvm_channel_protocol, response_msg)
+#define VM_CH_EVENT_MSG_OFFSET \
+ offsetof(struct spar_controlvm_channel_protocol, event_msg)
+#define VM_CH_ACK_MSG_OFFSET \
+ offsetof(struct spar_controlvm_channel_protocol, event_ack_msg)
+#define VM_CH_CRASH_MSG_OFFSET \
+ offsetof(struct spar_controlvm_channel_protocol, saved_crash_msg)
+
+/* The following header will be located at the beginning of PayloadVmOffset for
+ * various ControlVm commands. The receiver of a ControlVm command with a
+ * PayloadVmOffset will dereference this address and then use connection_offset,
+ * initiator_offset, and target_offset to get the location of UTF-8 formatted
+ * strings that can be parsed to obtain command-specific information. The value
+ * of total_length should equal PayloadBytes. The format of the strings at
+ * PayloadVmOffset will take different forms depending on the message.
+ */
+struct spar_controlvm_parameters_header {
+ u32 total_length;
+ u32 header_length;
+ u32 connection_offset;
+ u32 connection_length;
+ u32 initiator_offset;
+ u32 initiator_length;
+ u32 target_offset;
+ u32 target_length;
+ u32 client_offset;
+ u32 client_length;
+ u32 name_offset;
+ u32 name_length;
+ uuid_le id;
+ u32 revision;
+ u32 reserved; /* Natural alignment */
+};
+
+#endif /* __CONTROLVMCHANNEL_H__ */
diff --git a/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h b/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
new file mode 100644
index 000000000000..f74f5d8c2820
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
@@ -0,0 +1,94 @@
+/* controlvmcompletionstatus.c
+ *
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Defines for all valid values returned in the response message header
+ * completionStatus field. See controlvmchannel.h for description of
+ * the header: _CONTROLVM_MESSAGE_HEADER.
+ */
+
+#ifndef __CONTROLVMCOMPLETIONSTATUS_H__
+#define __CONTROLVMCOMPLETIONSTATUS_H__
+
+/* General Errors------------------------------------------------------[0-99] */
+#define CONTROLVM_RESP_SUCCESS 0
+#define CONTROLVM_RESP_ERROR_ALREADY_DONE 1
+#define CONTROLVM_RESP_ERROR_IOREMAP_FAILED 2
+#define CONTROLVM_RESP_ERROR_KMALLOC_FAILED 3
+#define CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN 4
+#define CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT 5
+
+/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
+#define CONTROLVM_RESP_ERROR_CLIENT_SWITCHCOUNT_NONZERO 100
+#define CONTROLVM_RESP_ERROR_EXPECTED_CHIPSET_INIT 101
+
+/* Maximum Limit----------------------------------------------------[200-299] */
+#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */
+/* Payload and Parameter Related------------------------------------[400-499] */
+#define CONTROLVM_RESP_ERROR_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT,
+ * DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_ERROR_INITIATOR_PARAMETER_INVALID 401 /* Multiple */
+#define CONTROLVM_RESP_ERROR_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_ERROR_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */
+/* Specified[Packet Structure] Value-------------------------------[500-599] */
+#define CONTROLVM_RESP_ERROR_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT,
+ * BUS_CONFIGURE,
+ * DEVICE_CREATE,
+ * DEVICE_CONFIG
+ * DEVICE_DESTROY */
+#define CONTROLVM_RESP_ERROR_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT */
+ /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE,
+ * DEVICE_DESTROY */
+#define CONTROLVM_RESP_ERROR_CHANNEL_INVALID 502 /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE */
+/* Partition Driver Callback Interface----------------------[600-699] */
+#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY */
+/* Unable to invoke VIRTPCI callback */
+#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR 605
+ /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY */
+/* VIRTPCI Callback returned error */
+#define CONTROLVM_RESP_ERROR_GENERIC_DRIVER_CALLBACK_ERROR 606
+ /* SWITCH_ATTACHEXTPORT,
+ * SWITCH_DETACHEXTPORT
+ * DEVICE_CONFIGURE */
+
+/* generic device callback returned error */
+/* Bus Related------------------------------------------------------[700-799] */
+#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */
+/* Channel Related--------------------------------------------------[800-899] */
+#define CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO,
+ * DEVICE_DESTROY */
+#define CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */
+/* Chipset Shutdown Related---------------------------------------[1000-1099] */
+#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_FAILED 1000
+#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
+
+/* Chipset Stop Related-------------------------------------------[1100-1199] */
+#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS 1100
+#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_SWITCH 1101
+
+/* Device Related-------------------------------------------------[1400-1499] */
+#define CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT 1400
+
+#endif /* __CONTROLVMCOMPLETIONSTATUS_H__ not defined */
diff --git a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
new file mode 100644
index 000000000000..57dd93e0cc83
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
@@ -0,0 +1,49 @@
+/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Linux GCC Version (32-bit and 64-bit) */
+static inline unsigned long
+__unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx,
+ unsigned long reg_ecx)
+{
+ unsigned long result = 0;
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (!(cpuid_ecx & 0x80000000))
+ return -1;
+
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
+ return result;
+}
+
+static inline unsigned long
+__unisys_extended_vmcall_gnuc(unsigned long long tuple,
+ unsigned long long reg_ebx,
+ unsigned long long reg_ecx,
+ unsigned long long reg_edx)
+{
+ unsigned long result = 0;
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (!(cpuid_ecx & 0x80000000))
+ return -1;
+
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx), "d"(reg_edx));
+ return result;
+}
diff --git a/drivers/staging/unisys/visorbus/periodic_work.c b/drivers/staging/unisys/visorbus/periodic_work.c
new file mode 100644
index 000000000000..5e56088cf855
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/periodic_work.c
@@ -0,0 +1,204 @@
+/* periodic_work.c
+ *
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Helper functions to schedule periodic work in Linux kernel mode.
+ */
+#include <linux/sched.h>
+
+#include "periodic_work.h"
+
+#define MYDRVNAME "periodic_work"
+
+struct periodic_work {
+ rwlock_t lock;
+ struct delayed_work work;
+ void (*workfunc)(void *);
+ void *workfuncarg;
+ bool is_scheduled;
+ bool want_to_stop;
+ ulong jiffy_interval;
+ struct workqueue_struct *workqueue;
+ const char *devnam;
+};
+
+static void periodic_work_func(struct work_struct *work)
+{
+ struct periodic_work *pw;
+
+ pw = container_of(work, struct periodic_work, work.work);
+ (*pw->workfunc)(pw->workfuncarg);
+}
+
+struct periodic_work *visor_periodic_work_create(ulong jiffy_interval,
+ struct workqueue_struct *workqueue,
+ void (*workfunc)(void *),
+ void *workfuncarg,
+ const char *devnam)
+{
+ struct periodic_work *pw;
+
+ pw = kzalloc(sizeof(*pw), GFP_KERNEL | __GFP_NORETRY);
+ if (!pw)
+ return NULL;
+
+ rwlock_init(&pw->lock);
+ pw->jiffy_interval = jiffy_interval;
+ pw->workqueue = workqueue;
+ pw->workfunc = workfunc;
+ pw->workfuncarg = workfuncarg;
+ pw->devnam = devnam;
+ return pw;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_create);
+
+void visor_periodic_work_destroy(struct periodic_work *pw)
+{
+ kfree(pw);
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_destroy);
+
+/** Call this from your periodic work worker function to schedule the next
+ * call.
+ * If this function returns false, there was a failure and the
+ * periodic work is no longer scheduled
+ */
+bool visor_periodic_work_nextperiod(struct periodic_work *pw)
+{
+ bool rc = false;
+
+ write_lock(&pw->lock);
+ if (pw->want_to_stop) {
+ pw->is_scheduled = false;
+ pw->want_to_stop = false;
+ rc = true; /* yes, true; see visor_periodic_work_stop() */
+ goto unlock;
+ } else if (queue_delayed_work(pw->workqueue, &pw->work,
+ pw->jiffy_interval) < 0) {
+ pw->is_scheduled = false;
+ rc = false;
+ goto unlock;
+ }
+ rc = true;
+unlock:
+ write_unlock(&pw->lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod);
+
+/** This function returns true iff new periodic work was actually started.
+ * If this function returns false, then no work was started
+ * (either because it was already started, or because of a failure).
+ */
+bool visor_periodic_work_start(struct periodic_work *pw)
+{
+ bool rc = false;
+
+ write_lock(&pw->lock);
+ if (pw->is_scheduled) {
+ rc = false;
+ goto unlock;
+ }
+ if (pw->want_to_stop) {
+ rc = false;
+ goto unlock;
+ }
+ INIT_DELAYED_WORK(&pw->work, &periodic_work_func);
+ if (queue_delayed_work(pw->workqueue, &pw->work,
+ pw->jiffy_interval) < 0) {
+ rc = false;
+ goto unlock;
+ }
+ pw->is_scheduled = true;
+ rc = true;
+unlock:
+ write_unlock(&pw->lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_start);
+
+/** This function returns true iff your call actually stopped the periodic
+ * work.
+ *
+ * -- PAY ATTENTION... this is important --
+ *
+ * NO NO #1
+ *
+ * Do NOT call this function from some function that is running on the
+ * same workqueue as the work you are trying to stop might be running
+ * on! If you violate this rule, visor_periodic_work_stop() MIGHT work,
+ * but it also MIGHT get hung up in an infinite loop saying
+ * "waiting for delayed work...". This will happen if the delayed work
+ * you are trying to cancel has been put in the workqueue list, but can't
+ * run yet because we are running that same workqueue thread right now.
+ *
+ * Bottom line: If you need to call visor_periodic_work_stop() from a
+ * workitem, be sure the workitem is on a DIFFERENT workqueue than the
+ * workitem that you are trying to cancel.
+ *
+ * If I could figure out some way to check for this "no no" condition in
+ * the code, I would. It would have saved me the trouble of writing this
+ * long comment. And also, don't think this is some "theoretical" race
+ * condition. It is REAL, as I have spent the day chasing it.
+ *
+ * NO NO #2
+ *
+ * Take close note of the locks that you own when you call this function.
+ * You must NOT own any locks that are needed by the periodic work
+ * function that is currently installed. If you DO, a deadlock may result,
+ * because stopping the periodic work often involves waiting for the last
+ * iteration of the periodic work function to complete. Again, if you hit
+ * this deadlock, you will get hung up in an infinite loop saying
+ * "waiting for delayed work...".
+ */
+bool visor_periodic_work_stop(struct periodic_work *pw)
+{
+ bool stopped_something = false;
+
+ write_lock(&pw->lock);
+ stopped_something = pw->is_scheduled && (!pw->want_to_stop);
+ while (pw->is_scheduled) {
+ pw->want_to_stop = true;
+ if (cancel_delayed_work(&pw->work)) {
+ /* We get here if the delayed work was pending as
+ * delayed work, but was NOT run.
+ */
+ WARN_ON(!pw->is_scheduled);
+ pw->is_scheduled = false;
+ } else {
+ /* If we get here, either the delayed work:
+ * - was run, OR,
+ * - is running RIGHT NOW on another processor, OR,
+ * - wasn't even scheduled (there is a miniscule
+ * timing window where this could be the case)
+ * flush_workqueue() would make sure it is finished
+ * executing, but that still isn't very useful, which
+ * explains the loop...
+ */
+ }
+ if (pw->is_scheduled) {
+ write_unlock(&pw->lock);
+ schedule_timeout_interruptible(msecs_to_jiffies(10));
+ write_lock(&pw->lock);
+ } else {
+ pw->want_to_stop = false;
+ }
+ }
+ write_unlock(&pw->lock);
+ return stopped_something;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_stop);
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
new file mode 100644
index 000000000000..5ed83a3f1428
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -0,0 +1,94 @@
+/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VBUSCHANNEL_H__
+#define __VBUSCHANNEL_H__
+
+/* The vbus channel is the channel area provided via the BUS_CREATE controlvm
+ * message for each virtual bus. This channel area is provided to both server
+ * and client ends of the bus. The channel header area is initialized by
+ * the server, and the remaining information is filled in by the client.
+ * We currently use this for the client to provide various information about
+ * the client devices and client drivers for the server end to see.
+ */
+#include <linux/uuid.h>
+#include "vbusdeviceinfo.h"
+#include "channel.h"
+
+/* {193b331b-c58f-11da-95a9-00e08161165f} */
+#define SPAR_VBUS_CHANNEL_PROTOCOL_UUID \
+ UUID_LE(0x193b331b, 0xc58f, 0x11da, \
+ 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
+static const uuid_le spar_vbus_channel_protocol_uuid =
+ SPAR_VBUS_CHANNEL_PROTOCOL_UUID;
+
+#define SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+
+/* Must increment this whenever you insert or delete fields within this channel
+* struct. Also increment whenever you change the meaning of fields within this
+* channel struct so as to break pre-existing software. Note that you can
+* usually add fields to the END of the channel struct withOUT needing to
+* increment this. */
+#define SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID 1
+
+#define SPAR_VBUS_CHANNEL_OK_CLIENT(ch) \
+ spar_check_channel_client(ch, \
+ spar_vbus_channel_protocol_uuid, \
+ "vbus", \
+ sizeof(struct spar_vbus_channel_protocol),\
+ SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
+ SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE)
+
+#define SPAR_VBUS_CHANNEL_OK_SERVER(actual_bytes) \
+ (spar_check_channel_server(spar_vbus_channel_protocol_uuid, \
+ "vbus", \
+ sizeof(struct spar_vbus_channel_protocol),\
+ actual_bytes))
+
+#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
+struct spar_vbus_headerinfo {
+ u32 struct_bytes; /* size of this struct in bytes */
+ u32 device_info_struct_bytes; /* sizeof(ULTRA_VBUS_DEVICEINFO) */
+ u32 dev_info_count; /* num of items in DevInfo member */
+ /* (this is the allocated size) */
+ u32 chp_info_offset; /* byte offset from beginning of this struct */
+ /* to the ChpInfo struct (below) */
+ u32 bus_info_offset; /* byte offset from beginning of this struct */
+ /* to the BusInfo struct (below) */
+ u32 dev_info_offset; /* byte offset from beginning of this struct */
+ /* to the DevInfo array (below) */
+ u8 reserved[104];
+};
+
+struct spar_vbus_channel_protocol {
+ struct channel_header channel_header; /* initialized by server */
+ struct spar_vbus_headerinfo hdr_info; /* initialized by server */
+ /* the remainder of this channel is filled in by the client */
+ struct ultra_vbus_deviceinfo chp_info;
+ /* describes client chipset device and driver */
+ struct ultra_vbus_deviceinfo bus_info;
+ /* describes client bus device and driver */
+ struct ultra_vbus_deviceinfo dev_info[0];
+ /* describes client device and driver for each device on the bus */
+};
+
+#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \
+ (sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \
+ sizeof(ULTRA_VBUS_DEVICEINFO)))
+#define VBUS_CH_SIZE(MAXDEVICES) COVER(VBUS_CH_SIZE_EXACT(MAXDEVICES), 4096)
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
new file mode 100644
index 000000000000..9b6d3e69355c
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
@@ -0,0 +1,213 @@
+/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VBUSDEVICEINFO_H__
+#define __VBUSDEVICEINFO_H__
+
+#include <linux/types.h>
+
+#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
+
+/* An array of this struct is present in the channel area for each vbus.
+ * (See vbuschannel.h.)
+ * It is filled in by the client side to provide info about the device
+ * and driver from the client's perspective.
+ */
+struct ultra_vbus_deviceinfo {
+ u8 devtype[16]; /* short string identifying the device type */
+ u8 drvname[16]; /* driver .sys file name */
+ u8 infostrs[96]; /* sequence of tab-delimited id strings: */
+ /* <DRIVER_REV> <DRIVER_VERTAG> <DRIVER_COMPILETIME> */
+ u8 reserved[128]; /* pad size to 256 bytes */
+};
+
+#pragma pack(pop)
+
+/* Reads chars from the buffer at <src> for <srcmax> bytes, and writes to
+ * the buffer at <p>, which is <remain> bytes long, ensuring never to
+ * overflow the buffer at <p>, using the following rules:
+ * - printable characters are simply copied from the buffer at <src> to the
+ * buffer at <p>
+ * - intervening streaks of non-printable characters in the buffer at <src>
+ * are replaced with a single space in the buffer at <p>
+ * Note that we pay no attention to '\0'-termination.
+ * Returns the number of bytes written to <p>.
+ *
+ * Pass <p> == NULL and <remain> == 0 for this special behavior. In this
+ * case, we simply return the number of bytes that WOULD HAVE been written
+ * to a buffer at <p>, had it been infinitely big.
+ */
+static inline int
+vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
+{
+ int chars = 0;
+ int nonprintable_streak = 0;
+
+ while (srcmax > 0) {
+ if ((*src >= ' ') && (*src < 0x7f)) {
+ if (nonprintable_streak) {
+ if (remain > 0) {
+ *p = ' ';
+ p++;
+ remain--;
+ chars++;
+ } else if (p == NULL) {
+ chars++;
+ }
+ nonprintable_streak = 0;
+ }
+ if (remain > 0) {
+ *p = *src;
+ p++;
+ remain--;
+ chars++;
+ } else if (p == NULL) {
+ chars++;
+ }
+ } else {
+ nonprintable_streak = 1;
+ }
+ src++;
+ srcmax--;
+ }
+ return chars;
+}
+
+#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \
+ do { \
+ if (remain <= 0) \
+ break; \
+ *p = ch; \
+ p++; chars++; remain--; \
+ } while (0)
+
+/* Converts the non-negative value at <num> to an ascii decimal string
+ * at <p>, writing at most <remain> bytes. Note there is NO '\0' termination
+ * written to <p>.
+ *
+ * Returns the number of bytes written to <p>.
+ *
+ * Note that we create this function because we need to do this operation in
+ * an environment-independent way (since we are in a common header file).
+ */
+static inline int
+vbuschannel_itoa(char *p, int remain, int num)
+{
+ int digits = 0;
+ char s[32];
+ int i;
+
+ if (num == 0) {
+ /* '0' is a special case */
+ if (remain <= 0)
+ return 0;
+ *p = '0';
+ return 1;
+ }
+ /* form a backwards decimal ascii string in <s> */
+ while (num > 0) {
+ if (digits >= (int)sizeof(s))
+ return 0;
+ s[digits++] = (num % 10) + '0';
+ num = num / 10;
+ }
+ if (remain < digits) {
+ /* not enough room left at <p> to hold number, so fill with
+ * '?' */
+ for (i = 0; i < remain; i++, p++)
+ *p = '?';
+ return remain;
+ }
+ /* plug in the decimal ascii string representing the number, by */
+ /* reversing the string we just built in <s> */
+ i = digits;
+ while (i > 0) {
+ i--;
+ *p = s[i];
+ p++;
+ }
+ return digits;
+}
+
+/* Reads <devInfo>, and converts its contents to a printable string at <p>,
+ * writing at most <remain> bytes. Note there is NO '\0' termination
+ * written to <p>.
+ *
+ * Pass <devix> >= 0 if you want a device index presented.
+ *
+ * Returns the number of bytes written to <p>.
+ */
+static inline int
+vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo,
+ char *p, int remain, int devix)
+{
+ char *psrc;
+ int nsrc, x, i, pad;
+ int chars = 0;
+
+ psrc = &devinfo->devtype[0];
+ nsrc = sizeof(devinfo->devtype);
+ if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
+ return 0;
+
+ /* emit device index */
+ if (devix >= 0) {
+ VBUSCHANNEL_ADDACHAR('[', p, remain, chars);
+ x = vbuschannel_itoa(p, remain, devix);
+ p += x;
+ remain -= x;
+ chars += x;
+ VBUSCHANNEL_ADDACHAR(']', p, remain, chars);
+ } else {
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ }
+
+ /* emit device type */
+ x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ pad = 15 - x; /* pad device type to be exactly 15 chars */
+ for (i = 0; i < pad; i++)
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+
+ /* emit driver name */
+ psrc = &devinfo->drvname[0];
+ nsrc = sizeof(devinfo->drvname);
+ x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ pad = 15 - x; /* pad driver name to be exactly 15 chars */
+ for (i = 0; i < pad; i++)
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+
+ /* emit strings */
+ psrc = &devinfo->infostrs[0];
+ nsrc = sizeof(devinfo->infostrs);
+ x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ VBUSCHANNEL_ADDACHAR('\n', p, remain, chars);
+
+ return chars;
+}
+
+#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
new file mode 100644
index 000000000000..6db47196c189
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -0,0 +1,1518 @@
+/* visorbus_main.c
+ *
+ * Copyright � 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/uuid.h>
+
+#include "visorbus.h"
+#include "visorbus_private.h"
+#include "version.h"
+#include "periodic_work.h"
+#include "vbuschannel.h"
+#include "guestlinuxdebug.h"
+#include "vmcallinterface.h"
+
+#define MYDRVNAME "visorbus"
+
+/* module parameters */
+static int visorbus_debug;
+static int visorbus_forcematch;
+static int visorbus_forcenomatch;
+static int visorbus_debugref;
+#define SERIALLOOPBACKCHANADDR (100 * 1024 * 1024)
+
+#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c
+#define POLLJIFFIES_TESTWORK 100
+#define POLLJIFFIES_NORMALCHANNEL 10
+
+static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env);
+static int visorbus_match(struct device *xdev, struct device_driver *xdrv);
+static void fix_vbus_dev_info(struct visor_device *visordev);
+
+/* BUS type attributes
+ *
+ * define & implement display of bus attributes under
+ * /sys/bus/visorbus.
+ *
+ */
+
+static ssize_t version_show(struct bus_type *bus, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", VERSION);
+}
+
+static BUS_ATTR_RO(version);
+
+static struct attribute *visorbus_bus_attrs[] = {
+ &bus_attr_version.attr,
+ NULL,
+};
+
+static const struct attribute_group visorbus_bus_group = {
+ .attrs = visorbus_bus_attrs,
+};
+
+static const struct attribute_group *visorbus_bus_groups[] = {
+ &visorbus_bus_group,
+ NULL,
+};
+
+/** This describes the TYPE of bus.
+ * (Don't confuse this with an INSTANCE of the bus.)
+ */
+struct bus_type visorbus_type = {
+ .name = "visorbus",
+ .match = visorbus_match,
+ .uevent = visorbus_uevent,
+ .bus_groups = visorbus_bus_groups,
+};
+
+static struct delayed_work periodic_work;
+
+/* YES, we need 2 workqueues.
+ * The reason is, workitems on the test queue may need to cancel
+ * workitems on the other queue. You will be in for trouble if you try to
+ * do this with workitems queued on the same workqueue.
+ */
+static struct workqueue_struct *periodic_test_workqueue;
+static struct workqueue_struct *periodic_dev_workqueue;
+static long long bus_count; /** number of bus instances */
+ /** ever-increasing */
+
+static void chipset_bus_create(struct visor_device *bus_info);
+static void chipset_bus_destroy(struct visor_device *bus_info);
+static void chipset_device_create(struct visor_device *dev_info);
+static void chipset_device_destroy(struct visor_device *dev_info);
+static void chipset_device_pause(struct visor_device *dev_info);
+static void chipset_device_resume(struct visor_device *dev_info);
+
+/** These functions are implemented herein, and are called by the chipset
+ * driver to notify us about specific events.
+ */
+static struct visorchipset_busdev_notifiers chipset_notifiers = {
+ .bus_create = chipset_bus_create,
+ .bus_destroy = chipset_bus_destroy,
+ .device_create = chipset_device_create,
+ .device_destroy = chipset_device_destroy,
+ .device_pause = chipset_device_pause,
+ .device_resume = chipset_device_resume,
+};
+
+/** These functions are implemented in the chipset driver, and we call them
+ * herein when we want to acknowledge a specific event.
+ */
+static struct visorchipset_busdev_responders chipset_responders;
+
+/* filled in with info about parent chipset driver when we register with it */
+static struct ultra_vbus_deviceinfo chipset_driverinfo;
+/* filled in with info about this driver, wrt it servicing client busses */
+static struct ultra_vbus_deviceinfo clientbus_driverinfo;
+
+/** list of visor_device structs, linked via .list_all */
+static LIST_HEAD(list_all_bus_instances);
+/** list of visor_device structs, linked via .list_all */
+static LIST_HEAD(list_all_device_instances);
+
+static int
+visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "VERSION=%s", VERSION))
+ return -ENOMEM;
+ return 0;
+}
+
+/* This is called automatically upon adding a visor_device (device_add), or
+ * adding a visor_driver (visorbus_register_visor_driver), and returns 1 iff the
+ * provided driver can control the specified device.
+ */
+static int
+visorbus_match(struct device *xdev, struct device_driver *xdrv)
+{
+ uuid_le channel_type;
+ int rc = 0;
+ int i;
+ struct visor_device *dev;
+ struct visor_driver *drv;
+
+ dev = to_visor_device(xdev);
+ drv = to_visor_driver(xdrv);
+ channel_type = visorchannel_get_uuid(dev->visorchannel);
+ if (visorbus_forcematch) {
+ rc = 1;
+ goto away;
+ }
+ if (visorbus_forcenomatch)
+ goto away;
+
+ if (!drv->channel_types)
+ goto away;
+ for (i = 0;
+ (uuid_le_cmp(drv->channel_types[i].guid, NULL_UUID_LE) != 0) ||
+ (drv->channel_types[i].name);
+ i++)
+ if (uuid_le_cmp(drv->channel_types[i].guid,
+ channel_type) == 0) {
+ rc = i + 1;
+ goto away;
+ }
+away:
+ return rc;
+}
+
+/** This is called when device_unregister() is called for the bus device
+ * instance, after all other tasks involved with destroying the device
+ * are complete.
+ */
+static void
+visorbus_release_busdevice(struct device *xdev)
+{
+ struct visor_device *dev = dev_get_drvdata(xdev);
+
+ dev_set_drvdata(xdev, NULL);
+ kfree(dev);
+}
+
+/** This is called when device_unregister() is called for each child
+ * device instance.
+ */
+static void
+visorbus_release_device(struct device *xdev)
+{
+ struct visor_device *dev = to_visor_device(xdev);
+
+ if (dev->periodic_work) {
+ visor_periodic_work_destroy(dev->periodic_work);
+ dev->periodic_work = NULL;
+ }
+ if (dev->visorchannel) {
+ visorchannel_destroy(dev->visorchannel);
+ dev->visorchannel = NULL;
+ }
+ kfree(dev);
+}
+
+/* Implement publishing of device node attributes under:
+ *
+ * /sys/bus/visorbus<x>/dev<y>/devmajorminor
+ *
+ */
+
+#define to_devmajorminor_attr(_attr) \
+ container_of(_attr, struct devmajorminor_attribute, attr)
+#define to_visor_device_from_kobjdevmajorminor(obj) \
+ container_of(obj, struct visor_device, kobjdevmajorminor)
+
+struct devmajorminor_attribute {
+ struct attribute attr;
+ int slot;
+ ssize_t (*show)(struct visor_device *, int slot, char *buf);
+ ssize_t (*store)(struct visor_device *, int slot, const char *buf,
+ size_t count);
+};
+
+static ssize_t DEVMAJORMINOR_ATTR(struct visor_device *dev, int slot, char *buf)
+{
+ int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
+
+ if (slot < 0 || slot >= maxdevnodes)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d:%d\n",
+ dev->devnodes[slot].major, dev->devnodes[slot].minor);
+}
+
+static ssize_t
+devmajorminor_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct devmajorminor_attribute *devmajorminor_attr =
+ to_devmajorminor_attr(attr);
+ struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
+ ssize_t ret = 0;
+
+ if (devmajorminor_attr->show)
+ ret = devmajorminor_attr->show(dev,
+ devmajorminor_attr->slot, buf);
+ return ret;
+}
+
+static ssize_t
+devmajorminor_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ struct devmajorminor_attribute *devmajorminor_attr =
+ to_devmajorminor_attr(attr);
+ struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
+ ssize_t ret = 0;
+
+ if (devmajorminor_attr->store)
+ ret = devmajorminor_attr->store(dev,
+ devmajorminor_attr->slot,
+ buf, count);
+ return ret;
+}
+
+static int register_devmajorminor_attributes(struct visor_device *dev);
+
+static int
+devmajorminor_create_file(struct visor_device *dev, const char *name,
+ int major, int minor)
+{
+ int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
+ struct devmajorminor_attribute *myattr = NULL;
+ int x = -1, rc = 0, slot = -1;
+
+ register_devmajorminor_attributes(dev);
+ for (slot = 0; slot < maxdevnodes; slot++)
+ if (!dev->devnodes[slot].attr)
+ break;
+ if (slot == maxdevnodes) {
+ rc = -ENOMEM;
+ goto away;
+ }
+ myattr = kmalloc(sizeof(*myattr), GFP_KERNEL);
+ if (!myattr) {
+ rc = -ENOMEM;
+ goto away;
+ }
+ memset(myattr, 0, sizeof(struct devmajorminor_attribute));
+ myattr->show = DEVMAJORMINOR_ATTR;
+ myattr->store = NULL;
+ myattr->slot = slot;
+ myattr->attr.name = name;
+ myattr->attr.mode = S_IRUGO;
+ dev->devnodes[slot].attr = myattr;
+ dev->devnodes[slot].major = major;
+ dev->devnodes[slot].minor = minor;
+ x = sysfs_create_file(&dev->kobjdevmajorminor, &myattr->attr);
+ if (x < 0) {
+ rc = x;
+ goto away;
+ }
+ kobject_uevent(&dev->device.kobj, KOBJ_ONLINE);
+away:
+ if (rc < 0) {
+ kfree(myattr);
+ myattr = NULL;
+ dev->devnodes[slot].attr = NULL;
+ }
+ return rc;
+}
+
+static void
+devmajorminor_remove_file(struct visor_device *dev, int slot)
+{
+ int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
+ struct devmajorminor_attribute *myattr = NULL;
+
+ if (slot < 0 || slot >= maxdevnodes)
+ return;
+ myattr = (struct devmajorminor_attribute *)(dev->devnodes[slot].attr);
+ if (!myattr)
+ return;
+ sysfs_remove_file(&dev->kobjdevmajorminor, &myattr->attr);
+ kobject_uevent(&dev->device.kobj, KOBJ_OFFLINE);
+ dev->devnodes[slot].attr = NULL;
+ kfree(myattr);
+}
+
+static void
+devmajorminor_remove_all_files(struct visor_device *dev)
+{
+ int i = 0;
+ int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
+
+ for (i = 0; i < maxdevnodes; i++)
+ devmajorminor_remove_file(dev, i);
+}
+
+static const struct sysfs_ops devmajorminor_sysfs_ops = {
+ .show = devmajorminor_attr_show,
+ .store = devmajorminor_attr_store,
+};
+
+static struct kobj_type devmajorminor_kobj_type = {
+ .sysfs_ops = &devmajorminor_sysfs_ops
+};
+
+static int
+register_devmajorminor_attributes(struct visor_device *dev)
+{
+ int rc = 0, x = 0;
+
+ if (dev->kobjdevmajorminor.parent)
+ goto away; /* already registered */
+ x = kobject_init_and_add(&dev->kobjdevmajorminor,
+ &devmajorminor_kobj_type, &dev->device.kobj,
+ "devmajorminor");
+ if (x < 0) {
+ rc = x;
+ goto away;
+ }
+
+ kobject_uevent(&dev->kobjdevmajorminor, KOBJ_ADD);
+
+away:
+ return rc;
+}
+
+static void
+unregister_devmajorminor_attributes(struct visor_device *dev)
+{
+ if (!dev->kobjdevmajorminor.parent)
+ return; /* already unregistered */
+ devmajorminor_remove_all_files(dev);
+
+ kobject_del(&dev->kobjdevmajorminor);
+ kobject_put(&dev->kobjdevmajorminor);
+ dev->kobjdevmajorminor.parent = NULL;
+}
+
+/* begin implementation of specific channel attributes to appear under
+* /sys/bus/visorbus<x>/dev<y>/channel
+*/
+static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+
+ if (!vdev->visorchannel)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "0x%Lx\n",
+ visorchannel_get_physaddr(vdev->visorchannel));
+}
+
+static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+
+ if (!vdev->visorchannel)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "0x%lx\n",
+ visorchannel_get_nbytes(vdev->visorchannel));
+}
+
+static ssize_t clientpartition_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+
+ if (!vdev->visorchannel)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "0x%Lx\n",
+ visorchannel_get_clientpartition(vdev->visorchannel));
+}
+
+static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+ char s[99];
+
+ if (!vdev->visorchannel)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ visorchannel_id(vdev->visorchannel, s));
+}
+
+static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+ char s[99];
+
+ if (!vdev->visorchannel)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ visorchannel_zoneid(vdev->visorchannel, s));
+}
+
+static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+ int i = 0;
+ struct bus_type *xbus = dev->bus;
+ struct device_driver *xdrv = dev->driver;
+ struct visor_driver *drv = NULL;
+
+ if (!vdev->visorchannel || !xbus || !xdrv)
+ return 0;
+ i = xbus->match(dev, xdrv);
+ if (!i)
+ return 0;
+ drv = to_visor_driver(xdrv);
+ return snprintf(buf, PAGE_SIZE, "%s\n", drv->channel_types[i - 1].name);
+}
+
+static DEVICE_ATTR_RO(physaddr);
+static DEVICE_ATTR_RO(nbytes);
+static DEVICE_ATTR_RO(clientpartition);
+static DEVICE_ATTR_RO(typeguid);
+static DEVICE_ATTR_RO(zoneguid);
+static DEVICE_ATTR_RO(typename);
+
+static struct attribute *channel_attrs[] = {
+ &dev_attr_physaddr.attr,
+ &dev_attr_nbytes.attr,
+ &dev_attr_clientpartition.attr,
+ &dev_attr_typeguid.attr,
+ &dev_attr_zoneguid.attr,
+ &dev_attr_typename.attr,
+};
+
+static struct attribute_group channel_attr_grp = {
+ .name = "channel",
+ .attrs = channel_attrs,
+};
+
+static const struct attribute_group *visorbus_dev_groups[] = {
+ &channel_attr_grp,
+ NULL
+};
+
+/* end implementation of specific channel attributes */
+
+/* BUS instance attributes
+ *
+ * define & implement display of bus attributes under
+ * /sys/bus/visorbus/busses/visorbus<n>.
+ *
+ * This is a bit hoaky because the kernel does not yet have the infrastructure
+ * to separate bus INSTANCE attributes from bus TYPE attributes...
+ * so we roll our own. See businst.c / businst.h.
+ *
+ */
+
+static ssize_t partition_handle_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct visor_device *vdev = to_visor_device(dev);
+ u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
+
+ return snprintf(buf, PAGE_SIZE, "0x%Lx\n", handle);
+}
+
+static ssize_t partition_guid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct visor_device *vdev = to_visor_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "{%pUb}\n", &vdev->partition_uuid);
+}
+
+static ssize_t partition_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct visor_device *vdev = to_visor_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", vdev->name);
+}
+
+static ssize_t channel_addr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct visor_device *vdev = to_visor_device(dev);
+ u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
+
+ return snprintf(buf, PAGE_SIZE, "0x%Lx\n", addr);
+}
+
+static ssize_t channel_bytes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct visor_device *vdev = to_visor_device(dev);
+ u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
+
+ return snprintf(buf, PAGE_SIZE, "0x%Lx\n", nbytes);
+}
+
+static ssize_t channel_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct visor_device *vdev = to_visor_device(dev);
+ int len = 0;
+
+ if (vdev->visorchannel) {
+ visorchannel_id(vdev->visorchannel, buf);
+ len = strlen(buf);
+ buf[len++] = '\n';
+ }
+ return len;
+}
+
+static ssize_t client_bus_info_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct visor_device *vdev = to_visor_device(dev);
+ struct visorchannel *channel = vdev->visorchannel;
+
+ int i, x, remain = PAGE_SIZE;
+ unsigned long off;
+ char *p = buf;
+ u8 *partition_name;
+ struct ultra_vbus_deviceinfo dev_info;
+
+ partition_name = "";
+ if (channel) {
+ if (vdev->name)
+ partition_name = vdev->name;
+ x = snprintf(p, remain,
+ "Client device / client driver info for %s partition (vbus #%d):\n",
+ partition_name, vdev->chipset_dev_no);
+ p += x;
+ remain -= x;
+ x = visorchannel_read(channel,
+ offsetof(struct
+ spar_vbus_channel_protocol,
+ chp_info),
+ &dev_info, sizeof(dev_info));
+ if (x >= 0) {
+ x = vbuschannel_devinfo_to_string(&dev_info, p,
+ remain, -1);
+ p += x;
+ remain -= x;
+ }
+ x = visorchannel_read(channel,
+ offsetof(struct
+ spar_vbus_channel_protocol,
+ bus_info),
+ &dev_info, sizeof(dev_info));
+ if (x >= 0) {
+ x = vbuschannel_devinfo_to_string(&dev_info, p,
+ remain, -1);
+ p += x;
+ remain -= x;
+ }
+ off = offsetof(struct spar_vbus_channel_protocol, dev_info);
+ i = 0;
+ while (off + sizeof(dev_info) <=
+ visorchannel_get_nbytes(channel)) {
+ x = visorchannel_read(channel,
+ off, &dev_info, sizeof(dev_info));
+ if (x >= 0) {
+ x = vbuschannel_devinfo_to_string
+ (&dev_info, p, remain, i);
+ p += x;
+ remain -= x;
+ }
+ off += sizeof(dev_info);
+ i++;
+ }
+ }
+ return PAGE_SIZE - remain;
+}
+
+static DEVICE_ATTR_RO(partition_handle);
+static DEVICE_ATTR_RO(partition_guid);
+static DEVICE_ATTR_RO(partition_name);
+static DEVICE_ATTR_RO(channel_addr);
+static DEVICE_ATTR_RO(channel_bytes);
+static DEVICE_ATTR_RO(channel_id);
+static DEVICE_ATTR_RO(client_bus_info);
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_partition_handle.attr,
+ &dev_attr_partition_guid.attr,
+ &dev_attr_partition_name.attr,
+ &dev_attr_channel_addr.attr,
+ &dev_attr_channel_bytes.attr,
+ &dev_attr_channel_id.attr,
+ &dev_attr_client_bus_info.attr,
+ NULL
+};
+
+static struct attribute_group dev_attr_grp = {
+ .attrs = dev_attrs,
+};
+
+static const struct attribute_group *visorbus_groups[] = {
+ &dev_attr_grp,
+ NULL
+};
+
+/* DRIVER attributes
+ *
+ * define & implement display of driver attributes under
+ * /sys/bus/visorbus/drivers/<drivername>.
+ *
+ */
+
+static ssize_t
+DRIVER_ATTR_version(struct device_driver *xdrv, char *buf)
+{
+ struct visor_driver *drv = to_visor_driver(xdrv);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", drv->version);
+}
+
+static int
+register_driver_attributes(struct visor_driver *drv)
+{
+ int rc;
+ struct driver_attribute version =
+ __ATTR(version, S_IRUGO, DRIVER_ATTR_version, NULL);
+ drv->version_attr = version;
+ rc = driver_create_file(&drv->driver, &drv->version_attr);
+ return rc;
+}
+
+static void
+unregister_driver_attributes(struct visor_driver *drv)
+{
+ driver_remove_file(&drv->driver, &drv->version_attr);
+}
+
+static void
+dev_periodic_work(void *xdev)
+{
+ struct visor_device *dev = (struct visor_device *)xdev;
+ struct visor_driver *drv = to_visor_driver(dev->device.driver);
+
+ down(&dev->visordriver_callback_lock);
+ if (drv->channel_interrupt)
+ drv->channel_interrupt(dev);
+ up(&dev->visordriver_callback_lock);
+ if (!visor_periodic_work_nextperiod(dev->periodic_work))
+ put_device(&dev->device);
+}
+
+static void
+dev_start_periodic_work(struct visor_device *dev)
+{
+ if (dev->being_removed)
+ return;
+ /* now up by at least 2 */
+ get_device(&dev->device);
+ if (!visor_periodic_work_start(dev->periodic_work))
+ put_device(&dev->device);
+}
+
+static void
+dev_stop_periodic_work(struct visor_device *dev)
+{
+ if (visor_periodic_work_stop(dev->periodic_work))
+ put_device(&dev->device);
+}
+
+/** This is called automatically upon adding a visor_device (device_add), or
+ * adding a visor_driver (visorbus_register_visor_driver), but only after
+ * visorbus_match has returned 1 to indicate a successful match between
+ * driver and device.
+ */
+static int
+visordriver_probe_device(struct device *xdev)
+{
+ int rc;
+ struct visor_driver *drv;
+ struct visor_device *dev;
+
+ drv = to_visor_driver(xdev->driver);
+ dev = to_visor_device(xdev);
+ down(&dev->visordriver_callback_lock);
+ dev->being_removed = false;
+ /*
+ * ensure that the dev->being_removed flag is cleared before
+ * we start the probe
+ */
+ wmb();
+ get_device(&dev->device);
+ if (!drv->probe) {
+ up(&dev->visordriver_callback_lock);
+ rc = -1;
+ goto away;
+ }
+ rc = drv->probe(dev);
+ if (rc < 0)
+ goto away;
+
+ fix_vbus_dev_info(dev);
+ up(&dev->visordriver_callback_lock);
+ rc = 0;
+away:
+ if (rc != 0)
+ put_device(&dev->device);
+ return rc;
+}
+
+/** This is called when device_unregister() is called for each child device
+ * instance, to notify the appropriate visorbus_driver that the device is
+ * going away, and to decrease the reference count of the device.
+ */
+static int
+visordriver_remove_device(struct device *xdev)
+{
+ struct visor_device *dev;
+ struct visor_driver *drv;
+
+ dev = to_visor_device(xdev);
+ drv = to_visor_driver(xdev->driver);
+ down(&dev->visordriver_callback_lock);
+ dev->being_removed = true;
+ /*
+ * ensure that the dev->being_removed flag is set before we start the
+ * actual removal
+ */
+ wmb();
+ if (drv) {
+ if (drv->remove)
+ drv->remove(dev);
+ }
+ up(&dev->visordriver_callback_lock);
+ dev_stop_periodic_work(dev);
+ devmajorminor_remove_all_files(dev);
+
+ put_device(&dev->device);
+
+ return 0;
+}
+
+/** A particular type of visor driver calls this function to register
+ * the driver. The caller MUST fill in the following fields within the
+ * #drv structure:
+ * name, version, owner, channel_types, probe, remove
+ *
+ * Here's how the whole Linux bus / driver / device model works.
+ *
+ * At system start-up, the visorbus kernel module is loaded, which registers
+ * visorbus_type as a bus type, using bus_register().
+ *
+ * All kernel modules that support particular device types on a
+ * visorbus bus are loaded. Each of these kernel modules calls
+ * visorbus_register_visor_driver() in their init functions, passing a
+ * visor_driver struct. visorbus_register_visor_driver() in turn calls
+ * register_driver(&visor_driver.driver). This .driver member is
+ * initialized with generic methods (like probe), whose sole responsibility
+ * is to act as a broker for the real methods, which are within the
+ * visor_driver struct. (This is the way the subclass behavior is
+ * implemented, since visor_driver is essentially a subclass of the
+ * generic driver.) Whenever a driver_register() happens, core bus code in
+ * the kernel does (see device_attach() in drivers/base/dd.c):
+ *
+ * for each dev associated with the bus (the bus that driver is on) that
+ * does not yet have a driver
+ * if bus.match(dev,newdriver) == yes_matched ** .match specified
+ * ** during bus_register().
+ * newdriver.probe(dev) ** for visor drivers, this will call
+ * ** the generic driver.probe implemented in visorbus.c,
+ * ** which in turn calls the probe specified within the
+ * ** struct visor_driver (which was specified by the
+ * ** actual device driver as part of
+ * ** visorbus_register_visor_driver()).
+ *
+ * The above dance also happens when a new device appears.
+ * So the question is, how are devices created within the system?
+ * Basically, just call device_add(dev). See pci_bus_add_devices().
+ * pci_scan_device() shows an example of how to build a device struct. It
+ * returns the newly-created struct to pci_scan_single_device(), who adds it
+ * to the list of devices at PCIBUS.devices. That list of devices is what
+ * is traversed by pci_bus_add_devices().
+ *
+ */
+int visorbus_register_visor_driver(struct visor_driver *drv)
+{
+ int rc = 0;
+
+ drv->driver.name = drv->name;
+ drv->driver.bus = &visorbus_type;
+ drv->driver.probe = visordriver_probe_device;
+ drv->driver.remove = visordriver_remove_device;
+ drv->driver.owner = drv->owner;
+
+ /* driver_register does this:
+ * bus_add_driver(drv)
+ * ->if (drv.bus) ** (bus_type) **
+ * driver_attach(drv)
+ * for each dev with bus type of drv.bus
+ * if (!dev.drv) ** no driver assigned yet **
+ * if (bus.match(dev,drv)) [visorbus_match]
+ * dev.drv = drv
+ * if (!drv.probe(dev)) [visordriver_probe_device]
+ * dev.drv = NULL
+ */
+
+ rc = driver_register(&drv->driver);
+ if (rc < 0)
+ return rc;
+ rc = register_driver_attributes(drv);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
+
+/** A particular type of visor driver calls this function to unregister
+ * the driver, i.e., within its module_exit function.
+ */
+void
+visorbus_unregister_visor_driver(struct visor_driver *drv)
+{
+ unregister_driver_attributes(drv);
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
+
+int
+visorbus_read_channel(struct visor_device *dev, unsigned long offset,
+ void *dest, unsigned long nbytes)
+{
+ return visorchannel_read(dev->visorchannel, offset, dest, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorbus_read_channel);
+
+int
+visorbus_write_channel(struct visor_device *dev, unsigned long offset,
+ void *src, unsigned long nbytes)
+{
+ return visorchannel_write(dev->visorchannel, offset, src, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorbus_write_channel);
+
+int
+visorbus_clear_channel(struct visor_device *dev, unsigned long offset, u8 ch,
+ unsigned long nbytes)
+{
+ return visorchannel_clear(dev->visorchannel, offset, ch, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorbus_clear_channel);
+
+int
+visorbus_registerdevnode(struct visor_device *dev,
+ const char *name, int major, int minor)
+{
+ return devmajorminor_create_file(dev, name, major, minor);
+}
+EXPORT_SYMBOL_GPL(visorbus_registerdevnode);
+
+/** We don't really have a real interrupt, so for now we just call the
+ * interrupt function periodically...
+ */
+void
+visorbus_enable_channel_interrupts(struct visor_device *dev)
+{
+ dev_start_periodic_work(dev);
+}
+EXPORT_SYMBOL_GPL(visorbus_enable_channel_interrupts);
+
+void
+visorbus_disable_channel_interrupts(struct visor_device *dev)
+{
+ dev_stop_periodic_work(dev);
+}
+EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
+
+/** This is how everything starts from the device end.
+ * This function is called when a channel first appears via a ControlVM
+ * message. In response, this function allocates a visor_device to
+ * correspond to the new channel, and attempts to connect it the appropriate
+ * driver. If the appropriate driver is found, the visor_driver.probe()
+ * function for that driver will be called, and will be passed the new
+ * visor_device that we just created.
+ *
+ * It's ok if the appropriate driver is not yet loaded, because in that case
+ * the new device struct will just stick around in the bus' list of devices.
+ * When the appropriate driver calls visorbus_register_visor_driver(), the
+ * visor_driver.probe() for the new driver will be called with the new
+ * device.
+ */
+static int
+create_visor_device(struct visor_device *dev)
+{
+ int rc = -1;
+ u32 chipset_bus_no = dev->chipset_bus_no;
+ u32 chipset_dev_no = dev->chipset_dev_no;
+
+ POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
+ POSTCODE_SEVERITY_INFO);
+
+ sema_init(&dev->visordriver_callback_lock, 1); /* unlocked */
+ dev->device.bus = &visorbus_type;
+ dev->device.groups = visorbus_dev_groups;
+ device_initialize(&dev->device);
+ dev->device.release = visorbus_release_device;
+ /* keep a reference just for us (now 2) */
+ get_device(&dev->device);
+ dev->periodic_work =
+ visor_periodic_work_create(POLLJIFFIES_NORMALCHANNEL,
+ periodic_dev_workqueue,
+ dev_periodic_work,
+ dev, dev_name(&dev->device));
+ if (!dev->periodic_work) {
+ POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no,
+ DIAG_SEVERITY_ERR);
+ goto away;
+ }
+
+ /* bus_id must be a unique name with respect to this bus TYPE
+ * (NOT bus instance). That's why we need to include the bus
+ * number within the name.
+ */
+ dev_set_name(&dev->device, "vbus%u:dev%u",
+ chipset_bus_no, chipset_dev_no);
+
+ /* device_add does this:
+ * bus_add_device(dev)
+ * ->device_attach(dev)
+ * ->for each driver drv registered on the bus that dev is on
+ * if (dev.drv) ** device already has a driver **
+ * ** not sure we could ever get here... **
+ * else
+ * if (bus.match(dev,drv)) [visorbus_match]
+ * dev.drv = drv
+ * if (!drv.probe(dev)) [visordriver_probe_device]
+ * dev.drv = NULL
+ *
+ * Note that device_add does NOT fail if no driver failed to
+ * claim the device. The device will be linked onto
+ * bus_type.klist_devices regardless (use bus_for_each_dev).
+ */
+ rc = device_add(&dev->device);
+ if (rc < 0) {
+ POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no,
+ DIAG_SEVERITY_ERR);
+ goto away;
+ }
+
+ rc = register_devmajorminor_attributes(dev);
+ if (rc < 0) {
+ POSTCODE_LINUX_3(DEVICE_REGISTER_FAILURE_PC, chipset_dev_no,
+ DIAG_SEVERITY_ERR);
+ goto away_register;
+ }
+
+ list_add_tail(&dev->list_all, &list_all_device_instances);
+ return 0;
+
+away_register:
+ device_unregister(&dev->device);
+away:
+ put_device(&dev->device);
+ return rc;
+}
+
+static void
+remove_visor_device(struct visor_device *dev)
+{
+ list_del(&dev->list_all);
+ unregister_devmajorminor_attributes(dev);
+ put_device(&dev->device);
+ device_unregister(&dev->device);
+}
+
+static int
+get_vbus_header_info(struct visorchannel *chan,
+ struct spar_vbus_headerinfo *hdr_info)
+{
+ int rc = -1;
+
+ if (!SPAR_VBUS_CHANNEL_OK_CLIENT(visorchannel_get_header(chan)))
+ goto away;
+ if (visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
+ sizeof(*hdr_info)) < 0) {
+ goto away;
+ }
+ if (hdr_info->struct_bytes < sizeof(struct spar_vbus_headerinfo))
+ goto away;
+ if (hdr_info->device_info_struct_bytes <
+ sizeof(struct ultra_vbus_deviceinfo)) {
+ goto away;
+ }
+ rc = 0;
+away:
+ return rc;
+}
+
+/* Write the contents of <info> to the struct
+ * spar_vbus_channel_protocol.chp_info. */
+
+static int
+write_vbus_chp_info(struct visorchannel *chan,
+ struct spar_vbus_headerinfo *hdr_info,
+ struct ultra_vbus_deviceinfo *info)
+{
+ int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
+
+ if (hdr_info->chp_info_offset == 0)
+ return -1;
+
+ if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
+ return -1;
+ return 0;
+}
+
+/* Write the contents of <info> to the struct
+ * spar_vbus_channel_protocol.bus_info. */
+
+static int
+write_vbus_bus_info(struct visorchannel *chan,
+ struct spar_vbus_headerinfo *hdr_info,
+ struct ultra_vbus_deviceinfo *info)
+{
+ int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
+
+ if (hdr_info->bus_info_offset == 0)
+ return -1;
+
+ if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
+ return -1;
+ return 0;
+}
+
+/* Write the contents of <info> to the
+ * struct spar_vbus_channel_protocol.dev_info[<devix>].
+ */
+static int
+write_vbus_dev_info(struct visorchannel *chan,
+ struct spar_vbus_headerinfo *hdr_info,
+ struct ultra_vbus_deviceinfo *info, int devix)
+{
+ int off =
+ (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
+ (hdr_info->device_info_struct_bytes * devix);
+
+ if (hdr_info->dev_info_offset == 0)
+ return -1;
+
+ if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
+ return -1;
+ return 0;
+}
+
+/* For a child device just created on a client bus, fill in
+ * information about the driver that is controlling this device into
+ * the the appropriate slot within the vbus channel of the bus
+ * instance.
+ */
+static void
+fix_vbus_dev_info(struct visor_device *visordev)
+{
+ int i;
+ struct visor_device *bdev;
+ struct visor_driver *visordrv;
+ int bus_no = visordev->chipset_bus_no;
+ int dev_no = visordev->chipset_dev_no;
+ struct ultra_vbus_deviceinfo dev_info;
+ const char *chan_type_name = NULL;
+ struct spar_vbus_headerinfo *hdr_info;
+
+ if (!visordev->device.driver)
+ return;
+
+ hdr_info = (struct spar_vbus_headerinfo *)visordev->vbus_hdr_info;
+ if (!hdr_info)
+ return;
+
+ bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+ if (!bdev)
+ return;
+
+ visordrv = to_visor_driver(visordev->device.driver);
+
+ /* Within the list of device types (by GUID) that the driver
+ * says it supports, find out which one of those types matches
+ * the type of this device, so that we can include the device
+ * type name
+ */
+ for (i = 0; visordrv->channel_types[i].name; i++) {
+ if (memcmp(&visordrv->channel_types[i].guid,
+ &visordev->channel_type_guid,
+ sizeof(visordrv->channel_types[i].guid)) == 0) {
+ chan_type_name = visordrv->channel_types[i].name;
+ break;
+ }
+ }
+
+ bus_device_info_init(&dev_info, chan_type_name,
+ visordrv->name, visordrv->version,
+ visordrv->vertag);
+ write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
+
+ /* Re-write bus+chipset info, because it is possible that this
+ * was previously written by our evil counterpart, virtpci.
+ */
+ write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
+ write_vbus_bus_info(bdev->visorchannel, hdr_info,
+ &clientbus_driverinfo);
+}
+
+/** Create a device instance for the visor bus itself.
+ */
+static int
+create_bus_instance(struct visor_device *dev)
+{
+ int rc;
+ int id = dev->chipset_bus_no;
+ struct spar_vbus_headerinfo *hdr_info;
+
+ POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
+ if (!hdr_info) {
+ rc = -1;
+ goto away;
+ }
+
+ dev_set_name(&dev->device, "visorbus%d", id);
+ dev->device.bus = &visorbus_type;
+ dev->device.groups = visorbus_groups;
+ dev->device.release = visorbus_release_busdevice;
+
+ if (device_register(&dev->device) < 0) {
+ POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, id,
+ POSTCODE_SEVERITY_ERR);
+ rc = -1;
+ goto away_mem;
+ }
+
+ if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) {
+ dev->vbus_hdr_info = (void *)hdr_info;
+ write_vbus_chp_info(dev->visorchannel, hdr_info,
+ &chipset_driverinfo);
+ write_vbus_bus_info(dev->visorchannel, hdr_info,
+ &clientbus_driverinfo);
+ } else {
+ kfree(hdr_info);
+ }
+ bus_count++;
+ list_add_tail(&dev->list_all, &list_all_bus_instances);
+ dev_set_drvdata(&dev->device, dev);
+ return 0;
+
+away_mem:
+ kfree(hdr_info);
+away:
+ return rc;
+}
+
+/** Remove a device instance for the visor bus itself.
+ */
+static void
+remove_bus_instance(struct visor_device *dev)
+{
+ /* Note that this will result in the release method for
+ * dev->dev being called, which will call
+ * visorbus_release_busdevice(). This has something to do with
+ * the put_device() done in device_unregister(), but I have never
+ * successfully been able to trace thru the code to see where/how
+ * release() gets called. But I know it does.
+ */
+ bus_count--;
+ if (dev->visorchannel) {
+ visorchannel_destroy(dev->visorchannel);
+ dev->visorchannel = NULL;
+ }
+ kfree(dev->vbus_hdr_info);
+ list_del(&dev->list_all);
+ device_unregister(&dev->device);
+}
+
+/** Create and register the one-and-only one instance of
+ * the visor bus type (visorbus_type).
+ */
+static int
+create_bus_type(void)
+{
+ int rc = 0;
+
+ rc = bus_register(&visorbus_type);
+ return rc;
+}
+
+/** Remove the one-and-only one instance of the visor bus type (visorbus_type).
+ */
+static void
+remove_bus_type(void)
+{
+ bus_unregister(&visorbus_type);
+}
+
+/** Remove all child visor bus device instances.
+ */
+static void
+remove_all_visor_devices(void)
+{
+ struct list_head *listentry, *listtmp;
+
+ list_for_each_safe(listentry, listtmp, &list_all_device_instances) {
+ struct visor_device *dev = list_entry(listentry,
+ struct visor_device,
+ list_all);
+ remove_visor_device(dev);
+ }
+}
+
+static void
+chipset_bus_create(struct visor_device *dev)
+{
+ int rc;
+ u32 bus_no = dev->chipset_bus_no;
+
+ POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
+ rc = create_bus_instance(dev);
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
+
+ if (rc < 0)
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ else
+ POSTCODE_LINUX_3(CHIPSET_INIT_SUCCESS_PC, bus_no,
+ POSTCODE_SEVERITY_INFO);
+
+ if (chipset_responders.bus_create)
+ (*chipset_responders.bus_create) (dev, rc);
+}
+
+static void
+chipset_bus_destroy(struct visor_device *dev)
+{
+ remove_bus_instance(dev);
+ if (chipset_responders.bus_destroy)
+ (*chipset_responders.bus_destroy)(dev, 0);
+}
+
+static void
+chipset_device_create(struct visor_device *dev_info)
+{
+ int rc = -1;
+ u32 bus_no = dev_info->chipset_bus_no;
+ u32 dev_no = dev_info->chipset_dev_no;
+
+ POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_INFO);
+
+ rc = create_visor_device(dev_info);
+ if (chipset_responders.device_create)
+ chipset_responders.device_create(dev_info, rc);
+
+ if (rc < 0)
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ else
+ POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_INFO);
+}
+
+static void
+chipset_device_destroy(struct visor_device *dev_info)
+{
+ remove_visor_device(dev_info);
+
+ if (chipset_responders.device_destroy)
+ (*chipset_responders.device_destroy) (dev_info, 0);
+}
+
+/* This is the callback function specified for a function driver, to
+ * be called when a pending "pause device" operation has been
+ * completed.
+ */
+static void
+pause_state_change_complete(struct visor_device *dev, int status)
+{
+ if (!dev->pausing)
+ return;
+
+ dev->pausing = false;
+ if (!chipset_responders.device_pause) /* this can never happen! */
+ return;
+
+ /* Notify the chipset driver that the pause is complete, which
+ * will presumably want to send some sort of response to the
+ * initiator. */
+ (*chipset_responders.device_pause) (dev, status);
+}
+
+/* This is the callback function specified for a function driver, to
+ * be called when a pending "resume device" operation has been
+ * completed.
+ */
+static void
+resume_state_change_complete(struct visor_device *dev, int status)
+{
+ if (!dev->resuming)
+ return;
+
+ dev->resuming = false;
+ if (!chipset_responders.device_resume) /* this can never happen! */
+ return;
+
+ /* Notify the chipset driver that the resume is complete,
+ * which will presumably want to send some sort of response to
+ * the initiator. */
+ (*chipset_responders.device_resume) (dev, status);
+}
+
+/* Tell the subordinate function driver for a specific device to pause
+ * or resume that device. Result is returned asynchronously via a
+ * callback function.
+ */
+static void
+initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
+{
+ int rc = -1, x;
+ struct visor_driver *drv = NULL;
+ void (*notify_func)(struct visor_device *dev, int response) = NULL;
+
+ if (is_pause)
+ notify_func = chipset_responders.device_pause;
+ else
+ notify_func = chipset_responders.device_resume;
+ if (!notify_func)
+ goto away;
+
+ drv = to_visor_driver(dev->device.driver);
+ if (!drv)
+ goto away;
+
+ if (dev->pausing || dev->resuming)
+ goto away;
+
+ /* Note that even though both drv->pause() and drv->resume
+ * specify a callback function, it is NOT necessary for us to
+ * increment our local module usage count. Reason is, there
+ * is already a linkage dependency between child function
+ * drivers and visorbus, so it is already IMPOSSIBLE to unload
+ * visorbus while child function drivers are still running.
+ */
+ if (is_pause) {
+ if (!drv->pause)
+ goto away;
+
+ dev->pausing = true;
+ x = drv->pause(dev, pause_state_change_complete);
+ } else {
+ /* This should be done at BUS resume time, but an
+ * existing problem prevents us from ever getting a bus
+ * resume... This hack would fail to work should we
+ * ever have a bus that contains NO devices, since we
+ * would never even get here in that case. */
+ fix_vbus_dev_info(dev);
+ if (!drv->resume)
+ goto away;
+
+ dev->resuming = true;
+ x = drv->resume(dev, resume_state_change_complete);
+ }
+ if (x < 0) {
+ if (is_pause)
+ dev->pausing = false;
+ else
+ dev->resuming = false;
+ goto away;
+ }
+ rc = 0;
+away:
+ if (rc < 0) {
+ if (notify_func)
+ (*notify_func)(dev, rc);
+ }
+}
+
+static void
+chipset_device_pause(struct visor_device *dev_info)
+{
+ initiate_chipset_device_pause_resume(dev_info, true);
+}
+
+static void
+chipset_device_resume(struct visor_device *dev_info)
+{
+ initiate_chipset_device_pause_resume(dev_info, false);
+}
+
+struct channel_size_info {
+ uuid_le guid;
+ unsigned long min_size;
+ unsigned long max_size;
+};
+
+int
+visorbus_init(void)
+{
+ int rc = 0;
+
+ POSTCODE_LINUX_3(DRIVER_ENTRY_PC, rc, POSTCODE_SEVERITY_INFO);
+ bus_device_info_init(&clientbus_driverinfo,
+ "clientbus", "visorbus",
+ VERSION, NULL);
+
+ rc = create_bus_type();
+ if (rc < 0) {
+ POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR);
+ goto away;
+ }
+
+ periodic_dev_workqueue = create_singlethread_workqueue("visorbus_dev");
+ if (!periodic_dev_workqueue) {
+ POSTCODE_LINUX_2(CREATE_WORKQUEUE_PC, DIAG_SEVERITY_ERR);
+ rc = -ENOMEM;
+ goto away;
+ }
+
+ /* This enables us to receive notifications when devices appear for
+ * which this service partition is to be a server for.
+ */
+ visorchipset_register_busdev(&chipset_notifiers,
+ &chipset_responders,
+ &chipset_driverinfo);
+
+ rc = 0;
+
+away:
+ if (rc)
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
+ POSTCODE_SEVERITY_ERR);
+ return rc;
+}
+
+void
+visorbus_exit(void)
+{
+ struct list_head *listentry, *listtmp;
+
+ visorchipset_register_busdev(NULL, NULL, NULL);
+ remove_all_visor_devices();
+
+ flush_workqueue(periodic_dev_workqueue); /* better not be any work! */
+ destroy_workqueue(periodic_dev_workqueue);
+ periodic_dev_workqueue = NULL;
+
+ if (periodic_test_workqueue) {
+ cancel_delayed_work(&periodic_work);
+ flush_workqueue(periodic_test_workqueue);
+ destroy_workqueue(periodic_test_workqueue);
+ periodic_test_workqueue = NULL;
+ }
+
+ list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
+ struct visor_device *dev = list_entry(listentry,
+ struct
+ visor_device,
+ list_all);
+ remove_bus_instance(dev);
+ }
+ remove_bus_type();
+}
+
+module_param_named(debug, visorbus_debug, int, S_IRUGO);
+MODULE_PARM_DESC(visorbus_debug, "1 to debug");
+
+module_param_named(forcematch, visorbus_forcematch, int, S_IRUGO);
+MODULE_PARM_DESC(visorbus_forcematch,
+ "1 to force a successful dev <--> drv match");
+
+module_param_named(forcenomatch, visorbus_forcenomatch, int, S_IRUGO);
+MODULE_PARM_DESC(visorbus_forcenomatch,
+ "1 to force an UNsuccessful dev <--> drv match");
+
+module_param_named(debugref, visorbus_debugref, int, S_IRUGO);
+MODULE_PARM_DESC(visorbus_debugref, "1 to debug reference counting");
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
new file mode 100644
index 000000000000..2f12483e38ab
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -0,0 +1,69 @@
+/* visorchipset.h
+ *
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VISORCHIPSET_H__
+#define __VISORCHIPSET_H__
+
+#include <linux/uuid.h>
+
+#include "controlvmchannel.h"
+#include "vbusdeviceinfo.h"
+#include "vbushelper.h"
+
+/* These functions will be called from within visorchipset when certain
+ * events happen. (The implementation of these functions is outside of
+ * visorchipset.)
+ */
+struct visorchipset_busdev_notifiers {
+ void (*bus_create)(struct visor_device *bus_info);
+ void (*bus_destroy)(struct visor_device *bus_info);
+ void (*device_create)(struct visor_device *bus_info);
+ void (*device_destroy)(struct visor_device *bus_info);
+ void (*device_pause)(struct visor_device *bus_info);
+ void (*device_resume)(struct visor_device *bus_info);
+};
+
+/* These functions live inside visorchipset, and will be called to indicate
+ * responses to specific events (by code outside of visorchipset).
+ * For now, the value for each response is simply either:
+ * 0 = it worked
+ * -1 = it failed
+ */
+struct visorchipset_busdev_responders {
+ void (*bus_create)(struct visor_device *p, int response);
+ void (*bus_destroy)(struct visor_device *p, int response);
+ void (*device_create)(struct visor_device *p, int response);
+ void (*device_destroy)(struct visor_device *p, int response);
+ void (*device_pause)(struct visor_device *p, int response);
+ void (*device_resume)(struct visor_device *p, int response);
+};
+
+/** Register functions (in the bus driver) to get called by visorchipset
+ * whenever a bus or device appears for which this guest is to be the
+ * client for. visorchipset will fill in <responders>, to indicate
+ * functions the bus driver should call to indicate message responses.
+ */
+void
+visorchipset_register_busdev(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info);
+
+/* visorbus init and exit functions */
+int visorbus_init(void);
+void visorbus_exit(void);
+#endif
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
new file mode 100644
index 000000000000..20b63496e9f2
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -0,0 +1,613 @@
+/* visorchannel_funcs.c
+ *
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * This provides Supervisor channel communication primitives, which are
+ * independent of the mechanism used to access the channel data.
+ */
+
+#include <linux/uuid.h>
+
+#include "version.h"
+#include "visorbus.h"
+#include "controlvmchannel.h"
+
+#define MYDRVNAME "visorchannel"
+
+#define SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID \
+ UUID_LE(0x3cd6e705, 0xd6a2, 0x4aa5, \
+ 0xad, 0x5c, 0x7b, 0x8, 0x88, 0x9d, 0xff, 0xe2)
+static const uuid_le spar_video_guid = SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID;
+
+struct visorchannel {
+ u64 physaddr;
+ ulong nbytes;
+ void __iomem *mapped;
+ bool requested;
+ struct channel_header chan_hdr;
+ uuid_le guid;
+ ulong size;
+ bool needs_lock; /* channel creator knows if more than one
+ * thread will be inserting or removing */
+ spinlock_t insert_lock; /* protect head writes in chan_hdr */
+ spinlock_t remove_lock; /* protect tail writes in chan_hdr */
+
+ struct {
+ struct signal_queue_header req_queue;
+ struct signal_queue_header rsp_queue;
+ struct signal_queue_header event_queue;
+ struct signal_queue_header ack_queue;
+ } safe_uis_queue;
+ uuid_le type;
+ uuid_le inst;
+};
+
+/* Creates the struct visorchannel abstraction for a data area in memory,
+ * but does NOT modify this data area.
+ */
+static struct visorchannel *
+visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
+ gfp_t gfp, unsigned long off,
+ uuid_le guid, bool needs_lock)
+{
+ struct visorchannel *channel;
+ int err;
+ size_t size = sizeof(struct channel_header);
+
+ if (physaddr == 0)
+ return NULL;
+
+ channel = kzalloc(sizeof(*channel), gfp);
+ if (!channel)
+ goto cleanup;
+
+ channel->needs_lock = needs_lock;
+ spin_lock_init(&channel->insert_lock);
+ spin_lock_init(&channel->remove_lock);
+
+ /* Video driver constains the efi framebuffer so it will get a
+ * conflict resource when requesting its full mem region. Since
+ * we are only using the efi framebuffer for video we can ignore
+ * this. Remember that we haven't requested it so we don't try to
+ * release later on.
+ */
+ channel->requested = request_mem_region(physaddr, size, MYDRVNAME);
+ if (!channel->requested) {
+ if (uuid_le_cmp(guid, spar_video_guid)) {
+ /* Not the video channel we care about this */
+ goto cleanup;
+ }
+ }
+
+ channel->mapped = ioremap_cache(physaddr, size);
+ if (!channel->mapped) {
+ release_mem_region(physaddr, size);
+ goto cleanup;
+ }
+
+ channel->physaddr = physaddr;
+ channel->nbytes = size;
+
+ err = visorchannel_read(channel, 0, &channel->chan_hdr,
+ sizeof(struct channel_header));
+ if (err)
+ goto cleanup;
+
+ /* we had better be a CLIENT of this channel */
+ if (channel_bytes == 0)
+ channel_bytes = (ulong)channel->chan_hdr.size;
+ if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
+ guid = channel->chan_hdr.chtype;
+
+ iounmap(channel->mapped);
+ if (channel->requested)
+ release_mem_region(channel->physaddr, channel->nbytes);
+ channel->mapped = NULL;
+ channel->requested = request_mem_region(channel->physaddr,
+ channel_bytes, MYDRVNAME);
+ if (!channel->requested) {
+ if (uuid_le_cmp(guid, spar_video_guid)) {
+ /* Different we care about this */
+ goto cleanup;
+ }
+ }
+
+ channel->mapped = ioremap_cache(channel->physaddr, channel_bytes);
+ if (!channel->mapped) {
+ release_mem_region(channel->physaddr, channel_bytes);
+ goto cleanup;
+ }
+
+ channel->nbytes = channel_bytes;
+
+ channel->size = channel_bytes;
+ channel->guid = guid;
+ return channel;
+
+cleanup:
+ visorchannel_destroy(channel);
+ return NULL;
+}
+
+struct visorchannel *
+visorchannel_create(u64 physaddr, unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid)
+{
+ return visorchannel_create_guts(physaddr, channel_bytes, gfp, 0, guid,
+ false);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create);
+
+struct visorchannel *
+visorchannel_create_with_lock(u64 physaddr, unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid)
+{
+ return visorchannel_create_guts(physaddr, channel_bytes, gfp, 0, guid,
+ true);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create_with_lock);
+
+void
+visorchannel_destroy(struct visorchannel *channel)
+{
+ if (!channel)
+ return;
+ if (channel->mapped) {
+ iounmap(channel->mapped);
+ if (channel->requested)
+ release_mem_region(channel->physaddr, channel->nbytes);
+ }
+ kfree(channel);
+}
+EXPORT_SYMBOL_GPL(visorchannel_destroy);
+
+u64
+visorchannel_get_physaddr(struct visorchannel *channel)
+{
+ return channel->physaddr;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
+
+ulong
+visorchannel_get_nbytes(struct visorchannel *channel)
+{
+ return channel->size;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_nbytes);
+
+char *
+visorchannel_uuid_id(uuid_le *guid, char *s)
+{
+ sprintf(s, "%pUL", guid);
+ return s;
+}
+EXPORT_SYMBOL_GPL(visorchannel_uuid_id);
+
+char *
+visorchannel_id(struct visorchannel *channel, char *s)
+{
+ return visorchannel_uuid_id(&channel->guid, s);
+}
+EXPORT_SYMBOL_GPL(visorchannel_id);
+
+char *
+visorchannel_zoneid(struct visorchannel *channel, char *s)
+{
+ return visorchannel_uuid_id(&channel->chan_hdr.zone_uuid, s);
+}
+EXPORT_SYMBOL_GPL(visorchannel_zoneid);
+
+u64
+visorchannel_get_clientpartition(struct visorchannel *channel)
+{
+ return channel->chan_hdr.partition_handle;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_clientpartition);
+
+int
+visorchannel_set_clientpartition(struct visorchannel *channel,
+ u64 partition_handle)
+{
+ channel->chan_hdr.partition_handle = partition_handle;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(visorchannel_set_clientpartition);
+
+uuid_le
+visorchannel_get_uuid(struct visorchannel *channel)
+{
+ return channel->guid;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_uuid);
+
+int
+visorchannel_read(struct visorchannel *channel, ulong offset,
+ void *local, ulong nbytes)
+{
+ if (offset + nbytes > channel->nbytes)
+ return -EIO;
+
+ memcpy_fromio(local, channel->mapped + offset, nbytes);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(visorchannel_read);
+
+int
+visorchannel_write(struct visorchannel *channel, ulong offset,
+ void *local, ulong nbytes)
+{
+ size_t chdr_size = sizeof(struct channel_header);
+ size_t copy_size;
+
+ if (offset + nbytes > channel->nbytes)
+ return -EIO;
+
+ if (offset < chdr_size) {
+ copy_size = min(chdr_size - offset, nbytes);
+ memcpy(&channel->chan_hdr + offset, local, copy_size);
+ }
+
+ memcpy_toio(channel->mapped + offset, local, nbytes);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(visorchannel_write);
+
+int
+visorchannel_clear(struct visorchannel *channel, ulong offset, u8 ch,
+ ulong nbytes)
+{
+ int err;
+ int bufsize = PAGE_SIZE;
+ int written = 0;
+ u8 *buf;
+
+ buf = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memset(buf, ch, bufsize);
+
+ while (nbytes > 0) {
+ int thisbytes = bufsize;
+
+ if (nbytes < thisbytes)
+ thisbytes = nbytes;
+ err = visorchannel_write(channel, offset + written,
+ buf, thisbytes);
+ if (err)
+ goto cleanup;
+
+ written += thisbytes;
+ nbytes -= thisbytes;
+ }
+ err = 0;
+
+cleanup:
+ free_page((unsigned long) buf);
+ return err;
+}
+EXPORT_SYMBOL_GPL(visorchannel_clear);
+
+void __iomem *
+visorchannel_get_header(struct visorchannel *channel)
+{
+ return (void __iomem *)&channel->chan_hdr;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_header);
+
+/** Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a
+ * channel header
+ */
+#define SIG_QUEUE_OFFSET(chan_hdr, q) \
+ ((chan_hdr)->ch_space_offset + \
+ ((q) * sizeof(struct signal_queue_header)))
+
+/** Return offset of a specific queue entry (data) from the beginning of a
+ * channel header
+ */
+#define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \
+ (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->sig_base_offset + \
+ ((slot) * (sig_hdr)->signal_size))
+
+/** Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
+ * into host memory
+ */
+#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
+ (visorchannel_write(channel, \
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue)+ \
+ offsetof(struct signal_queue_header, FIELD), \
+ &((sig_hdr)->FIELD), \
+ sizeof((sig_hdr)->FIELD)) >= 0)
+
+static bool
+sig_read_header(struct visorchannel *channel, u32 queue,
+ struct signal_queue_header *sig_hdr)
+{
+ int err;
+
+ if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header))
+ return false;
+
+ /* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
+ err = visorchannel_read(channel,
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue),
+ sig_hdr, sizeof(struct signal_queue_header));
+ if (err)
+ return false;
+
+ return true;
+}
+
+static inline bool
+sig_read_data(struct visorchannel *channel, u32 queue,
+ struct signal_queue_header *sig_hdr, u32 slot, void *data)
+{
+ int err;
+ int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
+ sig_hdr, slot);
+
+ err = visorchannel_read(channel, signal_data_offset,
+ data, sig_hdr->signal_size);
+ if (err)
+ return false;
+
+ return true;
+}
+
+static inline bool
+sig_write_data(struct visorchannel *channel, u32 queue,
+ struct signal_queue_header *sig_hdr, u32 slot, void *data)
+{
+ int err;
+ int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
+ sig_hdr, slot);
+
+ err = visorchannel_write(channel, signal_data_offset,
+ data, sig_hdr->signal_size);
+ if (err)
+ return false;
+
+ return true;
+}
+
+static bool
+signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
+{
+ struct signal_queue_header sig_hdr;
+
+ if (!sig_read_header(channel, queue, &sig_hdr))
+ return false;
+ if (sig_hdr.head == sig_hdr.tail)
+ return false; /* no signals to remove */
+
+ sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
+ if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg))
+ return false;
+ sig_hdr.num_received++;
+
+ /* For each data field in SIGNAL_QUEUE_HEADER that was modified,
+ * update host memory.
+ */
+ mb(); /* required for channel synch */
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail))
+ return false;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received))
+ return false;
+ return true;
+}
+
+bool
+visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
+{
+ bool rc;
+
+ if (channel->needs_lock) {
+ spin_lock(&channel->remove_lock);
+ rc = signalremove_inner(channel, queue, msg);
+ spin_unlock(&channel->remove_lock);
+ } else {
+ rc = signalremove_inner(channel, queue, msg);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalremove);
+
+static bool
+signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
+{
+ struct signal_queue_header sig_hdr;
+
+ if (!sig_read_header(channel, queue, &sig_hdr))
+ return false;
+
+ sig_hdr.head = ((sig_hdr.head + 1) % sig_hdr.max_slots);
+ if (sig_hdr.head == sig_hdr.tail) {
+ sig_hdr.num_overflows++;
+ visorchannel_write(channel,
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue) +
+ offsetof(struct signal_queue_header,
+ num_overflows),
+ &(sig_hdr.num_overflows),
+ sizeof(sig_hdr.num_overflows));
+ return false;
+ }
+
+ if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg))
+ return false;
+
+ sig_hdr.num_sent++;
+
+ /* For each data field in SIGNAL_QUEUE_HEADER that was modified,
+ * update host memory.
+ */
+ mb(); /* required for channel synch */
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, head))
+ return false;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent))
+ return false;
+
+ return true;
+}
+
+bool
+visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg)
+{
+ bool rc;
+
+ if (channel->needs_lock) {
+ spin_lock(&channel->insert_lock);
+ rc = signalinsert_inner(channel, queue, msg);
+ spin_unlock(&channel->insert_lock);
+ } else {
+ rc = signalinsert_inner(channel, queue, msg);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
+
+int
+visorchannel_signalqueue_slots_avail(struct visorchannel *channel, u32 queue)
+{
+ struct signal_queue_header sig_hdr;
+ u32 slots_avail, slots_used;
+ u32 head, tail;
+
+ if (!sig_read_header(channel, queue, &sig_hdr))
+ return 0;
+ head = sig_hdr.head;
+ tail = sig_hdr.tail;
+ if (head < tail)
+ head = head + sig_hdr.max_slots;
+ slots_used = (head - tail);
+ slots_avail = sig_hdr.max_signals - slots_used;
+ return (int)slots_avail;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalqueue_slots_avail);
+
+int
+visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue)
+{
+ struct signal_queue_header sig_hdr;
+
+ if (!sig_read_header(channel, queue, &sig_hdr))
+ return 0;
+ return (int)sig_hdr.max_signals;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalqueue_max_slots);
+
+static void
+sigqueue_debug(struct signal_queue_header *q, int which, struct seq_file *seq)
+{
+ seq_printf(seq, "Signal Queue #%d\n", which);
+ seq_printf(seq, " VersionId = %lu\n", (ulong)q->version);
+ seq_printf(seq, " Type = %lu\n", (ulong)q->chtype);
+ seq_printf(seq, " oSignalBase = %llu\n",
+ (long long)q->sig_base_offset);
+ seq_printf(seq, " SignalSize = %lu\n", (ulong)q->signal_size);
+ seq_printf(seq, " MaxSignalSlots = %lu\n",
+ (ulong)q->max_slots);
+ seq_printf(seq, " MaxSignals = %lu\n", (ulong)q->max_signals);
+ seq_printf(seq, " FeatureFlags = %-16.16Lx\n",
+ (long long)q->features);
+ seq_printf(seq, " NumSignalsSent = %llu\n",
+ (long long)q->num_sent);
+ seq_printf(seq, " NumSignalsReceived = %llu\n",
+ (long long)q->num_received);
+ seq_printf(seq, " NumOverflows = %llu\n",
+ (long long)q->num_overflows);
+ seq_printf(seq, " Head = %lu\n", (ulong)q->head);
+ seq_printf(seq, " Tail = %lu\n", (ulong)q->tail);
+}
+
+void
+visorchannel_debug(struct visorchannel *channel, int num_queues,
+ struct seq_file *seq, u32 off)
+{
+ u64 addr = 0;
+ ulong nbytes = 0, nbytes_region = 0;
+ struct channel_header hdr;
+ struct channel_header *phdr = &hdr;
+ int i = 0;
+ int errcode = 0;
+
+ if (!channel)
+ return;
+
+ addr = visorchannel_get_physaddr(channel);
+ nbytes_region = visorchannel_get_nbytes(channel);
+ errcode = visorchannel_read(channel, off,
+ phdr, sizeof(struct channel_header));
+ if (errcode < 0) {
+ seq_printf(seq,
+ "Read of channel header failed with errcode=%d)\n",
+ errcode);
+ if (off == 0) {
+ phdr = &channel->chan_hdr;
+ seq_puts(seq, "(following data may be stale)\n");
+ } else {
+ return;
+ }
+ }
+ nbytes = (ulong)(phdr->size);
+ seq_printf(seq, "--- Begin channel @0x%-16.16Lx for 0x%lx bytes (region=0x%lx bytes) ---\n",
+ addr + off, nbytes, nbytes_region);
+ seq_printf(seq, "Type = %pUL\n", &phdr->chtype);
+ seq_printf(seq, "ZoneGuid = %pUL\n", &phdr->zone_uuid);
+ seq_printf(seq, "Signature = 0x%-16.16Lx\n",
+ (long long)phdr->signature);
+ seq_printf(seq, "LegacyState = %lu\n", (ulong)phdr->legacy_state);
+ seq_printf(seq, "SrvState = %lu\n", (ulong)phdr->srv_state);
+ seq_printf(seq, "CliStateBoot = %lu\n", (ulong)phdr->cli_state_boot);
+ seq_printf(seq, "CliStateOS = %lu\n", (ulong)phdr->cli_state_os);
+ seq_printf(seq, "HeaderSize = %lu\n", (ulong)phdr->header_size);
+ seq_printf(seq, "Size = %llu\n", (long long)phdr->size);
+ seq_printf(seq, "Features = 0x%-16.16llx\n",
+ (long long)phdr->features);
+ seq_printf(seq, "PartitionHandle = 0x%-16.16llx\n",
+ (long long)phdr->partition_handle);
+ seq_printf(seq, "Handle = 0x%-16.16llx\n",
+ (long long)phdr->handle);
+ seq_printf(seq, "VersionId = %lu\n", (ulong)phdr->version_id);
+ seq_printf(seq, "oChannelSpace = %llu\n",
+ (long long)phdr->ch_space_offset);
+ if ((phdr->ch_space_offset == 0) || (errcode < 0))
+ ;
+ else
+ for (i = 0; i < num_queues; i++) {
+ struct signal_queue_header q;
+
+ errcode = visorchannel_read(channel,
+ off +
+ phdr->ch_space_offset +
+ (i * sizeof(q)),
+ &q, sizeof(q));
+ if (errcode < 0) {
+ seq_printf(seq,
+ "failed to read signal queue #%d from channel @0x%-16.16Lx errcode=%d\n",
+ i, addr, errcode);
+ continue;
+ }
+ sigqueue_debug(&q, i, seq);
+ }
+ seq_printf(seq, "--- End channel @0x%-16.16Lx for 0x%lx bytes ---\n",
+ addr + off, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorchannel_debug);
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
new file mode 100644
index 000000000000..bb8087e70127
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -0,0 +1,2437 @@
+/* visorchipset_main.c
+ *
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cdev.h>
+#include <linux/ctype.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/nls.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/uuid.h>
+#include <linux/crash_dump.h>
+
+#include "channel_guid.h"
+#include "controlvmchannel.h"
+#include "controlvmcompletionstatus.h"
+#include "guestlinuxdebug.h"
+#include "periodic_work.h"
+#include "version.h"
+#include "visorbus.h"
+#include "visorbus_private.h"
+#include "vmcallinterface.h"
+
+#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
+
+#define MAX_NAME_SIZE 128
+#define MAX_IP_SIZE 50
+#define MAXOUTSTANDINGCHANNELCOMMAND 256
+#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
+#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
+
+#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
+
+#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
+
+
+#define UNISYS_SPAR_LEAF_ID 0x40000000
+
+/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
+#define UNISYS_SPAR_ID_EBX 0x73696e55
+#define UNISYS_SPAR_ID_ECX 0x70537379
+#define UNISYS_SPAR_ID_EDX 0x34367261
+
+/*
+ * Module parameters
+ */
+static int visorchipset_major;
+static int visorchipset_visorbusregwait = 1; /* default is on */
+static int visorchipset_holdchipsetready;
+static unsigned long controlvm_payload_bytes_buffered;
+
+static int
+visorchipset_open(struct inode *inode, struct file *file)
+{
+ unsigned minor_number = iminor(inode);
+
+ if (minor_number)
+ return -ENODEV;
+ file->private_data = NULL;
+ return 0;
+}
+
+static int
+visorchipset_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
+* we switch to slow polling mode. As soon as we get a controlvm
+* message, we switch back to fast polling mode.
+*/
+#define MIN_IDLE_SECONDS 10
+static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+static unsigned long most_recent_message_jiffies; /* when we got our last
+ * controlvm message */
+static int visorbusregistered;
+
+#define MAX_CHIPSET_EVENTS 2
+static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
+
+struct parser_context {
+ unsigned long allocbytes;
+ unsigned long param_bytes;
+ u8 *curr;
+ unsigned long bytes_remaining;
+ bool byte_stream;
+ char data[0];
+};
+
+static struct delayed_work periodic_controlvm_work;
+static struct workqueue_struct *periodic_controlvm_workqueue;
+static DEFINE_SEMAPHORE(notifier_lock);
+
+static struct cdev file_cdev;
+static struct visorchannel **file_controlvm_channel;
+static struct controlvm_message_header g_chipset_msg_hdr;
+static struct controlvm_message_packet g_devicechangestate_packet;
+
+static LIST_HEAD(bus_info_list);
+static LIST_HEAD(dev_info_list);
+
+static struct visorchannel *controlvm_channel;
+
+/* Manages the request payload in the controlvm channel */
+struct visor_controlvm_payload_info {
+ u8 __iomem *ptr; /* pointer to base address of payload pool */
+ u64 offset; /* offset from beginning of controlvm
+ * channel to beginning of payload * pool */
+ u32 bytes; /* number of bytes in payload pool */
+};
+
+static struct visor_controlvm_payload_info controlvm_payload_info;
+
+/* The following globals are used to handle the scenario where we are unable to
+ * offload the payload from a controlvm message due to memory requirements. In
+ * this scenario, we simply stash the controlvm message, then attempt to
+ * process it again the next time controlvm_periodic_work() runs.
+ */
+static struct controlvm_message controlvm_pending_msg;
+static bool controlvm_pending_msg_valid;
+
+/* This identifies a data buffer that has been received via a controlvm messages
+ * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
+ */
+struct putfile_buffer_entry {
+ struct list_head next; /* putfile_buffer_entry list */
+ struct parser_context *parser_ctx; /* points to input data buffer */
+};
+
+/* List of struct putfile_request *, via next_putfile_request member.
+ * Each entry in this list identifies an outstanding TRANSMIT_FILE
+ * conversation.
+ */
+static LIST_HEAD(putfile_request_list);
+
+/* This describes a buffer and its current state of transfer (e.g., how many
+ * bytes have already been supplied as putfile data, and how many bytes are
+ * remaining) for a putfile_request.
+ */
+struct putfile_active_buffer {
+ /* a payload from a controlvm message, containing a file data buffer */
+ struct parser_context *parser_ctx;
+ /* points within data area of parser_ctx to next byte of data */
+ u8 *pnext;
+ /* # bytes left from <pnext> to the end of this data buffer */
+ size_t bytes_remaining;
+};
+
+#define PUTFILE_REQUEST_SIG 0x0906101302281211
+/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
+ * conversation. Structs of this type are dynamically linked into
+ * <Putfile_request_list>.
+ */
+struct putfile_request {
+ u64 sig; /* PUTFILE_REQUEST_SIG */
+
+ /* header from original TransmitFile request */
+ struct controlvm_message_header controlvm_header;
+ u64 file_request_number; /* from original TransmitFile request */
+
+ /* link to next struct putfile_request */
+ struct list_head next_putfile_request;
+
+ /* most-recent sequence number supplied via a controlvm message */
+ u64 data_sequence_number;
+
+ /* head of putfile_buffer_entry list, which describes the data to be
+ * supplied as putfile data;
+ * - this list is added to when controlvm messages come in that supply
+ * file data
+ * - this list is removed from via the hotplug program that is actually
+ * consuming these buffers to write as file data */
+ struct list_head input_buffer_list;
+ spinlock_t req_list_lock; /* lock for input_buffer_list */
+
+ /* waiters for input_buffer_list to go non-empty */
+ wait_queue_head_t input_buffer_wq;
+
+ /* data not yet read within current putfile_buffer_entry */
+ struct putfile_active_buffer active_buf;
+
+ /* <0 = failed, 0 = in-progress, >0 = successful; */
+ /* note that this must be set with req_list_lock, and if you set <0, */
+ /* it is your responsibility to also free up all of the other objects */
+ /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
+ /* before releasing the lock */
+ int completion_status;
+};
+
+struct parahotplug_request {
+ struct list_head list;
+ int id;
+ unsigned long expiration;
+ struct controlvm_message msg;
+};
+
+static LIST_HEAD(parahotplug_request_list);
+static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
+static void parahotplug_process_list(void);
+
+/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
+ * CONTROLVM_REPORTEVENT.
+ */
+static struct visorchipset_busdev_notifiers busdev_notifiers;
+
+static void bus_create_response(struct visor_device *p, int response);
+static void bus_destroy_response(struct visor_device *p, int response);
+static void device_create_response(struct visor_device *p, int response);
+static void device_destroy_response(struct visor_device *p, int response);
+static void device_resume_response(struct visor_device *p, int response);
+
+static void visorchipset_device_pause_response(struct visor_device *p,
+ int response);
+
+static struct visorchipset_busdev_responders busdev_responders = {
+ .bus_create = bus_create_response,
+ .bus_destroy = bus_destroy_response,
+ .device_create = device_create_response,
+ .device_destroy = device_destroy_response,
+ .device_pause = visorchipset_device_pause_response,
+ .device_resume = device_resume_response,
+};
+
+/* info for /dev/visorchipset */
+static dev_t major_dev = -1; /**< indicates major num for device */
+
+/* prototypes for attributes */
+static ssize_t toolaction_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t toolaction_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static DEVICE_ATTR_RW(toolaction);
+
+static ssize_t boottotool_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t boottotool_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count);
+static DEVICE_ATTR_RW(boottotool);
+
+static ssize_t error_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t error_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+static DEVICE_ATTR_RW(error);
+
+static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+static DEVICE_ATTR_RW(textid);
+
+static ssize_t remaining_steps_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t remaining_steps_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static DEVICE_ATTR_RW(remaining_steps);
+
+static ssize_t chipsetready_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static DEVICE_ATTR_WO(chipsetready);
+
+static ssize_t devicedisabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static DEVICE_ATTR_WO(devicedisabled);
+
+static ssize_t deviceenabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static DEVICE_ATTR_WO(deviceenabled);
+
+static struct attribute *visorchipset_install_attrs[] = {
+ &dev_attr_toolaction.attr,
+ &dev_attr_boottotool.attr,
+ &dev_attr_error.attr,
+ &dev_attr_textid.attr,
+ &dev_attr_remaining_steps.attr,
+ NULL
+};
+
+static struct attribute_group visorchipset_install_group = {
+ .name = "install",
+ .attrs = visorchipset_install_attrs
+};
+
+static struct attribute *visorchipset_guest_attrs[] = {
+ &dev_attr_chipsetready.attr,
+ NULL
+};
+
+static struct attribute_group visorchipset_guest_group = {
+ .name = "guest",
+ .attrs = visorchipset_guest_attrs
+};
+
+static struct attribute *visorchipset_parahotplug_attrs[] = {
+ &dev_attr_devicedisabled.attr,
+ &dev_attr_deviceenabled.attr,
+ NULL
+};
+
+static struct attribute_group visorchipset_parahotplug_group = {
+ .name = "parahotplug",
+ .attrs = visorchipset_parahotplug_attrs
+};
+
+static const struct attribute_group *visorchipset_dev_groups[] = {
+ &visorchipset_install_group,
+ &visorchipset_guest_group,
+ &visorchipset_parahotplug_group,
+ NULL
+};
+
+static void visorchipset_dev_release(struct device *dev)
+{
+}
+
+/* /sys/devices/platform/visorchipset */
+static struct platform_device visorchipset_platform_device = {
+ .name = "visorchipset",
+ .id = -1,
+ .dev.groups = visorchipset_dev_groups,
+ .dev.release = visorchipset_dev_release,
+};
+
+/* Function prototypes */
+static void controlvm_respond(struct controlvm_message_header *msg_hdr,
+ int response);
+static void controlvm_respond_chipset_init(
+ struct controlvm_message_header *msg_hdr, int response,
+ enum ultra_chipset_feature features);
+static void controlvm_respond_physdev_changestate(
+ struct controlvm_message_header *msg_hdr, int response,
+ struct spar_segment_state state);
+
+
+static void parser_done(struct parser_context *ctx);
+
+static struct parser_context *
+parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
+{
+ int allocbytes = sizeof(struct parser_context) + bytes;
+ struct parser_context *rc = NULL;
+ struct parser_context *ctx = NULL;
+
+ if (retry)
+ *retry = false;
+
+ /*
+ * alloc an 0 extra byte to ensure payload is
+ * '\0'-terminated
+ */
+ allocbytes++;
+ if ((controlvm_payload_bytes_buffered + bytes)
+ > MAX_CONTROLVM_PAYLOAD_BYTES) {
+ if (retry)
+ *retry = true;
+ rc = NULL;
+ goto cleanup;
+ }
+ ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
+ if (!ctx) {
+ if (retry)
+ *retry = true;
+ rc = NULL;
+ goto cleanup;
+ }
+
+ ctx->allocbytes = allocbytes;
+ ctx->param_bytes = bytes;
+ ctx->curr = NULL;
+ ctx->bytes_remaining = 0;
+ ctx->byte_stream = false;
+ if (local) {
+ void *p;
+
+ if (addr > virt_to_phys(high_memory - 1)) {
+ rc = NULL;
+ goto cleanup;
+ }
+ p = __va((unsigned long) (addr));
+ memcpy(ctx->data, p, bytes);
+ } else {
+ void __iomem *mapping;
+
+ if (!request_mem_region(addr, bytes, "visorchipset")) {
+ rc = NULL;
+ goto cleanup;
+ }
+
+ mapping = ioremap_cache(addr, bytes);
+ if (!mapping) {
+ release_mem_region(addr, bytes);
+ rc = NULL;
+ goto cleanup;
+ }
+ memcpy_fromio(ctx->data, mapping, bytes);
+ release_mem_region(addr, bytes);
+ }
+
+ ctx->byte_stream = true;
+ rc = ctx;
+cleanup:
+ if (rc) {
+ controlvm_payload_bytes_buffered += ctx->param_bytes;
+ } else {
+ if (ctx) {
+ parser_done(ctx);
+ ctx = NULL;
+ }
+ }
+ return rc;
+}
+
+static uuid_le
+parser_id_get(struct parser_context *ctx)
+{
+ struct spar_controlvm_parameters_header *phdr = NULL;
+
+ if (ctx == NULL)
+ return NULL_UUID_LE;
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ return phdr->id;
+}
+
+/** Describes the state from the perspective of which controlvm messages have
+ * been received for a bus or device.
+ */
+
+enum PARSER_WHICH_STRING {
+ PARSERSTRING_INITIATOR,
+ PARSERSTRING_TARGET,
+ PARSERSTRING_CONNECTION,
+ PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
+};
+
+static void
+parser_param_start(struct parser_context *ctx,
+ enum PARSER_WHICH_STRING which_string)
+{
+ struct spar_controlvm_parameters_header *phdr = NULL;
+
+ if (ctx == NULL)
+ goto Away;
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ switch (which_string) {
+ case PARSERSTRING_INITIATOR:
+ ctx->curr = ctx->data + phdr->initiator_offset;
+ ctx->bytes_remaining = phdr->initiator_length;
+ break;
+ case PARSERSTRING_TARGET:
+ ctx->curr = ctx->data + phdr->target_offset;
+ ctx->bytes_remaining = phdr->target_length;
+ break;
+ case PARSERSTRING_CONNECTION:
+ ctx->curr = ctx->data + phdr->connection_offset;
+ ctx->bytes_remaining = phdr->connection_length;
+ break;
+ case PARSERSTRING_NAME:
+ ctx->curr = ctx->data + phdr->name_offset;
+ ctx->bytes_remaining = phdr->name_length;
+ break;
+ default:
+ break;
+ }
+
+Away:
+ return;
+}
+
+static void parser_done(struct parser_context *ctx)
+{
+ if (!ctx)
+ return;
+ controlvm_payload_bytes_buffered -= ctx->param_bytes;
+ kfree(ctx);
+}
+
+static void *
+parser_string_get(struct parser_context *ctx)
+{
+ u8 *pscan;
+ unsigned long nscan;
+ int value_length = -1;
+ void *value = NULL;
+ int i;
+
+ if (!ctx)
+ return NULL;
+ pscan = ctx->curr;
+ nscan = ctx->bytes_remaining;
+ if (nscan == 0)
+ return NULL;
+ if (!pscan)
+ return NULL;
+ for (i = 0, value_length = -1; i < nscan; i++)
+ if (pscan[i] == '\0') {
+ value_length = i;
+ break;
+ }
+ if (value_length < 0) /* '\0' was not included in the length */
+ value_length = nscan;
+ value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
+ if (value == NULL)
+ return NULL;
+ if (value_length > 0)
+ memcpy(value, pscan, value_length);
+ ((u8 *) (value))[value_length] = '\0';
+ return value;
+}
+
+
+static ssize_t toolaction_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 tool_action;
+
+ visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action), &tool_action, sizeof(u8));
+ return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
+}
+
+static ssize_t toolaction_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u8 tool_action;
+ int ret;
+
+ if (kstrtou8(buf, 10, &tool_action))
+ return -EINVAL;
+
+ ret = visorchannel_write(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action),
+ &tool_action, sizeof(u8));
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t boottotool_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efi_spar_indication efi_spar_indication;
+
+ visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind), &efi_spar_indication,
+ sizeof(struct efi_spar_indication));
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ efi_spar_indication.boot_to_tool);
+}
+
+static ssize_t boottotool_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int val, ret;
+ struct efi_spar_indication efi_spar_indication;
+
+ if (kstrtoint(buf, 10, &val))
+ return -EINVAL;
+
+ efi_spar_indication.boot_to_tool = val;
+ ret = visorchannel_write(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind), &(efi_spar_indication),
+ sizeof(struct efi_spar_indication));
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t error_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 error;
+
+ visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_error),
+ &error, sizeof(u32));
+ return scnprintf(buf, PAGE_SIZE, "%i\n", error);
+}
+
+static ssize_t error_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 error;
+ int ret;
+
+ if (kstrtou32(buf, 10, &error))
+ return -EINVAL;
+
+ ret = visorchannel_write(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_error),
+ &error, sizeof(u32));
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 text_id;
+
+ visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
+ return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
+}
+
+static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 text_id;
+ int ret;
+
+ if (kstrtou32(buf, 10, &text_id))
+ return -EINVAL;
+
+ ret = visorchannel_write(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t remaining_steps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u16 remaining_steps;
+
+ visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
+ &remaining_steps, sizeof(u16));
+ return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
+}
+
+static ssize_t remaining_steps_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u16 remaining_steps;
+ int ret;
+
+ if (kstrtou16(buf, 10, &remaining_steps))
+ return -EINVAL;
+
+ ret = visorchannel_write(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
+ &remaining_steps, sizeof(u16));
+ if (ret)
+ return ret;
+ return count;
+}
+
+struct visor_busdev {
+ u32 bus_no;
+ u32 dev_no;
+};
+
+static int match_visorbus_dev_by_id(struct device *dev, void *data)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+ struct visor_busdev *id = (struct visor_busdev *)data;
+ u32 bus_no = id->bus_no;
+ u32 dev_no = id->dev_no;
+
+ if ((vdev->chipset_bus_no == bus_no) &&
+ (vdev->chipset_dev_no == dev_no))
+ return 1;
+
+ return 0;
+}
+struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
+ struct visor_device *from)
+{
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct visor_device *vdev = NULL;
+ struct visor_busdev id = {
+ .bus_no = bus_no,
+ .dev_no = dev_no
+ };
+
+ if (from)
+ dev_start = &from->device;
+ dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
+ match_visorbus_dev_by_id);
+ if (dev)
+ vdev = to_visor_device(dev);
+ return vdev;
+}
+EXPORT_SYMBOL(visorbus_get_device_by_id);
+
+static u8
+check_chipset_events(void)
+{
+ int i;
+ u8 send_msg = 1;
+ /* Check events to determine if response should be sent */
+ for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
+ send_msg &= chipset_events[i];
+ return send_msg;
+}
+
+static void
+clear_chipset_events(void)
+{
+ int i;
+ /* Clear chipset_events */
+ for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
+ chipset_events[i] = 0;
+}
+
+void
+visorchipset_register_busdev(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info)
+{
+ down(&notifier_lock);
+ if (!notifiers) {
+ memset(&busdev_notifiers, 0,
+ sizeof(busdev_notifiers));
+ visorbusregistered = 0; /* clear flag */
+ } else {
+ busdev_notifiers = *notifiers;
+ visorbusregistered = 1; /* set flag */
+ }
+ if (responders)
+ *responders = busdev_responders;
+ if (driver_info)
+ bus_device_info_init(driver_info, "chipset", "visorchipset",
+ VERSION, NULL);
+
+ up(&notifier_lock);
+}
+EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
+
+static void
+chipset_init(struct controlvm_message *inmsg)
+{
+ static int chipset_inited;
+ enum ultra_chipset_feature features = 0;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (chipset_inited) {
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto cleanup;
+ }
+ chipset_inited = 1;
+ POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ /* Set features to indicate we support parahotplug (if Command
+ * also supports it). */
+ features =
+ inmsg->cmd.init_chipset.
+ features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
+
+ /* Set the "reply" bit so Command knows this is a
+ * features-aware driver. */
+ features |= ULTRA_CHIPSET_FEATURE_REPLY;
+
+cleanup:
+ if (inmsg->hdr.flags.response_expected)
+ controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+}
+
+static void
+controlvm_init_response(struct controlvm_message *msg,
+ struct controlvm_message_header *msg_hdr, int response)
+{
+ memset(msg, 0, sizeof(struct controlvm_message));
+ memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
+ msg->hdr.payload_bytes = 0;
+ msg->hdr.payload_vm_offset = 0;
+ msg->hdr.payload_max_bytes = 0;
+ if (response < 0) {
+ msg->hdr.flags.failed = 1;
+ msg->hdr.completion_status = (u32) (-response);
+ }
+}
+
+static void
+controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
+{
+ struct controlvm_message outmsg;
+
+ controlvm_init_response(&outmsg, msg_hdr, response);
+ if (outmsg.hdr.flags.test_message == 1)
+ return;
+
+ if (!visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ return;
+ }
+}
+
+static void
+controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
+ int response,
+ enum ultra_chipset_feature features)
+{
+ struct controlvm_message outmsg;
+
+ controlvm_init_response(&outmsg, msg_hdr, response);
+ outmsg.cmd.init_chipset.features = features;
+ if (!visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ return;
+ }
+}
+
+static void controlvm_respond_physdev_changestate(
+ struct controlvm_message_header *msg_hdr, int response,
+ struct spar_segment_state state)
+{
+ struct controlvm_message outmsg;
+
+ controlvm_init_response(&outmsg, msg_hdr, response);
+ outmsg.cmd.device_change_state.state = state;
+ outmsg.cmd.device_change_state.flags.phys_device = 1;
+ if (!visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ return;
+ }
+}
+
+enum crash_obj_type {
+ CRASH_DEV,
+ CRASH_BUS,
+};
+
+static void
+bus_responder(enum controlvm_id cmd_id,
+ struct controlvm_message_header *pending_msg_hdr,
+ int response)
+{
+ if (pending_msg_hdr == NULL)
+ return; /* no controlvm response needed */
+
+ if (pending_msg_hdr->id != (u32)cmd_id)
+ return;
+
+ controlvm_respond(pending_msg_hdr, response);
+}
+
+static void
+device_changestate_responder(enum controlvm_id cmd_id,
+ struct visor_device *p, int response,
+ struct spar_segment_state response_state)
+{
+ struct controlvm_message outmsg;
+ u32 bus_no = p->chipset_bus_no;
+ u32 dev_no = p->chipset_dev_no;
+
+ if (p->pending_msg_hdr == NULL)
+ return; /* no controlvm response needed */
+ if (p->pending_msg_hdr->id != cmd_id)
+ return;
+
+ controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
+
+ outmsg.cmd.device_change_state.bus_no = bus_no;
+ outmsg.cmd.device_change_state.dev_no = dev_no;
+ outmsg.cmd.device_change_state.state = response_state;
+
+ if (!visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg))
+ return;
+}
+
+static void
+device_responder(enum controlvm_id cmd_id,
+ struct controlvm_message_header *pending_msg_hdr,
+ int response)
+{
+ if (pending_msg_hdr == NULL)
+ return; /* no controlvm response needed */
+
+ if (pending_msg_hdr->id != (u32)cmd_id)
+ return;
+
+ controlvm_respond(pending_msg_hdr, response);
+}
+
+static void
+bus_epilog(struct visor_device *bus_info,
+ u32 cmd, struct controlvm_message_header *msg_hdr,
+ int response, bool need_response)
+{
+ bool notified = false;
+ struct controlvm_message_header *pmsg_hdr = NULL;
+
+ if (!bus_info) {
+ /* relying on a valid passed in response code */
+ /* be lazy and re-use msg_hdr for this failure, is this ok?? */
+ pmsg_hdr = msg_hdr;
+ goto away;
+ }
+
+ if (bus_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ pmsg_hdr = bus_info->pending_msg_hdr;
+ goto away;
+ }
+
+ if (need_response) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto away;
+ }
+
+ memcpy(pmsg_hdr, msg_hdr,
+ sizeof(struct controlvm_message_header));
+ bus_info->pending_msg_hdr = pmsg_hdr;
+ }
+
+ down(&notifier_lock);
+ if (response == CONTROLVM_RESP_SUCCESS) {
+ switch (cmd) {
+ case CONTROLVM_BUS_CREATE:
+ if (busdev_notifiers.bus_create) {
+ (*busdev_notifiers.bus_create) (bus_info);
+ notified = true;
+ }
+ break;
+ case CONTROLVM_BUS_DESTROY:
+ if (busdev_notifiers.bus_destroy) {
+ (*busdev_notifiers.bus_destroy) (bus_info);
+ notified = true;
+ }
+ break;
+ }
+ }
+away:
+ if (notified)
+ /* The callback function just called above is responsible
+ * for calling the appropriate visorchipset_busdev_responders
+ * function, which will call bus_responder()
+ */
+ ;
+ else
+ /*
+ * Do not kfree(pmsg_hdr) as this is the failure path.
+ * The success path ('notified') will call the responder
+ * directly and kfree() there.
+ */
+ bus_responder(cmd, pmsg_hdr, response);
+ up(&notifier_lock);
+}
+
+static void
+device_epilog(struct visor_device *dev_info,
+ struct spar_segment_state state, u32 cmd,
+ struct controlvm_message_header *msg_hdr, int response,
+ bool need_response, bool for_visorbus)
+{
+ struct visorchipset_busdev_notifiers *notifiers;
+ bool notified = false;
+ struct controlvm_message_header *pmsg_hdr = NULL;
+
+ notifiers = &busdev_notifiers;
+
+ if (!dev_info) {
+ /* relying on a valid passed in response code */
+ /* be lazy and re-use msg_hdr for this failure, is this ok?? */
+ pmsg_hdr = msg_hdr;
+ goto away;
+ }
+
+ if (dev_info->pending_msg_hdr) {
+ /* only non-NULL if dev is still waiting on a response */
+ response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ pmsg_hdr = dev_info->pending_msg_hdr;
+ goto away;
+ }
+
+ if (need_response) {
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+ if (!pmsg_hdr) {
+ response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto away;
+ }
+
+ memcpy(pmsg_hdr, msg_hdr,
+ sizeof(struct controlvm_message_header));
+ dev_info->pending_msg_hdr = pmsg_hdr;
+ }
+
+ down(&notifier_lock);
+ if (response >= 0) {
+ switch (cmd) {
+ case CONTROLVM_DEVICE_CREATE:
+ if (notifiers->device_create) {
+ (*notifiers->device_create) (dev_info);
+ notified = true;
+ }
+ break;
+ case CONTROLVM_DEVICE_CHANGESTATE:
+ /* ServerReady / ServerRunning / SegmentStateRunning */
+ if (state.alive == segment_state_running.alive &&
+ state.operating ==
+ segment_state_running.operating) {
+ if (notifiers->device_resume) {
+ (*notifiers->device_resume) (dev_info);
+ notified = true;
+ }
+ }
+ /* ServerNotReady / ServerLost / SegmentStateStandby */
+ else if (state.alive == segment_state_standby.alive &&
+ state.operating ==
+ segment_state_standby.operating) {
+ /* technically this is standby case
+ * where server is lost
+ */
+ if (notifiers->device_pause) {
+ (*notifiers->device_pause) (dev_info);
+ notified = true;
+ }
+ }
+ break;
+ case CONTROLVM_DEVICE_DESTROY:
+ if (notifiers->device_destroy) {
+ (*notifiers->device_destroy) (dev_info);
+ notified = true;
+ }
+ break;
+ }
+ }
+away:
+ if (notified)
+ /* The callback function just called above is responsible
+ * for calling the appropriate visorchipset_busdev_responders
+ * function, which will call device_responder()
+ */
+ ;
+ else
+ /*
+ * Do not kfree(pmsg_hdr) as this is the failure path.
+ * The success path ('notified') will call the responder
+ * directly and kfree() there.
+ */
+ device_responder(cmd, pmsg_hdr, response);
+ up(&notifier_lock);
+}
+
+static void
+bus_create(struct controlvm_message *inmsg)
+{
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ u32 bus_no = cmd->create_bus.bus_no;
+ int rc = CONTROLVM_RESP_SUCCESS;
+ struct visor_device *bus_info;
+ struct visorchannel *visorchannel;
+
+ bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+ if (bus_info && (bus_info->state.created == 1)) {
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto cleanup;
+ }
+ bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
+ if (!bus_info) {
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto cleanup;
+ }
+
+ INIT_LIST_HEAD(&bus_info->list_all);
+ bus_info->chipset_bus_no = bus_no;
+ bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
+
+ POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
+
+ visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
+ cmd->create_bus.channel_bytes,
+ GFP_KERNEL,
+ cmd->create_bus.bus_data_type_uuid);
+
+ if (!visorchannel) {
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ kfree(bus_info);
+ bus_info = NULL;
+ goto cleanup;
+ }
+ bus_info->visorchannel = visorchannel;
+
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
+
+cleanup:
+ bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
+ rc, inmsg->hdr.flags.response_expected == 1);
+}
+
+static void
+bus_destroy(struct controlvm_message *inmsg)
+{
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ u32 bus_no = cmd->destroy_bus.bus_no;
+ struct visor_device *bus_info;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+ if (!bus_info)
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ else if (bus_info->state.created == 0)
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+
+ bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
+ rc, inmsg->hdr.flags.response_expected == 1);
+
+ /* bus_info is freed as part of the busdevice_release function */
+}
+
+static void
+bus_configure(struct controlvm_message *inmsg,
+ struct parser_context *parser_ctx)
+{
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ u32 bus_no;
+ struct visor_device *bus_info;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ bus_no = cmd->configure_bus.bus_no;
+ POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
+ POSTCODE_SEVERITY_INFO);
+
+ bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+ if (!bus_info) {
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ } else if (bus_info->state.created == 0) {
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ } else if (bus_info->pending_msg_hdr != NULL) {
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ } else {
+ visorchannel_set_clientpartition(bus_info->visorchannel,
+ cmd->configure_bus.guest_handle);
+ bus_info->partition_uuid = parser_id_get(parser_ctx);
+ parser_param_start(parser_ctx, PARSERSTRING_NAME);
+ bus_info->name = parser_string_get(parser_ctx);
+
+ POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
+ POSTCODE_SEVERITY_INFO);
+ }
+ bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
+ rc, inmsg->hdr.flags.response_expected == 1);
+}
+
+static void
+my_device_create(struct controlvm_message *inmsg)
+{
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ u32 bus_no = cmd->create_device.bus_no;
+ u32 dev_no = cmd->create_device.dev_no;
+ struct visor_device *dev_info = NULL;
+ struct visor_device *bus_info;
+ struct visorchannel *visorchannel;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
+ if (!bus_info) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto cleanup;
+ }
+
+ if (bus_info->state.created == 0) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto cleanup;
+ }
+
+ dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
+ if (dev_info && (dev_info->state.created == 1)) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto cleanup;
+ }
+
+ dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+ if (!dev_info) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto cleanup;
+ }
+
+ dev_info->chipset_bus_no = bus_no;
+ dev_info->chipset_dev_no = dev_no;
+ dev_info->inst = cmd->create_device.dev_inst_uuid;
+
+ /* not sure where the best place to set the 'parent' */
+ dev_info->device.parent = &bus_info->device;
+
+ POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_INFO);
+
+ visorchannel = visorchannel_create(cmd->create_device.channel_addr,
+ cmd->create_device.channel_bytes,
+ GFP_KERNEL,
+ cmd->create_device.data_type_uuid);
+
+ if (!visorchannel) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ kfree(dev_info);
+ dev_info = NULL;
+ goto cleanup;
+ }
+ dev_info->visorchannel = visorchannel;
+ dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
+ POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_INFO);
+cleanup:
+ device_epilog(dev_info, segment_state_running,
+ CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
+ inmsg->hdr.flags.response_expected == 1, 1);
+}
+
+static void
+my_device_changestate(struct controlvm_message *inmsg)
+{
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ u32 bus_no = cmd->device_change_state.bus_no;
+ u32 dev_no = cmd->device_change_state.dev_no;
+ struct spar_segment_state state = cmd->device_change_state.state;
+ struct visor_device *dev_info;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
+ if (!dev_info) {
+ POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ } else if (dev_info->state.created == 0) {
+ POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ }
+ if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
+ device_epilog(dev_info, state,
+ CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
+ inmsg->hdr.flags.response_expected == 1, 1);
+}
+
+static void
+my_device_destroy(struct controlvm_message *inmsg)
+{
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ u32 bus_no = cmd->destroy_device.bus_no;
+ u32 dev_no = cmd->destroy_device.dev_no;
+ struct visor_device *dev_info;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
+ if (!dev_info)
+ rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ else if (dev_info->state.created == 0)
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+
+ if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
+ device_epilog(dev_info, segment_state_running,
+ CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
+ inmsg->hdr.flags.response_expected == 1, 1);
+}
+
+/* When provided with the physical address of the controlvm channel
+ * (phys_addr), the offset to the payload area we need to manage
+ * (offset), and the size of this payload area (bytes), fills in the
+ * controlvm_payload_info struct. Returns true for success or false
+ * for failure.
+ */
+static int
+initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
+ struct visor_controlvm_payload_info *info)
+{
+ u8 __iomem *payload = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ if (!info) {
+ rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+ goto cleanup;
+ }
+ memset(info, 0, sizeof(struct visor_controlvm_payload_info));
+ if ((offset == 0) || (bytes == 0)) {
+ rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+ goto cleanup;
+ }
+ payload = ioremap_cache(phys_addr + offset, bytes);
+ if (!payload) {
+ rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
+ goto cleanup;
+ }
+
+ info->offset = offset;
+ info->bytes = bytes;
+ info->ptr = payload;
+
+cleanup:
+ if (rc < 0) {
+ if (payload) {
+ iounmap(payload);
+ payload = NULL;
+ }
+ }
+ return rc;
+}
+
+static void
+destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
+{
+ if (info->ptr) {
+ iounmap(info->ptr);
+ info->ptr = NULL;
+ }
+ memset(info, 0, sizeof(struct visor_controlvm_payload_info));
+}
+
+static void
+initialize_controlvm_payload(void)
+{
+ u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
+ u64 payload_offset = 0;
+ u32 payload_bytes = 0;
+
+ if (visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ request_payload_offset),
+ &payload_offset, sizeof(payload_offset)) < 0) {
+ POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ if (visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ request_payload_bytes),
+ &payload_bytes, sizeof(payload_bytes)) < 0) {
+ POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ initialize_controlvm_payload_info(phys_addr,
+ payload_offset, payload_bytes,
+ &controlvm_payload_info);
+}
+
+/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
+ * Returns CONTROLVM_RESP_xxx code.
+ */
+static int
+visorchipset_chipset_ready(void)
+{
+ kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+visorchipset_chipset_selftest(void)
+{
+ char env_selftest[20];
+ char *envp[] = { env_selftest, NULL };
+
+ sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
+ kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
+ envp);
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
+ * Returns CONTROLVM_RESP_xxx code.
+ */
+static int
+visorchipset_chipset_notready(void)
+{
+ kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static void
+chipset_ready(struct controlvm_message_header *msg_hdr)
+{
+ int rc = visorchipset_chipset_ready();
+
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
+ controlvm_respond(msg_hdr, rc);
+ if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
+ /* Send CHIPSET_READY response when all modules have been loaded
+ * and disks mounted for the partition
+ */
+ g_chipset_msg_hdr = *msg_hdr;
+ }
+}
+
+static void
+chipset_selftest(struct controlvm_message_header *msg_hdr)
+{
+ int rc = visorchipset_chipset_selftest();
+
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msg_hdr->flags.response_expected)
+ controlvm_respond(msg_hdr, rc);
+}
+
+static void
+chipset_notready(struct controlvm_message_header *msg_hdr)
+{
+ int rc = visorchipset_chipset_notready();
+
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msg_hdr->flags.response_expected)
+ controlvm_respond(msg_hdr, rc);
+}
+
+/* This is your "one-stop" shop for grabbing the next message from the
+ * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
+ */
+static bool
+read_controlvm_event(struct controlvm_message *msg)
+{
+ if (visorchannel_signalremove(controlvm_channel,
+ CONTROLVM_QUEUE_EVENT, msg)) {
+ /* got a message */
+ if (msg->hdr.flags.test_message == 1)
+ return false;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * The general parahotplug flow works as follows. The visorchipset
+ * driver receives a DEVICE_CHANGESTATE message from Command
+ * specifying a physical device to enable or disable. The CONTROLVM
+ * message handler calls parahotplug_process_message, which then adds
+ * the message to a global list and kicks off a udev event which
+ * causes a user level script to enable or disable the specified
+ * device. The udev script then writes to
+ * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
+ * to get called, at which point the appropriate CONTROLVM message is
+ * retrieved from the list and responded to.
+ */
+
+#define PARAHOTPLUG_TIMEOUT_MS 2000
+
+/*
+ * Generate unique int to match an outstanding CONTROLVM message with a
+ * udev script /proc response
+ */
+static int
+parahotplug_next_id(void)
+{
+ static atomic_t id = ATOMIC_INIT(0);
+
+ return atomic_inc_return(&id);
+}
+
+/*
+ * Returns the time (in jiffies) when a CONTROLVM message on the list
+ * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
+ */
+static unsigned long
+parahotplug_next_expiration(void)
+{
+ return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
+}
+
+/*
+ * Create a parahotplug_request, which is basically a wrapper for a
+ * CONTROLVM_MESSAGE that we can stick on a list
+ */
+static struct parahotplug_request *
+parahotplug_request_create(struct controlvm_message *msg)
+{
+ struct parahotplug_request *req;
+
+ req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
+ if (!req)
+ return NULL;
+
+ req->id = parahotplug_next_id();
+ req->expiration = parahotplug_next_expiration();
+ req->msg = *msg;
+
+ return req;
+}
+
+/*
+ * Free a parahotplug_request.
+ */
+static void
+parahotplug_request_destroy(struct parahotplug_request *req)
+{
+ kfree(req);
+}
+
+/*
+ * Cause uevent to run the user level script to do the disable/enable
+ * specified in (the CONTROLVM message in) the specified
+ * parahotplug_request
+ */
+static void
+parahotplug_request_kickoff(struct parahotplug_request *req)
+{
+ struct controlvm_message_packet *cmd = &req->msg.cmd;
+ char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
+ env_func[40];
+ char *envp[] = {
+ env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
+ };
+
+ sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
+ sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
+ sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
+ cmd->device_change_state.state.active);
+ sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
+ cmd->device_change_state.bus_no);
+ sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
+ cmd->device_change_state.dev_no >> 3);
+ sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
+ cmd->device_change_state.dev_no & 0x7);
+
+ kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
+ envp);
+}
+
+/*
+ * Remove any request from the list that's been on there too long and
+ * respond with an error.
+ */
+static void
+parahotplug_process_list(void)
+{
+ struct list_head *pos;
+ struct list_head *tmp;
+
+ spin_lock(&parahotplug_request_list_lock);
+
+ list_for_each_safe(pos, tmp, &parahotplug_request_list) {
+ struct parahotplug_request *req =
+ list_entry(pos, struct parahotplug_request, list);
+
+ if (!time_after_eq(jiffies, req->expiration))
+ continue;
+
+ list_del(pos);
+ if (req->msg.hdr.flags.response_expected)
+ controlvm_respond_physdev_changestate(
+ &req->msg.hdr,
+ CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
+ req->msg.cmd.device_change_state.state);
+ parahotplug_request_destroy(req);
+ }
+
+ spin_unlock(&parahotplug_request_list_lock);
+}
+
+/*
+ * Called from the /proc handler, which means the user script has
+ * finished the enable/disable. Find the matching identifier, and
+ * respond to the CONTROLVM message with success.
+ */
+static int
+parahotplug_request_complete(int id, u16 active)
+{
+ struct list_head *pos;
+ struct list_head *tmp;
+
+ spin_lock(&parahotplug_request_list_lock);
+
+ /* Look for a request matching "id". */
+ list_for_each_safe(pos, tmp, &parahotplug_request_list) {
+ struct parahotplug_request *req =
+ list_entry(pos, struct parahotplug_request, list);
+ if (req->id == id) {
+ /* Found a match. Remove it from the list and
+ * respond.
+ */
+ list_del(pos);
+ spin_unlock(&parahotplug_request_list_lock);
+ req->msg.cmd.device_change_state.state.active = active;
+ if (req->msg.hdr.flags.response_expected)
+ controlvm_respond_physdev_changestate(
+ &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
+ req->msg.cmd.device_change_state.state);
+ parahotplug_request_destroy(req);
+ return 0;
+ }
+ }
+
+ spin_unlock(&parahotplug_request_list_lock);
+ return -1;
+}
+
+/*
+ * Enables or disables a PCI device by kicking off a udev script
+ */
+static void
+parahotplug_process_message(struct controlvm_message *inmsg)
+{
+ struct parahotplug_request *req;
+
+ req = parahotplug_request_create(inmsg);
+
+ if (!req)
+ return;
+
+ if (inmsg->cmd.device_change_state.state.active) {
+ /* For enable messages, just respond with success
+ * right away. This is a bit of a hack, but there are
+ * issues with the early enable messages we get (with
+ * either the udev script not detecting that the device
+ * is up, or not getting called at all). Fortunately
+ * the messages that get lost don't matter anyway, as
+ * devices are automatically enabled at
+ * initialization.
+ */
+ parahotplug_request_kickoff(req);
+ controlvm_respond_physdev_changestate(&inmsg->hdr,
+ CONTROLVM_RESP_SUCCESS,
+ inmsg->cmd.device_change_state.state);
+ parahotplug_request_destroy(req);
+ } else {
+ /* For disable messages, add the request to the
+ * request list before kicking off the udev script. It
+ * won't get responded to until the script has
+ * indicated it's done.
+ */
+ spin_lock(&parahotplug_request_list_lock);
+ list_add_tail(&req->list, &parahotplug_request_list);
+ spin_unlock(&parahotplug_request_list_lock);
+
+ parahotplug_request_kickoff(req);
+ }
+}
+
+/* Process a controlvm message.
+ * Return result:
+ * false - this function will return false only in the case where the
+ * controlvm message was NOT processed, but processing must be
+ * retried before reading the next controlvm message; a
+ * scenario where this can occur is when we need to throttle
+ * the allocation of memory in which to copy out controlvm
+ * payload data
+ * true - processing of the controlvm message completed,
+ * either successfully or with an error.
+ */
+static bool
+handle_command(struct controlvm_message inmsg, u64 channel_addr)
+{
+ struct controlvm_message_packet *cmd = &inmsg.cmd;
+ u64 parm_addr;
+ u32 parm_bytes;
+ struct parser_context *parser_ctx = NULL;
+ bool local_addr;
+ struct controlvm_message ackmsg;
+
+ /* create parsing context if necessary */
+ local_addr = (inmsg.hdr.flags.test_message == 1);
+ if (channel_addr == 0)
+ return true;
+ parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
+ parm_bytes = inmsg.hdr.payload_bytes;
+
+ /* Parameter and channel addresses within test messages actually lie
+ * within our OS-controlled memory. We need to know that, because it
+ * makes a difference in how we compute the virtual address.
+ */
+ if (parm_addr && parm_bytes) {
+ bool retry = false;
+
+ parser_ctx =
+ parser_init_byte_stream(parm_addr, parm_bytes,
+ local_addr, &retry);
+ if (!parser_ctx && retry)
+ return false;
+ }
+
+ if (!local_addr) {
+ controlvm_init_response(&ackmsg, &inmsg.hdr,
+ CONTROLVM_RESP_SUCCESS);
+ if (controlvm_channel)
+ visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_ACK,
+ &ackmsg);
+ }
+ switch (inmsg.hdr.id) {
+ case CONTROLVM_CHIPSET_INIT:
+ chipset_init(&inmsg);
+ break;
+ case CONTROLVM_BUS_CREATE:
+ bus_create(&inmsg);
+ break;
+ case CONTROLVM_BUS_DESTROY:
+ bus_destroy(&inmsg);
+ break;
+ case CONTROLVM_BUS_CONFIGURE:
+ bus_configure(&inmsg, parser_ctx);
+ break;
+ case CONTROLVM_DEVICE_CREATE:
+ my_device_create(&inmsg);
+ break;
+ case CONTROLVM_DEVICE_CHANGESTATE:
+ if (cmd->device_change_state.flags.phys_device) {
+ parahotplug_process_message(&inmsg);
+ } else {
+ /* save the hdr and cmd structures for later use */
+ /* when sending back the response to Command */
+ my_device_changestate(&inmsg);
+ g_devicechangestate_packet = inmsg.cmd;
+ break;
+ }
+ break;
+ case CONTROLVM_DEVICE_DESTROY:
+ my_device_destroy(&inmsg);
+ break;
+ case CONTROLVM_DEVICE_CONFIGURE:
+ /* no op for now, just send a respond that we passed */
+ if (inmsg.hdr.flags.response_expected)
+ controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
+ break;
+ case CONTROLVM_CHIPSET_READY:
+ chipset_ready(&inmsg.hdr);
+ break;
+ case CONTROLVM_CHIPSET_SELFTEST:
+ chipset_selftest(&inmsg.hdr);
+ break;
+ case CONTROLVM_CHIPSET_STOP:
+ chipset_notready(&inmsg.hdr);
+ break;
+ default:
+ if (inmsg.hdr.flags.response_expected)
+ controlvm_respond(&inmsg.hdr,
+ -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
+ break;
+ }
+
+ if (parser_ctx) {
+ parser_done(parser_ctx);
+ parser_ctx = NULL;
+ }
+ return true;
+}
+
+static inline unsigned int
+issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
+{
+ struct vmcall_io_controlvm_addr_params params;
+ int result = VMCALL_SUCCESS;
+ u64 physaddr;
+
+ physaddr = virt_to_phys(&params);
+ ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
+ if (VMCALL_SUCCESSFUL(result)) {
+ *control_addr = params.address;
+ *control_bytes = params.channel_bytes;
+ }
+ return result;
+}
+
+static u64 controlvm_get_channel_address(void)
+{
+ u64 addr = 0;
+ u32 size = 0;
+
+ if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
+ return 0;
+
+ return addr;
+}
+
+static void
+controlvm_periodic_work(struct work_struct *work)
+{
+ struct controlvm_message inmsg;
+ bool got_command = false;
+ bool handle_command_failed = false;
+ static u64 poll_count;
+
+ /* make sure visorbus server is registered for controlvm callbacks */
+ if (visorchipset_visorbusregwait && !visorbusregistered)
+ goto cleanup;
+
+ poll_count++;
+ if (poll_count >= 250)
+ ; /* keep going */
+ else
+ goto cleanup;
+
+ /* Check events to determine if response to CHIPSET_READY
+ * should be sent
+ */
+ if (visorchipset_holdchipsetready &&
+ (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
+ if (check_chipset_events() == 1) {
+ controlvm_respond(&g_chipset_msg_hdr, 0);
+ clear_chipset_events();
+ memset(&g_chipset_msg_hdr, 0,
+ sizeof(struct controlvm_message_header));
+ }
+ }
+
+ while (visorchannel_signalremove(controlvm_channel,
+ CONTROLVM_QUEUE_RESPONSE,
+ &inmsg))
+ ;
+ if (!got_command) {
+ if (controlvm_pending_msg_valid) {
+ /* we throttled processing of a prior
+ * msg, so try to process it again
+ * rather than reading a new one
+ */
+ inmsg = controlvm_pending_msg;
+ controlvm_pending_msg_valid = false;
+ got_command = true;
+ } else {
+ got_command = read_controlvm_event(&inmsg);
+ }
+ }
+
+ handle_command_failed = false;
+ while (got_command && (!handle_command_failed)) {
+ most_recent_message_jiffies = jiffies;
+ if (handle_command(inmsg,
+ visorchannel_get_physaddr
+ (controlvm_channel)))
+ got_command = read_controlvm_event(&inmsg);
+ else {
+ /* this is a scenario where throttling
+ * is required, but probably NOT an
+ * error...; we stash the current
+ * controlvm msg so we will attempt to
+ * reprocess it on our next loop
+ */
+ handle_command_failed = true;
+ controlvm_pending_msg = inmsg;
+ controlvm_pending_msg_valid = true;
+ }
+ }
+
+ /* parahotplug_worker */
+ parahotplug_process_list();
+
+cleanup:
+
+ if (time_after(jiffies,
+ most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
+ /* it's been longer than MIN_IDLE_SECONDS since we
+ * processed our last controlvm message; slow down the
+ * polling
+ */
+ if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ } else {
+ if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ }
+
+ queue_delayed_work(periodic_controlvm_workqueue,
+ &periodic_controlvm_work, poll_jiffies);
+}
+
+static void
+setup_crash_devices_work_queue(struct work_struct *work)
+{
+ struct controlvm_message local_crash_bus_msg;
+ struct controlvm_message local_crash_dev_msg;
+ struct controlvm_message msg;
+ u32 local_crash_msg_offset;
+ u16 local_crash_msg_count;
+
+ /* make sure visorbus is registered for controlvm callbacks */
+ if (visorchipset_visorbusregwait && !visorbusregistered)
+ goto cleanup;
+
+ POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ /* send init chipset msg */
+ msg.hdr.id = CONTROLVM_CHIPSET_INIT;
+ msg.cmd.init_chipset.bus_count = 23;
+ msg.cmd.init_chipset.switch_count = 0;
+
+ chipset_init(&msg);
+
+ /* get saved message count */
+ if (visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_count),
+ &local_crash_msg_count, sizeof(u16)) < 0) {
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
+ POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
+ local_crash_msg_count,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* get saved crash message offset */
+ if (visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_offset),
+ &local_crash_msg_offset, sizeof(u32)) < 0) {
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* read create device message for storage bus offset */
+ if (visorchannel_read(controlvm_channel,
+ local_crash_msg_offset,
+ &local_crash_bus_msg,
+ sizeof(struct controlvm_message)) < 0) {
+ POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* read create device message for storage device */
+ if (visorchannel_read(controlvm_channel,
+ local_crash_msg_offset +
+ sizeof(struct controlvm_message),
+ &local_crash_dev_msg,
+ sizeof(struct controlvm_message)) < 0) {
+ POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* reuse IOVM create bus message */
+ if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
+ bus_create(&local_crash_bus_msg);
+ } else {
+ POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* reuse create device message for storage device */
+ if (local_crash_dev_msg.cmd.create_device.channel_addr) {
+ my_device_create(&local_crash_dev_msg);
+ } else {
+ POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return;
+
+cleanup:
+
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+
+ queue_delayed_work(periodic_controlvm_workqueue,
+ &periodic_controlvm_work, poll_jiffies);
+}
+
+static void
+bus_create_response(struct visor_device *bus_info, int response)
+{
+ if (response >= 0)
+ bus_info->state.created = 1;
+
+ bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
+ response);
+
+ kfree(bus_info->pending_msg_hdr);
+ bus_info->pending_msg_hdr = NULL;
+}
+
+static void
+bus_destroy_response(struct visor_device *bus_info, int response)
+{
+ bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
+ response);
+
+ kfree(bus_info->pending_msg_hdr);
+ bus_info->pending_msg_hdr = NULL;
+}
+
+static void
+device_create_response(struct visor_device *dev_info, int response)
+{
+ if (response >= 0)
+ dev_info->state.created = 1;
+
+ device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
+ response);
+
+ kfree(dev_info->pending_msg_hdr);
+}
+
+static void
+device_destroy_response(struct visor_device *dev_info, int response)
+{
+ device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
+ response);
+
+ kfree(dev_info->pending_msg_hdr);
+ dev_info->pending_msg_hdr = NULL;
+}
+
+static void
+visorchipset_device_pause_response(struct visor_device *dev_info,
+ int response)
+{
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
+ dev_info, response,
+ segment_state_standby);
+
+ kfree(dev_info->pending_msg_hdr);
+ dev_info->pending_msg_hdr = NULL;
+}
+
+static void
+device_resume_response(struct visor_device *dev_info, int response)
+{
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
+ dev_info, response,
+ segment_state_running);
+
+ kfree(dev_info->pending_msg_hdr);
+ dev_info->pending_msg_hdr = NULL;
+}
+
+static ssize_t chipsetready_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char msgtype[64];
+
+ if (sscanf(buf, "%63s", msgtype) != 1)
+ return -EINVAL;
+
+ if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
+ chipset_events[0] = 1;
+ return count;
+ } else if (!strcmp(msgtype, "MODULES_LOADED")) {
+ chipset_events[1] = 1;
+ return count;
+ }
+ return -EINVAL;
+}
+
+/* The parahotplug/devicedisabled interface gets called by our support script
+ * when an SR-IOV device has been shut down. The ID is passed to the script
+ * and then passed back when the device has been removed.
+ */
+static ssize_t devicedisabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int id;
+
+ if (kstrtouint(buf, 10, &id))
+ return -EINVAL;
+
+ parahotplug_request_complete(id, 0);
+ return count;
+}
+
+/* The parahotplug/deviceenabled interface gets called by our support script
+ * when an SR-IOV device has been recovered. The ID is passed to the script
+ * and then passed back when the device has been brought back up.
+ */
+static ssize_t deviceenabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int id;
+
+ if (kstrtouint(buf, 10, &id))
+ return -EINVAL;
+
+ parahotplug_request_complete(id, 1);
+ return count;
+}
+
+static int
+visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long physaddr = 0;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ u64 addr = 0;
+
+ /* sv_enable_dfp(); */
+ if (offset & (PAGE_SIZE - 1))
+ return -ENXIO; /* need aligned offsets */
+
+ switch (offset) {
+ case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
+ vma->vm_flags |= VM_IO;
+ if (!*file_controlvm_channel)
+ return -ENXIO;
+
+ visorchannel_read(*file_controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ gp_control_channel),
+ &addr, sizeof(addr));
+ if (!addr)
+ return -ENXIO;
+
+ physaddr = (unsigned long)addr;
+ if (remap_pfn_range(vma, vma->vm_start,
+ physaddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ /*pgprot_noncached */
+ (vma->vm_page_prot))) {
+ return -EAGAIN;
+ }
+ break;
+ default:
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
+{
+ u64 result = VMCALL_SUCCESS;
+ u64 physaddr = 0;
+
+ ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
+ result);
+ return result;
+}
+
+static inline int issue_vmcall_update_physical_time(u64 adjustment)
+{
+ int result = VMCALL_SUCCESS;
+
+ ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
+ return result;
+}
+
+static long visorchipset_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ s64 adjustment;
+ s64 vrtc_offset;
+
+ switch (cmd) {
+ case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
+ /* get the physical rtc offset */
+ vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
+ if (copy_to_user((void __user *)arg, &vrtc_offset,
+ sizeof(vrtc_offset))) {
+ return -EFAULT;
+ }
+ return 0;
+ case VMCALL_UPDATE_PHYSICAL_TIME:
+ if (copy_from_user(&adjustment, (void __user *)arg,
+ sizeof(adjustment))) {
+ return -EFAULT;
+ }
+ return issue_vmcall_update_physical_time(adjustment);
+ default:
+ return -EFAULT;
+ }
+}
+
+static const struct file_operations visorchipset_fops = {
+ .owner = THIS_MODULE,
+ .open = visorchipset_open,
+ .read = NULL,
+ .write = NULL,
+ .unlocked_ioctl = visorchipset_ioctl,
+ .release = visorchipset_release,
+ .mmap = visorchipset_mmap,
+};
+
+static int
+visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
+{
+ int rc = 0;
+
+ file_controlvm_channel = controlvm_channel;
+ cdev_init(&file_cdev, &visorchipset_fops);
+ file_cdev.owner = THIS_MODULE;
+ if (MAJOR(major_dev) == 0) {
+ rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
+ /* dynamic major device number registration required */
+ if (rc < 0)
+ return rc;
+ } else {
+ /* static major device number registration required */
+ rc = register_chrdev_region(major_dev, 1, "visorchipset");
+ if (rc < 0)
+ return rc;
+ }
+ rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
+ if (rc < 0) {
+ unregister_chrdev_region(major_dev, 1);
+ return rc;
+ }
+ return 0;
+}
+
+static int
+visorchipset_init(struct acpi_device *acpi_device)
+{
+ int rc = 0;
+ u64 addr;
+ int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
+ uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
+
+ addr = controlvm_get_channel_address();
+ if (!addr)
+ return -ENODEV;
+
+ memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
+ memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
+
+ controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
+ GFP_KERNEL, uuid);
+ if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
+ visorchannel_get_header(controlvm_channel))) {
+ initialize_controlvm_payload();
+ } else {
+ visorchannel_destroy(controlvm_channel);
+ controlvm_channel = NULL;
+ return -ENODEV;
+ }
+
+ major_dev = MKDEV(visorchipset_major, 0);
+ rc = visorchipset_file_init(major_dev, &controlvm_channel);
+ if (rc < 0) {
+ POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
+ goto cleanup;
+ }
+
+ memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
+
+ /* if booting in a crash kernel */
+ if (is_kdump_kernel())
+ INIT_DELAYED_WORK(&periodic_controlvm_work,
+ setup_crash_devices_work_queue);
+ else
+ INIT_DELAYED_WORK(&periodic_controlvm_work,
+ controlvm_periodic_work);
+ periodic_controlvm_workqueue =
+ create_singlethread_workqueue("visorchipset_controlvm");
+
+ if (!periodic_controlvm_workqueue) {
+ POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
+ DIAG_SEVERITY_ERR);
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ most_recent_message_jiffies = jiffies;
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ rc = queue_delayed_work(periodic_controlvm_workqueue,
+ &periodic_controlvm_work, poll_jiffies);
+ if (rc < 0) {
+ POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
+ DIAG_SEVERITY_ERR);
+ goto cleanup;
+ }
+
+ visorchipset_platform_device.dev.devt = major_dev;
+ if (platform_device_register(&visorchipset_platform_device) < 0) {
+ POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
+ rc = -1;
+ goto cleanup;
+ }
+ POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
+
+ rc = visorbus_init();
+cleanup:
+ if (rc) {
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
+ POSTCODE_SEVERITY_ERR);
+ }
+ return rc;
+}
+
+static void
+visorchipset_file_cleanup(dev_t major_dev)
+{
+ if (file_cdev.ops)
+ cdev_del(&file_cdev);
+ file_cdev.ops = NULL;
+ unregister_chrdev_region(major_dev, 1);
+}
+
+static int
+visorchipset_exit(struct acpi_device *acpi_device)
+{
+ POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ visorbus_exit();
+
+ cancel_delayed_work(&periodic_controlvm_work);
+ flush_workqueue(periodic_controlvm_workqueue);
+ destroy_workqueue(periodic_controlvm_workqueue);
+ periodic_controlvm_workqueue = NULL;
+ destroy_controlvm_payload_info(&controlvm_payload_info);
+
+ memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
+
+ visorchannel_destroy(controlvm_channel);
+
+ visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
+ platform_device_unregister(&visorchipset_platform_device);
+ POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ return 0;
+}
+
+static const struct acpi_device_id unisys_device_ids[] = {
+ {"PNP0A07", 0},
+ {"", 0},
+};
+
+static struct acpi_driver unisys_acpi_driver = {
+ .name = "unisys_acpi",
+ .class = "unisys_acpi_class",
+ .owner = THIS_MODULE,
+ .ids = unisys_device_ids,
+ .ops = {
+ .add = visorchipset_init,
+ .remove = visorchipset_exit,
+ },
+};
+static __init uint32_t visorutil_spar_detect(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ if (cpu_has_hypervisor) {
+ /* check the ID */
+ cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
+ return (ebx == UNISYS_SPAR_ID_EBX) &&
+ (ecx == UNISYS_SPAR_ID_ECX) &&
+ (edx == UNISYS_SPAR_ID_EDX);
+ } else {
+ return 0;
+ }
+}
+
+static int init_unisys(void)
+{
+ int result;
+
+ if (!visorutil_spar_detect())
+ return -ENODEV;
+
+ result = acpi_bus_register_driver(&unisys_acpi_driver);
+ if (result)
+ return -ENODEV;
+
+ pr_info("Unisys Visorchipset Driver Loaded.\n");
+ return 0;
+};
+
+static void exit_unisys(void)
+{
+ acpi_bus_unregister_driver(&unisys_acpi_driver);
+}
+
+module_param_named(major, visorchipset_major, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_major,
+ "major device number to use for the device node");
+module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_visorbusreqwait,
+ "1 to have the module wait for the visor bus to register");
+module_param_named(holdchipsetready, visorchipset_holdchipsetready,
+ int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_holdchipsetready,
+ "1 to hold response to CHIPSET_READY");
+
+module_init(init_unisys);
+module_exit(exit_unisys);
+
+MODULE_AUTHOR("Unisys");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
+ VERSION);
+MODULE_VERSION(VERSION);
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
new file mode 100644
index 000000000000..7a53df00726a
--- /dev/null
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -0,0 +1,149 @@
+/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __IOMONINTF_H__
+#define __IOMONINTF_H__
+
+/*
+* This file contains all structures needed to support the VMCALLs for IO
+* Virtualization. The VMCALLs are provided by Monitor and used by IO code
+* running on IO Partitions.
+*/
+
+#ifdef __GNUC__
+#include "iovmcall_gnuc.h"
+#endif /* */
+#include "diagchannel.h"
+
+#ifdef VMCALL_IO_CONTROLVM_ADDR
+#undef VMCALL_IO_CONTROLVM_ADDR
+#endif /* */
+
+/* define subsystem number for AppOS, used in uislib driver */
+#define MDS_APPOS 0x4000000000000000L /* subsystem = 62 - AppOS */
+enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
+ /* Note: when a new VMCALL is added:
+ * - the 1st 2 hex digits correspond to one of the
+ * VMCALL_MONITOR_INTERFACE types and
+ * - the next 2 hex digits are the nth relative instance of within a
+ * type
+ * E.G. for VMCALL_VIRTPART_RECYCLE_PART,
+ * - the 0x02 identifies it as a VMCALL_VIRTPART type and
+ * - the 0x01 identifies it as the 1st instance of a VMCALL_VIRTPART
+ * type of VMCALL
+ */
+
+ VMCALL_IO_CONTROLVM_ADDR = 0x0501, /* used by all Guests, not just
+ * IO */
+ VMCALL_IO_DIAG_ADDR = 0x0508,
+ VMCALL_IO_VISORSERIAL_ADDR = 0x0509,
+ VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET = 0x0708, /* Allow caller to
+ * query virtual time
+ * offset */
+ VMCALL_CHANNEL_VERSION_MISMATCH = 0x0709,
+ VMCALL_POST_CODE_LOGEVENT = 0x070B, /* LOGEVENT Post Code (RDX) with
+ * specified subsystem mask (RCX
+ * - monitor_subsystems.h) and
+ * severity (RDX) */
+ VMCALL_GENERIC_SURRENDER_QUANTUM_FOREVER = 0x0802, /* Yield the
+ * remainder & all
+ * future quantums of
+ * the caller */
+ VMCALL_MEASUREMENT_DO_NOTHING = 0x0901,
+ VMCALL_UPDATE_PHYSICAL_TIME = 0x0a02 /* Allow
+ * ULTRA_SERVICE_CAPABILITY_TIME
+ * capable guest to make
+ * VMCALL */
+};
+
+#define VMCALL_SUCCESS 0
+#define VMCALL_SUCCESSFUL(result) (result == 0)
+
+#ifdef __GNUC__
+#define unisys_vmcall(tuple, reg_ebx, reg_ecx) \
+ __unisys_vmcall_gnuc(tuple, reg_ebx, reg_ecx)
+#define unisys_extended_vmcall(tuple, reg_ebx, reg_ecx, reg_edx) \
+ __unisys_extended_vmcall_gnuc(tuple, reg_ebx, reg_ecx, reg_edx)
+#define ISSUE_IO_VMCALL(method, param, result) \
+ (result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \
+ (param) >> 32))
+#define ISSUE_IO_EXTENDED_VMCALL(method, param1, param2, param3) \
+ unisys_extended_vmcall(method, param1, param2, param3)
+
+ /* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently
+ * not used much */
+#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity) \
+ ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity, \
+ MDS_APPOS, postcode)
+#endif
+
+/* Structures for IO VMCALLs */
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+/* Parameters to VMCALL_IO_CONTROLVM_ADDR interface */
+struct vmcall_io_controlvm_addr_params {
+ /* The Guest-relative physical address of the ControlVm channel.
+ * This VMCall fills this in with the appropriate address. */
+ u64 address; /* contents provided by this VMCALL (OUT) */
+ /* the size of the ControlVm channel in bytes This VMCall fills this
+ * in with the appropriate address. */
+ u32 channel_bytes; /* contents provided by this VMCALL (OUT) */
+ u8 unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
+};
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+/* Parameters to VMCALL_IO_DIAG_ADDR interface */
+struct vmcall_io_diag_addr_params {
+ /* The Guest-relative physical address of the diagnostic channel.
+ * This VMCall fills this in with the appropriate address. */
+ u64 address; /* contents provided by this VMCALL (OUT) */
+};
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+/* Parameters to VMCALL_IO_VISORSERIAL_ADDR interface */
+struct vmcall_io_visorserial_addr_params {
+ /* The Guest-relative physical address of the serial console
+ * channel. This VMCall fills this in with the appropriate
+ * address. */
+ u64 address; /* contents provided by this VMCALL (OUT) */
+};
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* Parameters to VMCALL_CHANNEL_MISMATCH interface */
+struct vmcall_channel_version_mismatch_params {
+ u8 chname[32]; /* Null terminated string giving name of channel
+ * (IN) */
+ u8 item_name[32]; /* Null terminated string giving name of
+ * mismatched item (IN) */
+ u32 line_no; /* line# where invoked. (IN) */
+ u8 file_name[36]; /* source code where invoked - Null terminated
+ * string (IN) */
+};
+
+#endif /* __IOMONINTF_H__ */