aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/vfio_ccw_fsm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/vfio_ccw_fsm.c')
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c99
1 files changed, 82 insertions, 17 deletions
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 8483a266051c..a59c758869f8 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -10,7 +10,8 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
+
+#include <asm/isc.h>
#include "ioasm.h"
#include "vfio_ccw_private.h"
@@ -161,8 +162,12 @@ static void fsm_notoper(struct vfio_ccw_private *private,
{
struct subchannel *sch = private->sch;
- VFIO_CCW_TRACE_EVENT(2, "notoper");
- VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
+ sch->schid.cssid,
+ sch->schid.ssid,
+ sch->schid.sch_no,
+ event,
+ private->state);
/*
* TODO:
@@ -170,6 +175,9 @@ static void fsm_notoper(struct vfio_ccw_private *private,
*/
css_sched_sch_todo(sch, SCH_TODO_UNREG);
private->state = VFIO_CCW_STATE_NOT_OPER;
+
+ /* This is usually handled during CLOSE event */
+ cp_free(&private->cp);
}
/*
@@ -242,7 +250,6 @@ static void fsm_io_request(struct vfio_ccw_private *private,
union orb *orb;
union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = private->io_region;
- struct mdev_device *mdev = private->mdev;
char *errstr = "request";
struct subchannel_id schid = get_schid(private);
@@ -256,8 +263,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP;
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): transport mode\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: transport mode\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
errstr = "transport mode";
goto err_out;
@@ -265,8 +272,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_init(&private->cp, orb);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_init=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_init=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp init";
@@ -276,8 +283,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_prefetch=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp prefetch";
@@ -289,8 +296,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: fsm_io_helper=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp fsm_io_helper";
@@ -300,16 +307,16 @@ static void fsm_io_request(struct vfio_ccw_private *private,
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): halt on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: halt on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): clear on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: clear on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
@@ -366,6 +373,54 @@ static void fsm_irq(struct vfio_ccw_private *private,
complete(private->completion);
}
+static void fsm_open(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ sch->isc = VFIO_CCW_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_IDLE;
+ spin_unlock_irq(sch->lock);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
+static void fsm_close(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+
+ if (!sch->schib.pmcw.ena)
+ goto err_unlock;
+
+ ret = cio_disable_subchannel(sch);
+ if (ret == -EBUSY)
+ ret = vfio_ccw_sch_quiesce(sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_STANDBY;
+ spin_unlock_irq(sch->lock);
+ cp_free(&private->cp);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
/*
* Device statemachine
*/
@@ -375,29 +430,39 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_nop,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_nop,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
- [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_open,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_notoper,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PROCESSING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
};