@@ -18,5 +18,5 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
-vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o
+vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
@@ -64,54 +64,12 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
-
out_unlock:
+ private->state = VFIO_CCW_STATE_NOT_OPER;
spin_unlock_irq(sch->lock);
return ret;
}
-static int vfio_ccw_sch_io_helper(struct vfio_ccw_private *private)
-{
- struct subchannel *sch;
- union orb *orb;
- int ccode;
- __u8 lpm;
-
- sch = private->sch;
-
- orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
-
- /* Issue "Start Subchannel" */
- ccode = ssch(sch->schid, orb);
-
- switch (ccode) {
- case 0:
- /*
- * Initialize device status information
- */
- sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
- return 0;
- case 1: /* Status pending */
- case 2: /* Busy */
- return -EBUSY;
- case 3: /* Device/path not operational */
- {
- lpm = orb->cmd.lpm;
- if (lpm != 0)
- sch->lpm &= ~lpm;
- else
- sch->lpm = 0;
-
- if (cio_update_schib(sch))
- return -ENODEV;
-
- return sch->lpm ? -EACCES : -ENODEV;
- }
- default:
- return ccode;
- }
-}
-
static void vfio_ccw_sch_io_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
@@ -130,47 +88,9 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
-}
-
-/* Deal with the ccw command request from the userspace. */
-int vfio_ccw_sch_cmd_request(struct vfio_ccw_private *private)
-{
- struct mdev_device *mdev = private->mdev;
- union orb *orb;
- union scsw *scsw = &private->scsw;
- struct ccw_io_region *io_region = &private->io_region;
- int ret;
-
- memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
-
- if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
- orb = (union orb *)io_region->orb_area;
-
- ret = cp_init(&private->cp, &mdev->dev, orb);
- if (ret)
- return ret;
-
- ret = cp_prefetch(&private->cp);
- if (ret) {
- cp_free(&private->cp);
- return ret;
- }
-
- /* Start channel program and wait for I/O interrupt. */
- ret = vfio_ccw_sch_io_helper(private);
- if (!ret)
- cp_free(&private->cp);
- } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
- /* XXX: Handle halt. */
- ret = -EOPNOTSUPP;
- } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
- /* XXX: Handle clear. */
- ret = -EOPNOTSUPP;
- } else {
- ret = -EOPNOTSUPP;
- }
- return ret;
+ if (private->mdev)
+ private->state = VFIO_CCW_STATE_IDLE;
}
/*
@@ -227,20 +147,9 @@ static struct attribute_group vfio_subchannel_attr_group = {
static void vfio_ccw_sch_irq(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- struct irb *irb;
inc_irq_stat(IRQIO_CIO);
-
- if (!private)
- return;
-
- irb = this_cpu_ptr(&cio_irb);
- memcpy(&private->irb, irb, sizeof(*irb));
-
- queue_work(vfio_ccw_work_q, &private->io_work);
-
- if (private->completion)
- complete(private->completion);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
}
static int vfio_ccw_sch_probe(struct subchannel *sch)
@@ -262,6 +171,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
dev_set_drvdata(&sch->dev, private);
spin_lock_irq(sch->lock);
+ private->state = VFIO_CCW_STATE_NOT_OPER;
sch->isc = VFIO_CCW_ISC;
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
spin_unlock_irq(sch->lock);
@@ -277,6 +187,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
goto out_rm_group;
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
+ private->state = VFIO_CCW_STATE_STANDBY;
return 0;
@@ -324,6 +235,7 @@ static void vfio_ccw_sch_shutdown(struct subchannel *sch)
*/
static int vfio_ccw_sch_event(struct subchannel *sch, int process)
{
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
unsigned long flags;
spin_lock_irqsave(sch->lock, flags);
@@ -334,16 +246,16 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
goto out_unlock;
if (cio_update_schib(sch)) {
- /* Not operational. */
- css_sched_sch_todo(sch, SCH_TODO_UNREG);
-
- /*
- * TODO:
- * Probably we should send the machine check to the guest.
- */
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
goto out_unlock;
}
+ private = dev_get_drvdata(&sch->dev);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER) {
+ private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
+ VFIO_CCW_STATE_STANDBY;
+ }
+
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
new file mode 100644
@@ -0,0 +1,210 @@
+/*
+ * Finite state machine for vfio-ccw device handling
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "ioasm.h"
+#include "vfio_ccw_private.h"
+
+static int fsm_io_helper(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ union orb *orb;
+ int ccode;
+ __u8 lpm;
+ unsigned long flags;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+ private->state = VFIO_CCW_STATE_BUSY;
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
+
+ /* Issue "Start Subchannel" */
+ ccode = ssch(sch->schid, orb);
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
+ return 0;
+ case 1: /* Status pending */
+ case 2: /* Busy */
+ return -EBUSY;
+ case 3: /* Device/path not operational */
+ {
+ lpm = orb->cmd.lpm;
+ if (lpm != 0)
+ sch->lpm &= ~lpm;
+ else
+ sch->lpm = 0;
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ return sch->lpm ? -EACCES : -ENODEV;
+ }
+ default:
+ return ccode;
+ }
+}
+
+static void fsm_notoper(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+
+ /*
+ * TODO:
+ * Probably we should send the machine check to the guest.
+ */
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ private->state = VFIO_CCW_STATE_NOT_OPER;
+}
+
+/*
+ * No operation action.
+ */
+static void fsm_nop(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+}
+
+static void fsm_io_error(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
+ private->io_region.ret_code = -EIO;
+}
+
+static void fsm_io_busy(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->io_region.ret_code = -EBUSY;
+}
+
+static void fsm_disabled_irq(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+
+ /*
+ * An interrupt in a disabled state means a previous disable was not
+ * successful - should not happen, but we try to disable again.
+ */
+ cio_disable_subchannel(sch);
+}
+
+/*
+ * Deal with the ccw command request from the userspace.
+ */
+static void fsm_io_request(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ union orb *orb;
+ union scsw *scsw = &private->scsw;
+ struct ccw_io_region *io_region = &private->io_region;
+ struct mdev_device *mdev = private->mdev;
+
+ private->state = VFIO_CCW_STATE_BOXED;
+
+ memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
+
+ if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
+ orb = (union orb *)io_region->orb_area;
+
+ io_region->ret_code = cp_init(&private->cp, &mdev->dev, orb);
+ if (io_region->ret_code)
+ goto err_out;
+
+ io_region->ret_code = cp_prefetch(&private->cp);
+ if (io_region->ret_code) {
+ cp_free(&private->cp);
+ goto err_out;
+ }
+
+ /* Start channel program and wait for I/O interrupt. */
+ io_region->ret_code = fsm_io_helper(private);
+ if (io_region->ret_code) {
+ cp_free(&private->cp);
+ goto err_out;
+ }
+ return;
+ } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
+ /* XXX: Handle halt. */
+ io_region->ret_code = -EOPNOTSUPP;
+ goto err_out;
+ } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
+ /* XXX: Handle clear. */
+ io_region->ret_code = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+err_out:
+ private->state = VFIO_CCW_STATE_IDLE;
+}
+
+/*
+ * Got an interrupt for a normal io (state busy).
+ */
+static void fsm_irq(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct irb *irb;
+
+ if (!private)
+ return;
+
+ irb = this_cpu_ptr(&cio_irb);
+ memcpy(&private->irb, irb, sizeof(*irb));
+
+ queue_work(vfio_ccw_work_q, &private->io_work);
+
+ if (private->completion)
+ complete(private->completion);
+}
+
+/*
+ * Device statemachine
+ */
+fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
+ [VFIO_CCW_STATE_NOT_OPER] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ },
+ [VFIO_CCW_STATE_STANDBY] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_IDLE] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_BOXED] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_BUSY] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+};
@@ -41,7 +41,11 @@ static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
if (ret)
return ret;
- return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (!ret)
+ private->state = VFIO_CCW_STATE_IDLE;
+
+ return ret;
}
static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
@@ -115,11 +119,15 @@ static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->parent->dev);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -ENODEV;
+
/* Only support one mediated device for each physical subchannel. */
if (private->mdev)
return -EPERM;
private->mdev = mdev;
+ private->state = VFIO_CCW_STATE_IDLE;
available_instances--;
return 0;
@@ -130,10 +138,20 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
struct vfio_ccw_private *private = dev_get_drvdata(mdev->parent->dev);
int ret;
+ if (!private)
+ goto out;
+
+ if ((private->state == VFIO_CCW_STATE_NOT_OPER) ||
+ (private->state == VFIO_CCW_STATE_STANDBY))
+ goto out;
+
ret = vfio_ccw_mdev_reset(mdev);
if (ret)
return ret;
+ private->state = VFIO_CCW_STATE_STANDBY;
+
+out:
private->mdev = NULL;
available_instances++;
@@ -194,14 +212,18 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
private = dev_get_drvdata(mdev->dev.parent);
if (!private)
return -ENODEV;
+ if (private->state != VFIO_CCW_STATE_IDLE)
+ return -EACCES;
region = &private->io_region;
if (copy_from_user((void *)region + *ppos, buf, count))
return -EFAULT;
- region->ret_code = vfio_ccw_sch_cmd_request(private);
- if (region->ret_code != 0)
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
+ if (region->ret_code != 0) {
+ private->state = VFIO_CCW_STATE_IDLE;
return region->ret_code;
+ }
return count;
}
@@ -25,6 +25,7 @@
/**
* struct vfio_ccw_private
* @sch: pointor to the subchannel
+ * @state: internal state of the device
* @completion: synchronization helper of the I/O completion
* @mdev: pointor to the mediated device
* @nb: notifier for vfio events
@@ -37,6 +38,7 @@
*/
struct vfio_ccw_private {
struct subchannel *sch;
+ int state;
struct completion *completion;
struct mdev_device *mdev;
struct notifier_block nb;
@@ -54,6 +56,43 @@ extern int vfio_ccw_mdev_reg(struct subchannel *sch);
extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
-extern int vfio_ccw_sch_cmd_request(struct vfio_ccw_private *private);
+
+/*
+ * States of the device statemachine.
+ */
+enum vfio_ccw_state {
+ VFIO_CCW_STATE_NOT_OPER,
+ VFIO_CCW_STATE_STANDBY,
+ VFIO_CCW_STATE_IDLE,
+ VFIO_CCW_STATE_BOXED,
+ VFIO_CCW_STATE_BUSY,
+ /* last element! */
+ NR_VFIO_CCW_STATES
+};
+
+/*
+ * Asynchronous events of the device statemachine.
+ */
+enum vfio_ccw_event {
+ VFIO_CCW_EVENT_NOT_OPER,
+ VFIO_CCW_EVENT_IO_REQ,
+ VFIO_CCW_EVENT_INTERRUPT,
+ /* last element! */
+ NR_VFIO_CCW_EVENTS
+};
+
+/*
+ * Action called through jumptable.
+ */
+typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
+extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
+
+static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
+ int event)
+{
+ vfio_ccw_jumptable[private->state][event](private, event);
+}
+
+extern struct workqueue_struct *vfio_ccw_work_q;
#endif
The current implementation doesn't check if the subchannel is in a proper device state when handling an event. Let's introduce a finite state machine to manage the state/event change. Signed-off-by: Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> --- drivers/s390/cio/Makefile | 2 +- drivers/s390/cio/vfio_ccw_drv.c | 116 +++----------------- drivers/s390/cio/vfio_ccw_fsm.c | 210 ++++++++++++++++++++++++++++++++++++ drivers/s390/cio/vfio_ccw_ops.c | 28 ++++- drivers/s390/cio/vfio_ccw_private.h | 41 ++++++- 5 files changed, 290 insertions(+), 107 deletions(-) create mode 100644 drivers/s390/cio/vfio_ccw_fsm.c