aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-06 08:59:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-06 08:59:35 -0700
commita9cf69d0e7f2051cca1c08ed9b34fe79da951ee9 (patch)
tree80af592fdcb2a815a35272cd8375aa03c7d2f04a /drivers/s390/cio
parentMerge tag 'mm-stable-2022-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm (diff)
parentvfio/pci: fix the wrong word (diff)
downloadlinux-dev-a9cf69d0e7f2051cca1c08ed9b34fe79da951ee9.tar.xz
linux-dev-a9cf69d0e7f2051cca1c08ed9b34fe79da951ee9.zip
Merge tag 'vfio-v6.0-rc1' of https://github.com/awilliam/linux-vfio
Pull VFIO updates from Alex Williamson: - Cleanup use of extern in function prototypes (Alex Williamson) - Simplify bus_type usage and convert to device IOMMU interfaces (Robin Murphy) - Check missed return value and fix comment typos (Bo Liu) - Split migration ops from device ops and fix races in mlx5 migration support (Yishai Hadas) - Fix missed return value check in noiommu support (Liam Ni) - Hardening to clear buffer pointer to avoid use-after-free (Schspa Shi) - Remove requirement that only the same mm can unmap a previously mapped range (Li Zhe) - Adjust semaphore release vs device open counter (Yi Liu) - Remove unused arg from SPAPR support code (Deming Wang) - Rework vfio-ccw driver to better fit new mdev framework (Eric Farman, Michael Kawano) - Replace DMA unmap notifier with callbacks (Jason Gunthorpe) - Clarify SPAPR support comment relative to iommu_ops (Alexey Kardashevskiy) - Revise page pinning API towards compatibility with future iommufd support (Nicolin Chen) - Resolve issues in vfio-ccw, including use of DMA unmap callback (Eric Farman) * tag 'vfio-v6.0-rc1' of https://github.com/awilliam/linux-vfio: (40 commits) vfio/pci: fix the wrong word vfio/ccw: Check return code from subchannel quiesce vfio/ccw: Remove FSM Close from remove handlers vfio/ccw: Add length to DMA_UNMAP checks vfio: Replace phys_pfn with pages for vfio_pin_pages() vfio/ccw: Add kmap_local_page() for memcpy vfio: Rename user_iova of vfio_dma_rw() vfio/ccw: Change pa_pfn list to pa_iova list vfio/ap: Change saved_pfn to saved_iova vfio: Pass in starting IOVA to vfio_pin/unpin_pages API vfio/ccw: Only pass in contiguous pages vfio/ap: Pass in physical address of ind to ap_aqic() drm/i915/gvt: Replace roundup with DIV_ROUND_UP vfio: Make vfio_unpin_pages() return void vfio/spapr_tce: Fix the comment vfio: Replace the iommu notifier with a device list vfio: Replace the DMA unmapping notifier with a callback vfio/ccw: Move FSM open/close to MDEV open/close vfio/ccw: Refactor vfio_ccw_mdev_reset vfio/ccw: Create a CLOSE FSM event ...
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c205
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h12
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c58
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c99
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c114
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h13
7 files changed, 261 insertions, 241 deletions
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
index 7a838e3d7c0f..420d89ba7f83 100644
--- a/drivers/s390/cio/vfio_ccw_async.c
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -8,7 +8,6 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
#include "vfio_ccw_private.h"
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 0c2be9421ab7..7b02e97f4b29 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -11,6 +11,7 @@
#include <linux/ratelimit.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/vfio.h>
#include <asm/idals.h>
@@ -18,13 +19,11 @@
#include "vfio_ccw_cp.h"
#include "vfio_ccw_private.h"
-struct pfn_array {
- /* Starting guest physical I/O address. */
- unsigned long pa_iova;
- /* Array that stores PFNs of the pages need to pin. */
- unsigned long *pa_iova_pfn;
- /* Array that receives PFNs of the pages pinned. */
- unsigned long *pa_pfn;
+struct page_array {
+ /* Array that stores pages need to pin. */
+ dma_addr_t *pa_iova;
+ /* Array that receives the pinned pages. */
+ struct page **pa_page;
/* Number of pages pinned from @pa_iova. */
int pa_nr;
};
@@ -37,116 +36,158 @@ struct ccwchain {
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
- struct pfn_array *ch_pa;
+ struct page_array *ch_pa;
};
/*
- * pfn_array_alloc() - alloc memory for PFNs
- * @pa: pfn_array on which to perform the operation
+ * page_array_alloc() - alloc memory for page array
+ * @pa: page_array on which to perform the operation
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
*
- * Attempt to allocate memory for PFNs.
+ * Attempt to allocate memory for page array.
*
- * Usage of pfn_array:
- * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * Usage of page_array:
+ * We expect (pa_nr == 0) and (pa_iova == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
- * 0 if PFNs are allocated
- * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * 0 if page array is allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL
* -ENOMEM if alloc failed
*/
-static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
+static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
{
int i;
- if (pa->pa_nr || pa->pa_iova_pfn)
+ if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
- pa->pa_iova = iova;
-
pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (!pa->pa_nr)
return -EINVAL;
- pa->pa_iova_pfn = kcalloc(pa->pa_nr,
- sizeof(*pa->pa_iova_pfn) +
- sizeof(*pa->pa_pfn),
- GFP_KERNEL);
- if (unlikely(!pa->pa_iova_pfn)) {
+ pa->pa_iova = kcalloc(pa->pa_nr,
+ sizeof(*pa->pa_iova) + sizeof(*pa->pa_page),
+ GFP_KERNEL);
+ if (unlikely(!pa->pa_iova)) {
pa->pa_nr = 0;
return -ENOMEM;
}
- pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+ pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
- pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- pa->pa_pfn[0] = -1ULL;
+ pa->pa_iova[0] = iova;
+ pa->pa_page[0] = NULL;
for (i = 1; i < pa->pa_nr; i++) {
- pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
- pa->pa_pfn[i] = -1ULL;
+ pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
+ pa->pa_page[i] = NULL;
}
return 0;
}
/*
- * pfn_array_pin() - Pin user pages in memory
- * @pa: pfn_array on which to perform the operation
+ * page_array_unpin() - Unpin user pages in memory
+ * @pa: page_array on which to perform the operation
+ * @vdev: the vfio device to perform the operation
+ * @pa_nr: number of user pages to unpin
+ *
+ * Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0,
+ * otherwise only clear pa->pa_nr
+ */
+static void page_array_unpin(struct page_array *pa,
+ struct vfio_device *vdev, int pa_nr)
+{
+ int unpinned = 0, npage = 1;
+
+ while (unpinned < pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[unpinned];
+ dma_addr_t *last = &first[npage];
+
+ if (unpinned + npage < pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ vfio_unpin_pages(vdev, *first, npage);
+ unpinned += npage;
+ npage = 1;
+ }
+
+ pa->pa_nr = 0;
+}
+
+/*
+ * page_array_pin() - Pin user pages in memory
+ * @pa: page_array on which to perform the operation
* @mdev: the mediated device to perform pin operations
*
* Returns number of pages pinned upon success.
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
*/
-static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev)
+static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
{
+ int pinned = 0, npage = 1;
int ret = 0;
- ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr,
- IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+ while (pinned < pa->pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[pinned];
+ dma_addr_t *last = &first[npage];
- if (ret < 0) {
- goto err_out;
- } else if (ret > 0 && ret != pa->pa_nr) {
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret);
- ret = -EINVAL;
- goto err_out;
+ if (pinned + npage < pa->pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ ret = vfio_pin_pages(vdev, *first, npage,
+ IOMMU_READ | IOMMU_WRITE,
+ &pa->pa_page[pinned]);
+ if (ret < 0) {
+ goto err_out;
+ } else if (ret > 0 && ret != npage) {
+ pinned += ret;
+ ret = -EINVAL;
+ goto err_out;
+ }
+ pinned += npage;
+ npage = 1;
}
return ret;
err_out:
- pa->pa_nr = 0;
-
+ page_array_unpin(pa, vdev, pinned);
return ret;
}
/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev)
+static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev)
{
- /* Only unpin if any pages were pinned to begin with */
- if (pa->pa_nr)
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr);
- pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
+ page_array_unpin(pa, vdev, pa->pa_nr);
+ kfree(pa->pa_iova);
}
-static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
+static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
{
- unsigned long iova_pfn = iova >> PAGE_SHIFT;
+ u64 iova_pfn_start = iova >> PAGE_SHIFT;
+ u64 iova_pfn_end = (iova + length - 1) >> PAGE_SHIFT;
+ u64 pfn;
int i;
- for (i = 0; i < pa->pa_nr; i++)
- if (pa->pa_iova_pfn[i] == iova_pfn)
+ for (i = 0; i < pa->pa_nr; i++) {
+ pfn = pa->pa_iova[i] >> PAGE_SHIFT;
+ if (pfn >= iova_pfn_start && pfn <= iova_pfn_end)
return true;
+ }
return false;
}
-/* Create the list of IDAL words for a pfn_array. */
-static inline void pfn_array_idal_create_words(
- struct pfn_array *pa,
- unsigned long *idaws)
+/* Create the list of IDAL words for a page_array. */
+static inline void page_array_idal_create_words(struct page_array *pa,
+ unsigned long *idaws)
{
int i;
@@ -159,10 +200,10 @@ static inline void pfn_array_idal_create_words(
*/
for (i = 0; i < pa->pa_nr; i++)
- idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+ idaws[i] = page_to_phys(pa->pa_page[i]);
/* Adjust the first IDAW, since it may not start on a page boundary */
- idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+ idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
}
static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
@@ -194,24 +235,24 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
unsigned long n)
{
- struct pfn_array pa = {0};
- u64 from;
+ struct page_array pa = {0};
int i, ret;
unsigned long l, m;
- ret = pfn_array_alloc(&pa, iova, n);
+ ret = page_array_alloc(&pa, iova, n);
if (ret < 0)
return ret;
- ret = pfn_array_pin(&pa, vdev);
+ ret = page_array_pin(&pa, vdev);
if (ret < 0) {
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return ret;
}
l = n;
for (i = 0; i < pa.pa_nr; i++) {
- from = pa.pa_pfn[i] << PAGE_SHIFT;
+ void *from = kmap_local_page(pa.pa_page[i]);
+
m = PAGE_SIZE;
if (i == 0) {
from += iova & (PAGE_SIZE - 1);
@@ -219,14 +260,15 @@ static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
}
m = min(l, m);
- memcpy(to + (n - l), (void *)from, m);
+ memcpy(to + (n - l), from, m);
+ kunmap_local(from);
l -= m;
if (l == 0)
break;
}
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return l;
}
@@ -329,7 +371,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
chain->ch_ccw = (struct ccw1 *)data;
data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pa = (struct pfn_array *)data;
+ chain->ch_pa = (struct page_array *)data;
chain->ch_len = len;
@@ -513,7 +555,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccw1 *ccw;
- struct pfn_array *pa;
+ struct page_array *pa;
u64 iova;
unsigned long *idaws;
int ret;
@@ -547,13 +589,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
}
/*
- * Allocate an array of pfn's for pages to pin/translate.
+ * Allocate an array of pages to pin/translate.
* The number of pages is actually the count of the idaws
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
pa = chain->ch_pa + idx;
- ret = pfn_array_alloc(pa, iova, bytes);
+ ret = page_array_alloc(pa, iova, bytes);
if (ret < 0)
goto out_free_idaws;
@@ -564,21 +606,21 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
goto out_unpin;
/*
- * Copy guest IDAWs into pfn_array, in case the memory they
+ * Copy guest IDAWs into page_array, in case the memory they
* occupy is not contiguous.
*/
for (i = 0; i < idaw_nr; i++)
- pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ pa->pa_iova[i] = idaws[i];
} else {
/*
- * No action is required here; the iova addresses in pfn_array
- * were initialized sequentially in pfn_array_alloc() beginning
+ * No action is required here; the iova addresses in page_array
+ * were initialized sequentially in page_array_alloc() beginning
* with the contents of ccw->cda.
*/
}
if (ccw_does_data_transfer(ccw)) {
- ret = pfn_array_pin(pa, vdev);
+ ret = page_array_pin(pa, vdev);
if (ret < 0)
goto out_unpin;
} else {
@@ -588,13 +630,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ccw->cda = (__u32) virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
- /* Populate the IDAL with pinned/translated addresses from pfn */
- pfn_array_idal_create_words(pa, idaws);
+ /* Populate the IDAL with pinned/translated addresses from page */
+ page_array_idal_create_words(pa, idaws);
return 0;
out_unpin:
- pfn_array_unpin_free(pa, vdev);
+ page_array_unpin_free(pa, vdev);
out_free_idaws:
kfree(idaws);
out_init:
@@ -700,7 +742,7 @@ void cp_free(struct channel_program *cp)
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
- pfn_array_unpin_free(chain->ch_pa + i, vdev);
+ page_array_unpin_free(chain->ch_pa + i, vdev);
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
@@ -862,11 +904,12 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
* cp_iova_pinned() - check if an iova is pinned for a ccw chain.
* @cp: channel_program on which to perform the operation
* @iova: the iova to check
+ * @length: the length to check from @iova
*
* If the @iova is currently pinned for the ccw chain, return true;
* else return false.
*/
-bool cp_iova_pinned(struct channel_program *cp, u64 iova)
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length)
{
struct ccwchain *chain;
int i;
@@ -876,7 +919,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
+ if (page_array_iova_pinned(chain->ch_pa + i, iova, length))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index e4c436199b4c..54d26e242533 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -41,11 +41,11 @@ struct channel_program {
struct ccw1 *guest_cp;
};
-extern int cp_init(struct channel_program *cp, union orb *orb);
-extern void cp_free(struct channel_program *cp);
-extern int cp_prefetch(struct channel_program *cp);
-extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
-extern void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
-extern bool cp_iova_pinned(struct channel_program *cp, u64 iova);
+int cp_init(struct channel_program *cp, union orb *orb);
+void cp_free(struct channel_program *cp);
+int cp_prefetch(struct channel_program *cp);
+union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
+void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length);
#endif
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index ee182cfb467d..86d9e428357b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include <asm/isc.h>
@@ -42,13 +41,6 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
DECLARE_COMPLETION_ONSTACK(completion);
int iretry, ret = 0;
- spin_lock_irq(sch->lock);
- if (!sch->schib.pmcw.ena)
- goto out_unlock;
- ret = cio_disable_subchannel(sch);
- if (ret != -EBUSY)
- goto out_unlock;
-
iretry = 255;
do {
@@ -75,9 +67,7 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
-out_unlock:
- private->state = VFIO_CCW_STATE_NOT_OPER;
- spin_unlock_irq(sch->lock);
+
return ret;
}
@@ -107,9 +97,10 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
/*
* Reset to IDLE only if processing of a channel program
* has finished. Do not overwrite a possible processing
- * state if the final interrupt was for HSCH or CSCH.
+ * state if the interrupt was unsolicited, or if the final
+ * interrupt was for HSCH or CSCH.
*/
- if (private->mdev && cp_is_finished)
+ if (cp_is_finished)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
@@ -147,7 +138,7 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
private->sch = sch;
mutex_init(&private->io_mutex);
- private->state = VFIO_CCW_STATE_NOT_OPER;
+ private->state = VFIO_CCW_STATE_STANDBY;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
@@ -231,26 +222,15 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
dev_set_drvdata(&sch->dev, private);
- spin_lock_irq(sch->lock);
- sch->isc = VFIO_CCW_ISC;
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- spin_unlock_irq(sch->lock);
+ ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
if (ret)
goto out_free;
- private->state = VFIO_CCW_STATE_STANDBY;
-
- ret = vfio_ccw_mdev_reg(sch);
- if (ret)
- goto out_disable;
-
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
return 0;
-out_disable:
- cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
vfio_ccw_free_private(private);
@@ -261,8 +241,7 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- vfio_ccw_sch_quiesce(sch);
- vfio_ccw_mdev_unreg(sch);
+ mdev_unregister_device(&sch->dev);
dev_set_drvdata(&sch->dev, NULL);
@@ -275,7 +254,10 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
static void vfio_ccw_sch_shutdown(struct subchannel *sch)
{
- vfio_ccw_sch_quiesce(sch);
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
/**
@@ -301,19 +283,11 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
if (work_pending(&sch->todo_work))
goto out_unlock;
- if (cio_update_schib(sch)) {
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
- rc = 0;
- goto out_unlock;
- }
-
- private = dev_get_drvdata(&sch->dev);
- if (private->state == VFIO_CCW_STATE_NOT_OPER) {
- private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
- VFIO_CCW_STATE_STANDBY;
- }
rc = 0;
+ if (cio_update_schib(sch))
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
@@ -358,8 +332,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
return 0;
trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
- VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
- mdev_uuid(private->mdev), sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
+ sch->schid.cssid,
sch->schid.ssid, sch->schid.sch_no,
mask, event);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 8483a266051c..a59c758869f8 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -10,7 +10,8 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
+
+#include <asm/isc.h>
#include "ioasm.h"
#include "vfio_ccw_private.h"
@@ -161,8 +162,12 @@ static void fsm_notoper(struct vfio_ccw_private *private,
{
struct subchannel *sch = private->sch;
- VFIO_CCW_TRACE_EVENT(2, "notoper");
- VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
+ sch->schid.cssid,
+ sch->schid.ssid,
+ sch->schid.sch_no,
+ event,
+ private->state);
/*
* TODO:
@@ -170,6 +175,9 @@ static void fsm_notoper(struct vfio_ccw_private *private,
*/
css_sched_sch_todo(sch, SCH_TODO_UNREG);
private->state = VFIO_CCW_STATE_NOT_OPER;
+
+ /* This is usually handled during CLOSE event */
+ cp_free(&private->cp);
}
/*
@@ -242,7 +250,6 @@ static void fsm_io_request(struct vfio_ccw_private *private,
union orb *orb;
union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = private->io_region;
- struct mdev_device *mdev = private->mdev;
char *errstr = "request";
struct subchannel_id schid = get_schid(private);
@@ -256,8 +263,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP;
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): transport mode\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: transport mode\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
errstr = "transport mode";
goto err_out;
@@ -265,8 +272,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_init(&private->cp, orb);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_init=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_init=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp init";
@@ -276,8 +283,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_prefetch=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp prefetch";
@@ -289,8 +296,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: fsm_io_helper=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp fsm_io_helper";
@@ -300,16 +307,16 @@ static void fsm_io_request(struct vfio_ccw_private *private,
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): halt on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: halt on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): clear on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: clear on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
@@ -366,6 +373,54 @@ static void fsm_irq(struct vfio_ccw_private *private,
complete(private->completion);
}
+static void fsm_open(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ sch->isc = VFIO_CCW_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_IDLE;
+ spin_unlock_irq(sch->lock);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
+static void fsm_close(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+
+ if (!sch->schib.pmcw.ena)
+ goto err_unlock;
+
+ ret = cio_disable_subchannel(sch);
+ if (ret == -EBUSY)
+ ret = vfio_ccw_sch_quiesce(sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_STANDBY;
+ spin_unlock_irq(sch->lock);
+ cp_free(&private->cp);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
/*
* Device statemachine
*/
@@ -375,29 +430,39 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_nop,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_nop,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
- [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_open,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_notoper,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PROCESSING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index b49e2e9db2dc..4a806a2273b5 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -21,54 +21,28 @@ static const struct vfio_device_ops vfio_ccw_dev_ops;
static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
{
- struct subchannel *sch;
- int ret;
-
- sch = private->sch;
/*
- * TODO:
- * In the cureent stage, some things like "no I/O running" and "no
- * interrupt pending" are clear, but we are not sure what other state
- * we need to care about.
- * There are still a lot more instructions need to be handled. We
- * should come back here later.
+ * If the FSM state is seen as Not Operational after closing
+ * and re-opening the mdev, return an error.
*/
- ret = vfio_ccw_sch_quiesce(sch);
- if (ret)
- return ret;
-
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- if (!ret)
- private->state = VFIO_CCW_STATE_IDLE;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
- return ret;
+ return 0;
}
-static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
+static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
{
struct vfio_ccw_private *private =
- container_of(nb, struct vfio_ccw_private, nb);
-
- /*
- * Vendor drivers MUST unpin pages in response to an
- * invalidation.
- */
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
-
- if (!cp_iova_pinned(&private->cp, unmap->iova))
- return NOTIFY_OK;
+ container_of(vdev, struct vfio_ccw_private, vdev);
- if (vfio_ccw_mdev_reset(private))
- return NOTIFY_BAD;
+ /* Drivers MUST unpin pages in response to an invalidation. */
+ if (!cp_iova_pinned(&private->cp, iova, length))
+ return;
- cp_free(&private->cp);
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
+ vfio_ccw_mdev_reset(private);
}
static ssize_t name_show(struct mdev_type *mtype,
@@ -128,11 +102,8 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
vfio_init_group_dev(&private->vdev, &mdev->dev,
&vfio_ccw_dev_ops);
- private->mdev = mdev;
- private->state = VFIO_CCW_STATE_IDLE;
-
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
@@ -145,8 +116,6 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
err_atomic:
vfio_uninit_group_dev(&private->vdev);
atomic_inc(&private->avail);
- private->mdev = NULL;
- private->state = VFIO_CCW_STATE_IDLE;
return ret;
}
@@ -154,23 +123,14 @@ static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
vfio_unregister_group_dev(&private->vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_sch_quiesce(private->sch))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
vfio_uninit_group_dev(&private->vdev);
- cp_free(&private->cp);
- private->mdev = NULL;
atomic_inc(&private->avail);
}
@@ -178,19 +138,15 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
int ret;
- private->nb.notifier_call = vfio_ccw_mdev_notifier;
-
- ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY,
- &events, &private->nb);
- if (ret)
- return ret;
+ /* Device cannot simply be opened again from this state */
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
ret = vfio_ccw_register_async_dev_regions(private);
if (ret)
- goto out_unregister;
+ return ret;
ret = vfio_ccw_register_schib_dev_regions(private);
if (ret)
@@ -200,11 +156,16 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
if (ret)
goto out_unregister;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER) {
+ ret = -EINVAL;
+ goto out_unregister;
+ }
+
return ret;
out_unregister:
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
return ret;
}
@@ -213,16 +174,8 @@ static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_mdev_reset(private))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
- cp_free(&private->cp);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
@@ -645,6 +598,7 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = {
.write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl,
.request = vfio_ccw_mdev_request,
+ .dma_unmap = vfio_ccw_dma_unmap,
};
struct mdev_driver vfio_ccw_mdev_driver = {
@@ -657,13 +611,3 @@ struct mdev_driver vfio_ccw_mdev_driver = {
.remove = vfio_ccw_mdev_remove,
.supported_type_groups = mdev_type_groups,
};
-
-int vfio_ccw_mdev_reg(struct subchannel *sch)
-{
- return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
-}
-
-void vfio_ccw_mdev_unreg(struct subchannel *sch)
-{
- mdev_unregister_device(&sch->dev);
-}
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 7272eb788612..cd24b7fada91 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -73,8 +73,6 @@ struct vfio_ccw_crw {
* @state: internal state of the device
* @completion: synchronization helper of the I/O completion
* @avail: available for creating a mediated device
- * @mdev: pointer to the mediated device
- * @nb: notifier for vfio events
* @io_region: MMIO region to input/output I/O arguments/results
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
@@ -97,8 +95,6 @@ struct vfio_ccw_private {
int state;
struct completion *completion;
atomic_t avail;
- struct mdev_device *mdev;
- struct notifier_block nb;
struct ccw_io_region *io_region;
struct mutex io_mutex;
struct vfio_ccw_region *region;
@@ -119,10 +115,7 @@ struct vfio_ccw_private {
struct work_struct crw_work;
} __aligned(8);
-extern int vfio_ccw_mdev_reg(struct subchannel *sch);
-extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
-
-extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
+int vfio_ccw_sch_quiesce(struct subchannel *sch);
extern struct mdev_driver vfio_ccw_mdev_driver;
@@ -147,6 +140,8 @@ enum vfio_ccw_event {
VFIO_CCW_EVENT_IO_REQ,
VFIO_CCW_EVENT_INTERRUPT,
VFIO_CCW_EVENT_ASYNC_REQ,
+ VFIO_CCW_EVENT_OPEN,
+ VFIO_CCW_EVENT_CLOSE,
/* last element! */
NR_VFIO_CCW_EVENTS
};
@@ -158,7 +153,7 @@ typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
- int event)
+ enum vfio_ccw_event event)
{
trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
vfio_ccw_jumptable[private->state][event](private, event);