aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/xen-scsiback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/xen-scsiback.c')
-rw-r--r--drivers/xen/xen-scsiback.c204
1 files changed, 114 insertions, 90 deletions
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ba0942e481bc..6106ed93817d 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -33,8 +33,6 @@
#define pr_fmt(fmt) "xen-pvscsi: " fmt
-#include <stdarg.h>
-
#include <linux/module.h>
#include <linux/utsname.h>
#include <linux/interrupt.h>
@@ -91,7 +89,6 @@ struct vscsibk_info {
unsigned int irq;
struct vscsiif_back_ring ring;
- int ring_error;
spinlock_t ring_lock;
atomic_t nr_unreplied_reqs;
@@ -100,6 +97,8 @@ struct vscsibk_info {
struct list_head v2p_entry_lists;
wait_queue_head_t waiting_to_free;
+
+ struct gnttab_page_cache free_pages;
};
/* theoretical maximum of grants for one request */
@@ -189,10 +188,6 @@ module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
MODULE_PARM_DESC(max_buffer_pages,
"Maximum number of free pages to keep in backend buffer");
-static DEFINE_SPINLOCK(free_pages_lock);
-static int free_pages_num;
-static LIST_HEAD(scsiback_free_pages);
-
/* Global spinlock to protect scsiback TPG list */
static DEFINE_MUTEX(scsiback_mutex);
static LIST_HEAD(scsiback_list);
@@ -208,41 +203,6 @@ static void scsiback_put(struct vscsibk_info *info)
wake_up(&info->waiting_to_free);
}
-static void put_free_pages(struct page **page, int num)
-{
- unsigned long flags;
- int i = free_pages_num + num, n = num;
-
- if (num == 0)
- return;
- if (i > scsiback_max_buffer_pages) {
- n = min(num, i - scsiback_max_buffer_pages);
- gnttab_free_pages(n, page + num - n);
- n = num - n;
- }
- spin_lock_irqsave(&free_pages_lock, flags);
- for (i = 0; i < n; i++)
- list_add(&page[i]->lru, &scsiback_free_pages);
- free_pages_num += n;
- spin_unlock_irqrestore(&free_pages_lock, flags);
-}
-
-static int get_free_page(struct page **page)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&free_pages_lock, flags);
- if (list_empty(&scsiback_free_pages)) {
- spin_unlock_irqrestore(&free_pages_lock, flags);
- return gnttab_alloc_pages(1, page);
- }
- page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
- list_del(&page[0]->lru);
- free_pages_num--;
- spin_unlock_irqrestore(&free_pages_lock, flags);
- return 0;
-}
-
static unsigned long vaddr_page(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
@@ -260,10 +220,10 @@ static void scsiback_print_status(char *sense_buffer, int errors,
{
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
- pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
+ pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x\n",
tpg->tport->tport_name, pending_req->v2p->lun,
- pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
- host_byte(errors), driver_byte(errors));
+ pending_req->cmnd[0], errors & 0xff, COMMAND_COMPLETE,
+ host_byte(errors));
}
static void scsiback_fast_flush_area(struct vscsibk_pend *req)
@@ -303,7 +263,8 @@ static void scsiback_fast_flush_area(struct vscsibk_pend *req)
BUG_ON(err);
}
- put_free_pages(req->pages, req->n_grants);
+ gnttab_page_cache_put(&req->info->free_pages, req->pages,
+ req->n_grants);
req->n_grants = 0;
}
@@ -319,6 +280,70 @@ static void scsiback_free_translation_entry(struct kref *kref)
kfree(entry);
}
+static int32_t scsiback_result(int32_t result)
+{
+ int32_t host_status;
+
+ switch (XEN_VSCSIIF_RSLT_HOST(result)) {
+ case DID_OK:
+ host_status = XEN_VSCSIIF_RSLT_HOST_OK;
+ break;
+ case DID_NO_CONNECT:
+ host_status = XEN_VSCSIIF_RSLT_HOST_NO_CONNECT;
+ break;
+ case DID_BUS_BUSY:
+ host_status = XEN_VSCSIIF_RSLT_HOST_BUS_BUSY;
+ break;
+ case DID_TIME_OUT:
+ host_status = XEN_VSCSIIF_RSLT_HOST_TIME_OUT;
+ break;
+ case DID_BAD_TARGET:
+ host_status = XEN_VSCSIIF_RSLT_HOST_BAD_TARGET;
+ break;
+ case DID_ABORT:
+ host_status = XEN_VSCSIIF_RSLT_HOST_ABORT;
+ break;
+ case DID_PARITY:
+ host_status = XEN_VSCSIIF_RSLT_HOST_PARITY;
+ break;
+ case DID_ERROR:
+ host_status = XEN_VSCSIIF_RSLT_HOST_ERROR;
+ break;
+ case DID_RESET:
+ host_status = XEN_VSCSIIF_RSLT_HOST_RESET;
+ break;
+ case DID_BAD_INTR:
+ host_status = XEN_VSCSIIF_RSLT_HOST_BAD_INTR;
+ break;
+ case DID_PASSTHROUGH:
+ host_status = XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH;
+ break;
+ case DID_SOFT_ERROR:
+ host_status = XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR;
+ break;
+ case DID_IMM_RETRY:
+ host_status = XEN_VSCSIIF_RSLT_HOST_IMM_RETRY;
+ break;
+ case DID_REQUEUE:
+ host_status = XEN_VSCSIIF_RSLT_HOST_REQUEUE;
+ break;
+ case DID_TRANSPORT_DISRUPTED:
+ host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED;
+ break;
+ case DID_TRANSPORT_FAILFAST:
+ host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST;
+ break;
+ case DID_TRANSPORT_MARGINAL:
+ host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL;
+ break;
+ default:
+ host_status = XEN_VSCSIIF_RSLT_HOST_ERROR;
+ break;
+ }
+
+ return (host_status << 16) | (result & 0x00ffff);
+}
+
static void scsiback_send_response(struct vscsibk_info *info,
char *sense_buffer, int32_t result, uint32_t resid,
uint16_t rqid)
@@ -334,7 +359,7 @@ static void scsiback_send_response(struct vscsibk_info *info,
ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
info->ring.rsp_prod_pvt++;
- ring_res->rslt = result;
+ ring_res->rslt = scsiback_result(result);
ring_res->rqid = rqid;
if (sense_buffer != NULL &&
@@ -397,21 +422,18 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
{
struct se_cmd *se_cmd = &pending_req->se_cmd;
struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
- int rc;
scsiback_get(pending_req->info);
se_cmd->tag = pending_req->rqid;
- rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
- pending_req->sense_buffer, pending_req->v2p->lun,
- pending_req->data_len, 0,
- pending_req->sc_data_direction, TARGET_SCF_ACK_KREF,
- pending_req->sgl, pending_req->n_sg,
- NULL, 0, NULL, 0);
- if (rc < 0) {
- transport_send_check_condition_and_sense(se_cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0);
- }
+ target_init_cmd(se_cmd, sess, pending_req->sense_buffer,
+ pending_req->v2p->lun, pending_req->data_len, 0,
+ pending_req->sc_data_direction, TARGET_SCF_ACK_KREF);
+
+ if (target_submit_prep(se_cmd, pending_req->cmnd, pending_req->sgl,
+ pending_req->n_sg, NULL, 0, NULL, 0, GFP_KERNEL))
+ return;
+
+ target_submit(se_cmd);
}
static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
@@ -423,12 +445,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
return 0;
err = gnttab_map_refs(map, NULL, pg, cnt);
- BUG_ON(err);
for (i = 0; i < cnt; i++) {
if (unlikely(map[i].status != GNTST_okay)) {
pr_err("invalid buffer -- could not remap it\n");
map[i].handle = SCSIBACK_INVALID_HANDLE;
- err = -ENOMEM;
+ if (!err)
+ err = -ENOMEM;
} else {
get_page(pg[i]);
}
@@ -446,8 +468,8 @@ static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
struct vscsibk_info *info = pending_req->info;
for (i = 0; i < cnt; i++) {
- if (get_free_page(pg + mapcount)) {
- put_free_pages(pg, mapcount);
+ if (gnttab_page_cache_get(&info->free_pages, pg + mapcount)) {
+ gnttab_page_cache_put(&info->free_pages, pg, mapcount);
pr_err("no grant page\n");
return -ENOMEM;
}
@@ -597,7 +619,7 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
struct scsiback_nexus *nexus = tpg->tpg_nexus;
struct se_cmd *se_cmd = &pending_req->se_cmd;
u64 unpacked_lun = pending_req->v2p->lun;
- int rc, err = FAILED;
+ int rc, err = XEN_VSCSIIF_RSLT_RESET_FAILED;
init_completion(&pending_req->tmr_done);
@@ -611,7 +633,7 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
wait_for_completion(&pending_req->tmr_done);
err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
- SUCCESS : FAILED;
+ XEN_VSCSIIF_RSLT_RESET_SUCCESS : XEN_VSCSIIF_RSLT_RESET_FAILED;
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
transport_generic_free_cmd(&pending_req->se_cmd, 0);
@@ -722,7 +744,8 @@ static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
return pending_req;
}
-static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+static int scsiback_do_cmd_fn(struct vscsibk_info *info,
+ unsigned int *eoi_flags)
{
struct vscsiif_back_ring *ring = &info->ring;
struct vscsiif_request ring_req;
@@ -739,11 +762,12 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
rc = ring->rsp_prod_pvt;
pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
info->domid, rp, rc, rp - rc);
- info->ring_error = 1;
- return 0;
+ return -EINVAL;
}
while ((rc != rp)) {
+ *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
+
if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
break;
@@ -757,10 +781,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
result = DID_NO_CONNECT;
break;
default:
- result = DRIVER_ERROR;
+ result = DID_ERROR;
break;
}
- scsiback_send_response(info, NULL, result << 24, 0,
+ scsiback_send_response(info, NULL, result << 16, 0,
ring_req.rqid);
return 1;
}
@@ -770,7 +794,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
scsiback_fast_flush_area(pending_req);
scsiback_do_resp_with_sense(NULL,
- DRIVER_ERROR << 24, 0, pending_req);
+ DID_ERROR << 16, 0, pending_req);
transport_generic_free_cmd(&pending_req->se_cmd, 0);
} else {
scsiback_cmd_exec(pending_req);
@@ -785,7 +809,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
break;
default:
pr_err_ratelimited("invalid request\n");
- scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0,
+ scsiback_do_resp_with_sense(NULL, DID_ERROR << 16, 0,
pending_req);
transport_generic_free_cmd(&pending_req->se_cmd, 0);
break;
@@ -795,6 +819,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
cond_resched();
}
+ gnttab_page_cache_shrink(&info->free_pages, scsiback_max_buffer_pages);
+
RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
return more_to_do;
}
@@ -802,13 +828,16 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
{
struct vscsibk_info *info = dev_id;
+ int rc;
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
- if (info->ring_error)
- return IRQ_HANDLED;
-
- while (scsiback_do_cmd_fn(info))
+ while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0)
cond_resched();
+ /* In case of a ring error we keep the event channel masked. */
+ if (!rc)
+ xen_irq_lateeoi(irq, eoi_flags);
+
return IRQ_HANDLED;
}
@@ -829,7 +858,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
sring = (struct vscsiif_sring *)area;
BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
- err = bind_interdomain_evtchn_to_irq(info->domid, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(info->dev, evtchn);
if (err < 0)
goto unmap_page;
@@ -854,7 +883,8 @@ unmap_page:
static int scsiback_map(struct vscsibk_info *info)
{
struct xenbus_device *dev = info->dev;
- unsigned int ring_ref, evtchn;
+ unsigned int ring_ref;
+ evtchn_port_t evtchn;
int err;
err = xenbus_gather(XBT_NIL, dev->otherend,
@@ -1079,7 +1109,7 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
"%s: writing %s", __func__, state);
return;
}
- strlcpy(phy, val, VSCSI_NAMELEN);
+ strscpy(phy, val, VSCSI_NAMELEN);
kfree(val);
/* virtual SCSI device */
@@ -1184,7 +1214,7 @@ static void scsiback_frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through - if not online */
+ fallthrough; /* if not online */
case XenbusStateUnknown:
device_unregister(&dev->dev);
break;
@@ -1228,6 +1258,8 @@ static int scsiback_remove(struct xenbus_device *dev)
scsiback_release_translation_entry(info);
+ gnttab_page_cache_shrink(&info->free_pages, 0);
+
dev_set_drvdata(&dev->dev, NULL);
return 0;
@@ -1252,13 +1284,13 @@ static int scsiback_probe(struct xenbus_device *dev,
info->domid = dev->otherend_id;
spin_lock_init(&info->ring_lock);
- info->ring_error = 0;
atomic_set(&info->nr_unreplied_reqs, 0);
init_waitqueue_head(&info->waiting_to_free);
info->dev = dev;
info->irq = 0;
INIT_LIST_HEAD(&info->v2p_entry_lists);
spin_lock_init(&info->v2p_lock);
+ gnttab_page_cache_init(&info->free_pages);
err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u",
SG_ALL);
@@ -1431,8 +1463,7 @@ static int scsiback_queue_status(struct se_cmd *se_cmd)
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE)))
- pending_req->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ pending_req->result = SAM_STAT_CHECK_CONDITION;
else
pending_req->result = se_cmd->scsi_status;
@@ -1875,13 +1906,6 @@ out:
static void __exit scsiback_exit(void)
{
- struct page *page;
-
- while (free_pages_num) {
- if (get_free_page(&page))
- BUG();
- gnttab_free_pages(1, &page);
- }
target_unregister_template(&scsiback_ops);
xenbus_unregister_driver(&scsiback_driver);
}