aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_user.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_user.c')
-rw-r--r--drivers/target/target_core_user.c421
1 files changed, 273 insertions, 148 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 7f96dfa32b9c..9cd404acdb82 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -83,14 +83,10 @@
#define DATA_BLOCK_SIZE PAGE_SIZE
#define DATA_BLOCK_SHIFT PAGE_SHIFT
#define DATA_BLOCK_BITS_DEF (256 * 1024)
-#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
-/* The total size of the ring is 8M + 256K * PAGE_SIZE */
-#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
-
/*
* Default number of global data blocks(512K * PAGE_SIZE)
* when the unmap thread will be started.
@@ -98,6 +94,7 @@
#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
+static u8 tcmu_netlink_blocked;
static struct device *tcmu_root_device;
@@ -107,9 +104,16 @@ struct tcmu_hba {
#define TCMU_CONFIG_LEN 256
+static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
+static LIST_HEAD(tcmu_nl_cmd_list);
+
+struct tcmu_dev;
+
struct tcmu_nl_cmd {
/* wake up thread waiting for reply */
struct completion complete;
+ struct list_head nl_list;
+ struct tcmu_dev *udev;
int cmd;
int status;
};
@@ -133,7 +137,7 @@ struct tcmu_dev {
struct inode *inode;
struct tcmu_mailbox *mb_addr;
- size_t dev_size;
+ uint64_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data area from start of mb */
@@ -161,10 +165,7 @@ struct tcmu_dev {
struct list_head timedout_entry;
- spinlock_t nl_cmd_lock;
struct tcmu_nl_cmd curr_nl_cmd;
- /* wake up threads waiting on curr_nl_cmd */
- wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
@@ -255,6 +256,92 @@ MODULE_PARM_DESC(global_max_data_area_mb,
"Max MBs allowed to be allocated to all the tcmu device's "
"data areas.");
+static int tcmu_get_block_netlink(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
+ "blocked" : "unblocked");
+}
+
+static int tcmu_set_block_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val > 1) {
+ pr_err("Invalid block netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ tcmu_netlink_blocked = val;
+ return 0;
+}
+
+static const struct kernel_param_ops tcmu_block_netlink_op = {
+ .set = tcmu_set_block_netlink,
+ .get = tcmu_get_block_netlink,
+};
+
+module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
+
+static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
+{
+ struct tcmu_dev *udev = nl_cmd->udev;
+
+ if (!tcmu_netlink_blocked) {
+ pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
+ return -EBUSY;
+ }
+
+ if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
+ pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
+ nl_cmd->status = -EINTR;
+ list_del(&nl_cmd->nl_list);
+ complete(&nl_cmd->complete);
+ }
+ return 0;
+}
+
+static int tcmu_set_reset_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != 1) {
+ pr_err("Invalid reset netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
+ ret = tcmu_fail_netlink_cmd(nl_cmd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+
+ return ret;
+}
+
+static const struct kernel_param_ops tcmu_reset_netlink_op = {
+ .set = tcmu_set_reset_netlink,
+};
+
+module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
+MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
+
/* multicast group */
enum tcmu_multicast_groups {
TCMU_MCGRP_CONFIG,
@@ -274,48 +361,50 @@ static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
{
- struct se_device *dev;
- struct tcmu_dev *udev;
+ struct tcmu_dev *udev = NULL;
struct tcmu_nl_cmd *nl_cmd;
int dev_id, rc, ret = 0;
- bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
!info->attrs[TCMU_ATTR_DEVICE_ID]) {
printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
- return -EINVAL;
+ return -EINVAL;
}
dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
- dev = target_find_device(dev_id, !is_removed);
- if (!dev) {
- printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
- completed_cmd, rc, dev_id);
- return -ENODEV;
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
+ if (nl_cmd->udev->se_dev.dev_index == dev_id) {
+ udev = nl_cmd->udev;
+ break;
+ }
}
- udev = TCMU_DEV(dev);
- spin_lock(&udev->nl_cmd_lock);
- nl_cmd = &udev->curr_nl_cmd;
+ if (!udev) {
+ pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
+ completed_cmd, rc, dev_id);
+ ret = -ENODEV;
+ goto unlock;
+ }
+ list_del(&nl_cmd->nl_list);
- pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
- nl_cmd->cmd, completed_cmd, rc);
+ pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
+ udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
+ nl_cmd->status);
if (nl_cmd->cmd != completed_cmd) {
- printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
- completed_cmd, nl_cmd->cmd);
+ pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
+ udev->name, completed_cmd, nl_cmd->cmd);
ret = -EINVAL;
- } else {
- nl_cmd->status = rc;
+ goto unlock;
}
- spin_unlock(&udev->nl_cmd_lock);
- if (!is_removed)
- target_undepend_item(&dev->dev_group.cg_item);
- if (!ret)
- complete(&nl_cmd->complete);
+ nl_cmd->status = rc;
+ complete(&nl_cmd->complete);
+unlock:
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -656,7 +745,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
}
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
- bool bidi)
+ bool bidi, uint32_t read_len)
{
struct se_cmd *se_cmd = cmd->se_cmd;
int i, dbi;
@@ -689,7 +778,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
for_each_sg(data_sg, sg, data_nents, i) {
int sg_remaining = sg->length;
to = kmap_atomic(sg_page(sg)) + sg->offset;
- while (sg_remaining > 0) {
+ while (sg_remaining > 0 && read_len > 0) {
if (block_remaining == 0) {
if (from)
kunmap_atomic(from);
@@ -701,6 +790,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
}
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
+ if (read_len < copy_bytes)
+ copy_bytes = read_len;
offset = DATA_BLOCK_SIZE - block_remaining;
tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from + offset,
@@ -708,8 +799,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
sg_remaining -= copy_bytes;
block_remaining -= copy_bytes;
+ read_len -= copy_bytes;
}
kunmap_atomic(to - sg->offset);
+ if (read_len == 0)
+ break;
}
if (from)
kunmap_atomic(from);
@@ -977,7 +1071,6 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
&udev->cmd_timer);
if (ret) {
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- mutex_unlock(&udev->cmdr_lock);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
@@ -1042,6 +1135,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
{
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
+ bool read_len_valid = false;
+ uint32_t read_len = se_cmd->data_length;
/*
* cmd has been completed already from timeout, just reclaim
@@ -1056,13 +1151,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
cmd->se_cmd);
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
- } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+ goto done;
+ }
+
+ if (se_cmd->data_direction == DMA_FROM_DEVICE &&
+ (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
+ read_len_valid = true;
+ if (entry->rsp.read_len < read_len)
+ read_len = entry->rsp.read_len;
+ }
+
+ if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
- } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ if (!read_len_valid )
+ goto done;
+ else
+ se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+ }
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
/* Get Data-In buffer before clean up */
- gather_data_area(udev, cmd, true);
+ gather_data_area(udev, cmd, true, read_len);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
- gather_data_area(udev, cmd, false);
+ gather_data_area(udev, cmd, false, read_len);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
/* TODO: */
} else if (se_cmd->data_direction != DMA_NONE) {
@@ -1070,7 +1180,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
se_cmd->data_direction);
}
- target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
+done:
+ if (read_len_valid) {
+ pr_debug("read_len = %d\n", read_len);
+ target_complete_cmd_with_length(cmd->se_cmd,
+ entry->rsp.scsi_status, read_len);
+ } else
+ target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
out:
cmd->se_cmd = NULL;
@@ -1254,6 +1370,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->max_blocks = DATA_BLOCK_BITS_DEF;
mutex_init(&udev->cmdr_lock);
+ INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
INIT_LIST_HEAD(&udev->cmdr_queue);
idr_init(&udev->commands);
@@ -1261,9 +1378,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
- init_waitqueue_head(&udev->nl_cmd_wq);
- spin_lock_init(&udev->nl_cmd_lock);
-
INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
return &udev->se_dev;
@@ -1537,38 +1651,48 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
return 0;
}
-static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
+static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
if (!tcmu_kern_cmd_reply_supported)
- return;
+ return 0;
if (udev->nl_reply_supported <= 0)
- return;
+ return 0;
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
-relock:
- spin_lock(&udev->nl_cmd_lock);
+ if (tcmu_netlink_blocked) {
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
+ udev->name);
+ return -EAGAIN;
+ }
if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
- spin_unlock(&udev->nl_cmd_lock);
- pr_debug("sleeping for open nl cmd\n");
- wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
- goto relock;
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("netlink cmd %d already executing on %s\n",
+ nl_cmd->cmd, udev->name);
+ return -EBUSY;
}
memset(nl_cmd, 0, sizeof(*nl_cmd));
nl_cmd->cmd = cmd;
+ nl_cmd->udev = udev;
init_completion(&nl_cmd->complete);
+ INIT_LIST_HEAD(&nl_cmd->nl_list);
+
+ list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
- spin_unlock(&udev->nl_cmd_lock);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ return 0;
}
static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
int ret;
- DEFINE_WAIT(__wait);
if (!tcmu_kern_cmd_reply_supported)
return 0;
@@ -1579,13 +1703,10 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
- spin_lock(&udev->nl_cmd_lock);
+ mutex_lock(&tcmu_nl_cmd_mutex);
nl_cmd->cmd = TCMU_CMD_UNSPEC;
ret = nl_cmd->status;
- nl_cmd->status = 0;
- spin_unlock(&udev->nl_cmd_lock);
-
- wake_up_all(&udev->nl_cmd_wq);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -1629,19 +1750,21 @@ free_skb:
static int tcmu_netlink_event_send(struct tcmu_dev *udev,
enum tcmu_genl_cmd cmd,
- struct sk_buff **buf, void **hdr)
+ struct sk_buff *skb, void *msg_header)
{
- int ret = 0;
- struct sk_buff *skb = *buf;
- void *msg_header = *hdr;
+ int ret;
genlmsg_end(skb, msg_header);
- tcmu_init_genl_cmd_reply(udev, cmd);
+ ret = tcmu_init_genl_cmd_reply(udev, cmd);
+ if (ret) {
+ nlmsg_free(skb);
+ return ret;
+ }
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
- /* We don't care if no one is listening */
+ /* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
if (!ret)
@@ -1659,9 +1782,8 @@ static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
&msg_header);
if (ret < 0)
return ret;
- return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb,
- &msg_header);
-
+ return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
+ msg_header);
}
static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
@@ -1675,7 +1797,7 @@ static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
if (ret < 0)
return ret;
return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static int tcmu_update_uio_info(struct tcmu_dev *udev)
@@ -1717,9 +1839,11 @@ static int tcmu_configure_device(struct se_device *dev)
info = &udev->uio_info;
+ mutex_lock(&udev->cmdr_lock);
udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
sizeof(unsigned long),
GFP_KERNEL);
+ mutex_unlock(&udev->cmdr_lock);
if (!udev->data_bitmap) {
ret = -ENOMEM;
goto err_bitmap_alloc;
@@ -1740,7 +1864,7 @@ static int tcmu_configure_device(struct se_device *dev)
/* Initialise the mailbox of the ring buffer */
mb = udev->mb_addr;
mb->version = TCMU_MAILBOX_VERSION;
- mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
+ mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
@@ -1814,11 +1938,6 @@ err_bitmap_alloc:
return ret;
}
-static bool tcmu_dev_configured(struct tcmu_dev *udev)
-{
- return udev->uio_info.uio_dev ? true : false;
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1925,45 +2044,76 @@ enum {
static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
- {Opt_dev_size, "dev_size=%u"},
- {Opt_hw_block_size, "hw_block_size=%u"},
- {Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_dev_size, "dev_size=%s"},
+ {Opt_hw_block_size, "hw_block_size=%d"},
+ {Opt_hw_max_sectors, "hw_max_sectors=%d"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
- {Opt_max_data_area_mb, "max_data_area_mb=%u"},
+ {Opt_max_data_area_mb, "max_data_area_mb=%d"},
{Opt_err, NULL}
};
static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
{
- unsigned long tmp_ul;
- char *arg_p;
- int ret;
-
- arg_p = match_strdup(arg);
- if (!arg_p)
- return -ENOMEM;
+ int val, ret;
- ret = kstrtoul(arg_p, 0, &tmp_ul);
- kfree(arg_p);
+ ret = match_int(arg, &val);
if (ret < 0) {
- pr_err("kstrtoul() failed for dev attrib\n");
+ pr_err("match_int() failed for dev attrib. Error %d.\n",
+ ret);
return ret;
}
- if (!tmp_ul) {
- pr_err("dev attrib must be nonzero\n");
+
+ if (val <= 0) {
+ pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
+ val);
return -EINVAL;
}
- *dev_attrib = tmp_ul;
+ *dev_attrib = val;
return 0;
}
+static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid max_data_area %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
+ if (udev->max_blocks > tcmu_global_max_blocks) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ udev->max_blocks = tcmu_global_max_blocks;
+ }
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- char *orig, *ptr, *opts, *arg_p;
+ char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, token, tmpval;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -1986,15 +2136,10 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
break;
case Opt_dev_size:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
- kfree(arg_p);
+ ret = match_u64(&args[0], &udev->dev_size);
if (ret < 0)
- pr_err("kstrtoul() failed for dev_size=\n");
+ pr_err("match_u64() failed for dev_size=. Error %d.\n",
+ ret);
break;
case Opt_hw_block_size:
ret = tcmu_set_dev_attrib(&args[0],
@@ -2005,48 +2150,13 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
&(dev->dev_attrib.hw_max_sectors));
break;
case Opt_nl_reply_supported:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
- kfree(arg_p);
+ ret = match_int(&args[0], &udev->nl_reply_supported);
if (ret < 0)
- pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
+ ret);
break;
case Opt_max_data_area_mb:
- if (dev->export_count) {
- pr_err("Unable to set max_data_area_mb while exports exist\n");
- ret = -EINVAL;
- break;
- }
-
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &tmpval);
- kfree(arg_p);
- if (ret < 0) {
- pr_err("kstrtoint() failed for max_data_area_mb=\n");
- break;
- }
-
- if (tmpval <= 0) {
- pr_err("Invalid max_data_area %d\n", tmpval);
- ret = -EINVAL;
- break;
- }
-
- udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
- if (udev->max_blocks > tcmu_global_max_blocks) {
- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
- tmpval,
- TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
- udev->max_blocks = tcmu_global_max_blocks;
- }
+ ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
default:
break;
@@ -2067,7 +2177,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
- bl += sprintf(b + bl, "Size: %zu ", udev->dev_size);
+ bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
TCMU_BLOCKS_TO_MBS(udev->max_blocks));
@@ -2194,7 +2304,7 @@ static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
@@ -2211,7 +2321,7 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
return -EINVAL;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_config_event(udev, page);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2236,7 +2346,7 @@ static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
- return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
+ return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
}
static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
@@ -2256,7 +2366,7 @@ static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
@@ -2273,7 +2383,7 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_size_event(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2338,7 +2448,7 @@ static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
@@ -2355,7 +2465,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_emulate_write_cache(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2391,6 +2501,11 @@ static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2418,6 +2533,11 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2482,6 +2602,11 @@ static void find_free_blocks(void)
list_for_each_entry(udev, &root_udev, node) {
mutex_lock(&udev->cmdr_lock);
+ if (!target_dev_configured(&udev->se_dev)) {
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ }
+
/* Try to complete the finished commands first */
tcmu_handle_completions(udev);