aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/most
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/most')
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c374
-rw-r--r--drivers/staging/most/aim-network/networking.c11
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.c4
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h7
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c78
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.h2
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.c4
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c21
-rw-r--r--drivers/staging/most/mostcore/core.c194
-rw-r--r--drivers/staging/most/mostcore/mostcore.h3
10 files changed, 305 insertions, 393 deletions
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
index dc3fb25b52aa..de4f76abfb47 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -32,6 +32,7 @@ static struct most_aim cdev_aim;
struct aim_channel {
wait_queue_head_t wq;
+ spinlock_t unlink; /* synchronization lock to unlink channels */
struct cdev cdev;
struct device *dev;
struct mutex io_mutex;
@@ -39,11 +40,9 @@ struct aim_channel {
struct most_channel_config *cfg;
unsigned int channel_id;
dev_t devno;
- bool keep_mbo;
- unsigned int mbo_offs;
- struct mbo *stacked_mbo;
+ size_t mbo_offs;
DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
- atomic_t access_ref;
+ int access_ref;
struct list_head list;
};
@@ -51,15 +50,26 @@ struct aim_channel {
static struct list_head channel_list;
static spinlock_t ch_list_lock;
+static inline bool ch_has_mbo(struct aim_channel *c)
+{
+ return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
+}
+
+static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
+{
+ *mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
+ return *mbo;
+}
+
static struct aim_channel *get_channel(struct most_interface *iface, int id)
{
- struct aim_channel *channel, *tmp;
+ struct aim_channel *c, *tmp;
unsigned long flags;
int found_channel = 0;
spin_lock_irqsave(&ch_list_lock, flags);
- list_for_each_entry_safe(channel, tmp, &channel_list, list) {
- if ((channel->iface == iface) && (channel->channel_id == id)) {
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ if ((c->iface == iface) && (c->channel_id == id)) {
found_channel = 1;
break;
}
@@ -67,7 +77,29 @@ static struct aim_channel *get_channel(struct most_interface *iface, int id)
spin_unlock_irqrestore(&ch_list_lock, flags);
if (!found_channel)
return NULL;
- return channel;
+ return c;
+}
+
+static void stop_channel(struct aim_channel *c)
+{
+ struct mbo *mbo;
+
+ while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
+ most_put_mbo(mbo);
+ most_stop_channel(c->iface, c->channel_id, &cdev_aim);
+}
+
+static void destroy_cdev(struct aim_channel *c)
+{
+ unsigned long flags;
+
+ device_destroy(aim_class, c->devno);
+ cdev_del(&c->cdev);
+ kfifo_free(&c->fifo);
+ spin_lock_irqsave(&ch_list_lock, flags);
+ list_del(&c->list);
+ spin_unlock_irqrestore(&ch_list_lock, flags);
+ ida_simple_remove(&minor_id, MINOR(c->devno));
}
/**
@@ -80,29 +112,38 @@ static struct aim_channel *get_channel(struct most_interface *iface, int id)
*/
static int aim_open(struct inode *inode, struct file *filp)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
int ret;
- channel = to_channel(inode->i_cdev);
- filp->private_data = channel;
+ c = to_channel(inode->i_cdev);
+ filp->private_data = c;
- if (((channel->cfg->direction == MOST_CH_RX) &&
+ if (((c->cfg->direction == MOST_CH_RX) &&
((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
- ((channel->cfg->direction == MOST_CH_TX) &&
+ ((c->cfg->direction == MOST_CH_TX) &&
((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
pr_info("WARN: Access flags mismatch\n");
return -EACCES;
}
- if (!atomic_inc_and_test(&channel->access_ref)) {
+
+ mutex_lock(&c->io_mutex);
+ if (!c->dev) {
+ pr_info("WARN: Device is destroyed\n");
+ mutex_unlock(&c->io_mutex);
+ return -EBUSY;
+ }
+
+ if (c->access_ref) {
pr_info("WARN: Device is busy\n");
- atomic_dec(&channel->access_ref);
+ mutex_unlock(&c->io_mutex);
return -EBUSY;
}
- ret = most_start_channel(channel->iface, channel->channel_id,
- &cdev_aim);
- if (ret)
- atomic_dec(&channel->access_ref);
+ c->mbo_offs = 0;
+ ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
+ if (!ret)
+ c->access_ref = 1;
+ mutex_unlock(&c->io_mutex);
return ret;
}
@@ -115,33 +156,21 @@ static int aim_open(struct inode *inode, struct file *filp)
*/
static int aim_close(struct inode *inode, struct file *filp)
{
- int ret;
- struct mbo *mbo;
- struct aim_channel *channel = to_channel(inode->i_cdev);
-
- mutex_lock(&channel->io_mutex);
- if (!channel->dev) {
- mutex_unlock(&channel->io_mutex);
- atomic_dec(&channel->access_ref);
- device_destroy(aim_class, channel->devno);
- cdev_del(&channel->cdev);
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
- ida_simple_remove(&minor_id, MINOR(channel->devno));
- wake_up_interruptible(&channel->wq);
- kfree(channel);
- return 0;
+ struct aim_channel *c = to_channel(inode->i_cdev);
+
+ mutex_lock(&c->io_mutex);
+ spin_lock(&c->unlink);
+ c->access_ref = 0;
+ spin_unlock(&c->unlink);
+ if (c->dev) {
+ stop_channel(c);
+ mutex_unlock(&c->io_mutex);
+ } else {
+ destroy_cdev(c);
+ mutex_unlock(&c->io_mutex);
+ kfree(c);
}
- mutex_unlock(&channel->io_mutex);
-
- while (kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1))
- most_put_mbo(mbo);
- if (channel->keep_mbo)
- most_put_mbo(channel->stacked_mbo);
- ret = most_stop_channel(channel->iface, channel->channel_id, &cdev_aim);
- atomic_dec(&channel->access_ref);
- wake_up_interruptible(&channel->wq);
- return ret;
+ return 0;
}
/**
@@ -154,62 +183,48 @@ static int aim_close(struct inode *inode, struct file *filp)
static ssize_t aim_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offset)
{
- int ret, err;
- size_t actual_len = 0;
- size_t max_len = 0;
- ssize_t retval;
- struct mbo *mbo;
- struct aim_channel *channel = filp->private_data;
-
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
- return -EPIPE;
- }
- mutex_unlock(&channel->io_mutex);
+ int ret;
+ size_t actual_len;
+ size_t max_len;
+ struct mbo *mbo = NULL;
+ struct aim_channel *c = filp->private_data;
- mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
+ mutex_lock(&c->io_mutex);
+ while (c->dev && !ch_get_mbo(c, &mbo)) {
+ mutex_unlock(&c->io_mutex);
- if (!mbo) {
if ((filp->f_flags & O_NONBLOCK))
return -EAGAIN;
- if (wait_event_interruptible(
- channel->wq,
- (mbo = most_get_mbo(channel->iface,
- channel->channel_id,
- &cdev_aim)) ||
- (!channel->dev)))
+ if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
return -ERESTARTSYS;
+ mutex_lock(&c->io_mutex);
}
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
- err = -EPIPE;
- goto error;
+ if (unlikely(!c->dev)) {
+ ret = -EPIPE;
+ goto unlock;
}
- mutex_unlock(&channel->io_mutex);
- max_len = channel->cfg->buffer_size;
+ max_len = c->cfg->buffer_size;
actual_len = min(count, max_len);
mbo->buffer_length = actual_len;
- retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length);
- if (retval) {
- err = -EIO;
- goto error;
+ if (copy_from_user(mbo->virt_address, buf, mbo->buffer_length)) {
+ ret = -EFAULT;
+ goto put_mbo;
}
ret = most_submit_mbo(mbo);
- if (ret) {
- pr_info("submitting MBO to core failed\n");
- err = ret;
- goto error;
- }
- return actual_len - retval;
-error:
+ if (ret)
+ goto put_mbo;
+
+ mutex_unlock(&c->io_mutex);
+ return actual_len;
+put_mbo:
most_put_mbo(mbo);
- return err;
+unlock:
+ mutex_unlock(&c->io_mutex);
+ return ret;
}
/**
@@ -222,59 +237,46 @@ error:
static ssize_t
aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
{
- ssize_t retval;
- size_t not_copied, proc_len;
+ size_t to_copy, not_copied, copied;
struct mbo *mbo;
- struct aim_channel *channel = filp->private_data;
+ struct aim_channel *c = filp->private_data;
- if (channel->keep_mbo) {
- mbo = channel->stacked_mbo;
- channel->keep_mbo = false;
- goto start_copy;
- }
- while ((!kfifo_out(&channel->fifo, &mbo, 1)) && (channel->dev)) {
+ mutex_lock(&c->io_mutex);
+ while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
+ mutex_unlock(&c->io_mutex);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
- if (wait_event_interruptible(channel->wq,
- (!kfifo_is_empty(&channel->fifo) ||
- (!channel->dev))))
+ if (wait_event_interruptible(c->wq,
+ (!kfifo_is_empty(&c->fifo) ||
+ (!c->dev))))
return -ERESTARTSYS;
+ mutex_lock(&c->io_mutex);
}
-start_copy:
/* make sure we don't submit to gone devices */
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
+ if (unlikely(!c->dev)) {
+ mutex_unlock(&c->io_mutex);
return -EIO;
}
- if (count < mbo->processed_length)
- channel->keep_mbo = true;
-
- proc_len = min((int)count,
- (int)(mbo->processed_length - channel->mbo_offs));
+ to_copy = min_t(size_t,
+ count,
+ mbo->processed_length - c->mbo_offs);
not_copied = copy_to_user(buf,
- mbo->virt_address + channel->mbo_offs,
- proc_len);
+ mbo->virt_address + c->mbo_offs,
+ to_copy);
- retval = not_copied ? proc_len - not_copied : proc_len;
+ copied = to_copy - not_copied;
- if (channel->keep_mbo) {
- channel->mbo_offs = retval;
- channel->stacked_mbo = mbo;
- } else {
+ c->mbo_offs += copied;
+ if (c->mbo_offs >= mbo->processed_length) {
+ kfifo_skip(&c->fifo);
most_put_mbo(mbo);
- channel->mbo_offs = 0;
+ c->mbo_offs = 0;
}
- mutex_unlock(&channel->io_mutex);
- return retval;
-}
-
-static inline bool __must_check IS_ERR_OR_FALSE(int x)
-{
- return x <= 0;
+ mutex_unlock(&c->io_mutex);
+ return copied;
}
static unsigned int aim_poll(struct file *filp, poll_table *wait)
@@ -288,7 +290,7 @@ static unsigned int aim_poll(struct file *filp, poll_table *wait)
if (!kfifo_is_empty(&c->fifo))
mask |= POLLIN | POLLRDNORM;
} else {
- if (!IS_ERR_OR_FALSE(channel_has_mbo(c->iface, c->channel_id)))
+ if (ch_has_mbo(c))
mask |= POLLOUT | POLLWRNORM;
}
return mask;
@@ -316,33 +318,29 @@ static const struct file_operations channel_fops = {
*/
static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
{
- struct aim_channel *channel;
- unsigned long flags;
+ struct aim_channel *c;
if (!iface) {
pr_info("Bad interface pointer\n");
return -EINVAL;
}
- channel = get_channel(iface, channel_id);
- if (!channel)
+ c = get_channel(iface, channel_id);
+ if (!c)
return -ENXIO;
- mutex_lock(&channel->io_mutex);
- channel->dev = NULL;
- mutex_unlock(&channel->io_mutex);
-
- if (atomic_read(&channel->access_ref)) {
- device_destroy(aim_class, channel->devno);
- cdev_del(&channel->cdev);
- kfifo_free(&channel->fifo);
- ida_simple_remove(&minor_id, MINOR(channel->devno));
- spin_lock_irqsave(&ch_list_lock, flags);
- list_del(&channel->list);
- spin_unlock_irqrestore(&ch_list_lock, flags);
- kfree(channel);
+ mutex_lock(&c->io_mutex);
+ spin_lock(&c->unlink);
+ c->dev = NULL;
+ spin_unlock(&c->unlink);
+ if (c->access_ref) {
+ stop_channel(c);
+ wake_up_interruptible(&c->wq);
+ mutex_unlock(&c->io_mutex);
} else {
- wake_up_interruptible(&channel->wq);
+ destroy_cdev(c);
+ mutex_unlock(&c->io_mutex);
+ kfree(c);
}
return 0;
}
@@ -356,21 +354,27 @@ static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
*/
static int aim_rx_completion(struct mbo *mbo)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
if (!mbo)
return -EINVAL;
- channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
- if (!channel)
+ c = get_channel(mbo->ifp, mbo->hdm_channel_id);
+ if (!c)
return -ENXIO;
- kfifo_in(&channel->fifo, &mbo, 1);
+ spin_lock(&c->unlink);
+ if (!c->access_ref || !c->dev) {
+ spin_unlock(&c->unlink);
+ return -EFAULT;
+ }
+ kfifo_in(&c->fifo, &mbo, 1);
+ spin_unlock(&c->unlink);
#ifdef DEBUG_MESG
- if (kfifo_is_full(&channel->fifo))
+ if (kfifo_is_full(&c->fifo))
pr_info("WARN: Fifo is full\n");
#endif
- wake_up_interruptible(&channel->wq);
+ wake_up_interruptible(&c->wq);
return 0;
}
@@ -383,7 +387,7 @@ static int aim_rx_completion(struct mbo *mbo)
*/
static int aim_tx_completion(struct most_interface *iface, int channel_id)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
if (!iface) {
pr_info("Bad interface pointer\n");
@@ -394,15 +398,13 @@ static int aim_tx_completion(struct most_interface *iface, int channel_id)
return -EINVAL;
}
- channel = get_channel(iface, channel_id);
- if (!channel)
+ c = get_channel(iface, channel_id);
+ if (!c)
return -ENXIO;
- wake_up_interruptible(&channel->wq);
+ wake_up_interruptible(&c->wq);
return 0;
}
-static struct most_aim cdev_aim;
-
/**
* aim_probe - probe function of the driver module
* @iface: pointer to interface instance
@@ -419,7 +421,7 @@ static int aim_probe(struct most_interface *iface, int channel_id,
struct most_channel_config *cfg,
struct kobject *parent, char *name)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
unsigned long cl_flags;
int retval;
int current_minor;
@@ -428,60 +430,60 @@ static int aim_probe(struct most_interface *iface, int channel_id,
pr_info("Probing AIM with bad arguments");
return -EINVAL;
}
- channel = get_channel(iface, channel_id);
- if (channel)
+ c = get_channel(iface, channel_id);
+ if (c)
return -EEXIST;
current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
if (current_minor < 0)
return current_minor;
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel) {
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
retval = -ENOMEM;
goto error_alloc_channel;
}
- channel->devno = MKDEV(major, current_minor);
- cdev_init(&channel->cdev, &channel_fops);
- channel->cdev.owner = THIS_MODULE;
- cdev_add(&channel->cdev, channel->devno, 1);
- channel->iface = iface;
- channel->cfg = cfg;
- channel->channel_id = channel_id;
- channel->mbo_offs = 0;
- atomic_set(&channel->access_ref, -1);
- INIT_KFIFO(channel->fifo);
- retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
+ c->devno = MKDEV(major, current_minor);
+ cdev_init(&c->cdev, &channel_fops);
+ c->cdev.owner = THIS_MODULE;
+ cdev_add(&c->cdev, c->devno, 1);
+ c->iface = iface;
+ c->cfg = cfg;
+ c->channel_id = channel_id;
+ c->access_ref = 0;
+ spin_lock_init(&c->unlink);
+ INIT_KFIFO(c->fifo);
+ retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
if (retval) {
pr_info("failed to alloc channel kfifo");
goto error_alloc_kfifo;
}
- init_waitqueue_head(&channel->wq);
- mutex_init(&channel->io_mutex);
+ init_waitqueue_head(&c->wq);
+ mutex_init(&c->io_mutex);
spin_lock_irqsave(&ch_list_lock, cl_flags);
- list_add_tail(&channel->list, &channel_list);
+ list_add_tail(&c->list, &channel_list);
spin_unlock_irqrestore(&ch_list_lock, cl_flags);
- channel->dev = device_create(aim_class,
+ c->dev = device_create(aim_class,
NULL,
- channel->devno,
+ c->devno,
NULL,
"%s", name);
- retval = IS_ERR(channel->dev);
- if (retval) {
+ if (IS_ERR(c->dev)) {
+ retval = PTR_ERR(c->dev);
pr_info("failed to create new device node %s\n", name);
goto error_create_device;
}
- kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
+ kobject_uevent(&c->dev->kobj, KOBJ_ADD);
return 0;
error_create_device:
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
+ kfifo_free(&c->fifo);
+ list_del(&c->list);
error_alloc_kfifo:
- cdev_del(&channel->cdev);
- kfree(channel);
+ cdev_del(&c->cdev);
+ kfree(c);
error_alloc_channel:
ida_simple_remove(&minor_id, current_minor);
return retval;
@@ -526,19 +528,15 @@ free_cdev:
static void __exit mod_exit(void)
{
- struct aim_channel *channel, *tmp;
+ struct aim_channel *c, *tmp;
pr_info("exit module\n");
most_deregister_aim(&cdev_aim);
- list_for_each_entry_safe(channel, tmp, &channel_list, list) {
- device_destroy(aim_class, channel->devno);
- cdev_del(&channel->cdev);
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
- ida_simple_remove(&minor_id, MINOR(channel->devno));
- kfree(channel);
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ destroy_cdev(c);
+ kfree(c);
}
class_destroy(aim_class);
unregister_chrdev_region(aim_devno, 1);
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index 3c7beb03871d..2f42de44d051 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -431,6 +431,7 @@ static int aim_rx_data(struct mbo *mbo)
u32 len = mbo->processed_length;
struct sk_buff *skb;
struct net_device *dev;
+ unsigned int skb_len;
nd = get_net_dev_context(mbo->ifp);
if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
@@ -482,9 +483,13 @@ static int aim_rx_data(struct mbo *mbo)
memcpy(skb_put(skb, len), buf, len);
skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
+ skb_len = skb->len;
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb_len;
+ } else {
+ dev->stats.rx_dropped++;
+ }
out:
most_put_mbo(mbo);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
index 172257596f1f..3c524506ee22 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.c
@@ -84,7 +84,7 @@ static inline bool dim_on_error(u8 error_id, const char *error_message)
struct lld_global_vars_t {
bool dim_is_initialized;
bool mcm_is_initialized;
- struct dim2_regs *dim2; /* DIM2 core base address */
+ struct dim2_regs __iomem *dim2; /* DIM2 core base address */
u32 dbr_map[DBR_MAP_SIZE];
};
@@ -650,7 +650,7 @@ static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
/* -------------------------------------------------------------------------- */
/* API */
-u8 dim_startup(void *dim_base_address, u32 mlb_clock)
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock)
{
g.dim_is_initialized = false;
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
index 48cdd9c8cde1..fc73d4f97734 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -16,6 +16,7 @@
#define _DIM2_HAL_H
#include <linux/types.h>
+#include "dim2_reg.h"
#ifdef __cplusplus
extern "C" {
@@ -65,7 +66,7 @@ struct dim_channel {
u16 done_sw_buffers_number; /*< Done software buffers number. */
};
-u8 dim_startup(void *dim_base_address, u32 mlb_clock);
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock);
void dim_shutdown(void);
@@ -103,9 +104,9 @@ bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number);
-u32 dimcb_io_read(u32 *ptr32);
+u32 dimcb_io_read(u32 __iomem *ptr32);
-void dimcb_io_write(u32 *ptr32, u32 value);
+void dimcb_io_write(u32 __iomem *ptr32, u32 value);
void dimcb_on_error(u8 error_id, const char *error_message);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
index 327d738c7194..0dc86add7161 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -99,7 +99,7 @@ struct dim2_hdm {
struct most_channel_capability capabilities[DMA_CHANNELS];
struct most_interface most_iface;
char name[16 + sizeof "dim2-"];
- void *io_base;
+ void __iomem *io_base;
unsigned int irq_ahb0;
int clk_speed;
struct task_struct *netinfo_task;
@@ -138,9 +138,9 @@ bool dim2_sysfs_get_state_cb(void)
* dimcb_io_read - callback from HAL to read an I/O register
* @ptr32: register address
*/
-u32 dimcb_io_read(u32 *ptr32)
+u32 dimcb_io_read(u32 __iomem *ptr32)
{
- return __raw_readl(ptr32);
+ return readl(ptr32);
}
/**
@@ -148,9 +148,9 @@ u32 dimcb_io_read(u32 *ptr32)
* @ptr32: register address
* @value: value to write
*/
-void dimcb_io_write(u32 *ptr32, u32 value)
+void dimcb_io_write(u32 __iomem *ptr32, u32 value)
{
- __raw_writel(value, ptr32);
+ writel(value, ptr32);
}
/**
@@ -251,7 +251,7 @@ static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
return -EAGAIN;
}
- mbo = list_entry(head->next, struct mbo, list);
+ mbo = list_first_entry(head, struct mbo, list);
buf_size = mbo->buffer_length;
BUG_ON(mbo->bus_address == 0);
@@ -362,7 +362,7 @@ static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
break;
}
- mbo = list_entry(head->next, struct mbo, list);
+ mbo = list_first_entry(head, struct mbo, list);
list_del(head->next);
spin_unlock_irqrestore(&dim_lock, flags);
@@ -495,7 +495,7 @@ static void complete_all_mbos(struct list_head *head)
break;
}
- mbo = list_entry(head->next, struct mbo, list);
+ mbo = list_first_entry(head, struct mbo, list);
list_del(head->next);
spin_unlock_irqrestore(&dim_lock, flags);
@@ -736,7 +736,7 @@ static int dim2_probe(struct platform_device *pdev)
int ret, i;
struct kobject *kobj;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -747,47 +747,31 @@ static int dim2_probe(struct platform_device *pdev)
test_dev = dev;
#else
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- pr_err("no memory region defined\n");
- ret = -ENOENT;
- goto err_free_dev;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- pr_err("failed to request mem region\n");
- ret = -EBUSY;
- goto err_free_dev;
- }
-
- dev->io_base = ioremap(res->start, resource_size(res));
- if (!dev->io_base) {
- pr_err("failed to ioremap\n");
- ret = -ENOMEM;
- goto err_release_mem;
- }
+ dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->io_base))
+ return PTR_ERR(dev->io_base);
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- pr_err("failed to get irq\n");
- goto err_unmap_io;
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
}
dev->irq_ahb0 = ret;
- ret = request_irq(dev->irq_ahb0, dim2_ahb_isr, 0, "mlb_ahb0", dev);
+ ret = devm_request_irq(&pdev->dev, dev->irq_ahb0, dim2_ahb_isr, 0,
+ "mlb_ahb0", dev);
if (ret) {
- pr_err("failed to request IRQ: %d, err: %d\n",
- dev->irq_ahb0, ret);
- goto err_unmap_io;
+ dev_err(&pdev->dev, "failed to request IRQ: %d, err: %d\n",
+ dev->irq_ahb0, ret);
+ return ret;
}
#endif
init_waitqueue_head(&dev->netinfo_waitq);
dev->deliver_netinfo = 0;
dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
"dim2_netinfo");
- if (IS_ERR(dev->netinfo_task)) {
+ if (IS_ERR(dev->netinfo_task))
ret = PTR_ERR(dev->netinfo_task);
- goto err_free_irq;
- }
for (i = 0; i < DMA_CHANNELS; i++) {
struct most_channel_capability *cap = dev->capabilities + i;
@@ -833,7 +817,7 @@ static int dim2_probe(struct platform_device *pdev)
kobj = most_register_interface(&dev->most_iface);
if (IS_ERR(kobj)) {
ret = PTR_ERR(kobj);
- pr_err("failed to register MOST interface\n");
+ dev_err(&pdev->dev, "failed to register MOST interface\n");
goto err_stop_thread;
}
@@ -843,7 +827,7 @@ static int dim2_probe(struct platform_device *pdev)
ret = startup_dim(pdev);
if (ret) {
- pr_err("failed to initialize DIM2\n");
+ dev_err(&pdev->dev, "failed to initialize DIM2\n");
goto err_destroy_bus;
}
@@ -855,16 +839,6 @@ err_unreg_iface:
most_deregister_interface(&dev->most_iface);
err_stop_thread:
kthread_stop(dev->netinfo_task);
-err_free_irq:
-#if !defined(ENABLE_HDM_TEST)
- free_irq(dev->irq_ahb0, dev);
-err_unmap_io:
- iounmap(dev->io_base);
-err_release_mem:
- release_mem_region(res->start, resource_size(res));
-err_free_dev:
-#endif
- kfree(dev);
return ret;
}
@@ -878,7 +852,6 @@ err_free_dev:
static int dim2_remove(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct dim2_platform_data *pdata = pdev->dev.platform_data;
unsigned long flags;
@@ -892,13 +865,6 @@ static int dim2_remove(struct platform_device *pdev)
dim2_sysfs_destroy(&dev->bus);
most_deregister_interface(&dev->most_iface);
kthread_stop(dev->netinfo_task);
-#if !defined(ENABLE_HDM_TEST)
- free_irq(dev->irq_ahb0, dev);
- iounmap(dev->io_base);
- release_mem_region(res->start, resource_size(res));
-#endif
- kfree(dev);
- platform_set_drvdata(pdev, NULL);
/*
* break link to local platform_device_id struct
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.h b/drivers/staging/most/hdm-dim2/dim2_hdm.h
index 1c94e3355fcc..4050e7c764ed 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.h
@@ -18,7 +18,7 @@ struct device;
/* platform dependent data for dim2 interface */
struct dim2_platform_data {
- int (*init)(struct dim2_platform_data *pd, void *io_base,
+ int (*init)(struct dim2_platform_data *pd, void __iomem *io_base,
int clk_speed);
void (*destroy)(struct dim2_platform_data *pd);
void *priv;
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
index c5b10c7d2fac..2b28e4a51131 100644
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.c
+++ b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
@@ -63,7 +63,6 @@ static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
- ssize_t ret;
struct medialb_bus *bus =
container_of(kobj, struct medialb_bus, kobj_group);
struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
@@ -71,8 +70,7 @@ static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
if (!xattr->store)
return -EIO;
- ret = xattr->store(bus, buf, count);
- return ret;
+ return xattr->store(bus, buf, count);
}
static struct sysfs_ops const bus_kobj_sysfs_ops = {
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 41690f801fb8..aeae071f2823 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -40,7 +40,6 @@
#define MAX_SUFFIX_LEN 10
#define MAX_STRING_LEN 80
#define MAX_BUF_SIZE 0xFFFF
-#define CEILING(x, y) (((x) + (y) - 1) / (y))
#define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
#define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
@@ -137,7 +136,6 @@ struct most_dev {
#define to_mdev(d) container_of(d, struct most_dev, iface)
#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
-static struct workqueue_struct *schedule_usb_work;
static void wq_clear_halt(struct work_struct *wq_obj);
static void wq_netinfo(struct work_struct *wq_obj);
@@ -223,6 +221,7 @@ static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel)
}
spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
list_del(&anchor->list);
+ cancel_work_sync(&anchor->clear_work_obj);
kfree(anchor);
}
spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
@@ -411,7 +410,7 @@ static void hdm_write_completion(struct urb *urb)
mbo->status = MBO_E_INVAL;
usb_unlink_urb(urb);
INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
- queue_work(schedule_usb_work, &anchor->clear_work_obj);
+ schedule_work(&anchor->clear_work_obj);
return;
case -ENODEV:
case -EPROTO:
@@ -575,7 +574,7 @@ static void hdm_read_completion(struct urb *urb)
mbo->status = MBO_E_INVAL;
usb_unlink_urb(urb);
INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
- queue_work(schedule_usb_work, &anchor->clear_work_obj);
+ schedule_work(&anchor->clear_work_obj);
return;
case -ENODEV:
case -EPROTO:
@@ -785,7 +784,7 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
temp_size += tail_space;
/* calculate extra length to comply w/ HW padding */
- conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU)
+ conf->extra_len = (DIV_ROUND_UP(temp_size, USB_MTU) * USB_MTU)
- conf->buffer_size;
exit:
mdev->conf[channel] = *conf;
@@ -872,7 +871,7 @@ static void link_stat_timer_handler(unsigned long data)
{
struct most_dev *mdev = (struct most_dev *)data;
- queue_work(schedule_usb_work, &mdev->poll_work_obj);
+ schedule_work(&mdev->poll_work_obj);
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
add_timer(&mdev->link_stat_timer);
}
@@ -1299,7 +1298,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
MOST_CH_ISOC_AVP | MOST_CH_SYNC;
- if (ep_desc->bEndpointAddress & USB_DIR_IN)
+ if (usb_endpoint_dir_in(ep_desc))
tmp_cap->direction = MOST_CH_RX;
else
tmp_cap->direction = MOST_CH_TX;
@@ -1415,19 +1414,13 @@ static int __init hdm_usb_init(void)
pr_err("could not register hdm_usb driver\n");
return -EIO;
}
- schedule_usb_work = create_workqueue("hdmu_work");
- if (!schedule_usb_work) {
- pr_err("could not create workqueue\n");
- usb_deregister(&hdm_usb);
- return -ENOMEM;
- }
+
return 0;
}
static void __exit hdm_usb_exit(void)
{
pr_info("hdm_usb_exit()\n");
- destroy_workqueue(schedule_usb_work);
usb_deregister(&hdm_usb);
}
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index ed1ed25b6d1d..7c619feb12d3 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -35,7 +35,6 @@
static struct class *most_class;
static struct device *class_glue_dir;
static struct ida mdev_id;
-static int modref;
static int dummy_num_buffers;
struct most_c_aim_obj {
@@ -66,7 +65,6 @@ struct most_c_obj {
struct most_c_aim_obj aim1;
struct list_head trash_fifo;
struct task_struct *hdm_enqueue_task;
- struct mutex stop_task_mutex;
wait_queue_head_t hdm_fifo_wq;
};
@@ -74,7 +72,6 @@ struct most_c_obj {
struct most_inst_obj {
int dev_id;
- atomic_t tainted;
struct most_interface *iface;
struct list_head channel_list;
struct most_c_obj *channel[MAX_CHANNELS];
@@ -82,6 +79,14 @@ struct most_inst_obj {
struct list_head list;
};
+static const struct {
+ int most_ch_data_type;
+ char *name;
+} ch_data_type[] = { { MOST_CH_CONTROL, "control\n" },
+ { MOST_CH_ASYNC, "async\n" },
+ { MOST_CH_SYNC, "sync\n" },
+ { MOST_CH_ISOC_AVP, "isoc_avp\n"} };
+
#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
/**
@@ -95,8 +100,6 @@ struct most_inst_obj {
_mbo; \
})
-static struct mutex deregister_mutex;
-
/* ___ ___
* ___C H A N N E L___
*/
@@ -414,14 +417,12 @@ static ssize_t show_set_datatype(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
- if (c->cfg.data_type & MOST_CH_CONTROL)
- return snprintf(buf, PAGE_SIZE, "control\n");
- else if (c->cfg.data_type & MOST_CH_ASYNC)
- return snprintf(buf, PAGE_SIZE, "async\n");
- else if (c->cfg.data_type & MOST_CH_SYNC)
- return snprintf(buf, PAGE_SIZE, "sync\n");
- else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
- return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+ if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
+ return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
+ }
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
@@ -430,15 +431,16 @@ static ssize_t store_set_datatype(struct most_c_obj *c,
const char *buf,
size_t count)
{
- if (!strcmp(buf, "control\n")) {
- c->cfg.data_type = MOST_CH_CONTROL;
- } else if (!strcmp(buf, "async\n")) {
- c->cfg.data_type = MOST_CH_ASYNC;
- } else if (!strcmp(buf, "sync\n")) {
- c->cfg.data_type = MOST_CH_SYNC;
- } else if (!strcmp(buf, "isoc_avp\n")) {
- c->cfg.data_type = MOST_CH_ISOC_AVP;
- } else {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+ if (!strcmp(buf, ch_data_type[i].name)) {
+ c->cfg.data_type = ch_data_type[i].most_ch_data_type;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(ch_data_type)) {
pr_info("WARN: invalid attribute settings\n");
return -EINVAL;
}
@@ -551,29 +553,6 @@ create_most_c_obj(const char *name, struct kobject *parent)
return c;
}
-/**
- * destroy_most_c_obj - channel release function
- * @c: pointer to channel object
- *
- * This decrements the reference counter of the channel object.
- * If the reference count turns zero, its release function is called.
- */
-static void destroy_most_c_obj(struct most_c_obj *c)
-{
- if (c->aim0.ptr)
- c->aim0.ptr->disconnect_channel(c->iface, c->channel_id);
- if (c->aim1.ptr)
- c->aim1.ptr->disconnect_channel(c->iface, c->channel_id);
- c->aim0.ptr = NULL;
- c->aim1.ptr = NULL;
-
- mutex_lock(&deregister_mutex);
- flush_trash_fifo(c);
- flush_channel_fifos(c);
- mutex_unlock(&deregister_mutex);
- kobject_put(&c->kobj);
-}
-
/* ___ ___
* ___I N S T A N C E___
*/
@@ -761,12 +740,10 @@ static void destroy_most_inst_obj(struct most_inst_obj *inst)
{
struct most_c_obj *c, *tmp;
- /* need to destroy channels first, since
- * each channel incremented the
- * reference count of the inst->kobj
- */
list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
- destroy_most_c_obj(c);
+ flush_trash_fifo(c);
+ flush_channel_fifos(c);
+ kobject_put(&c->kobj);
}
kobject_put(&inst->kobj);
}
@@ -1006,11 +983,14 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
else
return -ENOSPC;
+ *aim_ptr = aim_obj->driver;
ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
&c->cfg, &c->kobj, mdev_devnod);
- if (ret)
+ if (ret) {
+ *aim_ptr = NULL;
return ret;
- *aim_ptr = aim_obj->driver;
+ }
+
return len;
}
@@ -1056,12 +1036,12 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
if (IS_ERR(c))
return -ENODEV;
+ if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
+ return -EIO;
if (c->aim0.ptr == aim_obj->driver)
c->aim0.ptr = NULL;
if (c->aim1.ptr == aim_obj->driver)
c->aim1.ptr = NULL;
- if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
- return -EIO;
return len;
}
@@ -1279,7 +1259,6 @@ static int arm_mbo_chain(struct most_c_obj *c, int dir,
for (i = 0; i < c->cfg.num_buffers; i++) {
mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
if (!mbo) {
- pr_info("WARN: Allocation of MBO failed.\n");
retval = i;
goto _exit;
}
@@ -1319,18 +1298,10 @@ _exit:
*/
int most_submit_mbo(struct mbo *mbo)
{
- struct most_c_obj *c;
- struct most_inst_obj *i;
-
if (unlikely((!mbo) || (!mbo->context))) {
pr_err("Bad MBO or missing channel reference\n");
return -EINVAL;
}
- c = mbo->context;
- i = c->inst;
-
- if (unlikely(atomic_read(&i->tainted)))
- return -ENODEV;
nq_hdm_mbo(mbo);
return 0;
@@ -1387,7 +1358,7 @@ most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
return i->channel[id];
}
-int channel_has_mbo(struct most_interface *iface, int id)
+int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
{
struct most_c_obj *c = get_channel_by_iface(iface, id);
unsigned long flags;
@@ -1396,6 +1367,11 @@ int channel_has_mbo(struct most_interface *iface, int id)
if (unlikely(!c))
return -EINVAL;
+ if (c->aim0.refs && c->aim1.refs &&
+ ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
+ (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
+ return 0;
+
spin_lock_irqsave(&c->fifo_lock, flags);
empty = list_empty(&c->fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
@@ -1456,17 +1432,8 @@ EXPORT_SYMBOL_GPL(most_get_mbo);
*/
void most_put_mbo(struct mbo *mbo)
{
- struct most_c_obj *c;
- struct most_inst_obj *i;
-
- c = mbo->context;
- i = c->inst;
+ struct most_c_obj *c = mbo->context;
- if (unlikely(atomic_read(&i->tainted))) {
- mbo->status = MBO_E_CLOSE;
- trash_mbo(mbo);
- return;
- }
if (c->cfg.direction == MOST_CH_TX) {
arm_mbo(mbo);
return;
@@ -1546,7 +1513,6 @@ int most_start_channel(struct most_interface *iface, int id,
mutex_unlock(&c->start_mutex);
return -ENOLCK;
}
- modref++;
c->cfg.extra_len = 0;
if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
@@ -1588,7 +1554,6 @@ out:
error:
module_put(iface->mod);
- modref--;
mutex_unlock(&c->start_mutex);
return ret;
}
@@ -1616,24 +1581,12 @@ int most_stop_channel(struct most_interface *iface, int id,
if (c->aim0.refs + c->aim1.refs >= 2)
goto out;
- mutex_lock(&c->stop_task_mutex);
if (c->hdm_enqueue_task)
kthread_stop(c->hdm_enqueue_task);
c->hdm_enqueue_task = NULL;
- mutex_unlock(&c->stop_task_mutex);
- mutex_lock(&deregister_mutex);
- if (atomic_read(&c->inst->tainted)) {
- mutex_unlock(&deregister_mutex);
- mutex_unlock(&c->start_mutex);
- return -ENODEV;
- }
- mutex_unlock(&deregister_mutex);
-
- if (iface->mod && modref) {
+ if (iface->mod)
module_put(iface->mod);
- modref--;
- }
c->is_poisoned = true;
if (c->iface->poison_channel(c->iface, c->channel_id)) {
@@ -1762,6 +1715,7 @@ struct kobject *most_register_interface(struct most_interface *iface)
inst = create_most_inst_obj(name);
if (!inst) {
pr_info("Failed to allocate interface instance\n");
+ ida_simple_remove(&mdev_id, id);
return ERR_PTR(-ENOMEM);
}
@@ -1769,7 +1723,6 @@ struct kobject *most_register_interface(struct most_interface *iface)
INIT_LIST_HEAD(&inst->channel_list);
inst->iface = iface;
inst->dev_id = id;
- atomic_set(&inst->tainted, 0);
list_add_tail(&inst->list, &instance_list);
for (i = 0; i < iface->num_channels; i++) {
@@ -1808,7 +1761,6 @@ struct kobject *most_register_interface(struct most_interface *iface)
init_completion(&c->cleanup);
atomic_set(&c->mbo_ref, 0);
mutex_init(&c->start_mutex);
- mutex_init(&c->stop_task_mutex);
list_add_tail(&c->list, &inst->channel_list);
}
pr_info("registered new MOST device mdev%d (%s)\n",
@@ -1818,6 +1770,7 @@ struct kobject *most_register_interface(struct most_interface *iface)
free_instance:
pr_info("Failed allocate channel(s)\n");
list_del(&inst->list);
+ ida_simple_remove(&mdev_id, id);
destroy_most_inst_obj(inst);
return ERR_PTR(-ENOMEM);
}
@@ -1835,37 +1788,24 @@ void most_deregister_interface(struct most_interface *iface)
struct most_inst_obj *i = iface->priv;
struct most_c_obj *c;
- mutex_lock(&deregister_mutex);
if (unlikely(!i)) {
pr_info("Bad Interface\n");
- mutex_unlock(&deregister_mutex);
return;
}
pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
iface->description);
- atomic_set(&i->tainted, 1);
- mutex_unlock(&deregister_mutex);
-
- while (modref) {
- if (iface->mod && modref)
- module_put(iface->mod);
- modref--;
- }
-
list_for_each_entry(c, &i->channel_list, list) {
- if (c->aim0.refs + c->aim1.refs <= 0)
- continue;
-
- mutex_lock(&c->stop_task_mutex);
- if (c->hdm_enqueue_task)
- kthread_stop(c->hdm_enqueue_task);
- c->hdm_enqueue_task = NULL;
- mutex_unlock(&c->stop_task_mutex);
-
- if (iface->poison_channel(iface, c->channel_id))
- pr_err("Can't poison channel %d\n", c->channel_id);
+ if (c->aim0.ptr)
+ c->aim0.ptr->disconnect_channel(c->iface,
+ c->channel_id);
+ if (c->aim1.ptr)
+ c->aim1.ptr->disconnect_channel(c->iface,
+ c->channel_id);
+ c->aim0.ptr = NULL;
+ c->aim1.ptr = NULL;
}
+
ida_simple_remove(&mdev_id, i->dev_id);
list_del(&i->list);
destroy_most_inst_obj(i);
@@ -1913,41 +1853,52 @@ EXPORT_SYMBOL_GPL(most_resume_enqueue);
static int __init most_init(void)
{
+ int err;
+
pr_info("init()\n");
INIT_LIST_HEAD(&instance_list);
INIT_LIST_HEAD(&aim_list);
- mutex_init(&deregister_mutex);
ida_init(&mdev_id);
- if (bus_register(&most_bus)) {
+ err = bus_register(&most_bus);
+ if (err) {
pr_info("Cannot register most bus\n");
- goto exit;
+ return err;
}
most_class = class_create(THIS_MODULE, "most");
if (IS_ERR(most_class)) {
pr_info("No udev support.\n");
+ err = PTR_ERR(most_class);
goto exit_bus;
}
- if (driver_register(&mostcore)) {
+
+ err = driver_register(&mostcore);
+ if (err) {
pr_info("Cannot register core driver\n");
goto exit_class;
}
class_glue_dir =
device_create(most_class, NULL, 0, NULL, "mostcore");
- if (!class_glue_dir)
+ if (IS_ERR(class_glue_dir)) {
+ err = PTR_ERR(class_glue_dir);
goto exit_driver;
+ }
most_aim_kset =
kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
- if (!most_aim_kset)
+ if (!most_aim_kset) {
+ err = -ENOMEM;
goto exit_class_container;
+ }
most_inst_kset =
kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
- if (!most_inst_kset)
+ if (!most_inst_kset) {
+ err = -ENOMEM;
goto exit_driver_kset;
+ }
return 0;
@@ -1961,8 +1912,7 @@ exit_class:
class_destroy(most_class);
exit_bus:
bus_unregister(&most_bus);
-exit:
- return -ENOMEM;
+ return err;
}
static void __exit most_exit(void)
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h
index bda3850d5435..60e018e499ef 100644
--- a/drivers/staging/most/mostcore/mostcore.h
+++ b/drivers/staging/most/mostcore/mostcore.h
@@ -310,7 +310,8 @@ int most_deregister_aim(struct most_aim *aim);
struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
struct most_aim *);
void most_put_mbo(struct mbo *mbo);
-int channel_has_mbo(struct most_interface *iface, int channel_idx);
+int channel_has_mbo(struct most_interface *iface, int channel_idx,
+ struct most_aim *aim);
int most_start_channel(struct most_interface *iface, int channel_idx,
struct most_aim *);
int most_stop_channel(struct most_interface *iface, int channel_idx,