aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c')
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c345
1 files changed, 83 insertions, 262 deletions
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index a91ef6ea29ce..f5b5ead6347c 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
-#include <linux/btree.h>
#include <asm/cacheflush.h>
#include <media/videobuf2-vmalloc.h>
@@ -111,7 +110,11 @@ struct vchiq_mmal_instance;
/* normal message context */
struct mmal_msg_context {
struct vchiq_mmal_instance *instance;
- u32 handle;
+
+ /* Index in the context_map idr so that we can find the
+ * mmal_msg_context again when servicing the VCHI reply.
+ */
+ int handle;
union {
struct {
@@ -149,111 +152,28 @@ struct mmal_msg_context {
};
-struct vchiq_mmal_context_map {
- /* ensure serialized access to the btree(contention should be low) */
- struct mutex lock;
- struct btree_head32 btree_head;
- u32 last_handle;
-};
-
struct vchiq_mmal_instance {
VCHI_SERVICE_HANDLE_T handle;
/* ensure serialised access to service */
struct mutex vchiq_mutex;
- /* ensure serialised access to bulk operations */
- struct mutex bulk_mutex;
-
/* vmalloc page to receive scratch bulk xfers into */
void *bulk_scratch;
- /* mapping table between context handles and mmal_msg_contexts */
- struct vchiq_mmal_context_map context_map;
+ struct idr context_map;
+ spinlock_t context_map_lock;
/* component to use next */
int component_idx;
struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
};
-static int __must_check
-mmal_context_map_init(struct vchiq_mmal_context_map *context_map)
-{
- mutex_init(&context_map->lock);
- context_map->last_handle = 0;
- return btree_init32(&context_map->btree_head);
-}
-
-static void mmal_context_map_destroy(struct vchiq_mmal_context_map *context_map)
-{
- mutex_lock(&context_map->lock);
- btree_destroy32(&context_map->btree_head);
- mutex_unlock(&context_map->lock);
-}
-
-static u32
-mmal_context_map_create_handle(struct vchiq_mmal_context_map *context_map,
- struct mmal_msg_context *msg_context,
- gfp_t gfp)
-{
- u32 handle;
-
- mutex_lock(&context_map->lock);
-
- while (1) {
- /* just use a simple count for handles, but do not use 0 */
- context_map->last_handle++;
- if (!context_map->last_handle)
- context_map->last_handle++;
-
- handle = context_map->last_handle;
-
- /* check if the handle is already in use */
- if (!btree_lookup32(&context_map->btree_head, handle))
- break;
- }
-
- if (btree_insert32(&context_map->btree_head, handle,
- msg_context, gfp)) {
- /* probably out of memory */
- mutex_unlock(&context_map->lock);
- return 0;
- }
-
- mutex_unlock(&context_map->lock);
- return handle;
-}
-
-static struct mmal_msg_context *
-mmal_context_map_lookup_handle(struct vchiq_mmal_context_map *context_map,
- u32 handle)
-{
- struct mmal_msg_context *msg_context;
-
- if (!handle)
- return NULL;
-
- mutex_lock(&context_map->lock);
-
- msg_context = btree_lookup32(&context_map->btree_head, handle);
-
- mutex_unlock(&context_map->lock);
- return msg_context;
-}
-
-static void
-mmal_context_map_destroy_handle(struct vchiq_mmal_context_map *context_map,
- u32 handle)
-{
- mutex_lock(&context_map->lock);
- btree_remove32(&context_map->btree_head, handle);
- mutex_unlock(&context_map->lock);
-}
-
static struct mmal_msg_context *
get_msg_context(struct vchiq_mmal_instance *instance)
{
struct mmal_msg_context *msg_context;
+ int handle;
/* todo: should this be allocated from a pool to avoid kzalloc */
msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
@@ -261,32 +181,40 @@ get_msg_context(struct vchiq_mmal_instance *instance)
if (!msg_context)
return ERR_PTR(-ENOMEM);
- msg_context->instance = instance;
- msg_context->handle =
- mmal_context_map_create_handle(&instance->context_map,
- msg_context,
- GFP_KERNEL);
+ /* Create an ID that will be passed along with our message so
+ * that when we service the VCHI reply, we can look up what
+ * message is being replied to.
+ */
+ spin_lock(&instance->context_map_lock);
+ handle = idr_alloc(&instance->context_map, msg_context,
+ 0, 0, GFP_KERNEL);
+ spin_unlock(&instance->context_map_lock);
- if (!msg_context->handle) {
+ if (handle < 0) {
kfree(msg_context);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(handle);
}
+ msg_context->instance = instance;
+ msg_context->handle = handle;
+
return msg_context;
}
static struct mmal_msg_context *
-lookup_msg_context(struct vchiq_mmal_instance *instance, u32 handle)
+lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
{
- return mmal_context_map_lookup_handle(&instance->context_map,
- handle);
+ return idr_find(&instance->context_map, handle);
}
static void
release_msg_context(struct mmal_msg_context *msg_context)
{
- mmal_context_map_destroy_handle(&msg_context->instance->context_map,
- msg_context->handle);
+ struct vchiq_mmal_instance *instance = msg_context->instance;
+
+ spin_lock(&instance->context_map_lock);
+ idr_remove(&instance->context_map, msg_context->handle);
+ spin_unlock(&instance->context_map_lock);
kfree(msg_context);
}
@@ -321,8 +249,6 @@ static void buffer_work_cb(struct work_struct *work)
msg_context->u.bulk.dts,
msg_context->u.bulk.pts);
- /* release message context */
- release_msg_context(msg_context);
}
/* enqueue a bulk receive for a given message context */
@@ -331,23 +257,12 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context)
{
unsigned long rd_len;
- unsigned long flags = 0;
int ret;
- /* bulk mutex stops other bulk operations while we have a
- * receive in progress - released in callback
- */
- ret = mutex_lock_interruptible(&instance->bulk_mutex);
- if (ret != 0)
- return ret;
-
rd_len = msg->u.buffer_from_host.buffer_header.length;
- /* take buffer from queue */
- spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
- if (list_empty(&msg_context->u.bulk.port->buffers)) {
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
- pr_err("buffer list empty trying to submit bulk receive\n");
+ if (!msg_context->u.bulk.buffer) {
+ pr_err("bulk.buffer not configured - error in buffer_from_host\n");
/* todo: this is a serious error, we should never have
* committed a buffer_to_host operation to the mmal
@@ -359,18 +274,9 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
* waiting bulk receive?
*/
- mutex_unlock(&instance->bulk_mutex);
-
return -EINVAL;
}
- msg_context->u.bulk.buffer =
- list_entry(msg_context->u.bulk.port->buffers.next,
- struct mmal_buffer, list);
- list_del(&msg_context->u.bulk.buffer->list);
-
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
-
/* ensure we do not overrun the available buffer */
if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
rd_len = msg_context->u.bulk.buffer->buffer_size;
@@ -401,11 +307,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
vchi_service_release(instance->handle);
- if (ret != 0) {
- /* callback will not be clearing the mutex */
- mutex_unlock(&instance->bulk_mutex);
- }
-
return ret;
}
@@ -415,13 +316,6 @@ static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
{
int ret;
- /* bulk mutex stops other bulk operations while we have a
- * receive in progress - released in callback
- */
- ret = mutex_lock_interruptible(&instance->bulk_mutex);
- if (ret != 0)
- return ret;
-
/* zero length indicates this was a dummy transfer */
msg_context->u.bulk.buffer_used = 0;
@@ -437,11 +331,6 @@ static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
vchi_service_release(instance->handle);
- if (ret != 0) {
- /* callback will not be clearing the mutex */
- mutex_unlock(&instance->bulk_mutex);
- }
-
return ret;
}
@@ -450,31 +339,6 @@ static int inline_receive(struct vchiq_mmal_instance *instance,
struct mmal_msg *msg,
struct mmal_msg_context *msg_context)
{
- unsigned long flags = 0;
-
- /* take buffer from queue */
- spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
- if (list_empty(&msg_context->u.bulk.port->buffers)) {
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
- pr_err("buffer list empty trying to receive inline\n");
-
- /* todo: this is a serious error, we should never have
- * committed a buffer_to_host operation to the mmal
- * port without the buffer to back it up (with
- * underflow handling) and there is no obvious way to
- * deal with this. Less bad than the bulk case as we
- * can just drop this on the floor but...unhelpful
- */
- return -EINVAL;
- }
-
- msg_context->u.bulk.buffer =
- list_entry(msg_context->u.bulk.port->buffers.next,
- struct mmal_buffer, list);
- list_del(&msg_context->u.bulk.buffer->list);
-
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
-
memcpy(msg_context->u.bulk.buffer->buffer,
msg->u.buffer_from_host.short_data,
msg->u.buffer_from_host.payload_in_message);
@@ -494,25 +358,23 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
struct mmal_msg m;
int ret;
- pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
+ if (!port->enabled)
+ return -EINVAL;
- /* bulk mutex stops other bulk operations while we
- * have a receive in progress
- */
- if (mutex_lock_interruptible(&instance->bulk_mutex))
- return -EINTR;
+ pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
/* get context */
- msg_context = get_msg_context(instance);
- if (IS_ERR(msg_context)) {
- ret = PTR_ERR(msg_context);
- goto unlock;
+ if (!buf->msg_context) {
+ pr_err("%s: msg_context not allocated, buf %p\n", __func__,
+ buf);
+ return -EINVAL;
}
+ msg_context = buf->msg_context;
/* store bulk message context for when data arrives */
msg_context->u.bulk.instance = instance;
msg_context->u.bulk.port = port;
- msg_context->u.bulk.buffer = NULL; /* not valid until bulk xfer */
+ msg_context->u.bulk.buffer = buf;
msg_context->u.bulk.buffer_used = 0;
/* initialise work structure ready to schedule callback */
@@ -557,54 +419,8 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
sizeof(struct mmal_msg_header) +
sizeof(m.u.buffer_from_host));
- if (ret != 0) {
- release_msg_context(msg_context);
- /* todo: is this correct error value? */
- }
-
vchi_service_release(instance->handle);
-unlock:
- mutex_unlock(&instance->bulk_mutex);
-
- return ret;
-}
-
-/* submit a buffer to the mmal sevice
- *
- * the buffer_from_host uses size data from the ports next available
- * mmal_buffer and deals with there being no buffer available by
- * incrementing the underflow for later
- */
-static int port_buffer_from_host(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port)
-{
- int ret;
- struct mmal_buffer *buf;
- unsigned long flags = 0;
-
- if (!port->enabled)
- return -EINVAL;
-
- /* peek buffer from queue */
- spin_lock_irqsave(&port->slock, flags);
- if (list_empty(&port->buffers)) {
- port->buffer_underflow++;
- spin_unlock_irqrestore(&port->slock, flags);
- return -ENOSPC;
- }
-
- buf = list_entry(port->buffers.next, struct mmal_buffer, list);
-
- spin_unlock_irqrestore(&port->slock, flags);
-
- /* issue buffer to mmal service */
- ret = buffer_from_host(instance, port, buf);
- if (ret) {
- pr_err("adding buffer header failed\n");
- /* todo: how should this be dealt with */
- }
-
return ret;
}
@@ -680,9 +496,6 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
msg->u.buffer_from_host.payload_in_message;
}
- /* replace the buffer header */
- port_buffer_from_host(instance, msg_context->u.bulk.port);
-
/* schedule the port callback */
schedule_work(&msg_context->u.bulk.work);
}
@@ -690,13 +503,6 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context)
{
- /* bulk receive operation complete */
- mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
-
- /* replace the buffer header */
- port_buffer_from_host(msg_context->u.bulk.instance,
- msg_context->u.bulk.port);
-
msg_context->u.bulk.status = 0;
/* schedule the port callback */
@@ -708,13 +514,6 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
{
pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
- /* bulk receive operation complete */
- mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
-
- /* replace the buffer header */
- port_buffer_from_host(msg_context->u.bulk.instance,
- msg_context->u.bulk.port);
-
msg_context->u.bulk.status = -EINTR;
schedule_work(&msg_context->u.bulk.work);
@@ -1482,7 +1281,14 @@ static int port_disable(struct vchiq_mmal_instance *instance,
ret = port_action_port(instance, port,
MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
if (ret == 0) {
- /* drain all queued buffers on port */
+ /*
+ * Drain all queued buffers on port. This should only
+ * apply to buffers that have been queued before the port
+ * has been enabled. If the port has been enabled and buffers
+ * passed, then the buffers should have been removed from this
+ * list, and we should get the relevant callbacks via VCHIQ
+ * to release the buffers.
+ */
spin_lock_irqsave(&port->slock, flags);
list_for_each_safe(buf_head, q, &port->buffers) {
@@ -1511,7 +1317,7 @@ static int port_enable(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_port *port)
{
unsigned int hdr_count;
- struct list_head *buf_head;
+ struct list_head *q, *buf_head;
int ret;
if (port->enabled)
@@ -1537,7 +1343,7 @@ static int port_enable(struct vchiq_mmal_instance *instance,
if (port->buffer_cb) {
/* send buffer headers to videocore */
hdr_count = 1;
- list_for_each(buf_head, &port->buffers) {
+ list_for_each_safe(buf_head, q, &port->buffers) {
struct mmal_buffer *mmalbuf;
mmalbuf = list_entry(buf_head, struct mmal_buffer,
@@ -1546,6 +1352,7 @@ static int port_enable(struct vchiq_mmal_instance *instance,
if (ret)
goto done;
+ list_del(buf_head);
hdr_count++;
if (hdr_count > port->current_buffer.num)
break;
@@ -1758,23 +1565,42 @@ int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
struct mmal_buffer *buffer)
{
unsigned long flags = 0;
+ int ret;
- spin_lock_irqsave(&port->slock, flags);
- list_add_tail(&buffer->list, &port->buffers);
- spin_unlock_irqrestore(&port->slock, flags);
-
- /* the port previously underflowed because it was missing a
- * mmal_buffer which has just been added, submit that buffer
- * to the mmal service.
- */
- if (port->buffer_underflow) {
- port_buffer_from_host(instance, port);
- port->buffer_underflow--;
+ ret = buffer_from_host(instance, port, buffer);
+ if (ret == -EINVAL) {
+ /* Port is disabled. Queue for when it is enabled. */
+ spin_lock_irqsave(&port->slock, flags);
+ list_add_tail(&buffer->list, &port->buffers);
+ spin_unlock_irqrestore(&port->slock, flags);
}
return 0;
}
+int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
+ struct mmal_buffer *buf)
+{
+ struct mmal_msg_context *msg_context = get_msg_context(instance);
+
+ if (IS_ERR(msg_context))
+ return (PTR_ERR(msg_context));
+
+ buf->msg_context = msg_context;
+ return 0;
+}
+
+int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
+{
+ struct mmal_msg_context *msg_context = buf->msg_context;
+
+ if (msg_context)
+ release_msg_context(msg_context);
+ buf->msg_context = NULL;
+
+ return 0;
+}
+
/* Initialise a mmal component and its ports
*
*/
@@ -1965,7 +1791,7 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
vfree(instance->bulk_scratch);
- mmal_context_map_destroy(&instance->context_map);
+ idr_destroy(&instance->context_map);
kfree(instance);
@@ -2024,16 +1850,11 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
return -ENOMEM;
mutex_init(&instance->vchiq_mutex);
- mutex_init(&instance->bulk_mutex);
instance->bulk_scratch = vmalloc(PAGE_SIZE);
- status = mmal_context_map_init(&instance->context_map);
- if (status) {
- pr_err("Failed to init context map (status=%d)\n", status);
- kfree(instance);
- return status;
- }
+ spin_lock_init(&instance->context_map_lock);
+ idr_init_base(&instance->context_map, 1);
params.callback_param = instance;