aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf/dma-buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
-rw-r--r--drivers/dma-buf/dma-buf.c801
1 files changed, 535 insertions, 266 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index c343c7c10b4c..dd0f83ee505b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -20,6 +20,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/sync_file.h>
#include <linux/poll.h>
#include <linux/dma-resv.h>
#include <linux/mm.h>
@@ -29,6 +30,8 @@
#include <uapi/linux/dma-buf.h>
#include <uapi/linux/magic.h>
+#include "dma-buf-sysfs-stats.h"
+
static inline int is_dma_buf_file(struct file *);
struct dma_buf_list {
@@ -45,17 +48,63 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
size_t ret = 0;
dmabuf = dentry->d_fsdata;
- dma_resv_lock(dmabuf->resv, NULL);
+ spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
- dma_resv_unlock(dmabuf->resv);
+ spin_unlock(&dmabuf->name_lock);
- return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
+ return dynamic_dname(buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
}
+static void dma_buf_release(struct dentry *dentry)
+{
+ struct dma_buf *dmabuf;
+
+ dmabuf = dentry->d_fsdata;
+ if (unlikely(!dmabuf))
+ return;
+
+ BUG_ON(dmabuf->vmapping_counter);
+
+ /*
+ * If you hit this BUG() it could mean:
+ * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
+ * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
+ */
+ BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
+
+ dma_buf_stats_teardown(dmabuf);
+ dmabuf->ops->release(dmabuf);
+
+ if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
+ dma_resv_fini(dmabuf->resv);
+
+ WARN_ON(!list_empty(&dmabuf->attachments));
+ module_put(dmabuf->owner);
+ kfree(dmabuf->name);
+ kfree(dmabuf);
+}
+
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+
+ return 0;
+}
+
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
+ .d_release = dma_buf_release,
};
static struct vfsmount *dma_buf_mnt;
@@ -77,42 +126,6 @@ static struct file_system_type dma_buf_fs_type = {
.kill_sb = kill_anon_super,
};
-static int dma_buf_release(struct inode *inode, struct file *file)
-{
- struct dma_buf *dmabuf;
-
- if (!is_dma_buf_file(file))
- return -EINVAL;
-
- dmabuf = file->private_data;
-
- BUG_ON(dmabuf->vmapping_counter);
-
- /*
- * Any fences that a dma-buf poll can wait on should be signaled
- * before releasing dma-buf. This is the responsibility of each
- * driver that uses the reservation objects.
- *
- * If you hit this BUG() it means someone dropped their ref to the
- * dma-buf while still having pending operation to the buffer.
- */
- BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
-
- dmabuf->ops->release(dmabuf);
-
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
-
- if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
- dma_resv_fini(dmabuf->resv);
-
- module_put(dmabuf->owner);
- kfree(dmabuf->name);
- kfree(dmabuf);
- return 0;
-}
-
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
@@ -161,11 +174,11 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
}
/**
- * DOC: fence polling
+ * DOC: implicit fence polling
*
* To support cross-device and cross-driver synchronization of buffer access
- * implicit fences (represented internally in the kernel with &struct fence) can
- * be attached to a &dma_buf. The glue for that and a few related things are
+ * implicit fences (represented internally in the kernel with &struct dma_fence)
+ * can be attached to a &dma_buf. The glue for that and a few related things are
* provided in the &dma_resv structure.
*
* Userspace can query the state of these implicitly tracked fences using poll()
@@ -180,27 +193,50 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
* Note that this only signals the completion of the respective fences, i.e. the
* DMA transfers are complete. Cache flushing and any other necessary
* preparations before CPU access can begin still need to happen.
+ *
+ * As an alternative to poll(), the set of fences on DMA buffer can be
+ * exported as a &sync_file using &dma_buf_sync_file_export.
*/
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
+ struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
unsigned long flags;
spin_lock_irqsave(&dcb->poll->lock, flags);
wake_up_locked_poll(dcb->poll, dcb->active);
dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags);
+ dma_fence_put(fence);
+ /* Paired with get_file in dma_buf_poll */
+ fput(dmabuf->file);
+}
+
+static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
+ struct dma_buf_poll_cb_t *dcb)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ int r;
+
+ dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
+ fence) {
+ dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+ if (!r)
+ return true;
+ dma_fence_put(fence);
+ }
+
+ return false;
}
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct dma_resv *resv;
- struct dma_resv_list *fobj;
- struct dma_fence *fence_excl;
__poll_t events;
- unsigned shared_count, seq;
dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv)
@@ -214,114 +250,66 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
if (!events)
return 0;
-retry:
- seq = read_seqcount_begin(&resv->seq);
- rcu_read_lock();
+ dma_resv_lock(resv, NULL);
- fobj = rcu_dereference(resv->fence);
- if (fobj)
- shared_count = fobj->shared_count;
- else
- shared_count = 0;
- fence_excl = rcu_dereference(resv->fence_excl);
- if (read_seqcount_retry(&resv->seq, seq)) {
- rcu_read_unlock();
- goto retry;
- }
-
- if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
- __poll_t pevents = EPOLLIN;
-
- if (shared_count == 0)
- pevents |= EPOLLOUT;
+ if (events & EPOLLOUT) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
- if (dcb->active) {
- dcb->active |= pevents;
- events &= ~pevents;
- } else
- dcb->active = pevents;
+ if (dcb->active)
+ events &= ~EPOLLOUT;
+ else
+ dcb->active = EPOLLOUT;
spin_unlock_irq(&dmabuf->poll.lock);
- if (events & pevents) {
- if (!dma_fence_get_rcu(fence_excl)) {
- /* force a recheck */
- events &= ~pevents;
- dma_buf_poll_cb(NULL, &dcb->cb);
- } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
- dma_buf_poll_cb)) {
- events &= ~pevents;
- dma_fence_put(fence_excl);
- } else {
- /*
- * No callback queued, wake up any additional
- * waiters.
- */
- dma_fence_put(fence_excl);
+ if (events & EPOLLOUT) {
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
+
+ if (!dma_buf_poll_add_cb(resv, true, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- }
+ else
+ events &= ~EPOLLOUT;
}
}
- if ((events & EPOLLOUT) && shared_count > 0) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
- int i;
+ if (events & EPOLLIN) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
- /* Only queue a new callback if no event has fired yet */
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active)
- events &= ~EPOLLOUT;
+ events &= ~EPOLLIN;
else
- dcb->active = EPOLLOUT;
+ dcb->active = EPOLLIN;
spin_unlock_irq(&dmabuf->poll.lock);
- if (!(events & EPOLLOUT))
- goto out;
+ if (events & EPOLLIN) {
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
-
- if (!dma_fence_get_rcu(fence)) {
- /*
- * fence refcount dropped to zero, this means
- * that fobj has been freed
- *
- * call dma_buf_poll_cb and force a recheck!
- */
- events &= ~EPOLLOUT;
+ if (!dma_buf_poll_add_cb(resv, false, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- break;
- }
- if (!dma_fence_add_callback(fence, &dcb->cb,
- dma_buf_poll_cb)) {
- dma_fence_put(fence);
- events &= ~EPOLLOUT;
- break;
- }
- dma_fence_put(fence);
+ else
+ events &= ~EPOLLIN;
}
-
- /* No callback queued, wake up any additional waiters. */
- if (i == shared_count)
- dma_buf_poll_cb(NULL, &dcb->cb);
}
-out:
- rcu_read_unlock();
+ dma_resv_unlock(resv);
return events;
}
/**
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
- * The name of the dma-buf buffer can only be set when the dma-buf is not
- * attached to any devices. It could theoritically support changing the
- * name of the dma-buf if the same piece of memory is used for multiple
- * purpose between different devices.
+ * It could support changing the name of the dma-buf if the same
+ * piece of memory is used for multiple purpose between different devices.
*
- * @dmabuf [in] dmabuf buffer that will be renamed.
- * @buf: [in] A piece of userspace memory that contains the name of
- * the dma-buf.
+ * @dmabuf: [in] dmabuf buffer that will be renamed.
+ * @buf: [in] A piece of userspace memory that contains the name of
+ * the dma-buf.
*
* Returns 0 on success. If the dma-buf buffer is already attached to
* devices, return -EBUSY.
@@ -330,24 +318,112 @@ out:
static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
{
char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
- long ret = 0;
if (IS_ERR(name))
return PTR_ERR(name);
- dma_resv_lock(dmabuf->resv, NULL);
- if (!list_empty(&dmabuf->attachments)) {
- ret = -EBUSY;
- kfree(name);
- goto out_unlock;
- }
+ spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
+ spin_unlock(&dmabuf->name_lock);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SYNC_FILE)
+static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
+ void __user *user_data)
+{
+ struct dma_buf_export_sync_file arg;
+ enum dma_resv_usage usage;
+ struct dma_fence *fence = NULL;
+ struct sync_file *sync_file;
+ int fd, ret;
+
+ if (copy_from_user(&arg, user_data, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags & ~DMA_BUF_SYNC_RW)
+ return -EINVAL;
+
+ if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
+ return -EINVAL;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
+ ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
+ if (ret)
+ goto err_put_fd;
+
+ if (!fence)
+ fence = dma_fence_get_stub();
+
+ sync_file = sync_file_create(fence);
+
+ dma_fence_put(fence);
+
+ if (!sync_file) {
+ ret = -ENOMEM;
+ goto err_put_fd;
+ }
+
+ arg.fd = fd;
+ if (copy_to_user(user_data, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto err_put_file;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return 0;
+
+err_put_file:
+ fput(sync_file->file);
+err_put_fd:
+ put_unused_fd(fd);
+ return ret;
+}
+
+static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
+ const void __user *user_data)
+{
+ struct dma_buf_import_sync_file arg;
+ struct dma_fence *fence;
+ enum dma_resv_usage usage;
+ int ret = 0;
+
+ if (copy_from_user(&arg, user_data, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags & ~DMA_BUF_SYNC_RW)
+ return -EINVAL;
+
+ if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
+ return -EINVAL;
+
+ fence = sync_file_get_fence(arg.fd);
+ if (!fence)
+ return -EINVAL;
+
+ usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
+ DMA_RESV_USAGE_READ;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+
+ ret = dma_resv_reserve_fences(dmabuf->resv, 1);
+ if (!ret)
+ dma_resv_add_fence(dmabuf->resv, fence, usage);
-out_unlock:
dma_resv_unlock(dmabuf->resv);
+
+ dma_fence_put(fence);
+
return ret;
}
+#endif
static long dma_buf_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
@@ -388,9 +464,17 @@ static long dma_buf_ioctl(struct file *file,
return ret;
- case DMA_BUF_SET_NAME:
+ case DMA_BUF_SET_NAME_A:
+ case DMA_BUF_SET_NAME_B:
return dma_buf_set_name(dmabuf, (const char __user *)arg);
+#if IS_ENABLED(CONFIG_SYNC_FILE)
+ case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
+ return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
+ case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
+ return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
+#endif
+
default:
return -ENOTTY;
}
@@ -404,14 +488,14 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
- dma_resv_lock(dmabuf->resv, NULL);
+ spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
- dma_resv_unlock(dmabuf->resv);
+ spin_unlock(&dmabuf->name_lock);
}
static const struct file_operations dma_buf_fops = {
- .release = dma_buf_release,
+ .release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
@@ -430,6 +514,7 @@ static inline int is_dma_buf_file(struct file *file)
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
{
+ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
@@ -439,11 +524,18 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
inode->i_size = dmabuf->size;
inode_set_bytes(inode, dmabuf->size);
+ /*
+ * The ->i_ino acquired from get_next_ino() is not unique thus
+ * not suitable for using it as dentry name by dmabuf stats.
+ * Override ->i_ino with the unique and dmabuffs specific
+ * value.
+ */
+ inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
+ flags &= O_ACCMODE | O_NONBLOCK;
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
if (IS_ERR(file))
goto err_alloc_file;
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = dmabuf;
file->f_path.dentry->d_fsdata = dmabuf;
@@ -466,7 +558,7 @@ err_alloc_file:
* as a file descriptor by calling dma_buf_fd().
*
* 2. Userspace passes this file-descriptors to all drivers it wants this buffer
- * to share with: First the filedescriptor is converted to a &dma_buf using
+ * to share with: First the file descriptor is converted to a &dma_buf using
* dma_buf_get(). Then the buffer is attached to the device using
* dma_buf_attach().
*
@@ -479,7 +571,7 @@ err_alloc_file:
*
* 4. Once a driver is done with a shared buffer it needs to call
* dma_buf_detach() (after cleaning up any mappings) and then release the
- * reference acquired with dma_buf_get by calling dma_buf_put().
+ * reference acquired with dma_buf_get() by calling dma_buf_put().
*
* For the detailed semantics exporters are expected to implement see
* &dma_buf_ops.
@@ -495,9 +587,10 @@ err_alloc_file:
* by the exporter. see &struct dma_buf_export_info
* for further details.
*
- * Returns, on success, a newly created dma_buf object, which wraps the
- * supplied private data and operations for dma_buf_ops. On either missing
- * ops, or error in allocating struct dma_buf, will return negative error.
+ * Returns, on success, a newly created struct dma_buf object, which wraps the
+ * supplied private data and operations for struct dma_buf_ops. On either
+ * missing ops, or error in allocating struct dma_buf, will return negative
+ * error.
*
* For most cases the easiest way to create @exp_info is through the
* %DEFINE_DMA_BUF_EXPORT_INFO macro.
@@ -525,7 +618,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
}
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- exp_info->ops->dynamic_mapping))
+ (exp_info->ops->pin || exp_info->ops->unpin)))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
if (!try_module_get(exp_info->owner))
@@ -542,9 +638,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->size = exp_info->size;
dmabuf->exp_name = exp_info->exp_name;
dmabuf->owner = exp_info->owner;
+ spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
- dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
- dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+ dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
+ dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
if (!resv) {
resv = (struct dma_resv *)&dmabuf[1];
@@ -558,7 +655,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
goto err_dmabuf;
}
- file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
mutex_init(&dmabuf->lock);
@@ -568,18 +664,30 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
+ ret = dma_buf_stats_setup(dmabuf);
+ if (ret)
+ goto err_sysfs;
+
return dmabuf;
+err_sysfs:
+ /*
+ * Set file->f_path.dentry->d_fsdata to NULL so that when
+ * dma_buf_release() gets invoked by dentry_ops, it exits
+ * early before calling the release() dma_buf op.
+ */
+ file->f_path.dentry->d_fsdata = NULL;
+ fput(file);
err_dmabuf:
kfree(dmabuf);
err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dma_buf_export);
+EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
/**
- * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * dma_buf_fd - returns a file descriptor for the given struct dma_buf
* @dmabuf: [in] pointer to dma_buf for which fd is required.
* @flags: [in] flags to give to fd
*
@@ -600,13 +708,13 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
return fd;
}
-EXPORT_SYMBOL_GPL(dma_buf_fd);
+EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
/**
- * dma_buf_get - returns the dma_buf structure related to an fd
- * @fd: [in] fd associated with the dma_buf to be returned
+ * dma_buf_get - returns the struct dma_buf related to an fd
+ * @fd: [in] fd associated with the struct dma_buf to be returned
*
- * On success, returns the dma_buf structure associated with an fd; uses
+ * On success, returns the struct dma_buf associated with an fd; uses
* file's refcounting done by fget to increase refcount. returns ERR_PTR
* otherwise.
*/
@@ -626,7 +734,7 @@ struct dma_buf *dma_buf_get(int fd)
return file->private_data;
}
-EXPORT_SYMBOL_GPL(dma_buf_get);
+EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
/**
* dma_buf_put - decreases refcount of the buffer
@@ -645,18 +753,61 @@ void dma_buf_put(struct dma_buf *dmabuf)
fput(dmabuf->file);
}
-EXPORT_SYMBOL_GPL(dma_buf_put);
+EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
+
+static void mangle_sg_table(struct sg_table *sg_table)
+{
+#ifdef CONFIG_DMABUF_DEBUG
+ int i;
+ struct scatterlist *sg;
+
+ /* To catch abuse of the underlying struct page by importers mix
+ * up the bits, but take care to preserve the low SG_ bits to
+ * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * before passing the sgt back to the exporter. */
+ for_each_sgtable_sg(sg_table, sg, i)
+ sg->page_link ^= ~0xffUL;
+#endif
+
+}
+static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+ signed long ret;
+
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (IS_ERR_OR_NULL(sg_table))
+ return sg_table;
+
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0) {
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+ direction);
+ return ERR_PTR(ret);
+ }
+ }
+
+ mangle_sg_table(sg_table);
+ return sg_table;
+}
/**
- * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
- * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
- * @dynamic_mapping: [in] calling convention for map/unmap
+ * @importer_ops: [in] importer operations for the attachment
+ * @importer_priv: [in] importer private pointer for the attachment
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
*
+ * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
+ * functionality.
+ *
* Returns:
*
* A pointer to newly created &dma_buf_attachment on success, or a negative
@@ -668,7 +819,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
*/
struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
- bool dynamic_mapping)
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv)
{
struct dma_buf_attachment *attach;
int ret;
@@ -676,13 +828,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
+ if (WARN_ON(importer_ops && !importer_ops->move_notify))
+ return ERR_PTR(-EINVAL);
+
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
return ERR_PTR(-ENOMEM);
attach->dev = dev;
attach->dmabuf = dmabuf;
- attach->dynamic_mapping = dynamic_mapping;
+ if (importer_ops)
+ attach->peer2peer = importer_ops->allow_peer2peer;
+ attach->importer_ops = importer_ops;
+ attach->importer_priv = importer_priv;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
@@ -701,15 +859,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
dma_buf_is_dynamic(dmabuf)) {
struct sg_table *sgt;
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_lock(attach->dmabuf->resv, NULL);
+ ret = dmabuf->ops->pin(attach);
+ if (ret)
+ goto err_unlock;
+ }
- sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
- goto err_unlock;
+ goto err_unpin;
}
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
@@ -723,6 +885,10 @@ err_attach:
kfree(attach);
return ERR_PTR(ret);
+err_unpin:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dmabuf->ops->unpin(attach);
+
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
@@ -730,7 +896,7 @@ err_unlock:
dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
/**
* dma_buf_attach - Wrapper for dma_buf_dynamic_attach
@@ -743,17 +909,28 @@ EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
{
- return dma_buf_dynamic_attach(dmabuf, dev, false);
+ return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
+
+static void __unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ /* uses XOR, hence this unmangles */
+ mangle_sg_table(sg_table);
+
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
}
-EXPORT_SYMBOL_GPL(dma_buf_attach);
/**
- * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
- * optionally calls detach() of dma_buf_ops for device-specific detach
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
* @attach: [in] attachment to be detached; is free'd after this call.
*
* Clean up a device attachment obtained by calling dma_buf_attach().
+ *
+ * Optionally this calls &dma_buf_ops.detach for device-specific detach.
*/
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
{
@@ -764,10 +941,12 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_lock(attach->dmabuf->resv, NULL);
- dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+ __unmap_dma_buf(attach, attach->sgt, attach->dir);
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
+ dmabuf->ops->unpin(attach);
dma_resv_unlock(attach->dmabuf->resv);
+ }
}
dma_resv_lock(dmabuf->resv, NULL);
@@ -778,7 +957,58 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
kfree(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_detach);
+EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
+
+/**
+ * dma_buf_pin - Lock down the DMA-buf
+ * @attach: [in] attachment which should be pinned
+ *
+ * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
+ * call this, and only for limited use cases like scanout and not for temporary
+ * pin operations. It is not permitted to allow userspace to pin arbitrary
+ * amounts of buffers through this interface.
+ *
+ * Buffers must be unpinned by calling dma_buf_unpin().
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+ int ret = 0;
+
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->pin)
+ ret = dmabuf->ops->pin(attach);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
+
+/**
+ * dma_buf_unpin - Unpin a DMA-buf
+ * @attach: [in] attachment which should be unpinned
+ *
+ * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
+ * any mapping of @attach again and inform the importer through
+ * &dma_buf_attach_ops.move_notify.
+ */
+void dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->unpin)
+ dmabuf->ops->unpin(attach);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
@@ -790,15 +1020,22 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
* on error. May return -EINTR if it is interrupted by a signal.
*
+ * On success, the DMA addresses and lengths in the returned scatterlist are
+ * PAGE_SIZE aligned.
+ *
* A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
* the underlying backing storage is pinned for as long as a mapping exists,
* therefore users/importers should not hold onto a mapping for undue amounts of
* time.
+ *
+ * Important: Dynamic importers must wait for the exclusive fence of the struct
+ * dma_resv attached to the DMA-BUF first.
*/
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
+ int r;
might_sleep();
@@ -820,21 +1057,48 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ r = attach->dmabuf->ops->pin(attach);
+ if (r)
+ return ERR_PTR(r);
+ }
+ }
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ sg_table = __map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ attach->dmabuf->ops->unpin(attach);
+
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
attach->sgt = sg_table;
attach->dir = direction;
}
+#ifdef CONFIG_DMA_API_DEBUG
+ if (!IS_ERR(sg_table)) {
+ struct scatterlist *sg;
+ u64 addr;
+ int len;
+ int i;
+
+ for_each_sgtable_dma_sg(sg_table, sg, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
+ pr_debug("%s: addr %llx or len %x is not page aligned!\n",
+ __func__, addr, len);
+ }
+ }
+ }
+#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
}
-EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
/**
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
@@ -864,9 +1128,33 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_assert_held(attach->dmabuf->resv);
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ __unmap_dma_buf(attach, sg_table, direction);
+
+ if (dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ dma_buf_unpin(attach);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
+
+/**
+ * dma_buf_move_notify - notify attachments that DMA-buf is moving
+ *
+ * @dmabuf: [in] buffer which is moving
+ *
+ * Informs all attachmenst that they need to destroy and recreated all their
+ * mappings.
+ */
+void dma_buf_move_notify(struct dma_buf *dmabuf)
+{
+ struct dma_buf_attachment *attach;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (attach->importer_ops)
+ attach->importer_ops->move_notify(attach);
}
-EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
/**
* DOC: cpu access
@@ -884,15 +1172,15 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* vmalloc space might be limited and result in vmap calls failing.
*
* Interfaces::
- * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
- * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
+ *
+ * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
+ * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
- * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
- * that the dma-buf layer keeps a reference count for all vmap access and
- * calls down into the exporter's vmap function only when no vmapping exists,
- * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
- * provided by taking the dma_buf->lock mutex.
+ * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
+ * count for all vmap access and calls down into the exporter's vmap function
+ * only when no vmapping exists, and only unmaps it once. Protection against
+ * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
*
* - For full compatibility on the importer side with existing userspace
* interfaces, which might already support mmap'ing buffers. This is needed in
@@ -944,11 +1232,12 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* shootdowns would increase the complexity quite a bit.
*
* Interface::
+ *
* int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
* unsigned long);
*
* If the importing subsystem simply provides a special-purpose mmap call to
- * set up a mapping in userspace, calling do_mmap with dma_buf->file will
+ * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
* equally achieve that for a dma-buf object.
*/
@@ -961,8 +1250,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
long ret;
/* Wait on any implicit rendering fences */
- ret = dma_resv_wait_timeout_rcu(resv, write, true,
- MAX_SCHEDULE_TIMEOUT);
+ ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
+ true, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
@@ -981,6 +1270,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
* it guaranteed to be coherent with other DMA access.
*
+ * This function will also wait for any DMA transactions tracked through
+ * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
+ * synchronization this function will only ensure cache coherency, callers must
+ * ensure synchronization with such DMA transactions on their own.
+ *
* Can return negative error values, returns 0 on success.
*/
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -991,6 +1285,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (WARN_ON(!dmabuf))
return -EINVAL;
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
@@ -1003,7 +1299,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
+EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
@@ -1024,12 +1320,14 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
WARN_ON(!dmabuf);
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->end_cpu_access)
ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
+EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
/**
@@ -1049,9 +1347,6 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
- struct file *oldfile;
- int ret;
-
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@@ -1069,108 +1364,99 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* readjust the vma */
- get_file(dmabuf->file);
- oldfile = vma->vm_file;
- vma->vm_file = dmabuf->file;
+ vma_set_file(vma, dmabuf->file);
vma->vm_pgoff = pgoff;
- ret = dmabuf->ops->mmap(dmabuf, vma);
- if (ret) {
- /* restore old parameters on failure */
- vma->vm_file = oldfile;
- fput(dmabuf->file);
- } else {
- if (oldfile)
- fput(oldfile);
- }
- return ret;
-
+ return dmabuf->ops->mmap(dmabuf, vma);
}
-EXPORT_SYMBOL_GPL(dma_buf_mmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
/**
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
* address space. Same restrictions as for vmap and friends apply.
* @dmabuf: [in] buffer to vmap
+ * @map: [out] returns the vmap pointer
*
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
- * Please attempt to use kmap/kunmap before thinking about these interfaces.
*
- * Returns NULL on error.
+ * To ensure coherency users must call dma_buf_begin_cpu_access() and
+ * dma_buf_end_cpu_access() around any cpu access performed through this
+ * mapping.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
*/
-void *dma_buf_vmap(struct dma_buf *dmabuf)
+int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
- void *ptr;
+ struct iosys_map ptr;
+ int ret = 0;
+
+ iosys_map_clear(map);
if (WARN_ON(!dmabuf))
- return NULL;
+ return -EINVAL;
if (!dmabuf->ops->vmap)
- return NULL;
+ return -EINVAL;
mutex_lock(&dmabuf->lock);
if (dmabuf->vmapping_counter) {
dmabuf->vmapping_counter++;
- BUG_ON(!dmabuf->vmap_ptr);
- ptr = dmabuf->vmap_ptr;
+ BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
+ *map = dmabuf->vmap_ptr;
goto out_unlock;
}
- BUG_ON(dmabuf->vmap_ptr);
+ BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
- ptr = dmabuf->ops->vmap(dmabuf);
- if (WARN_ON_ONCE(IS_ERR(ptr)))
- ptr = NULL;
- if (!ptr)
+ ret = dmabuf->ops->vmap(dmabuf, &ptr);
+ if (WARN_ON_ONCE(ret))
goto out_unlock;
dmabuf->vmap_ptr = ptr;
dmabuf->vmapping_counter = 1;
+ *map = dmabuf->vmap_ptr;
+
out_unlock:
mutex_unlock(&dmabuf->lock);
- return ptr;
+ return ret;
}
-EXPORT_SYMBOL_GPL(dma_buf_vmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
* @dmabuf: [in] buffer to vunmap
- * @vaddr: [in] vmap to vunmap
+ * @map: [in] vmap pointer to vunmap
*/
-void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
if (WARN_ON(!dmabuf))
return;
- BUG_ON(!dmabuf->vmap_ptr);
+ BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
BUG_ON(dmabuf->vmapping_counter == 0);
- BUG_ON(dmabuf->vmap_ptr != vaddr);
+ BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
mutex_lock(&dmabuf->lock);
if (--dmabuf->vmapping_counter == 0) {
if (dmabuf->ops->vunmap)
- dmabuf->ops->vunmap(dmabuf, vaddr);
- dmabuf->vmap_ptr = NULL;
+ dmabuf->ops->vunmap(dmabuf, map);
+ iosys_map_clear(&dmabuf->vmap_ptr);
}
mutex_unlock(&dmabuf->lock);
}
-EXPORT_SYMBOL_GPL(dma_buf_vunmap);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
- int ret;
struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj;
- struct dma_resv *robj;
- struct dma_resv_list *fobj;
- struct dma_fence *fence;
- unsigned seq;
- int count = 0, attach_count, shared_count, i;
+ int count = 0, attach_count;
size_t size = 0;
+ int ret;
ret = mutex_lock_interruptible(&db_list.lock);
@@ -1178,7 +1464,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
return ret;
seq_puts(s, "\nDma-buf Objects:\n");
- seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
+ seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
@@ -1187,42 +1473,18 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
if (ret)
goto error_unlock;
+
+ spin_lock(&buf_obj->name_lock);
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
file_count(buf_obj->file),
buf_obj->exp_name,
file_inode(buf_obj->file)->i_ino,
- buf_obj->name ?: "");
-
- robj = buf_obj->resv;
- while (true) {
- seq = read_seqcount_begin(&robj->seq);
- rcu_read_lock();
- fobj = rcu_dereference(robj->fence);
- shared_count = fobj ? fobj->shared_count : 0;
- fence = rcu_dereference(robj->fence_excl);
- if (!read_seqcount_retry(&robj->seq, seq))
- break;
- rcu_read_unlock();
- }
+ buf_obj->name ?: "<none>");
+ spin_unlock(&buf_obj->name_lock);
- if (fence)
- seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- dma_fence_is_signaled(fence) ? "" : "un");
- for (i = 0; i < shared_count; i++) {
- fence = rcu_dereference(fobj->shared[i]);
- if (!dma_fence_get_rcu(fence))
- continue;
- seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- dma_fence_is_signaled(fence) ? "" : "un");
- dma_fence_put(fence);
- }
- rcu_read_unlock();
+ dma_resv_describe(buf_obj->resv, s);
seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
@@ -1293,6 +1555,12 @@ static inline void dma_buf_uninit_debugfs(void)
static int __init dma_buf_init(void)
{
+ int ret;
+
+ ret = dma_buf_init_sysfs_statistics();
+ if (ret)
+ return ret;
+
dma_buf_mnt = kern_mount(&dma_buf_fs_type);
if (IS_ERR(dma_buf_mnt))
return PTR_ERR(dma_buf_mnt);
@@ -1308,5 +1576,6 @@ static void __exit dma_buf_deinit(void)
{
dma_buf_uninit_debugfs();
kern_unmount(dma_buf_mnt);
+ dma_buf_uninit_sysfs_statistics();
}
__exitcall(dma_buf_deinit);