diff options
Diffstat (limited to 'drivers/dma-buf')
24 files changed, 4119 insertions, 1380 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 0613bb7770f5..e4dc53a36428 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -5,8 +5,8 @@ config SYNC_FILE bool "Explicit Synchronization Framework" default n select DMA_SHARED_BUFFER - ---help--- - The Sync File Framework adds explicit syncronization via + help + The Sync File Framework adds explicit synchronization via userspace. It enables send/receive 'struct dma_fence' objects to/from userspace via Sync File fds for synchronization between drivers via userspace components. It has been ported from Android. @@ -22,7 +22,7 @@ config SW_SYNC default n depends on SYNC_FILE depends on DEBUG_FS - ---help--- + help A sync object driver that uses a 32bit counter to coordinate synchronization. Useful when there is no hardware primitive backing the synchronization. @@ -39,6 +39,27 @@ config UDMABUF A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. +config DMABUF_MOVE_NOTIFY + bool "Move notify between drivers (EXPERIMENTAL)" + default n + depends on DMA_SHARED_BUFFER + help + Don't pin buffers if the dynamic DMA-buf interface is available on + both the exporter as well as the importer. This fixes a security + problem where userspace is able to pin unrestricted amounts of memory + through DMA-buf. + This is marked experimental because we don't yet have a consistent + execution context and memory management between drivers. + +config DMABUF_DEBUG + bool "DMA-BUF debug checks" + depends on DMA_SHARED_BUFFER + default y if DMA_API_DEBUG + help + This option enables additional checks for DMA-BUF importers and + exporters. Specifically it validates that importers do not peek at the + underlying struct page when they import a buffer. + config DMABUF_SELFTESTS tristate "Selftests for the dma-buf interfaces" default n @@ -53,6 +74,21 @@ menuconfig DMABUF_HEAPS allows userspace to allocate dma-bufs that can be shared between drivers. +menuconfig DMABUF_SYSFS_STATS + bool "DMA-BUF sysfs statistics (DEPRECATED)" + depends on DMA_SHARED_BUFFER + help + Choose this option to enable DMA-BUF sysfs statistics + in location /sys/kernel/dmabuf/buffers. + + /sys/kernel/dmabuf/buffers/<inode_number> will contain + statistics for the DMA-BUF with the unique inode number + <inode_number>. + + This option is deprecated and should sooner or later be removed. + Android is the only user of this and it turned out that this resulted + in quite some performance problems. + source "drivers/dma-buf/heaps/Kconfig" endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 9c190026bfab..70ec901edf2c 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,14 +1,18 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ - dma-resv.o seqno-fence.o + dma-fence-unwrap.o dma-resv.o obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o obj-$(CONFIG_DMABUF_HEAPS) += heaps/ obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o +obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o dmabuf_selftests-y := \ selftest.o \ - st-dma-fence.o + st-dma-fence.o \ + st-dma-fence-chain.o \ + st-dma-fence-unwrap.o \ + st-dma-resv.o obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c new file mode 100644 index 000000000000..2bba0babcb62 --- /dev/null +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * DMA-BUF sysfs statistics. + * + * Copyright (C) 2021 Google LLC. + */ + +#include <linux/dma-buf.h> +#include <linux/dma-resv.h> +#include <linux/kobject.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/sysfs.h> + +#include "dma-buf-sysfs-stats.h" + +#define to_dma_buf_entry_from_kobj(x) container_of(x, struct dma_buf_sysfs_entry, kobj) + +/** + * DOC: overview + * + * ``/sys/kernel/debug/dma_buf/bufinfo`` provides an overview of every DMA-BUF + * in the system. However, since debugfs is not safe to be mounted in + * production, procfs and sysfs can be used to gather DMA-BUF statistics on + * production systems. + * + * The ``/proc/<pid>/fdinfo/<fd>`` files in procfs can be used to gather + * information about DMA-BUF fds. Detailed documentation about the interface + * is present in Documentation/filesystems/proc.rst. + * + * Unfortunately, the existing procfs interfaces can only provide information + * about the DMA-BUFs for which processes hold fds or have the buffers mmapped + * into their address space. This necessitated the creation of the DMA-BUF sysfs + * statistics interface to provide per-buffer information on production systems. + * + * The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about + * every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled. + * + * The following stats are exposed by the interface: + * + * * ``/sys/kernel/dmabuf/buffers/<inode_number>/exporter_name`` + * * ``/sys/kernel/dmabuf/buffers/<inode_number>/size`` + * + * The information in the interface can also be used to derive per-exporter + * statistics. The data from the interface can be gathered on error conditions + * or other important events to provide a snapshot of DMA-BUF usage. + * It can also be collected periodically by telemetry to monitor various metrics. + * + * Detailed documentation about the interface is present in + * Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers. + */ + +struct dma_buf_stats_attribute { + struct attribute attr; + ssize_t (*show)(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, char *buf); +}; +#define to_dma_buf_stats_attr(x) container_of(x, struct dma_buf_stats_attribute, attr) + +static ssize_t dma_buf_stats_attribute_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct dma_buf_stats_attribute *attribute; + struct dma_buf_sysfs_entry *sysfs_entry; + struct dma_buf *dmabuf; + + attribute = to_dma_buf_stats_attr(attr); + sysfs_entry = to_dma_buf_entry_from_kobj(kobj); + dmabuf = sysfs_entry->dmabuf; + + if (!dmabuf || !attribute->show) + return -EIO; + + return attribute->show(dmabuf, attribute, buf); +} + +static const struct sysfs_ops dma_buf_stats_sysfs_ops = { + .show = dma_buf_stats_attribute_show, +}; + +static ssize_t exporter_name_show(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", dmabuf->exp_name); +} + +static ssize_t size_show(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%zu\n", dmabuf->size); +} + +static struct dma_buf_stats_attribute exporter_name_attribute = + __ATTR_RO(exporter_name); +static struct dma_buf_stats_attribute size_attribute = __ATTR_RO(size); + +static struct attribute *dma_buf_stats_default_attrs[] = { + &exporter_name_attribute.attr, + &size_attribute.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dma_buf_stats_default); + +static void dma_buf_sysfs_release(struct kobject *kobj) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + + sysfs_entry = to_dma_buf_entry_from_kobj(kobj); + kfree(sysfs_entry); +} + +static struct kobj_type dma_buf_ktype = { + .sysfs_ops = &dma_buf_stats_sysfs_ops, + .release = dma_buf_sysfs_release, + .default_groups = dma_buf_stats_default_groups, +}; + +void dma_buf_stats_teardown(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + + sysfs_entry = dmabuf->sysfs_entry; + if (!sysfs_entry) + return; + + kobject_del(&sysfs_entry->kobj); + kobject_put(&sysfs_entry->kobj); +} + + +/* Statistics files do not need to send uevents. */ +static int dmabuf_sysfs_uevent_filter(struct kobject *kobj) +{ + return 0; +} + +static const struct kset_uevent_ops dmabuf_sysfs_no_uevent_ops = { + .filter = dmabuf_sysfs_uevent_filter, +}; + +static struct kset *dma_buf_stats_kset; +static struct kset *dma_buf_per_buffer_stats_kset; +int dma_buf_init_sysfs_statistics(void) +{ + dma_buf_stats_kset = kset_create_and_add("dmabuf", + &dmabuf_sysfs_no_uevent_ops, + kernel_kobj); + if (!dma_buf_stats_kset) + return -ENOMEM; + + dma_buf_per_buffer_stats_kset = kset_create_and_add("buffers", + &dmabuf_sysfs_no_uevent_ops, + &dma_buf_stats_kset->kobj); + if (!dma_buf_per_buffer_stats_kset) { + kset_unregister(dma_buf_stats_kset); + return -ENOMEM; + } + + return 0; +} + +void dma_buf_uninit_sysfs_statistics(void) +{ + kset_unregister(dma_buf_per_buffer_stats_kset); + kset_unregister(dma_buf_stats_kset); +} + +int dma_buf_stats_setup(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + int ret; + + if (!dmabuf || !dmabuf->file) + return -EINVAL; + + if (!dmabuf->exp_name) { + pr_err("exporter name must not be empty if stats needed\n"); + return -EINVAL; + } + + sysfs_entry = kzalloc(sizeof(struct dma_buf_sysfs_entry), GFP_KERNEL); + if (!sysfs_entry) + return -ENOMEM; + + sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset; + sysfs_entry->dmabuf = dmabuf; + + dmabuf->sysfs_entry = sysfs_entry; + + /* create the directory for buffer stats */ + ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL, + "%lu", file_inode(dmabuf->file)->i_ino); + if (ret) + goto err_sysfs_dmabuf; + + return 0; + +err_sysfs_dmabuf: + kobject_put(&sysfs_entry->kobj); + dmabuf->sysfs_entry = NULL; + return ret; +} diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h new file mode 100644 index 000000000000..a49c6e2650cc --- /dev/null +++ b/drivers/dma-buf/dma-buf-sysfs-stats.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DMA-BUF sysfs statistics. + * + * Copyright (C) 2021 Google LLC. + */ + +#ifndef _DMA_BUF_SYSFS_STATS_H +#define _DMA_BUF_SYSFS_STATS_H + +#ifdef CONFIG_DMABUF_SYSFS_STATS + +int dma_buf_init_sysfs_statistics(void); +void dma_buf_uninit_sysfs_statistics(void); + +int dma_buf_stats_setup(struct dma_buf *dmabuf); + +void dma_buf_stats_teardown(struct dma_buf *dmabuf); +#else + +static inline int dma_buf_init_sysfs_statistics(void) +{ + return 0; +} + +static inline void dma_buf_uninit_sysfs_statistics(void) {} + +static inline int dma_buf_stats_setup(struct dma_buf *dmabuf) +{ + return 0; +} + +static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {} +#endif +#endif // _DMA_BUF_SYSFS_STATS_H diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index c343c7c10b4c..dd0f83ee505b 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -20,6 +20,7 @@ #include <linux/debugfs.h> #include <linux/module.h> #include <linux/seq_file.h> +#include <linux/sync_file.h> #include <linux/poll.h> #include <linux/dma-resv.h> #include <linux/mm.h> @@ -29,6 +30,8 @@ #include <uapi/linux/dma-buf.h> #include <uapi/linux/magic.h> +#include "dma-buf-sysfs-stats.h" + static inline int is_dma_buf_file(struct file *); struct dma_buf_list { @@ -45,17 +48,63 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) size_t ret = 0; dmabuf = dentry->d_fsdata; - dma_resv_lock(dmabuf->resv, NULL); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); - dma_resv_unlock(dmabuf->resv); + spin_unlock(&dmabuf->name_lock); - return dynamic_dname(dentry, buffer, buflen, "/%s:%s", + return dynamic_dname(buffer, buflen, "/%s:%s", dentry->d_name.name, ret > 0 ? name : ""); } +static void dma_buf_release(struct dentry *dentry) +{ + struct dma_buf *dmabuf; + + dmabuf = dentry->d_fsdata; + if (unlikely(!dmabuf)) + return; + + BUG_ON(dmabuf->vmapping_counter); + + /* + * If you hit this BUG() it could mean: + * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else + * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback + */ + BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); + + dma_buf_stats_teardown(dmabuf); + dmabuf->ops->release(dmabuf); + + if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) + dma_resv_fini(dmabuf->resv); + + WARN_ON(!list_empty(&dmabuf->attachments)); + module_put(dmabuf->owner); + kfree(dmabuf->name); + kfree(dmabuf); +} + +static int dma_buf_file_release(struct inode *inode, struct file *file) +{ + struct dma_buf *dmabuf; + + if (!is_dma_buf_file(file)) + return -EINVAL; + + dmabuf = file->private_data; + + mutex_lock(&db_list.lock); + list_del(&dmabuf->list_node); + mutex_unlock(&db_list.lock); + + return 0; +} + static const struct dentry_operations dma_buf_dentry_ops = { .d_dname = dmabuffs_dname, + .d_release = dma_buf_release, }; static struct vfsmount *dma_buf_mnt; @@ -77,42 +126,6 @@ static struct file_system_type dma_buf_fs_type = { .kill_sb = kill_anon_super, }; -static int dma_buf_release(struct inode *inode, struct file *file) -{ - struct dma_buf *dmabuf; - - if (!is_dma_buf_file(file)) - return -EINVAL; - - dmabuf = file->private_data; - - BUG_ON(dmabuf->vmapping_counter); - - /* - * Any fences that a dma-buf poll can wait on should be signaled - * before releasing dma-buf. This is the responsibility of each - * driver that uses the reservation objects. - * - * If you hit this BUG() it means someone dropped their ref to the - * dma-buf while still having pending operation to the buffer. - */ - BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); - - dmabuf->ops->release(dmabuf); - - mutex_lock(&db_list.lock); - list_del(&dmabuf->list_node); - mutex_unlock(&db_list.lock); - - if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) - dma_resv_fini(dmabuf->resv); - - module_put(dmabuf->owner); - kfree(dmabuf->name); - kfree(dmabuf); - return 0; -} - static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; @@ -161,11 +174,11 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) } /** - * DOC: fence polling + * DOC: implicit fence polling * * To support cross-device and cross-driver synchronization of buffer access - * implicit fences (represented internally in the kernel with &struct fence) can - * be attached to a &dma_buf. The glue for that and a few related things are + * implicit fences (represented internally in the kernel with &struct dma_fence) + * can be attached to a &dma_buf. The glue for that and a few related things are * provided in the &dma_resv structure. * * Userspace can query the state of these implicitly tracked fences using poll() @@ -180,27 +193,50 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) * Note that this only signals the completion of the respective fences, i.e. the * DMA transfers are complete. Cache flushing and any other necessary * preparations before CPU access can begin still need to happen. + * + * As an alternative to poll(), the set of fences on DMA buffer can be + * exported as a &sync_file using &dma_buf_sync_file_export. */ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; + struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); unsigned long flags; spin_lock_irqsave(&dcb->poll->lock, flags); wake_up_locked_poll(dcb->poll, dcb->active); dcb->active = 0; spin_unlock_irqrestore(&dcb->poll->lock, flags); + dma_fence_put(fence); + /* Paired with get_file in dma_buf_poll */ + fput(dmabuf->file); +} + +static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, + struct dma_buf_poll_cb_t *dcb) +{ + struct dma_resv_iter cursor; + struct dma_fence *fence; + int r; + + dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), + fence) { + dma_fence_get(fence); + r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); + if (!r) + return true; + dma_fence_put(fence); + } + + return false; } static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { struct dma_buf *dmabuf; struct dma_resv *resv; - struct dma_resv_list *fobj; - struct dma_fence *fence_excl; __poll_t events; - unsigned shared_count, seq; dmabuf = file->private_data; if (!dmabuf || !dmabuf->resv) @@ -214,114 +250,66 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) if (!events) return 0; -retry: - seq = read_seqcount_begin(&resv->seq); - rcu_read_lock(); + dma_resv_lock(resv, NULL); - fobj = rcu_dereference(resv->fence); - if (fobj) - shared_count = fobj->shared_count; - else - shared_count = 0; - fence_excl = rcu_dereference(resv->fence_excl); - if (read_seqcount_retry(&resv->seq, seq)) { - rcu_read_unlock(); - goto retry; - } - - if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { - struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; - __poll_t pevents = EPOLLIN; - - if (shared_count == 0) - pevents |= EPOLLOUT; + if (events & EPOLLOUT) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; + /* Check that callback isn't busy */ spin_lock_irq(&dmabuf->poll.lock); - if (dcb->active) { - dcb->active |= pevents; - events &= ~pevents; - } else - dcb->active = pevents; + if (dcb->active) + events &= ~EPOLLOUT; + else + dcb->active = EPOLLOUT; spin_unlock_irq(&dmabuf->poll.lock); - if (events & pevents) { - if (!dma_fence_get_rcu(fence_excl)) { - /* force a recheck */ - events &= ~pevents; - dma_buf_poll_cb(NULL, &dcb->cb); - } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, - dma_buf_poll_cb)) { - events &= ~pevents; - dma_fence_put(fence_excl); - } else { - /* - * No callback queued, wake up any additional - * waiters. - */ - dma_fence_put(fence_excl); + if (events & EPOLLOUT) { + /* Paired with fput in dma_buf_poll_cb */ + get_file(dmabuf->file); + + if (!dma_buf_poll_add_cb(resv, true, dcb)) + /* No callback queued, wake up any other waiters */ dma_buf_poll_cb(NULL, &dcb->cb); - } + else + events &= ~EPOLLOUT; } } - if ((events & EPOLLOUT) && shared_count > 0) { - struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; - int i; + if (events & EPOLLIN) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; - /* Only queue a new callback if no event has fired yet */ + /* Check that callback isn't busy */ spin_lock_irq(&dmabuf->poll.lock); if (dcb->active) - events &= ~EPOLLOUT; + events &= ~EPOLLIN; else - dcb->active = EPOLLOUT; + dcb->active = EPOLLIN; spin_unlock_irq(&dmabuf->poll.lock); - if (!(events & EPOLLOUT)) - goto out; + if (events & EPOLLIN) { + /* Paired with fput in dma_buf_poll_cb */ + get_file(dmabuf->file); - for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = rcu_dereference(fobj->shared[i]); - - if (!dma_fence_get_rcu(fence)) { - /* - * fence refcount dropped to zero, this means - * that fobj has been freed - * - * call dma_buf_poll_cb and force a recheck! - */ - events &= ~EPOLLOUT; + if (!dma_buf_poll_add_cb(resv, false, dcb)) + /* No callback queued, wake up any other waiters */ dma_buf_poll_cb(NULL, &dcb->cb); - break; - } - if (!dma_fence_add_callback(fence, &dcb->cb, - dma_buf_poll_cb)) { - dma_fence_put(fence); - events &= ~EPOLLOUT; - break; - } - dma_fence_put(fence); + else + events &= ~EPOLLIN; } - - /* No callback queued, wake up any additional waiters. */ - if (i == shared_count) - dma_buf_poll_cb(NULL, &dcb->cb); } -out: - rcu_read_unlock(); + dma_resv_unlock(resv); return events; } /** * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. - * The name of the dma-buf buffer can only be set when the dma-buf is not - * attached to any devices. It could theoritically support changing the - * name of the dma-buf if the same piece of memory is used for multiple - * purpose between different devices. + * It could support changing the name of the dma-buf if the same + * piece of memory is used for multiple purpose between different devices. * - * @dmabuf [in] dmabuf buffer that will be renamed. - * @buf: [in] A piece of userspace memory that contains the name of - * the dma-buf. + * @dmabuf: [in] dmabuf buffer that will be renamed. + * @buf: [in] A piece of userspace memory that contains the name of + * the dma-buf. * * Returns 0 on success. If the dma-buf buffer is already attached to * devices, return -EBUSY. @@ -330,24 +318,112 @@ out: static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) { char *name = strndup_user(buf, DMA_BUF_NAME_LEN); - long ret = 0; if (IS_ERR(name)) return PTR_ERR(name); - dma_resv_lock(dmabuf->resv, NULL); - if (!list_empty(&dmabuf->attachments)) { - ret = -EBUSY; - kfree(name); - goto out_unlock; - } + spin_lock(&dmabuf->name_lock); kfree(dmabuf->name); dmabuf->name = name; + spin_unlock(&dmabuf->name_lock); + + return 0; +} + +#if IS_ENABLED(CONFIG_SYNC_FILE) +static long dma_buf_export_sync_file(struct dma_buf *dmabuf, + void __user *user_data) +{ + struct dma_buf_export_sync_file arg; + enum dma_resv_usage usage; + struct dma_fence *fence = NULL; + struct sync_file *sync_file; + int fd, ret; + + if (copy_from_user(&arg, user_data, sizeof(arg))) + return -EFAULT; + + if (arg.flags & ~DMA_BUF_SYNC_RW) + return -EINVAL; + + if ((arg.flags & DMA_BUF_SYNC_RW) == 0) + return -EINVAL; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) + return fd; + + usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE); + ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence); + if (ret) + goto err_put_fd; + + if (!fence) + fence = dma_fence_get_stub(); + + sync_file = sync_file_create(fence); + + dma_fence_put(fence); + + if (!sync_file) { + ret = -ENOMEM; + goto err_put_fd; + } + + arg.fd = fd; + if (copy_to_user(user_data, &arg, sizeof(arg))) { + ret = -EFAULT; + goto err_put_file; + } + + fd_install(fd, sync_file->file); + + return 0; + +err_put_file: + fput(sync_file->file); +err_put_fd: + put_unused_fd(fd); + return ret; +} + +static long dma_buf_import_sync_file(struct dma_buf *dmabuf, + const void __user *user_data) +{ + struct dma_buf_import_sync_file arg; + struct dma_fence *fence; + enum dma_resv_usage usage; + int ret = 0; + + if (copy_from_user(&arg, user_data, sizeof(arg))) + return -EFAULT; + + if (arg.flags & ~DMA_BUF_SYNC_RW) + return -EINVAL; + + if ((arg.flags & DMA_BUF_SYNC_RW) == 0) + return -EINVAL; + + fence = sync_file_get_fence(arg.fd); + if (!fence) + return -EINVAL; + + usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE : + DMA_RESV_USAGE_READ; + + dma_resv_lock(dmabuf->resv, NULL); + + ret = dma_resv_reserve_fences(dmabuf->resv, 1); + if (!ret) + dma_resv_add_fence(dmabuf->resv, fence, usage); -out_unlock: dma_resv_unlock(dmabuf->resv); + + dma_fence_put(fence); + return ret; } +#endif static long dma_buf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -388,9 +464,17 @@ static long dma_buf_ioctl(struct file *file, return ret; - case DMA_BUF_SET_NAME: + case DMA_BUF_SET_NAME_A: + case DMA_BUF_SET_NAME_B: return dma_buf_set_name(dmabuf, (const char __user *)arg); +#if IS_ENABLED(CONFIG_SYNC_FILE) + case DMA_BUF_IOCTL_EXPORT_SYNC_FILE: + return dma_buf_export_sync_file(dmabuf, (void __user *)arg); + case DMA_BUF_IOCTL_IMPORT_SYNC_FILE: + return dma_buf_import_sync_file(dmabuf, (const void __user *)arg); +#endif + default: return -ENOTTY; } @@ -404,14 +488,14 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) /* Don't count the temporary reference taken inside procfs seq_show */ seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); - dma_resv_lock(dmabuf->resv, NULL); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) seq_printf(m, "name:\t%s\n", dmabuf->name); - dma_resv_unlock(dmabuf->resv); + spin_unlock(&dmabuf->name_lock); } static const struct file_operations dma_buf_fops = { - .release = dma_buf_release, + .release = dma_buf_file_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, @@ -430,6 +514,7 @@ static inline int is_dma_buf_file(struct file *file) static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) { + static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); struct file *file; struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); @@ -439,11 +524,18 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) inode->i_size = dmabuf->size; inode_set_bytes(inode, dmabuf->size); + /* + * The ->i_ino acquired from get_next_ino() is not unique thus + * not suitable for using it as dentry name by dmabuf stats. + * Override ->i_ino with the unique and dmabuffs specific + * value. + */ + inode->i_ino = atomic64_add_return(1, &dmabuf_inode); + flags &= O_ACCMODE | O_NONBLOCK; file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", flags, &dma_buf_fops); if (IS_ERR(file)) goto err_alloc_file; - file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); file->private_data = dmabuf; file->f_path.dentry->d_fsdata = dmabuf; @@ -466,7 +558,7 @@ err_alloc_file: * as a file descriptor by calling dma_buf_fd(). * * 2. Userspace passes this file-descriptors to all drivers it wants this buffer - * to share with: First the filedescriptor is converted to a &dma_buf using + * to share with: First the file descriptor is converted to a &dma_buf using * dma_buf_get(). Then the buffer is attached to the device using * dma_buf_attach(). * @@ -479,7 +571,7 @@ err_alloc_file: * * 4. Once a driver is done with a shared buffer it needs to call * dma_buf_detach() (after cleaning up any mappings) and then release the - * reference acquired with dma_buf_get by calling dma_buf_put(). + * reference acquired with dma_buf_get() by calling dma_buf_put(). * * For the detailed semantics exporters are expected to implement see * &dma_buf_ops. @@ -495,9 +587,10 @@ err_alloc_file: * by the exporter. see &struct dma_buf_export_info * for further details. * - * Returns, on success, a newly created dma_buf object, which wraps the - * supplied private data and operations for dma_buf_ops. On either missing - * ops, or error in allocating struct dma_buf, will return negative error. + * Returns, on success, a newly created struct dma_buf object, which wraps the + * supplied private data and operations for struct dma_buf_ops. On either + * missing ops, or error in allocating struct dma_buf, will return negative + * error. * * For most cases the easiest way to create @exp_info is through the * %DEFINE_DMA_BUF_EXPORT_INFO macro. @@ -525,7 +618,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) } if (WARN_ON(exp_info->ops->cache_sgt_mapping && - exp_info->ops->dynamic_mapping)) + (exp_info->ops->pin || exp_info->ops->unpin))) + return ERR_PTR(-EINVAL); + + if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) return ERR_PTR(-EINVAL); if (!try_module_get(exp_info->owner)) @@ -542,9 +638,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->size = exp_info->size; dmabuf->exp_name = exp_info->exp_name; dmabuf->owner = exp_info->owner; + spin_lock_init(&dmabuf->name_lock); init_waitqueue_head(&dmabuf->poll); - dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; - dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; + dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; + dmabuf->cb_in.active = dmabuf->cb_out.active = 0; if (!resv) { resv = (struct dma_resv *)&dmabuf[1]; @@ -558,7 +655,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) goto err_dmabuf; } - file->f_mode |= FMODE_LSEEK; dmabuf->file = file; mutex_init(&dmabuf->lock); @@ -568,18 +664,30 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) list_add(&dmabuf->list_node, &db_list.head); mutex_unlock(&db_list.lock); + ret = dma_buf_stats_setup(dmabuf); + if (ret) + goto err_sysfs; + return dmabuf; +err_sysfs: + /* + * Set file->f_path.dentry->d_fsdata to NULL so that when + * dma_buf_release() gets invoked by dentry_ops, it exits + * early before calling the release() dma_buf op. + */ + file->f_path.dentry->d_fsdata = NULL; + fput(file); err_dmabuf: kfree(dmabuf); err_module: module_put(exp_info->owner); return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(dma_buf_export); +EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF); /** - * dma_buf_fd - returns a file descriptor for the given dma_buf + * dma_buf_fd - returns a file descriptor for the given struct dma_buf * @dmabuf: [in] pointer to dma_buf for which fd is required. * @flags: [in] flags to give to fd * @@ -600,13 +708,13 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags) return fd; } -EXPORT_SYMBOL_GPL(dma_buf_fd); +EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF); /** - * dma_buf_get - returns the dma_buf structure related to an fd - * @fd: [in] fd associated with the dma_buf to be returned + * dma_buf_get - returns the struct dma_buf related to an fd + * @fd: [in] fd associated with the struct dma_buf to be returned * - * On success, returns the dma_buf structure associated with an fd; uses + * On success, returns the struct dma_buf associated with an fd; uses * file's refcounting done by fget to increase refcount. returns ERR_PTR * otherwise. */ @@ -626,7 +734,7 @@ struct dma_buf *dma_buf_get(int fd) return file->private_data; } -EXPORT_SYMBOL_GPL(dma_buf_get); +EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF); /** * dma_buf_put - decreases refcount of the buffer @@ -645,18 +753,61 @@ void dma_buf_put(struct dma_buf *dmabuf) fput(dmabuf->file); } -EXPORT_SYMBOL_GPL(dma_buf_put); +EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF); + +static void mangle_sg_table(struct sg_table *sg_table) +{ +#ifdef CONFIG_DMABUF_DEBUG + int i; + struct scatterlist *sg; + + /* To catch abuse of the underlying struct page by importers mix + * up the bits, but take care to preserve the low SG_ bits to + * not corrupt the sgt. The mixing is undone in __unmap_dma_buf + * before passing the sgt back to the exporter. */ + for_each_sgtable_sg(sg_table, sg, i) + sg->page_link ^= ~0xffUL; +#endif + +} +static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction direction) +{ + struct sg_table *sg_table; + signed long ret; + + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); + if (IS_ERR_OR_NULL(sg_table)) + return sg_table; + + if (!dma_buf_attachment_is_dynamic(attach)) { + ret = dma_resv_wait_timeout(attach->dmabuf->resv, + DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); + if (ret < 0) { + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, + direction); + return ERR_PTR(ret); + } + } + + mangle_sg_table(sg_table); + return sg_table; +} /** - * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally, - * calls attach() of dma_buf_ops to allow device-specific attach functionality + * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list * @dmabuf: [in] buffer to attach device to. * @dev: [in] device to be attached. - * @dynamic_mapping: [in] calling convention for map/unmap + * @importer_ops: [in] importer operations for the attachment + * @importer_priv: [in] importer private pointer for the attachment * * Returns struct dma_buf_attachment pointer for this attachment. Attachments * must be cleaned up by calling dma_buf_detach(). * + * Optionally this calls &dma_buf_ops.attach to allow device-specific attach + * functionality. + * * Returns: * * A pointer to newly created &dma_buf_attachment on success, or a negative @@ -668,7 +819,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put); */ struct dma_buf_attachment * dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, - bool dynamic_mapping) + const struct dma_buf_attach_ops *importer_ops, + void *importer_priv) { struct dma_buf_attachment *attach; int ret; @@ -676,13 +828,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, if (WARN_ON(!dmabuf || !dev)) return ERR_PTR(-EINVAL); + if (WARN_ON(importer_ops && !importer_ops->move_notify)) + return ERR_PTR(-EINVAL); + attach = kzalloc(sizeof(*attach), GFP_KERNEL); if (!attach) return ERR_PTR(-ENOMEM); attach->dev = dev; attach->dmabuf = dmabuf; - attach->dynamic_mapping = dynamic_mapping; + if (importer_ops) + attach->peer2peer = importer_ops->allow_peer2peer; + attach->importer_ops = importer_ops; + attach->importer_priv = importer_priv; if (dmabuf->ops->attach) { ret = dmabuf->ops->attach(dmabuf, attach); @@ -701,15 +859,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, dma_buf_is_dynamic(dmabuf)) { struct sg_table *sgt; - if (dma_buf_is_dynamic(attach->dmabuf)) + if (dma_buf_is_dynamic(attach->dmabuf)) { dma_resv_lock(attach->dmabuf->resv, NULL); + ret = dmabuf->ops->pin(attach); + if (ret) + goto err_unlock; + } - sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL); + sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); if (!sgt) sgt = ERR_PTR(-ENOMEM); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); - goto err_unlock; + goto err_unpin; } if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_unlock(attach->dmabuf->resv); @@ -723,6 +885,10 @@ err_attach: kfree(attach); return ERR_PTR(ret); +err_unpin: + if (dma_buf_is_dynamic(attach->dmabuf)) + dmabuf->ops->unpin(attach); + err_unlock: if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_unlock(attach->dmabuf->resv); @@ -730,7 +896,7 @@ err_unlock: dma_buf_detach(dmabuf, attach); return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach); +EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF); /** * dma_buf_attach - Wrapper for dma_buf_dynamic_attach @@ -743,17 +909,28 @@ EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach); struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev) { - return dma_buf_dynamic_attach(dmabuf, dev, false); + return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); +} +EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF); + +static void __unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sg_table, + enum dma_data_direction direction) +{ + /* uses XOR, hence this unmangles */ + mangle_sg_table(sg_table); + + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); } -EXPORT_SYMBOL_GPL(dma_buf_attach); /** - * dma_buf_detach - Remove the given attachment from dmabuf's attachments list; - * optionally calls detach() of dma_buf_ops for device-specific detach + * dma_buf_detach - Remove the given attachment from dmabuf's attachments list * @dmabuf: [in] buffer to detach from. * @attach: [in] attachment to be detached; is free'd after this call. * * Clean up a device attachment obtained by calling dma_buf_attach(). + * + * Optionally this calls &dma_buf_ops.detach for device-specific detach. */ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { @@ -764,10 +941,12 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_lock(attach->dmabuf->resv, NULL); - dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir); + __unmap_dma_buf(attach, attach->sgt, attach->dir); - if (dma_buf_is_dynamic(attach->dmabuf)) + if (dma_buf_is_dynamic(attach->dmabuf)) { + dmabuf->ops->unpin(attach); dma_resv_unlock(attach->dmabuf->resv); + } } dma_resv_lock(dmabuf->resv, NULL); @@ -778,7 +957,58 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) kfree(attach); } -EXPORT_SYMBOL_GPL(dma_buf_detach); +EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF); + +/** + * dma_buf_pin - Lock down the DMA-buf + * @attach: [in] attachment which should be pinned + * + * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may + * call this, and only for limited use cases like scanout and not for temporary + * pin operations. It is not permitted to allow userspace to pin arbitrary + * amounts of buffers through this interface. + * + * Buffers must be unpinned by calling dma_buf_unpin(). + * + * Returns: + * 0 on success, negative error code on failure. + */ +int dma_buf_pin(struct dma_buf_attachment *attach) +{ + struct dma_buf *dmabuf = attach->dmabuf; + int ret = 0; + + WARN_ON(!dma_buf_attachment_is_dynamic(attach)); + + dma_resv_assert_held(dmabuf->resv); + + if (dmabuf->ops->pin) + ret = dmabuf->ops->pin(attach); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF); + +/** + * dma_buf_unpin - Unpin a DMA-buf + * @attach: [in] attachment which should be unpinned + * + * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move + * any mapping of @attach again and inform the importer through + * &dma_buf_attach_ops.move_notify. + */ +void dma_buf_unpin(struct dma_buf_attachment *attach) +{ + struct dma_buf *dmabuf = attach->dmabuf; + + WARN_ON(!dma_buf_attachment_is_dynamic(attach)); + + dma_resv_assert_held(dmabuf->resv); + + if (dmabuf->ops->unpin) + dmabuf->ops->unpin(attach); +} +EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF); /** * dma_buf_map_attachment - Returns the scatterlist table of the attachment; @@ -790,15 +1020,22 @@ EXPORT_SYMBOL_GPL(dma_buf_detach); * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR * on error. May return -EINTR if it is interrupted by a signal. * + * On success, the DMA addresses and lengths in the returned scatterlist are + * PAGE_SIZE aligned. + * * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that * the underlying backing storage is pinned for as long as a mapping exists, * therefore users/importers should not hold onto a mapping for undue amounts of * time. + * + * Important: Dynamic importers must wait for the exclusive fence of the struct + * dma_resv attached to the DMA-BUF first. */ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; + int r; might_sleep(); @@ -820,21 +1057,48 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, return attach->sgt; } - if (dma_buf_is_dynamic(attach->dmabuf)) + if (dma_buf_is_dynamic(attach->dmabuf)) { dma_resv_assert_held(attach->dmabuf->resv); + if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { + r = attach->dmabuf->ops->pin(attach); + if (r) + return ERR_PTR(r); + } + } - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); + sg_table = __map_dma_buf(attach, direction); if (!sg_table) sg_table = ERR_PTR(-ENOMEM); + if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) + attach->dmabuf->ops->unpin(attach); + if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { attach->sgt = sg_table; attach->dir = direction; } +#ifdef CONFIG_DMA_API_DEBUG + if (!IS_ERR(sg_table)) { + struct scatterlist *sg; + u64 addr; + int len; + int i; + + for_each_sgtable_dma_sg(sg_table, sg, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { + pr_debug("%s: addr %llx or len %x is not page aligned!\n", + __func__, addr, len); + } + } + } +#endif /* CONFIG_DMA_API_DEBUG */ return sg_table; } -EXPORT_SYMBOL_GPL(dma_buf_map_attachment); +EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF); /** * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might @@ -864,9 +1128,33 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_assert_held(attach->dmabuf->resv); - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); + __unmap_dma_buf(attach, sg_table, direction); + + if (dma_buf_is_dynamic(attach->dmabuf) && + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) + dma_buf_unpin(attach); +} +EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF); + +/** + * dma_buf_move_notify - notify attachments that DMA-buf is moving + * + * @dmabuf: [in] buffer which is moving + * + * Informs all attachmenst that they need to destroy and recreated all their + * mappings. + */ +void dma_buf_move_notify(struct dma_buf *dmabuf) +{ + struct dma_buf_attachment *attach; + + dma_resv_assert_held(dmabuf->resv); + + list_for_each_entry(attach, &dmabuf->attachments, node) + if (attach->importer_ops) + attach->importer_ops->move_notify(attach); } -EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); +EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); /** * DOC: cpu access @@ -884,15 +1172,15 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); * vmalloc space might be limited and result in vmap calls failing. * * Interfaces:: - * void \*dma_buf_vmap(struct dma_buf \*dmabuf) - * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr) + * + * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map) + * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map) * * The vmap call can fail if there is no vmap support in the exporter, or if - * it runs out of vmalloc space. Fallback to kmap should be implemented. Note - * that the dma-buf layer keeps a reference count for all vmap access and - * calls down into the exporter's vmap function only when no vmapping exists, - * and only unmaps it once. Protection against concurrent vmap/vunmap calls is - * provided by taking the dma_buf->lock mutex. + * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference + * count for all vmap access and calls down into the exporter's vmap function + * only when no vmapping exists, and only unmaps it once. Protection against + * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex. * * - For full compatibility on the importer side with existing userspace * interfaces, which might already support mmap'ing buffers. This is needed in @@ -944,11 +1232,12 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); * shootdowns would increase the complexity quite a bit. * * Interface:: + * * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, * unsigned long); * * If the importing subsystem simply provides a special-purpose mmap call to - * set up a mapping in userspace, calling do_mmap with dma_buf->file will + * set up a mapping in userspace, calling do_mmap with &dma_buf.file will * equally achieve that for a dma-buf object. */ @@ -961,8 +1250,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, long ret; /* Wait on any implicit rendering fences */ - ret = dma_resv_wait_timeout_rcu(resv, write, true, - MAX_SCHEDULE_TIMEOUT); + ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), + true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; @@ -981,6 +1270,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is * it guaranteed to be coherent with other DMA access. * + * This function will also wait for any DMA transactions tracked through + * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit + * synchronization this function will only ensure cache coherency, callers must + * ensure synchronization with such DMA transactions on their own. + * * Can return negative error values, returns 0 on success. */ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, @@ -991,6 +1285,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, if (WARN_ON(!dmabuf)) return -EINVAL; + might_lock(&dmabuf->resv->lock.base); + if (dmabuf->ops->begin_cpu_access) ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); @@ -1003,7 +1299,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, return ret; } -EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); +EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); /** * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the @@ -1024,12 +1320,14 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf, WARN_ON(!dmabuf); + might_lock(&dmabuf->resv->lock.base); + if (dmabuf->ops->end_cpu_access) ret = dmabuf->ops->end_cpu_access(dmabuf, direction); return ret; } -EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); +EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); /** @@ -1049,9 +1347,6 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff) { - struct file *oldfile; - int ret; - if (WARN_ON(!dmabuf || !vma)) return -EINVAL; @@ -1069,108 +1364,99 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return -EINVAL; /* readjust the vma */ - get_file(dmabuf->file); - oldfile = vma->vm_file; - vma->vm_file = dmabuf->file; + vma_set_file(vma, dmabuf->file); vma->vm_pgoff = pgoff; - ret = dmabuf->ops->mmap(dmabuf, vma); - if (ret) { - /* restore old parameters on failure */ - vma->vm_file = oldfile; - fput(dmabuf->file); - } else { - if (oldfile) - fput(oldfile); - } - return ret; - + return dmabuf->ops->mmap(dmabuf, vma); } -EXPORT_SYMBOL_GPL(dma_buf_mmap); +EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); /** * dma_buf_vmap - Create virtual mapping for the buffer object into kernel * address space. Same restrictions as for vmap and friends apply. * @dmabuf: [in] buffer to vmap + * @map: [out] returns the vmap pointer * * This call may fail due to lack of virtual mapping address space. * These calls are optional in drivers. The intended use for them * is for mapping objects linear in kernel space for high use objects. - * Please attempt to use kmap/kunmap before thinking about these interfaces. * - * Returns NULL on error. + * To ensure coherency users must call dma_buf_begin_cpu_access() and + * dma_buf_end_cpu_access() around any cpu access performed through this + * mapping. + * + * Returns 0 on success, or a negative errno code otherwise. */ -void *dma_buf_vmap(struct dma_buf *dmabuf) +int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { - void *ptr; + struct iosys_map ptr; + int ret = 0; + + iosys_map_clear(map); if (WARN_ON(!dmabuf)) - return NULL; + return -EINVAL; if (!dmabuf->ops->vmap) - return NULL; + return -EINVAL; mutex_lock(&dmabuf->lock); if (dmabuf->vmapping_counter) { dmabuf->vmapping_counter++; - BUG_ON(!dmabuf->vmap_ptr); - ptr = dmabuf->vmap_ptr; + BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); + *map = dmabuf->vmap_ptr; goto out_unlock; } - BUG_ON(dmabuf->vmap_ptr); + BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); - ptr = dmabuf->ops->vmap(dmabuf); - if (WARN_ON_ONCE(IS_ERR(ptr))) - ptr = NULL; - if (!ptr) + ret = dmabuf->ops->vmap(dmabuf, &ptr); + if (WARN_ON_ONCE(ret)) goto out_unlock; dmabuf->vmap_ptr = ptr; dmabuf->vmapping_counter = 1; + *map = dmabuf->vmap_ptr; + out_unlock: mutex_unlock(&dmabuf->lock); - return ptr; + return ret; } -EXPORT_SYMBOL_GPL(dma_buf_vmap); +EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); /** * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. * @dmabuf: [in] buffer to vunmap - * @vaddr: [in] vmap to vunmap + * @map: [in] vmap pointer to vunmap */ -void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) +void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { if (WARN_ON(!dmabuf)) return; - BUG_ON(!dmabuf->vmap_ptr); + BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); BUG_ON(dmabuf->vmapping_counter == 0); - BUG_ON(dmabuf->vmap_ptr != vaddr); + BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); mutex_lock(&dmabuf->lock); if (--dmabuf->vmapping_counter == 0) { if (dmabuf->ops->vunmap) - dmabuf->ops->vunmap(dmabuf, vaddr); - dmabuf->vmap_ptr = NULL; + dmabuf->ops->vunmap(dmabuf, map); + iosys_map_clear(&dmabuf->vmap_ptr); } mutex_unlock(&dmabuf->lock); } -EXPORT_SYMBOL_GPL(dma_buf_vunmap); +EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); #ifdef CONFIG_DEBUG_FS static int dma_buf_debug_show(struct seq_file *s, void *unused) { - int ret; struct dma_buf *buf_obj; struct dma_buf_attachment *attach_obj; - struct dma_resv *robj; - struct dma_resv_list *fobj; - struct dma_fence *fence; - unsigned seq; - int count = 0, attach_count, shared_count, i; + int count = 0, attach_count; size_t size = 0; + int ret; ret = mutex_lock_interruptible(&db_list.lock); @@ -1178,7 +1464,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) return ret; seq_puts(s, "\nDma-buf Objects:\n"); - seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n", + seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", "size", "flags", "mode", "count", "ino"); list_for_each_entry(buf_obj, &db_list.head, list_node) { @@ -1187,42 +1473,18 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) if (ret) goto error_unlock; + + spin_lock(&buf_obj->name_lock); seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", buf_obj->size, buf_obj->file->f_flags, buf_obj->file->f_mode, file_count(buf_obj->file), buf_obj->exp_name, file_inode(buf_obj->file)->i_ino, - buf_obj->name ?: ""); - - robj = buf_obj->resv; - while (true) { - seq = read_seqcount_begin(&robj->seq); - rcu_read_lock(); - fobj = rcu_dereference(robj->fence); - shared_count = fobj ? fobj->shared_count : 0; - fence = rcu_dereference(robj->fence_excl); - if (!read_seqcount_retry(&robj->seq, seq)) - break; - rcu_read_unlock(); - } + buf_obj->name ?: "<none>"); + spin_unlock(&buf_obj->name_lock); - if (fence) - seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - dma_fence_is_signaled(fence) ? "" : "un"); - for (i = 0; i < shared_count; i++) { - fence = rcu_dereference(fobj->shared[i]); - if (!dma_fence_get_rcu(fence)) - continue; - seq_printf(s, "\tShared fence: %s %s %ssignalled\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - dma_fence_is_signaled(fence) ? "" : "un"); - dma_fence_put(fence); - } - rcu_read_unlock(); + dma_resv_describe(buf_obj->resv, s); seq_puts(s, "\tAttached Devices:\n"); attach_count = 0; @@ -1293,6 +1555,12 @@ static inline void dma_buf_uninit_debugfs(void) static int __init dma_buf_init(void) { + int ret; + + ret = dma_buf_init_sysfs_statistics(); + if (ret) + return ret; + dma_buf_mnt = kern_mount(&dma_buf_fs_type); if (IS_ERR(dma_buf_mnt)) return PTR_ERR(dma_buf_mnt); @@ -1308,5 +1576,6 @@ static void __exit dma_buf_deinit(void) { dma_buf_uninit_debugfs(); kern_unmount(dma_buf_mnt); + dma_buf_uninit_sysfs_statistics(); } __exitcall(dma_buf_deinit); diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index d3fbd950be94..5c8a7084577b 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -104,7 +104,11 @@ static bool dma_fence_array_signaled(struct dma_fence *fence) { struct dma_fence_array *array = to_dma_fence_array(fence); - return atomic_read(&array->num_pending) <= 0; + if (atomic_read(&array->num_pending) > 0) + return false; + + dma_fence_array_clear_pending_error(array); + return true; } static void dma_fence_array_release(struct dma_fence *fence) @@ -155,6 +159,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, struct dma_fence_array *array; size_t size = sizeof(*array); + WARN_ON(!num_fences || !fences); + /* Allocate the callback structures behind the array. */ size += num_fences * sizeof(struct dma_fence_array_cb); array = kzalloc(size, GFP_KERNEL); @@ -172,6 +178,20 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, array->base.error = PENDING_ERROR; + /* + * dma_fence_array objects should never contain any other fence + * containers or otherwise we run into recursion and potential kernel + * stack overflow on operations on the dma_fence_array. + * + * The correct way of handling this is to flatten out the array by the + * caller instead. + * + * Enforce this here by checking that we don't create a dma_fence_array + * with any container inside. + */ + while (num_fences--) + WARN_ON(dma_fence_is_container(fences[num_fences])); + return array; } EXPORT_SYMBOL(dma_fence_array_create); @@ -201,3 +221,33 @@ bool dma_fence_match_context(struct dma_fence *fence, u64 context) return true; } EXPORT_SYMBOL(dma_fence_match_context); + +struct dma_fence *dma_fence_array_first(struct dma_fence *head) +{ + struct dma_fence_array *array; + + if (!head) + return NULL; + + array = to_dma_fence_array(head); + if (!array) + return head; + + if (!array->num_fences) + return NULL; + + return array->fences[0]; +} +EXPORT_SYMBOL(dma_fence_array_first); + +struct dma_fence *dma_fence_array_next(struct dma_fence *head, + unsigned int index) +{ + struct dma_fence_array *array = to_dma_fence_array(head); + + if (!array || index >= array->num_fences) + return NULL; + + return array->fences[index]; +} +EXPORT_SYMBOL(dma_fence_array_next); diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 44a741677d25..a0d920576ba6 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -62,7 +62,8 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) replacement = NULL; } - tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement); + tmp = unrcu_pointer(cmpxchg(&chain->prev, RCU_INITIALIZER(prev), + RCU_INITIALIZER(replacement))); if (tmp == prev) dma_fence_put(tmp); else @@ -136,6 +137,7 @@ static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) struct dma_fence_chain *chain; chain = container_of(cb, typeof(*chain), cb); + init_irq_work(&chain->work, dma_fence_chain_irq_work); irq_work_queue(&chain->work); dma_fence_put(f); } @@ -146,8 +148,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) dma_fence_get(&head->base); dma_fence_chain_for_each(fence, &head->base) { - struct dma_fence_chain *chain = to_dma_fence_chain(fence); - struct dma_fence *f = chain ? chain->fence : fence; + struct dma_fence *f = dma_fence_chain_contained(fence); dma_fence_get(f); if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) { @@ -163,8 +164,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) static bool dma_fence_chain_signaled(struct dma_fence *fence) { dma_fence_chain_for_each(fence, fence) { - struct dma_fence_chain *chain = to_dma_fence_chain(fence); - struct dma_fence *f = chain ? chain->fence : fence; + struct dma_fence *f = dma_fence_chain_contained(fence); if (!dma_fence_is_signaled(f)) { dma_fence_put(fence); @@ -221,6 +221,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops); * @chain: the chain node to initialize * @prev: the previous fence * @fence: the current fence + * @seqno: the sequence number to use for the fence chain * * Initialize a new chain node and either start a new chain or add the node to * the existing chain of the previous fence. @@ -237,7 +238,6 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, rcu_assign_pointer(chain->prev, prev); chain->fence = fence; chain->prev_seqno = 0; - init_irq_work(&chain->work, dma_fence_chain_irq_work); /* Try to reuse the context of the previous chain node. */ if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { @@ -252,5 +252,14 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock, context, seqno); + + /* + * Chaining dma_fence_chain container together is only allowed through + * the prev fence and not through the contained fence. + * + * The correct way of handling this is to flatten out the fence + * structure into a dma_fence_array by the caller instead. + */ + WARN_ON(dma_fence_is_chain(fence)); } EXPORT_SYMBOL(dma_fence_chain_init); diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c new file mode 100644 index 000000000000..7002bca792ff --- /dev/null +++ b/drivers/dma-buf/dma-fence-unwrap.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * dma-fence-util: misc functions for dma_fence objects + * + * Copyright (C) 2022 Advanced Micro Devices, Inc. + * Authors: + * Christian König <christian.koenig@amd.com> + */ + +#include <linux/dma-fence.h> +#include <linux/dma-fence-array.h> +#include <linux/dma-fence-chain.h> +#include <linux/dma-fence-unwrap.h> +#include <linux/slab.h> + +/* Internal helper to start new array iteration, don't use directly */ +static struct dma_fence * +__dma_fence_unwrap_array(struct dma_fence_unwrap *cursor) +{ + cursor->array = dma_fence_chain_contained(cursor->chain); + cursor->index = 0; + return dma_fence_array_first(cursor->array); +} + +/** + * dma_fence_unwrap_first - return the first fence from fence containers + * @head: the entrypoint into the containers + * @cursor: current position inside the containers + * + * Unwraps potential dma_fence_chain/dma_fence_array containers and return the + * first fence. + */ +struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head, + struct dma_fence_unwrap *cursor) +{ + cursor->chain = dma_fence_get(head); + return __dma_fence_unwrap_array(cursor); +} +EXPORT_SYMBOL_GPL(dma_fence_unwrap_first); + +/** + * dma_fence_unwrap_next - return the next fence from a fence containers + * @cursor: current position inside the containers + * + * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return + * the next fence from them. + */ +struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor) +{ + struct dma_fence *tmp; + + ++cursor->index; + tmp = dma_fence_array_next(cursor->array, cursor->index); + if (tmp) + return tmp; + + cursor->chain = dma_fence_chain_walk(cursor->chain); + return __dma_fence_unwrap_array(cursor); +} +EXPORT_SYMBOL_GPL(dma_fence_unwrap_next); + +/* Implementation for the dma_fence_merge() marco, don't use directly */ +struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, + struct dma_fence **fences, + struct dma_fence_unwrap *iter) +{ + struct dma_fence_array *result; + struct dma_fence *tmp, **array; + unsigned int i; + size_t count; + + count = 0; + for (i = 0; i < num_fences; ++i) { + dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) + if (!dma_fence_is_signaled(tmp)) + ++count; + } + + if (count == 0) + return dma_fence_get_stub(); + + array = kmalloc_array(count, sizeof(*array), GFP_KERNEL); + if (!array) + return NULL; + + /* + * This trashes the input fence array and uses it as position for the + * following merge loop. This works because the dma_fence_merge() + * wrapper macro is creating this temporary array on the stack together + * with the iterators. + */ + for (i = 0; i < num_fences; ++i) + fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]); + + count = 0; + do { + unsigned int sel; + +restart: + tmp = NULL; + for (i = 0; i < num_fences; ++i) { + struct dma_fence *next; + + while (fences[i] && dma_fence_is_signaled(fences[i])) + fences[i] = dma_fence_unwrap_next(&iter[i]); + + next = fences[i]; + if (!next) + continue; + + /* + * We can't guarantee that inpute fences are ordered by + * context, but it is still quite likely when this + * function is used multiple times. So attempt to order + * the fences by context as we pass over them and merge + * fences with the same context. + */ + if (!tmp || tmp->context > next->context) { + tmp = next; + sel = i; + + } else if (tmp->context < next->context) { + continue; + + } else if (dma_fence_is_later(tmp, next)) { + fences[i] = dma_fence_unwrap_next(&iter[i]); + goto restart; + } else { + fences[sel] = dma_fence_unwrap_next(&iter[sel]); + goto restart; + } + } + + if (tmp) { + array[count++] = dma_fence_get(tmp); + fences[sel] = dma_fence_unwrap_next(&iter[sel]); + } + } while (tmp); + + if (count == 0) { + tmp = dma_fence_get_stub(); + goto return_tmp; + } + + if (count == 1) { + tmp = array[0]; + goto return_tmp; + } + + result = dma_fence_array_create(count, array, + dma_fence_context_alloc(1), + 1, false); + if (!result) { + tmp = NULL; + goto return_tmp; + } + return &result->base; + +return_tmp: + kfree(array); + return tmp; +} +EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 052a41e2451c..406b4e26f538 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -15,6 +15,7 @@ #include <linux/atomic.h> #include <linux/dma-fence.h> #include <linux/sched/signal.h> +#include <linux/seq_file.h> #define CREATE_TRACE_POINTS #include <trace/events/dma_fence.h> @@ -64,6 +65,52 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); * &dma_buf.resv pointer. */ +/** + * DOC: fence cross-driver contract + * + * Since &dma_fence provide a cross driver contract, all drivers must follow the + * same rules: + * + * * Fences must complete in a reasonable time. Fences which represent kernels + * and shaders submitted by userspace, which could run forever, must be backed + * up by timeout and gpu hang recovery code. Minimally that code must prevent + * further command submission and force complete all in-flight fences, e.g. + * when the driver or hardware do not support gpu reset, or if the gpu reset + * failed for some reason. Ideally the driver supports gpu recovery which only + * affects the offending userspace context, and no other userspace + * submissions. + * + * * Drivers may have different ideas of what completion within a reasonable + * time means. Some hang recovery code uses a fixed timeout, others a mix + * between observing forward progress and increasingly strict timeouts. + * Drivers should not try to second guess timeout handling of fences from + * other drivers. + * + * * To ensure there's no deadlocks of dma_fence_wait() against other locks + * drivers should annotate all code required to reach dma_fence_signal(), + * which completes the fences, with dma_fence_begin_signalling() and + * dma_fence_end_signalling(). + * + * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock(). + * This means any code required for fence completion cannot acquire a + * &dma_resv lock. Note that this also pulls in the entire established + * locking hierarchy around dma_resv_lock() and dma_resv_unlock(). + * + * * Drivers are allowed to call dma_fence_wait() from their &shrinker + * callbacks. This means any code required for fence completion cannot + * allocate memory with GFP_KERNEL. + * + * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier + * respectively &mmu_interval_notifier callbacks. This means any code required + * for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO. + * Only GFP_ATOMIC is permissible, which might fail. + * + * Note that only GPU drivers have a reasonable excuse for both requiring + * &mmu_interval_notifier and &shrinker callbacks at the same time as having to + * track asynchronous compute work using &dma_fence. No driver outside of + * drivers/gpu should ever call dma_fence_wait() in such contexts. + */ + static const char *dma_fence_stub_get_name(struct dma_fence *fence) { return "stub"; @@ -77,7 +124,9 @@ static const struct dma_fence_ops dma_fence_stub_ops = { /** * dma_fence_get_stub - return a signaled fence * - * Return a stub fence which is already signaled. + * Return a stub fence which is already signaled. The fence's + * timestamp corresponds to the first time after boot this + * function is called. */ struct dma_fence *dma_fence_get_stub(void) { @@ -87,6 +136,10 @@ struct dma_fence *dma_fence_get_stub(void) &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); + + set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &dma_fence_stub.flags); + dma_fence_signal_locked(&dma_fence_stub); } spin_unlock(&dma_fence_stub_lock); @@ -96,6 +149,33 @@ struct dma_fence *dma_fence_get_stub(void) EXPORT_SYMBOL(dma_fence_get_stub); /** + * dma_fence_allocate_private_stub - return a private, signaled fence + * + * Return a newly allocated and signaled stub fence. + */ +struct dma_fence *dma_fence_allocate_private_stub(void) +{ + struct dma_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (fence == NULL) + return ERR_PTR(-ENOMEM); + + dma_fence_init(fence, + &dma_fence_stub_ops, + &dma_fence_stub_lock, + 0, 0); + + set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &dma_fence_stub.flags); + + dma_fence_signal(fence); + + return fence; +} +EXPORT_SYMBOL(dma_fence_allocate_private_stub); + +/** * dma_fence_context_alloc - allocate an array of fence contexts * @num: amount of contexts to allocate * @@ -106,27 +186,185 @@ EXPORT_SYMBOL(dma_fence_get_stub); u64 dma_fence_context_alloc(unsigned num) { WARN_ON(!num); - return atomic64_add_return(num, &dma_fence_context_counter) - num; + return atomic64_fetch_add(num, &dma_fence_context_counter); } EXPORT_SYMBOL(dma_fence_context_alloc); /** - * dma_fence_signal_locked - signal completion of a fence + * DOC: fence signalling annotation + * + * Proving correctness of all the kernel code around &dma_fence through code + * review and testing is tricky for a few reasons: + * + * * It is a cross-driver contract, and therefore all drivers must follow the + * same rules for lock nesting order, calling contexts for various functions + * and anything else significant for in-kernel interfaces. But it is also + * impossible to test all drivers in a single machine, hence brute-force N vs. + * N testing of all combinations is impossible. Even just limiting to the + * possible combinations is infeasible. + * + * * There is an enormous amount of driver code involved. For render drivers + * there's the tail of command submission, after fences are published, + * scheduler code, interrupt and workers to process job completion, + * and timeout, gpu reset and gpu hang recovery code. Plus for integration + * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier, + * and &shrinker. For modesetting drivers there's the commit tail functions + * between when fences for an atomic modeset are published, and when the + * corresponding vblank completes, including any interrupt processing and + * related workers. Auditing all that code, across all drivers, is not + * feasible. + * + * * Due to how many other subsystems are involved and the locking hierarchies + * this pulls in there is extremely thin wiggle-room for driver-specific + * differences. &dma_fence interacts with almost all of the core memory + * handling through page fault handlers via &dma_resv, dma_resv_lock() and + * dma_resv_unlock(). On the other side it also interacts through all + * allocation sites through &mmu_notifier and &shrinker. + * + * Furthermore lockdep does not handle cross-release dependencies, which means + * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught + * at runtime with some quick testing. The simplest example is one thread + * waiting on a &dma_fence while holding a lock:: + * + * lock(A); + * dma_fence_wait(B); + * unlock(A); + * + * while the other thread is stuck trying to acquire the same lock, which + * prevents it from signalling the fence the previous thread is stuck waiting + * on:: + * + * lock(A); + * unlock(A); + * dma_fence_signal(B); + * + * By manually annotating all code relevant to signalling a &dma_fence we can + * teach lockdep about these dependencies, which also helps with the validation + * headache since now lockdep can check all the rules for us:: + * + * cookie = dma_fence_begin_signalling(); + * lock(A); + * unlock(A); + * dma_fence_signal(B); + * dma_fence_end_signalling(cookie); + * + * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to + * annotate critical sections the following rules need to be observed: + * + * * All code necessary to complete a &dma_fence must be annotated, from the + * point where a fence is accessible to other threads, to the point where + * dma_fence_signal() is called. Un-annotated code can contain deadlock issues, + * and due to the very strict rules and many corner cases it is infeasible to + * catch these just with review or normal stress testing. + * + * * &struct dma_resv deserves a special note, since the readers are only + * protected by rcu. This means the signalling critical section starts as soon + * as the new fences are installed, even before dma_resv_unlock() is called. + * + * * The only exception are fast paths and opportunistic signalling code, which + * calls dma_fence_signal() purely as an optimization, but is not required to + * guarantee completion of a &dma_fence. The usual example is a wait IOCTL + * which calls dma_fence_signal(), while the mandatory completion path goes + * through a hardware interrupt and possible job completion worker. + * + * * To aid composability of code, the annotations can be freely nested, as long + * as the overall locking hierarchy is consistent. The annotations also work + * both in interrupt and process context. Due to implementation details this + * requires that callers pass an opaque cookie from + * dma_fence_begin_signalling() to dma_fence_end_signalling(). + * + * * Validation against the cross driver contract is implemented by priming + * lockdep with the relevant hierarchy at boot-up. This means even just + * testing with a single device is enough to validate a driver, at least as + * far as deadlocks with dma_fence_wait() against dma_fence_signal() are + * concerned. + */ +#ifdef CONFIG_LOCKDEP +static struct lockdep_map dma_fence_lockdep_map = { + .name = "dma_fence_map" +}; + +/** + * dma_fence_begin_signalling - begin a critical DMA fence signalling section + * + * Drivers should use this to annotate the beginning of any code section + * required to eventually complete &dma_fence by calling dma_fence_signal(). + * + * The end of these critical sections are annotated with + * dma_fence_end_signalling(). + * + * Returns: + * + * Opaque cookie needed by the implementation, which needs to be passed to + * dma_fence_end_signalling(). + */ +bool dma_fence_begin_signalling(void) +{ + /* explicitly nesting ... */ + if (lock_is_held_type(&dma_fence_lockdep_map, 1)) + return true; + + /* rely on might_sleep check for soft/hardirq locks */ + if (in_atomic()) + return true; + + /* ... and non-recursive readlock */ + lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_); + + return false; +} +EXPORT_SYMBOL(dma_fence_begin_signalling); + +/** + * dma_fence_end_signalling - end a critical DMA fence signalling section + * @cookie: opaque cookie from dma_fence_begin_signalling() + * + * Closes a critical section annotation opened by dma_fence_begin_signalling(). + */ +void dma_fence_end_signalling(bool cookie) +{ + if (cookie) + return; + + lock_release(&dma_fence_lockdep_map, _RET_IP_); +} +EXPORT_SYMBOL(dma_fence_end_signalling); + +void __dma_fence_might_wait(void) +{ + bool tmp; + + tmp = lock_is_held_type(&dma_fence_lockdep_map, 1); + if (tmp) + lock_release(&dma_fence_lockdep_map, _THIS_IP_); + lock_map_acquire(&dma_fence_lockdep_map); + lock_map_release(&dma_fence_lockdep_map); + if (tmp) + lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_); +} +#endif + + +/** + * dma_fence_signal_timestamp_locked - signal completion of a fence * @fence: the fence to signal + * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will - * only be effective the first time. + * only be effective the first time. Set the timestamp provided as the fence + * signal timestamp. * - * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock - * held. + * Unlike dma_fence_signal_timestamp(), this function must be called with + * &dma_fence.lock held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ -int dma_fence_signal_locked(struct dma_fence *fence) +int dma_fence_signal_timestamp_locked(struct dma_fence *fence, + ktime_t timestamp) { struct dma_fence_cb *cur, *tmp; struct list_head cb_list; @@ -140,7 +378,7 @@ int dma_fence_signal_locked(struct dma_fence *fence) /* Stash the cb_list before replacing it with the timestamp */ list_replace(&fence->cb_list, &cb_list); - fence->timestamp = ktime_get(); + fence->timestamp = timestamp; set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); @@ -151,6 +389,59 @@ int dma_fence_signal_locked(struct dma_fence *fence) return 0; } +EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); + +/** + * dma_fence_signal_timestamp - signal completion of a fence + * @fence: the fence to signal + * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from the unsignaled to the signaled state and not back, it will + * only be effective the first time. Set the timestamp provided as the fence + * signal timestamp. + * + * Returns 0 on success and a negative error value when @fence has been + * signalled already. + */ +int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) +{ + unsigned long flags; + int ret; + + if (!fence) + return -EINVAL; + + spin_lock_irqsave(fence->lock, flags); + ret = dma_fence_signal_timestamp_locked(fence, timestamp); + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(dma_fence_signal_timestamp); + +/** + * dma_fence_signal_locked - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence + * can only go from the unsignaled to the signaled state and not back, it will + * only be effective the first time. + * + * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock + * held. + * + * Returns 0 on success and a negative error value when @fence has been + * signalled already. + */ +int dma_fence_signal_locked(struct dma_fence *fence) +{ + return dma_fence_signal_timestamp_locked(fence, ktime_get()); +} EXPORT_SYMBOL(dma_fence_signal_locked); /** @@ -170,14 +461,19 @@ int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; int ret; + bool tmp; if (!fence) return -EINVAL; + tmp = dma_fence_begin_signalling(); + spin_lock_irqsave(fence->lock, flags); - ret = dma_fence_signal_locked(fence); + ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); spin_unlock_irqrestore(fence->lock, flags); + dma_fence_end_signalling(tmp); + return ret; } EXPORT_SYMBOL(dma_fence_signal); @@ -208,6 +504,12 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) if (WARN_ON(timeout < 0)) return -EINVAL; + might_sleep(); + + __dma_fence_might_wait(); + + dma_fence_enable_sw_signaling(fence); + trace_dma_fence_wait_start(fence); if (fence->ops->wait) ret = fence->ops->wait(fence, intr, timeout); @@ -309,9 +611,6 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return; - spin_lock_irqsave(fence->lock, flags); __dma_fence_enable_signaling(fence); spin_unlock_irqrestore(fence->lock, flags); @@ -325,20 +624,17 @@ EXPORT_SYMBOL(dma_fence_enable_sw_signaling); * @cb: the callback to register * @func: the function to call * + * Add a software callback to the fence. The caller should keep a reference to + * the fence. + * * @cb will be initialized by dma_fence_add_callback(), no initialization * by the caller is required. Any number of callbacks can be registered * to a fence, but a callback can only be registered to one fence at a time. * - * Note that the callback can be called from an atomic context. If - * fence is already signaled, this function will return -ENOENT (and + * If fence is already signaled, this function will return -ENOENT (and * *not* call the callback). * - * Add a software callback to the fence. Same restrictions apply to - * refcount as it does to dma_fence_wait(), however the caller doesn't need to - * keep a refcount to fence afterward dma_fence_add_callback() has returned: - * when software access is enabled, the creator of the fence is required to keep - * the fence alive until after it signals with dma_fence_signal(). The callback - * itself can be called from irq context. + * Note that the callback can be called from an atomic context or irq context. * * Returns 0 in case of success, -ENOENT if the fence is already signaled * and -EINVAL in case of error. @@ -467,19 +763,16 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) unsigned long flags; signed long ret = timeout ? timeout : 1; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return ret; - spin_lock_irqsave(fence->lock, flags); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + goto out; + if (intr && signal_pending(current)) { ret = -ERESTARTSYS; goto out; } - if (!__dma_fence_enable_signaling(fence)) - goto out; - if (!timeout) { ret = 0; goto out; @@ -620,6 +913,22 @@ err_free_cb: EXPORT_SYMBOL(dma_fence_wait_any_timeout); /** + * dma_fence_describe - Dump fence describtion into seq_file + * @fence: the 6fence to describe + * @seq: the seq_file to put the textual description into + * + * Dump a textual description of the fence and it's state into the seq_file. + */ +void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) +{ + seq_printf(seq, "%s %s seq %llu %ssignalled\n", + fence->ops->get_driver_name(fence), + fence->ops->get_timeline_name(fence), fence->seqno, + dma_fence_is_signaled(fence) ? "" : "un"); +} +EXPORT_SYMBOL(dma_fence_describe); + +/** * dma_fence_init - Initialize a custom fence. * @fence: the fence to initialize * @ops: the dma_fence_ops for operations on this fence diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index afd22c9dbdcf..8f5848aa144f 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -14,6 +14,7 @@ #include <linux/xarray.h> #include <linux/list.h> #include <linux/slab.h> +#include <linux/nospec.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/dma-heap.h> @@ -52,6 +53,9 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, unsigned int fd_flags, unsigned int heap_flags) { + struct dma_buf *dmabuf; + int fd; + /* * Allocations from all heaps have to begin * and end on page boundaries. @@ -60,7 +64,16 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, if (!len) return -EINVAL; - return heap->ops->allocate(heap, len, fd_flags, heap_flags); + dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + fd = dma_buf_fd(dmabuf, fd_flags); + if (fd < 0) { + dma_buf_put(dmabuf); + /* just return, as put will call release and that will free */ + } + return fd; } static int dma_heap_open(struct inode *inode, struct file *file) @@ -123,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd, if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) return -EINVAL; + nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds)); /* Get the kernel ioctl cmd that matches */ kcmd = dma_heap_ioctl_cmds[nr]; @@ -190,6 +204,18 @@ void *dma_heap_get_drvdata(struct dma_heap *heap) return heap->priv; } +/** + * dma_heap_get_name() - get heap name + * @heap: DMA-Heap to retrieve private data for + * + * Returns: + * The char* for the heap name. + */ +const char *dma_heap_get_name(struct dma_heap *heap) +{ + return heap->name; +} + struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) { struct dma_heap *heap, *h, *err_ret; diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 4264e64788c4..e3885c90a3ac 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) * @@ -33,56 +34,82 @@ */ #include <linux/dma-resv.h> +#include <linux/dma-fence-array.h> #include <linux/export.h> +#include <linux/mm.h> #include <linux/sched/mm.h> +#include <linux/mmu_notifier.h> +#include <linux/seq_file.h> /** * DOC: Reservation Object Overview * - * The reservation object provides a mechanism to manage shared and - * exclusive fences associated with a buffer. A reservation object - * can have attached one exclusive fence (normally associated with - * write operations) or N shared fences (read operations). The RCU - * mechanism is used to protect read access to fences from locked - * write-side updates. + * The reservation object provides a mechanism to manage a container of + * dma_fence object associated with a resource. A reservation object + * can have any number of fences attaches to it. Each fence carries an usage + * parameter determining how the operation represented by the fence is using the + * resource. The RCU mechanism is used to protect read access to fences from + * locked write-side updates. + * + * See struct dma_resv for more details. */ DEFINE_WD_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); -struct lock_class_key reservation_seqcount_class; -EXPORT_SYMBOL(reservation_seqcount_class); +/* Mask for the lower fence pointer bits */ +#define DMA_RESV_LIST_MASK 0x3 -const char reservation_seqcount_string[] = "reservation_seqcount"; -EXPORT_SYMBOL(reservation_seqcount_string); +struct dma_resv_list { + struct rcu_head rcu; + u32 num_fences, max_fences; + struct dma_fence __rcu *table[]; +}; -/** - * dma_resv_list_alloc - allocate fence list - * @shared_max: number of fences we need space for - * +/* Extract the fence and usage flags from an RCU protected entry in the list. */ +static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index, + struct dma_resv *resv, struct dma_fence **fence, + enum dma_resv_usage *usage) +{ + long tmp; + + tmp = (long)rcu_dereference_check(list->table[index], + resv ? dma_resv_held(resv) : true); + *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK); + if (usage) + *usage = tmp & DMA_RESV_LIST_MASK; +} + +/* Set the fence and usage flags at the specific index in the list. */ +static void dma_resv_list_set(struct dma_resv_list *list, + unsigned int index, + struct dma_fence *fence, + enum dma_resv_usage usage) +{ + long tmp = ((long)fence) | usage; + + RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp); +} + +/* * Allocate a new dma_resv_list and make sure to correctly initialize - * shared_max. + * max_fences. */ -static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) +static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences) { struct dma_resv_list *list; - list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL); + list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL); if (!list) return NULL; - list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) / - sizeof(*list->shared); + list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) / + sizeof(*list->table); return list; } -/** - * dma_resv_list_free - free fence list - * @list: list to free - * - * Free a dma_resv_list and make sure to drop all references. - */ +/* Free a dma_resv_list and make sure to drop all references. */ static void dma_resv_list_free(struct dma_resv_list *list) { unsigned int i; @@ -90,43 +117,15 @@ static void dma_resv_list_free(struct dma_resv_list *list) if (!list) return; - for (i = 0; i < list->shared_count; ++i) - dma_fence_put(rcu_dereference_protected(list->shared[i], true)); + for (i = 0; i < list->num_fences; ++i) { + struct dma_fence *fence; + dma_resv_list_entry(list, i, NULL, &fence, NULL); + dma_fence_put(fence); + } kfree_rcu(list, rcu); } -#if IS_ENABLED(CONFIG_LOCKDEP) -static int __init dma_resv_lockdep(void) -{ - struct mm_struct *mm = mm_alloc(); - struct ww_acquire_ctx ctx; - struct dma_resv obj; - int ret; - - if (!mm) - return -ENOMEM; - - dma_resv_init(&obj); - - down_read(&mm->mmap_sem); - ww_acquire_init(&ctx, &reservation_ww_class); - ret = dma_resv_lock(&obj, &ctx); - if (ret == -EDEADLK) - dma_resv_lock_slow(&obj, &ctx); - fs_reclaim_acquire(GFP_KERNEL); - fs_reclaim_release(GFP_KERNEL); - ww_mutex_unlock(&obj.lock); - ww_acquire_fini(&ctx); - up_read(&mm->mmap_sem); - - mmput(mm); - - return 0; -} -subsys_initcall(dma_resv_lockdep); -#endif - /** * dma_resv_init - initialize a reservation object * @obj: the reservation object @@ -135,10 +134,7 @@ void dma_resv_init(struct dma_resv *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); - __seqcount_init(&obj->seq, reservation_seqcount_string, - &reservation_seqcount_class); - RCU_INIT_POINTER(obj->fence, NULL); - RCU_INIT_POINTER(obj->fence_excl, NULL); + RCU_INIT_POINTER(obj->fences, NULL); } EXPORT_SYMBOL(dma_resv_init); @@ -148,52 +144,50 @@ EXPORT_SYMBOL(dma_resv_init); */ void dma_resv_fini(struct dma_resv *obj) { - struct dma_resv_list *fobj; - struct dma_fence *excl; - /* * This object should be dead and all references must have * been released to it, so no need to be protected with rcu. */ - excl = rcu_dereference_protected(obj->fence_excl, 1); - if (excl) - dma_fence_put(excl); - - fobj = rcu_dereference_protected(obj->fence, 1); - dma_resv_list_free(fobj); + dma_resv_list_free(rcu_dereference_protected(obj->fences, true)); ww_mutex_destroy(&obj->lock); } EXPORT_SYMBOL(dma_resv_fini); +/* Dereference the fences while ensuring RCU rules */ +static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj) +{ + return rcu_dereference_check(obj->fences, dma_resv_held(obj)); +} + /** - * dma_resv_reserve_shared - Reserve space to add shared fences to - * a dma_resv. + * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object. * @obj: reservation object * @num_fences: number of fences we want to add * - * Should be called before dma_resv_add_shared_fence(). Must - * be called with obj->lock held. + * Should be called before dma_resv_add_fence(). Must be called with @obj + * locked through dma_resv_lock(). + * + * Note that the preallocated slots need to be re-reserved if @obj is unlocked + * at any time before calling dma_resv_add_fence(). This is validated when + * CONFIG_DEBUG_MUTEXES is enabled. * * RETURNS * Zero for success, or -errno */ -int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) +int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) { struct dma_resv_list *old, *new; unsigned int i, j, k, max; dma_resv_assert_held(obj); - old = dma_resv_get_list(obj); - - if (old && old->shared_max) { - if ((old->shared_count + num_fences) <= old->shared_max) + old = dma_resv_fences_list(obj); + if (old && old->max_fences) { + if ((old->num_fences + num_fences) <= old->max_fences) return 0; - else - max = max(old->shared_count + num_fences, - old->shared_max * 2); + max = max(old->num_fences + num_fences, old->max_fences * 2); } else { - max = 4; + max = max(4ul, roundup_pow_of_two(num_fences)); } new = dma_resv_list_alloc(max); @@ -206,27 +200,27 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) * references from the old struct are carried over to * the new. */ - for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) { + for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) { + enum dma_resv_usage usage; struct dma_fence *fence; - fence = rcu_dereference_protected(old->shared[i], - dma_resv_held(obj)); + dma_resv_list_entry(old, i, obj, &fence, &usage); if (dma_fence_is_signaled(fence)) - RCU_INIT_POINTER(new->shared[--k], fence); + RCU_INIT_POINTER(new->table[--k], fence); else - RCU_INIT_POINTER(new->shared[j++], fence); + dma_resv_list_set(new, j++, fence, usage); } - new->shared_count = j; + new->num_fences = j; /* * We are not changing the effective set of fences here so can * merely update the pointer to the new array; both existing * readers and new readers will see exactly the same set of - * active (unsignaled) shared fences. Individual fences and the + * active (unsignaled) fences. Individual fences and the * old array are protected by RCU and so will not vanish under * the gaze of the rcu_read_lock() readers. */ - rcu_assign_pointer(obj->fence, new); + rcu_assign_pointer(obj->fences, new); if (!old) return 0; @@ -235,7 +229,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) for (i = k; i < max; ++i) { struct dma_fence *fence; - fence = rcu_dereference_protected(new->shared[i], + fence = rcu_dereference_protected(new->table[i], dma_resv_held(obj)); dma_fence_put(fence); } @@ -243,17 +237,43 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) return 0; } -EXPORT_SYMBOL(dma_resv_reserve_shared); +EXPORT_SYMBOL(dma_resv_reserve_fences); +#ifdef CONFIG_DEBUG_MUTEXES /** - * dma_resv_add_shared_fence - Add a fence to a shared slot + * dma_resv_reset_max_fences - reset fences for debugging + * @obj: the dma_resv object to reset + * + * Reset the number of pre-reserved fence slots to test that drivers do + * correct slot allocation using dma_resv_reserve_fences(). See also + * &dma_resv_list.max_fences. + */ +void dma_resv_reset_max_fences(struct dma_resv *obj) +{ + struct dma_resv_list *fences = dma_resv_fences_list(obj); + + dma_resv_assert_held(obj); + + /* Test fence slot reservation */ + if (fences) + fences->max_fences = fences->num_fences; +} +EXPORT_SYMBOL(dma_resv_reset_max_fences); +#endif + +/** + * dma_resv_add_fence - Add a fence to the dma_resv obj * @obj: the reservation object - * @fence: the shared fence to add + * @fence: the fence to add + * @usage: how the fence is used, see enum dma_resv_usage * - * Add a fence to a shared slot, obj->lock must be held, and - * dma_resv_reserve_shared() has been called. + * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and + * dma_resv_reserve_fences() has been called. + * + * See also &dma_resv.fence for a discussion of the semantics. */ -void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage) { struct dma_resv_list *fobj; struct dma_fence *old; @@ -263,420 +283,492 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) dma_resv_assert_held(obj); - fobj = dma_resv_get_list(obj); - count = fobj->shared_count; + /* Drivers should not add containers here, instead add each fence + * individually. + */ + WARN_ON(dma_fence_is_container(fence)); - preempt_disable(); - write_seqcount_begin(&obj->seq); + fobj = dma_resv_fences_list(obj); + count = fobj->num_fences; for (i = 0; i < count; ++i) { - - old = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(obj)); - if (old->context == fence->context || - dma_fence_is_signaled(old)) - goto replace; + enum dma_resv_usage old_usage; + + dma_resv_list_entry(fobj, i, obj, &old, &old_usage); + if ((old->context == fence->context && old_usage >= usage && + dma_fence_is_later(fence, old)) || + dma_fence_is_signaled(old)) { + dma_resv_list_set(fobj, i, fence, usage); + dma_fence_put(old); + return; + } } - BUG_ON(fobj->shared_count >= fobj->shared_max); - old = NULL; + BUG_ON(fobj->num_fences >= fobj->max_fences); count++; -replace: - RCU_INIT_POINTER(fobj->shared[i], fence); - /* pointer update must be visible before we extend the shared_count */ - smp_store_mb(fobj->shared_count, count); - - write_seqcount_end(&obj->seq); - preempt_enable(); - dma_fence_put(old); + dma_resv_list_set(fobj, i, fence, usage); + /* pointer update must be visible before we extend the num_fences */ + smp_store_mb(fobj->num_fences, count); } -EXPORT_SYMBOL(dma_resv_add_shared_fence); +EXPORT_SYMBOL(dma_resv_add_fence); /** - * dma_resv_add_excl_fence - Add an exclusive fence. + * dma_resv_replace_fences - replace fences in the dma_resv obj * @obj: the reservation object - * @fence: the shared fence to add + * @context: the context of the fences to replace + * @replacement: the new fence to use instead + * @usage: how the new fence is used, see enum dma_resv_usage + * + * Replace fences with a specified context with a new fence. Only valid if the + * operation represented by the original fence has no longer access to the + * resources represented by the dma_resv object when the new fence completes. * - * Add a fence to the exclusive slot. The obj->lock must be held. + * And example for using this is replacing a preemption fence with a page table + * update fence which makes the resource inaccessible. */ -void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, + struct dma_fence *replacement, + enum dma_resv_usage usage) { - struct dma_fence *old_fence = dma_resv_get_excl(obj); - struct dma_resv_list *old; - u32 i = 0; + struct dma_resv_list *list; + unsigned int i; dma_resv_assert_held(obj); - old = dma_resv_get_list(obj); - if (old) - i = old->shared_count; + list = dma_resv_fences_list(obj); + for (i = 0; list && i < list->num_fences; ++i) { + struct dma_fence *old; - if (fence) - dma_fence_get(fence); + dma_resv_list_entry(list, i, obj, &old, NULL); + if (old->context != context) + continue; - preempt_disable(); - write_seqcount_begin(&obj->seq); - /* write_seqcount_begin provides the necessary memory barrier */ - RCU_INIT_POINTER(obj->fence_excl, fence); - if (old) - old->shared_count = 0; - write_seqcount_end(&obj->seq); - preempt_enable(); - - /* inplace update, no shared fences */ - while (i--) - dma_fence_put(rcu_dereference_protected(old->shared[i], - dma_resv_held(obj))); - - dma_fence_put(old_fence); + dma_resv_list_set(list, i, dma_fence_get(replacement), usage); + dma_fence_put(old); + } } -EXPORT_SYMBOL(dma_resv_add_excl_fence); +EXPORT_SYMBOL(dma_resv_replace_fences); -/** -* dma_resv_copy_fences - Copy all fences from src to dst. -* @dst: the destination reservation object -* @src: the source reservation object -* -* Copy all fences from src to dst. dst-lock must be held. -*/ -int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) +/* Restart the unlocked iteration by initializing the cursor object. */ +static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) { - struct dma_resv_list *src_list, *dst_list; - struct dma_fence *old, *new; - unsigned i; - - dma_resv_assert_held(dst); - - rcu_read_lock(); - src_list = rcu_dereference(src->fence); + cursor->index = 0; + cursor->num_fences = 0; + cursor->fences = dma_resv_fences_list(cursor->obj); + if (cursor->fences) + cursor->num_fences = cursor->fences->num_fences; + cursor->is_restarted = true; +} -retry: - if (src_list) { - unsigned shared_count = src_list->shared_count; +/* Walk to the next not signaled fence and grab a reference to it */ +static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) +{ + if (!cursor->fences) + return; - rcu_read_unlock(); + do { + /* Drop the reference from the previous round */ + dma_fence_put(cursor->fence); - dst_list = dma_resv_list_alloc(shared_count); - if (!dst_list) - return -ENOMEM; + if (cursor->index >= cursor->num_fences) { + cursor->fence = NULL; + break; - rcu_read_lock(); - src_list = rcu_dereference(src->fence); - if (!src_list || src_list->shared_count > shared_count) { - kfree(dst_list); - goto retry; } - dst_list->shared_count = 0; - for (i = 0; i < src_list->shared_count; ++i) { - struct dma_fence *fence; + dma_resv_list_entry(cursor->fences, cursor->index++, + cursor->obj, &cursor->fence, + &cursor->fence_usage); + cursor->fence = dma_fence_get_rcu(cursor->fence); + if (!cursor->fence) { + dma_resv_iter_restart_unlocked(cursor); + continue; + } - fence = rcu_dereference(src_list->shared[i]); - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &fence->flags)) - continue; + if (!dma_fence_is_signaled(cursor->fence) && + cursor->usage >= cursor->fence_usage) + break; + } while (true); +} - if (!dma_fence_get_rcu(fence)) { - dma_resv_list_free(dst_list); - src_list = rcu_dereference(src->fence); - goto retry; - } +/** + * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj. + * @cursor: the cursor with the current position + * + * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). + * + * Beware that the iterator can be restarted. Code which accumulates statistics + * or similar needs to check for this with dma_resv_iter_is_restarted(). For + * this reason prefer the locked dma_resv_iter_first() whenver possible. + * + * Returns the first fence from an unlocked dma_resv obj. + */ +struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) +{ + rcu_read_lock(); + do { + dma_resv_iter_restart_unlocked(cursor); + dma_resv_iter_walk_unlocked(cursor); + } while (dma_resv_fences_list(cursor->obj) != cursor->fences); + rcu_read_unlock(); - if (dma_fence_is_signaled(fence)) { - dma_fence_put(fence); - continue; - } + return cursor->fence; +} +EXPORT_SYMBOL(dma_resv_iter_first_unlocked); - rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence); - } - } else { - dst_list = NULL; - } +/** + * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj. + * @cursor: the cursor with the current position + * + * Beware that the iterator can be restarted. Code which accumulates statistics + * or similar needs to check for this with dma_resv_iter_is_restarted(). For + * this reason prefer the locked dma_resv_iter_next() whenver possible. + * + * Returns the next fence from an unlocked dma_resv obj. + */ +struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) +{ + bool restart; - new = dma_fence_get_rcu_safe(&src->fence_excl); + rcu_read_lock(); + cursor->is_restarted = false; + restart = dma_resv_fences_list(cursor->obj) != cursor->fences; + do { + if (restart) + dma_resv_iter_restart_unlocked(cursor); + dma_resv_iter_walk_unlocked(cursor); + restart = true; + } while (dma_resv_fences_list(cursor->obj) != cursor->fences); rcu_read_unlock(); - src_list = dma_resv_get_list(dst); - old = dma_resv_get_excl(dst); + return cursor->fence; +} +EXPORT_SYMBOL(dma_resv_iter_next_unlocked); - preempt_disable(); - write_seqcount_begin(&dst->seq); - /* write_seqcount_begin provides the necessary memory barrier */ - RCU_INIT_POINTER(dst->fence_excl, new); - RCU_INIT_POINTER(dst->fence, dst_list); - write_seqcount_end(&dst->seq); - preempt_enable(); +/** + * dma_resv_iter_first - first fence from a locked dma_resv object + * @cursor: cursor to record the current position + * + * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). + * + * Return the first fence in the dma_resv object while holding the + * &dma_resv.lock. + */ +struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor) +{ + struct dma_fence *fence; - dma_resv_list_free(src_list); - dma_fence_put(old); + dma_resv_assert_held(cursor->obj); - return 0; + cursor->index = 0; + cursor->fences = dma_resv_fences_list(cursor->obj); + + fence = dma_resv_iter_next(cursor); + cursor->is_restarted = true; + return fence; } -EXPORT_SYMBOL(dma_resv_copy_fences); +EXPORT_SYMBOL_GPL(dma_resv_iter_first); /** - * dma_resv_get_fences_rcu - Get an object's shared and exclusive - * fences without update side lock held - * @obj: the reservation object - * @pfence_excl: the returned exclusive fence (or NULL) - * @pshared_count: the number of shared fences returned - * @pshared: the array of shared fence ptrs returned (array is krealloc'd to - * the required size, and must be freed by caller) - * - * Retrieve all fences from the reservation object. If the pointer for the - * exclusive fence is not specified the fence is put into the array of the - * shared fences as well. Returns either zero or -ENOMEM. + * dma_resv_iter_next - next fence from a locked dma_resv object + * @cursor: cursor to record the current position + * + * Return the next fences from the dma_resv object while holding the + * &dma_resv.lock. */ -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared) +struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor) { - struct dma_fence **shared = NULL; - struct dma_fence *fence_excl; - unsigned int shared_count; - int ret = 1; + struct dma_fence *fence; - do { - struct dma_resv_list *fobj; - unsigned int i, seq; - size_t sz = 0; + dma_resv_assert_held(cursor->obj); - shared_count = i = 0; + cursor->is_restarted = false; - rcu_read_lock(); - seq = read_seqcount_begin(&obj->seq); + do { + if (!cursor->fences || + cursor->index >= cursor->fences->num_fences) + return NULL; - fence_excl = rcu_dereference(obj->fence_excl); - if (fence_excl && !dma_fence_get_rcu(fence_excl)) - goto unlock; + dma_resv_list_entry(cursor->fences, cursor->index++, + cursor->obj, &fence, &cursor->fence_usage); + } while (cursor->fence_usage > cursor->usage); - fobj = rcu_dereference(obj->fence); - if (fobj) - sz += sizeof(*shared) * fobj->shared_max; + return fence; +} +EXPORT_SYMBOL_GPL(dma_resv_iter_next); - if (!pfence_excl && fence_excl) - sz += sizeof(*shared); +/** + * dma_resv_copy_fences - Copy all fences from src to dst. + * @dst: the destination reservation object + * @src: the source reservation object + * + * Copy all fences from src to dst. dst-lock must be held. + */ +int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) +{ + struct dma_resv_iter cursor; + struct dma_resv_list *list; + struct dma_fence *f; - if (sz) { - struct dma_fence **nshared; + dma_resv_assert_held(dst); - nshared = krealloc(shared, sz, - GFP_NOWAIT | __GFP_NOWARN); - if (!nshared) { - rcu_read_unlock(); + list = NULL; - dma_fence_put(fence_excl); - fence_excl = NULL; + dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP); + dma_resv_for_each_fence_unlocked(&cursor, f) { - nshared = krealloc(shared, sz, GFP_KERNEL); - if (nshared) { - shared = nshared; - continue; - } + if (dma_resv_iter_is_restarted(&cursor)) { + dma_resv_list_free(list); - ret = -ENOMEM; - break; - } - shared = nshared; - shared_count = fobj ? fobj->shared_count : 0; - for (i = 0; i < shared_count; ++i) { - shared[i] = rcu_dereference(fobj->shared[i]); - if (!dma_fence_get_rcu(shared[i])) - break; + list = dma_resv_list_alloc(cursor.num_fences); + if (!list) { + dma_resv_iter_end(&cursor); + return -ENOMEM; } + list->num_fences = 0; } - if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { - while (i--) - dma_fence_put(shared[i]); - dma_fence_put(fence_excl); - goto unlock; - } - - ret = 0; -unlock: - rcu_read_unlock(); - } while (ret); - - if (pfence_excl) - *pfence_excl = fence_excl; - else if (fence_excl) - shared[shared_count++] = fence_excl; - - if (!shared_count) { - kfree(shared); - shared = NULL; + dma_fence_get(f); + dma_resv_list_set(list, list->num_fences++, f, + dma_resv_iter_usage(&cursor)); } + dma_resv_iter_end(&cursor); - *pshared_count = shared_count; - *pshared = shared; - return ret; + list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst)); + dma_resv_list_free(list); + return 0; } -EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); +EXPORT_SYMBOL(dma_resv_copy_fences); /** - * dma_resv_wait_timeout_rcu - Wait on reservation's objects - * shared and/or exclusive fences. + * dma_resv_get_fences - Get an object's fences + * fences without update side lock held * @obj: the reservation object - * @wait_all: if true, wait on all fences, else wait on just exclusive fence - * @intr: if true, do interruptible wait - * @timeout: timeout value in jiffies or zero to return immediately + * @usage: controls which fences to include, see enum dma_resv_usage. + * @num_fences: the number of fences returned + * @fences: the array of fence ptrs returned (array is krealloc'd to the + * required size, and must be freed by caller) * - * RETURNS - * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or - * greater than zer on success. + * Retrieve all fences from the reservation object. + * Returns either zero or -ENOMEM. */ -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, - bool wait_all, bool intr, - unsigned long timeout) +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, + unsigned int *num_fences, struct dma_fence ***fences) { + struct dma_resv_iter cursor; struct dma_fence *fence; - unsigned seq, shared_count; - long ret = timeout ? timeout : 1; - int i; -retry: - shared_count = 0; - seq = read_seqcount_begin(&obj->seq); - rcu_read_lock(); - i = -1; + *num_fences = 0; + *fences = NULL; - fence = rcu_dereference(obj->fence_excl); - if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - if (!dma_fence_get_rcu(fence)) - goto unlock_retry; + dma_resv_iter_begin(&cursor, obj, usage); + dma_resv_for_each_fence_unlocked(&cursor, fence) { - if (dma_fence_is_signaled(fence)) { - dma_fence_put(fence); - fence = NULL; - } + if (dma_resv_iter_is_restarted(&cursor)) { + unsigned int count; - } else { - fence = NULL; - } + while (*num_fences) + dma_fence_put((*fences)[--(*num_fences)]); - if (wait_all) { - struct dma_resv_list *fobj = rcu_dereference(obj->fence); + count = cursor.num_fences + 1; - if (fobj) - shared_count = fobj->shared_count; + /* Eventually re-allocate the array */ + *fences = krealloc_array(*fences, count, + sizeof(void *), + GFP_KERNEL); + if (count && !*fences) { + dma_resv_iter_end(&cursor); + return -ENOMEM; + } + } - for (i = 0; !fence && i < shared_count; ++i) { - struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); + (*fences)[(*num_fences)++] = dma_fence_get(fence); + } + dma_resv_iter_end(&cursor); - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &lfence->flags)) - continue; + return 0; +} +EXPORT_SYMBOL_GPL(dma_resv_get_fences); - if (!dma_fence_get_rcu(lfence)) - goto unlock_retry; +/** + * dma_resv_get_singleton - Get a single fence for all the fences + * @obj: the reservation object + * @usage: controls which fences to include, see enum dma_resv_usage. + * @fence: the resulting fence + * + * Get a single fence representing all the fences inside the resv object. + * Returns either 0 for success or -ENOMEM. + * + * Warning: This can't be used like this when adding the fence back to the resv + * object since that can lead to stack corruption when finalizing the + * dma_fence_array. + * + * Returns 0 on success and negative error values on failure. + */ +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, + struct dma_fence **fence) +{ + struct dma_fence_array *array; + struct dma_fence **fences; + unsigned count; + int r; - if (dma_fence_is_signaled(lfence)) { - dma_fence_put(lfence); - continue; - } + r = dma_resv_get_fences(obj, usage, &count, &fences); + if (r) + return r; - fence = lfence; - break; - } + if (count == 0) { + *fence = NULL; + return 0; } - rcu_read_unlock(); - if (fence) { - if (read_seqcount_retry(&obj->seq, seq)) { - dma_fence_put(fence); - goto retry; - } + if (count == 1) { + *fence = fences[0]; + kfree(fences); + return 0; + } - ret = dma_fence_wait_timeout(fence, intr, ret); - dma_fence_put(fence); - if (ret > 0 && wait_all && (i + 1 < shared_count)) - goto retry; + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), + 1, false); + if (!array) { + while (count--) + dma_fence_put(fences[count]); + kfree(fences); + return -ENOMEM; } - return ret; -unlock_retry: - rcu_read_unlock(); - goto retry; + *fence = &array->base; + return 0; } -EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); - +EXPORT_SYMBOL_GPL(dma_resv_get_singleton); -static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) +/** + * dma_resv_wait_timeout - Wait on reservation's objects fences + * @obj: the reservation object + * @usage: controls which fences to include, see enum dma_resv_usage. + * @intr: if true, do interruptible wait + * @timeout: timeout value in jiffies or zero to return immediately + * + * Callers are not required to hold specific locks, but maybe hold + * dma_resv_lock() already + * RETURNS + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or + * greater than zer on success. + */ +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, + bool intr, unsigned long timeout) { - struct dma_fence *fence, *lfence = passed_fence; - int ret = 1; + long ret = timeout ? timeout : 1; + struct dma_resv_iter cursor; + struct dma_fence *fence; - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { - fence = dma_fence_get_rcu(lfence); - if (!fence) - return -1; + dma_resv_iter_begin(&cursor, obj, usage); + dma_resv_for_each_fence_unlocked(&cursor, fence) { - ret = !!dma_fence_is_signaled(fence); - dma_fence_put(fence); + ret = dma_fence_wait_timeout(fence, intr, ret); + if (ret <= 0) { + dma_resv_iter_end(&cursor); + return ret; + } } + dma_resv_iter_end(&cursor); + return ret; } +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); + /** - * dma_resv_test_signaled_rcu - Test if a reservation object's - * fences have been signaled. + * dma_resv_test_signaled - Test if a reservation object's fences have been + * signaled. * @obj: the reservation object - * @test_all: if true, test all fences, otherwise only test the exclusive - * fence + * @usage: controls which fences to include, see enum dma_resv_usage. + * + * Callers are not required to hold specific locks, but maybe hold + * dma_resv_lock() already. * * RETURNS - * true if all fences signaled, else false + * + * True if all fences signaled, else false. */ -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage) { - unsigned seq, shared_count; - int ret; - - rcu_read_lock(); -retry: - ret = true; - shared_count = 0; - seq = read_seqcount_begin(&obj->seq); - - if (test_all) { - unsigned i; + struct dma_resv_iter cursor; + struct dma_fence *fence; - struct dma_resv_list *fobj = rcu_dereference(obj->fence); + dma_resv_iter_begin(&cursor, obj, usage); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + dma_resv_iter_end(&cursor); + return false; + } + dma_resv_iter_end(&cursor); + return true; +} +EXPORT_SYMBOL_GPL(dma_resv_test_signaled); - if (fobj) - shared_count = fobj->shared_count; +/** + * dma_resv_describe - Dump description of the resv object into seq_file + * @obj: the reservation object + * @seq: the seq_file to dump the description into + * + * Dump a textual description of the fences inside an dma_resv object into the + * seq_file. + */ +void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) +{ + static const char *usage[] = { "kernel", "write", "read", "bookkeep" }; + struct dma_resv_iter cursor; + struct dma_fence *fence; - for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = rcu_dereference(fobj->shared[i]); + dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) { + seq_printf(seq, "\t%s fence:", + usage[dma_resv_iter_usage(&cursor)]); + dma_fence_describe(fence, seq); + } +} +EXPORT_SYMBOL_GPL(dma_resv_describe); - ret = dma_resv_test_signaled_single(fence); - if (ret < 0) - goto retry; - else if (!ret) - break; - } +#if IS_ENABLED(CONFIG_LOCKDEP) +static int __init dma_resv_lockdep(void) +{ + struct mm_struct *mm = mm_alloc(); + struct ww_acquire_ctx ctx; + struct dma_resv obj; + struct address_space mapping; + int ret; - if (read_seqcount_retry(&obj->seq, seq)) - goto retry; - } + if (!mm) + return -ENOMEM; - if (!shared_count) { - struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); + dma_resv_init(&obj); + address_space_init_once(&mapping); - if (fence_excl) { - ret = dma_resv_test_signaled_single(fence_excl); - if (ret < 0) - goto retry; + mmap_read_lock(mm); + ww_acquire_init(&ctx, &reservation_ww_class); + ret = dma_resv_lock(&obj, &ctx); + if (ret == -EDEADLK) + dma_resv_lock_slow(&obj, &ctx); + fs_reclaim_acquire(GFP_KERNEL); + /* for unmap_mapping_range on trylocked buffer objects in shrinkers */ + i_mmap_lock_write(&mapping); + i_mmap_unlock_write(&mapping); +#ifdef CONFIG_MMU_NOTIFIER + lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); + __dma_fence_might_wait(); + lock_map_release(&__mmu_notifier_invalidate_range_start_map); +#else + __dma_fence_might_wait(); +#endif + fs_reclaim_release(GFP_KERNEL); + ww_mutex_unlock(&obj.lock); + ww_acquire_fini(&ctx); + mmap_read_unlock(mm); - if (read_seqcount_retry(&obj->seq, seq)) - goto retry; - } - } + mmput(mm); - rcu_read_unlock(); - return ret; + return 0; } -EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); +subsys_initcall(dma_resv_lockdep); +#endif diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 6e54cdec3da0..974467791032 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += heap-helpers.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 626cf7fd033a..28fb04eccdd0 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -2,76 +2,311 @@ /* * DMABUF CMA heap exporter * - * Copyright (C) 2012, 2019 Linaro Ltd. + * Copyright (C) 2012, 2019, 2020 Linaro Ltd. * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + * + * Also utilizing parts of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> */ - #include <linux/cma.h> -#include <linux/device.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/err.h> -#include <linux/errno.h> #include <linux/highmem.h> +#include <linux/io.h> +#include <linux/mm.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/scatterlist.h> -#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> -#include "heap-helpers.h" struct cma_heap { struct dma_heap *heap; struct cma *cma; }; -static void cma_heap_free(struct heap_helper_buffer *buffer) +struct cma_heap_buffer { + struct cma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct page *cma_pages; + struct page **pages; + pgoff_t pagecount; + int vmap_cnt; + void *vaddr; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table table; + struct list_head list; + bool mapped; +}; + +static int cma_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(&a->table, buffer->pages, + buffer->pagecount, 0, + buffer->pagecount << PAGE_SHIFT, + GFP_KERNEL); + if (ret) { + kfree(a); + return ret; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void cma_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(&a->table); + kfree(a); +} + +static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = &a->table; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + return ERR_PTR(-ENOMEM); + a->mapped = true; + return table; +} + +static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, 0); +} + +static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct cma_heap_buffer *buffer = vma->vm_private_data; + + if (vmf->pgoff > buffer->pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = buffer->pages[vmf->pgoff]; + get_page(vmf->page); + + return 0; +} + +static const struct vm_operations_struct dma_heap_vm_ops = { + .fault = cma_heap_vm_fault, +}; + +static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) + return -EINVAL; + + vma->vm_ops = &dma_heap_vm_ops; + vma->vm_private_data = buffer; + + return 0; +} + +static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) +{ + void *vaddr; + + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + iosys_map_set_vaddr(map, buffer->vaddr); + goto out; + } + + vaddr = cma_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto out; + } + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + iosys_map_set_vaddr(map, buffer->vaddr); +out: + mutex_unlock(&buffer->lock); + + return ret; +} + +static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { - struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); - unsigned long nr_pages = buffer->pagecount; - struct page *cma_pages = buffer->priv_virt; + struct cma_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + iosys_map_clear(map); +} + +static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct cma_heap *cma_heap = buffer->heap; + + if (buffer->vmap_cnt > 0) { + WARN(1, "%s: buffer still mapped in the kernel\n", __func__); + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } /* free page list */ kfree(buffer->pages); /* release memory */ - cma_release(cma_heap->cma, cma_pages, nr_pages); + cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); kfree(buffer); } -/* dmabuf heap CMA operations functions */ -static int cma_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static const struct dma_buf_ops cma_heap_buf_ops = { + .attach = cma_heap_attach, + .detach = cma_heap_detach, + .map_dma_buf = cma_heap_map_dma_buf, + .unmap_dma_buf = cma_heap_unmap_dma_buf, + .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, + .end_cpu_access = cma_heap_dma_buf_end_cpu_access, + .mmap = cma_heap_mmap, + .vmap = cma_heap_vmap, + .vunmap = cma_heap_vunmap, + .release = cma_heap_dma_buf_release, +}; + +static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); - struct heap_helper_buffer *helper_buffer; - struct page *cma_pages; + struct cma_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); size_t size = PAGE_ALIGN(len); - unsigned long nr_pages = size >> PAGE_SHIFT; + pgoff_t pagecount = size >> PAGE_SHIFT; unsigned long align = get_order(size); + struct page *cma_pages; struct dma_buf *dmabuf; int ret = -ENOMEM; pgoff_t pg; - if (align > CONFIG_CMA_ALIGNMENT) - align = CONFIG_CMA_ALIGNMENT; + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) - return -ENOMEM; + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->len = size; - init_heap_helper_buffer(helper_buffer, cma_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; - cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false); if (!cma_pages) - goto free_buf; + goto free_buffer; + /* Clear the cma pages */ if (PageHighMem(cma_pages)) { - unsigned long nr_clear_pages = nr_pages; + unsigned long nr_clear_pages = pagecount; struct page *page = cma_pages; while (nr_clear_pages > 0) { @@ -85,7 +320,6 @@ static int cma_heap_allocate(struct dma_heap *heap, */ if (fatal_signal_pending(current)) goto free_cma; - page++; nr_clear_pages--; } @@ -93,44 +327,40 @@ static int cma_heap_allocate(struct dma_heap *heap, memset(page_address(cma_pages), 0, size); } - helper_buffer->pagecount = nr_pages; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { + buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); + if (!buffer->pages) { ret = -ENOMEM; goto free_cma; } - for (pg = 0; pg < helper_buffer->pagecount; pg++) - helper_buffer->pages[pg] = &cma_pages[pg]; + for (pg = 0; pg < pagecount; pg++) + buffer->pages[pg] = &cma_pages[pg]; + + buffer->cma_pages = cma_pages; + buffer->heap = cma_heap; + buffer->pagecount = pagecount; /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &cma_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); goto free_pages; } - - helper_buffer->dmabuf = dmabuf; - helper_buffer->priv_virt = cma_pages; - - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; - } - - return ret; + return dmabuf; free_pages: - kfree(helper_buffer->pages); + kfree(buffer->pages); free_cma: - cma_release(cma_heap->cma, cma_pages, nr_pages); -free_buf: - kfree(helper_buffer); - return ret; + cma_release(cma_heap->cma, cma_pages, pagecount); +free_buffer: + kfree(buffer); + + return ERR_PTR(ret); } static const struct dma_heap_ops cma_heap_ops = { diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c deleted file mode 100644 index 9f964ca3f59c..000000000000 --- a/drivers/dma-buf/heaps/heap-helpers.c +++ /dev/null @@ -1,271 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/device.h> -#include <linux/dma-buf.h> -#include <linux/err.h> -#include <linux/highmem.h> -#include <linux/idr.h> -#include <linux/list.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> -#include <uapi/linux/dma-heap.h> - -#include "heap-helpers.h" - -void init_heap_helper_buffer(struct heap_helper_buffer *buffer, - void (*free)(struct heap_helper_buffer *)) -{ - buffer->priv_virt = NULL; - mutex_init(&buffer->lock); - buffer->vmap_cnt = 0; - buffer->vaddr = NULL; - buffer->pagecount = 0; - buffer->pages = NULL; - INIT_LIST_HEAD(&buffer->attachments); - buffer->free = free; -} - -struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, - int fd_flags) -{ - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - - exp_info.ops = &heap_helper_ops; - exp_info.size = buffer->size; - exp_info.flags = fd_flags; - exp_info.priv = buffer; - - return dma_buf_export(&exp_info); -} - -static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer) -{ - void *vaddr; - - vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); - if (!vaddr) - return ERR_PTR(-ENOMEM); - - return vaddr; -} - -static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer) -{ - if (buffer->vmap_cnt > 0) { - WARN(1, "%s: buffer still mapped in the kernel\n", __func__); - vunmap(buffer->vaddr); - } - - buffer->free(buffer); -} - -static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer) -{ - void *vaddr; - - if (buffer->vmap_cnt) { - buffer->vmap_cnt++; - return buffer->vaddr; - } - vaddr = dma_heap_map_kernel(buffer); - if (IS_ERR(vaddr)) - return vaddr; - buffer->vaddr = vaddr; - buffer->vmap_cnt++; - return vaddr; -} - -static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer) -{ - if (!--buffer->vmap_cnt) { - vunmap(buffer->vaddr); - buffer->vaddr = NULL; - } -} - -struct dma_heaps_attachment { - struct device *dev; - struct sg_table table; - struct list_head list; -}; - -static int dma_heap_attach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct dma_heaps_attachment *a; - struct heap_helper_buffer *buffer = dmabuf->priv; - int ret; - - a = kzalloc(sizeof(*a), GFP_KERNEL); - if (!a) - return -ENOMEM; - - ret = sg_alloc_table_from_pages(&a->table, buffer->pages, - buffer->pagecount, 0, - buffer->pagecount << PAGE_SHIFT, - GFP_KERNEL); - if (ret) { - kfree(a); - return ret; - } - - a->dev = attachment->dev; - INIT_LIST_HEAD(&a->list); - - attachment->priv = a; - - mutex_lock(&buffer->lock); - list_add(&a->list, &buffer->attachments); - mutex_unlock(&buffer->lock); - - return 0; -} - -static void dma_heap_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct dma_heaps_attachment *a = attachment->priv; - struct heap_helper_buffer *buffer = dmabuf->priv; - - mutex_lock(&buffer->lock); - list_del(&a->list); - mutex_unlock(&buffer->lock); - - sg_free_table(&a->table); - kfree(a); -} - -static -struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction direction) -{ - struct dma_heaps_attachment *a = attachment->priv; - struct sg_table *table; - - table = &a->table; - - if (!dma_map_sg(attachment->dev, table->sgl, table->nents, - direction)) - table = ERR_PTR(-ENOMEM); - return table; -} - -static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, - struct sg_table *table, - enum dma_data_direction direction) -{ - dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); -} - -static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct heap_helper_buffer *buffer = vma->vm_private_data; - - if (vmf->pgoff > buffer->pagecount) - return VM_FAULT_SIGBUS; - - vmf->page = buffer->pages[vmf->pgoff]; - get_page(vmf->page); - - return 0; -} - -static const struct vm_operations_struct dma_heap_vm_ops = { - .fault = dma_heap_vm_fault, -}; - -static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - - if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) - return -EINVAL; - - vma->vm_ops = &dma_heap_vm_ops; - vma->vm_private_data = buffer; - - return 0; -} - -static void dma_heap_dma_buf_release(struct dma_buf *dmabuf) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - - dma_heap_buffer_destroy(buffer); -} - -static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - struct dma_heaps_attachment *a; - int ret = 0; - - mutex_lock(&buffer->lock); - - if (buffer->vmap_cnt) - invalidate_kernel_vmap_range(buffer->vaddr, buffer->size); - - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents, - direction); - } - mutex_unlock(&buffer->lock); - - return ret; -} - -static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - struct dma_heaps_attachment *a; - - mutex_lock(&buffer->lock); - - if (buffer->vmap_cnt) - flush_kernel_vmap_range(buffer->vaddr, buffer->size); - - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents, - direction); - } - mutex_unlock(&buffer->lock); - - return 0; -} - -static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - void *vaddr; - - mutex_lock(&buffer->lock); - vaddr = dma_heap_buffer_vmap_get(buffer); - mutex_unlock(&buffer->lock); - - return vaddr; -} - -static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - - mutex_lock(&buffer->lock); - dma_heap_buffer_vmap_put(buffer); - mutex_unlock(&buffer->lock); -} - -const struct dma_buf_ops heap_helper_ops = { - .map_dma_buf = dma_heap_map_dma_buf, - .unmap_dma_buf = dma_heap_unmap_dma_buf, - .mmap = dma_heap_mmap, - .release = dma_heap_dma_buf_release, - .attach = dma_heap_attach, - .detach = dma_heap_detach, - .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access, - .end_cpu_access = dma_heap_dma_buf_end_cpu_access, - .vmap = dma_heap_dma_buf_vmap, - .vunmap = dma_heap_dma_buf_vunmap, -}; diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h deleted file mode 100644 index 805d2df88024..000000000000 --- a/drivers/dma-buf/heaps/heap-helpers.h +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * DMABUF Heaps helper code - * - * Copyright (C) 2011 Google, Inc. - * Copyright (C) 2019 Linaro Ltd. - */ - -#ifndef _HEAP_HELPERS_H -#define _HEAP_HELPERS_H - -#include <linux/dma-heap.h> -#include <linux/list.h> - -/** - * struct heap_helper_buffer - helper buffer metadata - * @heap: back pointer to the heap the buffer came from - * @dmabuf: backing dma-buf for this buffer - * @size: size of the buffer - * @priv_virt pointer to heap specific private value - * @lock mutext to protect the data in this structure - * @vmap_cnt count of vmap references on the buffer - * @vaddr vmap'ed virtual address - * @pagecount number of pages in the buffer - * @pages list of page pointers - * @attachments list of device attachments - * - * @free heap callback to free the buffer - */ -struct heap_helper_buffer { - struct dma_heap *heap; - struct dma_buf *dmabuf; - size_t size; - - void *priv_virt; - struct mutex lock; - int vmap_cnt; - void *vaddr; - pgoff_t pagecount; - struct page **pages; - struct list_head attachments; - - void (*free)(struct heap_helper_buffer *buffer); -}; - -void init_heap_helper_buffer(struct heap_helper_buffer *buffer, - void (*free)(struct heap_helper_buffer *)); - -struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, - int fd_flags); - -extern const struct dma_buf_ops heap_helper_ops; -#endif /* _HEAP_HELPERS_H */ diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 0bf688e3c023..fcf836ba9c1f 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -3,7 +3,11 @@ * DMABUF System heap exporter * * Copyright (C) 2011 Google, Inc. - * Copyright (C) 2019 Linaro Ltd. + * Copyright (C) 2019, 2020 Linaro Ltd. + * + * Portions based off of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> */ #include <linux/dma-buf.h> @@ -15,89 +19,403 @@ #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> -#include <linux/sched/signal.h> -#include <asm/page.h> +#include <linux/vmalloc.h> + +static struct dma_heap *sys_heap; -#include "heap-helpers.h" +struct system_heap_buffer { + struct dma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct sg_table sg_table; + int vmap_cnt; + void *vaddr; +}; -struct dma_heap *sys_heap; +struct dma_heap_attachment { + struct device *dev; + struct sg_table *table; + struct list_head list; + bool mapped; +}; + +#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP) +#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN) +#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ + | __GFP_NORETRY) & ~__GFP_RECLAIM) \ + | __GFP_COMP) +static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP}; +/* + * The selection of the orders used for allocation (1MB, 64K, 4K) is designed + * to match with the sizes often found in IOMMUs. Using order 4 pages instead + * of order 0 pages can significantly improve the performance of many IOMMUs + * by reducing TLB pressure and time spent updating page tables. + */ +static const unsigned int orders[] = {8, 4, 0}; +#define NUM_ORDERS ARRAY_SIZE(orders) -static void system_heap_free(struct heap_helper_buffer *buffer) +static struct sg_table *dup_sg_table(struct sg_table *table) { - pgoff_t pg; + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; - for (pg = 0; pg < buffer->pagecount; pg++) - __free_page(buffer->pages[pg]); - kfree(buffer->pages); - kfree(buffer); + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg = sg_next(new_sg); + } + + return new_table; } -static int system_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) +static int system_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) { - struct heap_helper_buffer *helper_buffer; - struct dma_buf *dmabuf; - int ret = -ENOMEM; - pgoff_t pg; + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + struct sg_table *table; - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) return -ENOMEM; - init_heap_helper_buffer(helper_buffer, system_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; + table = dup_sg_table(&buffer->sg_table); + if (IS_ERR(table)) { + kfree(a); + return -ENOMEM; + } + + a->table = table; + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void system_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(a->table); + kfree(a->table); + kfree(a); +} + +static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = a->table; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + return ERR_PTR(ret); + + a->mapped = true; + return table; +} + +static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, 0); +} + +static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, a->table, direction); + } + mutex_unlock(&buffer->lock); - helper_buffer->pagecount = len / PAGE_SIZE; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { - ret = -ENOMEM; - goto err0; + return 0; +} + +static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + unsigned long addr = vma->vm_start; + struct sg_page_iter piter; + int ret; + + for_each_sgtable_page(table, &piter, vma->vm_pgoff) { + struct page *page = sg_page_iter_page(&piter); + + ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, + vma->vm_page_prot); + if (ret) + return ret; + addr += PAGE_SIZE; + if (addr >= vma->vm_end) + return 0; } + return 0; +} - for (pg = 0; pg < helper_buffer->pagecount; pg++) { +static void *system_heap_do_vmap(struct system_heap_buffer *buffer) +{ + struct sg_table *table = &buffer->sg_table; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + struct sg_page_iter piter; + void *vaddr; + + if (!pages) + return ERR_PTR(-ENOMEM); + + for_each_sgtable_page(table, &piter, 0) { + WARN_ON(tmp - pages >= npages); + *tmp++ = sg_page_iter_page(&piter); + } + + vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL); + vfree(pages); + + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + iosys_map_set_vaddr(map, buffer->vaddr); + goto out; + } + + vaddr = system_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto out; + } + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + iosys_map_set_vaddr(map, buffer->vaddr); +out: + mutex_unlock(&buffer->lock); + + return ret; +} + +static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + iosys_map_clear(map); +} + +static void system_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table; + struct scatterlist *sg; + int i; + + table = &buffer->sg_table; + for_each_sgtable_sg(table, sg, i) { + struct page *page = sg_page(sg); + + __free_pages(page, compound_order(page)); + } + sg_free_table(table); + kfree(buffer); +} + +static const struct dma_buf_ops system_heap_buf_ops = { + .attach = system_heap_attach, + .detach = system_heap_detach, + .map_dma_buf = system_heap_map_dma_buf, + .unmap_dma_buf = system_heap_unmap_dma_buf, + .begin_cpu_access = system_heap_dma_buf_begin_cpu_access, + .end_cpu_access = system_heap_dma_buf_end_cpu_access, + .mmap = system_heap_mmap, + .vmap = system_heap_vmap, + .vunmap = system_heap_vunmap, + .release = system_heap_dma_buf_release, +}; + +static struct page *alloc_largest_available(unsigned long size, + unsigned int max_order) +{ + struct page *page; + int i; + + for (i = 0; i < NUM_ORDERS; i++) { + if (size < (PAGE_SIZE << orders[i])) + continue; + if (max_order < orders[i]) + continue; + + page = alloc_pages(order_flags[i], orders[i]); + if (!page) + continue; + return page; + } + return NULL; +} + +static struct dma_buf *system_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + struct system_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + unsigned long size_remaining = len; + unsigned int max_order = orders[0]; + struct dma_buf *dmabuf; + struct sg_table *table; + struct scatterlist *sg; + struct list_head pages; + struct page *page, *tmp_page; + int i, ret = -ENOMEM; + + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->heap = heap; + buffer->len = len; + + INIT_LIST_HEAD(&pages); + i = 0; + while (size_remaining > 0) { /* * Avoid trying to allocate memory if the process - * has been killed by by SIGKILL + * has been killed by SIGKILL */ - if (fatal_signal_pending(current)) - goto err1; + if (fatal_signal_pending(current)) { + ret = -EINTR; + goto free_buffer; + } - helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!helper_buffer->pages[pg]) - goto err1; + page = alloc_largest_available(size_remaining, max_order); + if (!page) + goto free_buffer; + + list_add_tail(&page->lru, &pages); + size_remaining -= page_size(page); + max_order = compound_order(page); + i++; + } + + table = &buffer->sg_table; + if (sg_alloc_table(table, i, GFP_KERNEL)) + goto free_buffer; + + sg = table->sgl; + list_for_each_entry_safe(page, tmp_page, &pages, lru) { + sg_set_page(sg, page, page_size(page), 0); + sg = sg_next(sg); + list_del(&page->lru); } /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &system_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); - goto err1; + goto free_pages; } + return dmabuf; - helper_buffer->dmabuf = dmabuf; +free_pages: + for_each_sgtable_sg(table, sg, i) { + struct page *p = sg_page(sg); - ret = dma_buf_fd(dmabuf, fd_flags); - if (ret < 0) { - dma_buf_put(dmabuf); - /* just return, as put will call release and that will free */ - return ret; + __free_pages(p, compound_order(p)); } + sg_free_table(table); +free_buffer: + list_for_each_entry_safe(page, tmp_page, &pages, lru) + __free_pages(page, compound_order(page)); + kfree(buffer); - return ret; - -err1: - while (pg > 0) - __free_page(helper_buffer->pages[--pg]); - kfree(helper_buffer->pages); -err0: - kfree(helper_buffer); - - return ret; + return ERR_PTR(ret); } static const struct dma_heap_ops system_heap_ops = { @@ -107,7 +425,6 @@ static const struct dma_heap_ops system_heap_ops = { static int system_heap_create(void) { struct dma_heap_export_info exp_info; - int ret = 0; exp_info.name = "system"; exp_info.ops = &system_heap_ops; @@ -115,9 +432,9 @@ static int system_heap_create(void) sys_heap = dma_heap_add(&exp_info); if (IS_ERR(sys_heap)) - ret = PTR_ERR(sys_heap); + return PTR_ERR(sys_heap); - return ret; + return 0; } module_init(system_heap_create); MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h index 5320386f02e5..851965867d9c 100644 --- a/drivers/dma-buf/selftests.h +++ b/drivers/dma-buf/selftests.h @@ -5,9 +5,12 @@ * a module parameter. It must be unique and legal for a C identifier. * * The function should be of type int function(void). It may be conditionally - * compiled using #if IS_ENABLED(DRM_I915_SELFTEST). + * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST). * * Tests are executed in order by igt/dmabuf_selftest */ selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */ selftest(dma_fence, dma_fence) +selftest(dma_fence_chain, dma_fence_chain) +selftest(dma_fence_unwrap, dma_fence_unwrap) +selftest(dma_resv, dma_resv) diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c deleted file mode 100644 index bfe14e94c488..000000000000 --- a/drivers/dma-buf/seqno-fence.c +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * seqno-fence, using a dma-buf to synchronize fencing - * - * Copyright (C) 2012 Texas Instruments - * Copyright (C) 2012-2014 Canonical Ltd - * Authors: - * Rob Clark <robdclark@gmail.com> - * Maarten Lankhorst <maarten.lankhorst@canonical.com> - */ - -#include <linux/slab.h> -#include <linux/export.h> -#include <linux/seqno-fence.h> - -static const char *seqno_fence_get_driver_name(struct dma_fence *fence) -{ - struct seqno_fence *seqno_fence = to_seqno_fence(fence); - - return seqno_fence->ops->get_driver_name(fence); -} - -static const char *seqno_fence_get_timeline_name(struct dma_fence *fence) -{ - struct seqno_fence *seqno_fence = to_seqno_fence(fence); - - return seqno_fence->ops->get_timeline_name(fence); -} - -static bool seqno_enable_signaling(struct dma_fence *fence) -{ - struct seqno_fence *seqno_fence = to_seqno_fence(fence); - - return seqno_fence->ops->enable_signaling(fence); -} - -static bool seqno_signaled(struct dma_fence *fence) -{ - struct seqno_fence *seqno_fence = to_seqno_fence(fence); - - return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); -} - -static void seqno_release(struct dma_fence *fence) -{ - struct seqno_fence *f = to_seqno_fence(fence); - - dma_buf_put(f->sync_buf); - if (f->ops->release) - f->ops->release(fence); - else - dma_fence_free(&f->base); -} - -static signed long seqno_wait(struct dma_fence *fence, bool intr, - signed long timeout) -{ - struct seqno_fence *f = to_seqno_fence(fence); - - return f->ops->wait(fence, intr, timeout); -} - -const struct dma_fence_ops seqno_fence_ops = { - .get_driver_name = seqno_fence_get_driver_name, - .get_timeline_name = seqno_fence_get_timeline_name, - .enable_signaling = seqno_enable_signaling, - .signaled = seqno_signaled, - .wait = seqno_wait, - .release = seqno_release, -}; -EXPORT_SYMBOL(seqno_fence_ops); diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c new file mode 100644 index 000000000000..0a9b099d0518 --- /dev/null +++ b/drivers/dma-buf/st-dma-fence-chain.c @@ -0,0 +1,710 @@ +// SPDX-License-Identifier: MIT + +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/delay.h> +#include <linux/dma-fence.h> +#include <linux/dma-fence-chain.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/mm.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/random.h> + +#include "selftest.h" + +#define CHAIN_SZ (4 << 10) + +static struct kmem_cache *slab_fences; + +static inline struct mock_fence { + struct dma_fence base; + spinlock_t lock; +} *to_mock_fence(struct dma_fence *f) { + return container_of(f, struct mock_fence, base); +} + +static const char *mock_name(struct dma_fence *f) +{ + return "mock"; +} + +static void mock_fence_release(struct dma_fence *f) +{ + kmem_cache_free(slab_fences, to_mock_fence(f)); +} + +static const struct dma_fence_ops mock_ops = { + .get_driver_name = mock_name, + .get_timeline_name = mock_name, + .release = mock_fence_release, +}; + +static struct dma_fence *mock_fence(void) +{ + struct mock_fence *f; + + f = kmem_cache_alloc(slab_fences, GFP_KERNEL); + if (!f) + return NULL; + + spin_lock_init(&f->lock); + dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0); + + return &f->base; +} + +static struct dma_fence *mock_chain(struct dma_fence *prev, + struct dma_fence *fence, + u64 seqno) +{ + struct dma_fence_chain *f; + + f = dma_fence_chain_alloc(); + if (!f) + return NULL; + + dma_fence_chain_init(f, dma_fence_get(prev), dma_fence_get(fence), + seqno); + + return &f->base; +} + +static int sanitycheck(void *arg) +{ + struct dma_fence *f, *chain; + int err = 0; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + chain = mock_chain(NULL, f, 1); + if (!chain) + err = -ENOMEM; + + dma_fence_enable_sw_signaling(chain); + + dma_fence_signal(f); + dma_fence_put(f); + + dma_fence_put(chain); + + return err; +} + +struct fence_chains { + unsigned int chain_length; + struct dma_fence **fences; + struct dma_fence **chains; + + struct dma_fence *tail; +}; + +static uint64_t seqno_inc(unsigned int i) +{ + return i + 1; +} + +static int fence_chains_init(struct fence_chains *fc, unsigned int count, + uint64_t (*seqno_fn)(unsigned int)) +{ + unsigned int i; + int err = 0; + + fc->chains = kvmalloc_array(count, sizeof(*fc->chains), + GFP_KERNEL | __GFP_ZERO); + if (!fc->chains) + return -ENOMEM; + + fc->fences = kvmalloc_array(count, sizeof(*fc->fences), + GFP_KERNEL | __GFP_ZERO); + if (!fc->fences) { + err = -ENOMEM; + goto err_chains; + } + + fc->tail = NULL; + for (i = 0; i < count; i++) { + fc->fences[i] = mock_fence(); + if (!fc->fences[i]) { + err = -ENOMEM; + goto unwind; + } + + fc->chains[i] = mock_chain(fc->tail, + fc->fences[i], + seqno_fn(i)); + if (!fc->chains[i]) { + err = -ENOMEM; + goto unwind; + } + + fc->tail = fc->chains[i]; + + dma_fence_enable_sw_signaling(fc->chains[i]); + } + + fc->chain_length = i; + return 0; + +unwind: + for (i = 0; i < count; i++) { + dma_fence_put(fc->fences[i]); + dma_fence_put(fc->chains[i]); + } + kvfree(fc->fences); +err_chains: + kvfree(fc->chains); + return err; +} + +static void fence_chains_fini(struct fence_chains *fc) +{ + unsigned int i; + + for (i = 0; i < fc->chain_length; i++) { + dma_fence_signal(fc->fences[i]); + dma_fence_put(fc->fences[i]); + } + kvfree(fc->fences); + + for (i = 0; i < fc->chain_length; i++) + dma_fence_put(fc->chains[i]); + kvfree(fc->chains); +} + +static int find_seqno(void *arg) +{ + struct fence_chains fc; + struct dma_fence *fence; + int err; + int i; + + err = fence_chains_init(&fc, 64, seqno_inc); + if (err) + return err; + + fence = dma_fence_get(fc.tail); + err = dma_fence_chain_find_seqno(&fence, 0); + dma_fence_put(fence); + if (err) { + pr_err("Reported %d for find_seqno(0)!\n", err); + goto err; + } + + for (i = 0; i < fc.chain_length; i++) { + fence = dma_fence_get(fc.tail); + err = dma_fence_chain_find_seqno(&fence, i + 1); + dma_fence_put(fence); + if (err) { + pr_err("Reported %d for find_seqno(%d:%d)!\n", + err, fc.chain_length + 1, i + 1); + goto err; + } + if (fence != fc.chains[i]) { + pr_err("Incorrect fence reported by find_seqno(%d:%d)\n", + fc.chain_length + 1, i + 1); + err = -EINVAL; + goto err; + } + + dma_fence_get(fence); + err = dma_fence_chain_find_seqno(&fence, i + 1); + dma_fence_put(fence); + if (err) { + pr_err("Error reported for finding self\n"); + goto err; + } + if (fence != fc.chains[i]) { + pr_err("Incorrect fence reported by find self\n"); + err = -EINVAL; + goto err; + } + + dma_fence_get(fence); + err = dma_fence_chain_find_seqno(&fence, i + 2); + dma_fence_put(fence); + if (!err) { + pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n", + i + 1, i + 2); + err = -EINVAL; + goto err; + } + + dma_fence_get(fence); + err = dma_fence_chain_find_seqno(&fence, i); + dma_fence_put(fence); + if (err) { + pr_err("Error reported for previous fence!\n"); + goto err; + } + if (i > 0 && fence != fc.chains[i - 1]) { + pr_err("Incorrect fence reported by find_seqno(%d:%d)\n", + i + 1, i); + err = -EINVAL; + goto err; + } + } + +err: + fence_chains_fini(&fc); + return err; +} + +static int find_signaled(void *arg) +{ + struct fence_chains fc; + struct dma_fence *fence; + int err; + + err = fence_chains_init(&fc, 2, seqno_inc); + if (err) + return err; + + dma_fence_signal(fc.fences[0]); + + fence = dma_fence_get(fc.tail); + err = dma_fence_chain_find_seqno(&fence, 1); + dma_fence_put(fence); + if (err) { + pr_err("Reported %d for find_seqno()!\n", err); + goto err; + } + + if (fence && fence != fc.chains[0]) { + pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:1\n", + fence->seqno); + + dma_fence_get(fence); + err = dma_fence_chain_find_seqno(&fence, 1); + dma_fence_put(fence); + if (err) + pr_err("Reported %d for finding self!\n", err); + + err = -EINVAL; + } + +err: + fence_chains_fini(&fc); + return err; +} + +static int find_out_of_order(void *arg) +{ + struct fence_chains fc; + struct dma_fence *fence; + int err; + + err = fence_chains_init(&fc, 3, seqno_inc); + if (err) + return err; + + dma_fence_signal(fc.fences[1]); + + fence = dma_fence_get(fc.tail); + err = dma_fence_chain_find_seqno(&fence, 2); + dma_fence_put(fence); + if (err) { + pr_err("Reported %d for find_seqno()!\n", err); + goto err; + } + + /* + * We signaled the middle fence (2) of the 1-2-3 chain. The behavior + * of the dma-fence-chain is to make us wait for all the fences up to + * the point we want. Since fence 1 is still not signaled, this what + * we should get as fence to wait upon (fence 2 being garbage + * collected during the traversal of the chain). + */ + if (fence != fc.chains[0]) { + pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:2\n", + fence ? fence->seqno : 0); + + err = -EINVAL; + } + +err: + fence_chains_fini(&fc); + return err; +} + +static uint64_t seqno_inc2(unsigned int i) +{ + return 2 * i + 2; +} + +static int find_gap(void *arg) +{ + struct fence_chains fc; + struct dma_fence *fence; + int err; + int i; + + err = fence_chains_init(&fc, 64, seqno_inc2); + if (err) + return err; + + for (i = 0; i < fc.chain_length; i++) { + fence = dma_fence_get(fc.tail); + err = dma_fence_chain_find_seqno(&fence, 2 * i + 1); + dma_fence_put(fence); + if (err) { + pr_err("Reported %d for find_seqno(%d:%d)!\n", + err, fc.chain_length + 1, 2 * i + 1); + goto err; + } + if (fence != fc.chains[i]) { + pr_err("Incorrect fence.seqno:%lld reported by find_seqno(%d:%d)\n", + fence->seqno, + fc.chain_length + 1, + 2 * i + 1); + err = -EINVAL; + goto err; + } + + dma_fence_get(fence); + err = dma_fence_chain_find_seqno(&fence, 2 * i + 2); + dma_fence_put(fence); + if (err) { + pr_err("Error reported for finding self\n"); + goto err; + } + if (fence != fc.chains[i]) { + pr_err("Incorrect fence reported by find self\n"); + err = -EINVAL; + goto err; + } + } + +err: + fence_chains_fini(&fc); + return err; +} + +struct find_race { + struct fence_chains fc; + atomic_t children; +}; + +static int __find_race(void *arg) +{ + struct find_race *data = arg; + int err = 0; + + while (!kthread_should_stop()) { + struct dma_fence *fence = dma_fence_get(data->fc.tail); + int seqno; + + seqno = prandom_u32_max(data->fc.chain_length) + 1; + + err = dma_fence_chain_find_seqno(&fence, seqno); + if (err) { + pr_err("Failed to find fence seqno:%d\n", + seqno); + dma_fence_put(fence); + break; + } + if (!fence) + goto signal; + + /* + * We can only find ourselves if we are on fence we were + * looking for. + */ + if (fence->seqno == seqno) { + err = dma_fence_chain_find_seqno(&fence, seqno); + if (err) { + pr_err("Reported an invalid fence for find-self:%d\n", + seqno); + dma_fence_put(fence); + break; + } + } + + dma_fence_put(fence); + +signal: + seqno = prandom_u32_max(data->fc.chain_length - 1); + dma_fence_signal(data->fc.fences[seqno]); + cond_resched(); + } + + if (atomic_dec_and_test(&data->children)) + wake_up_var(&data->children); + return err; +} + +static int find_race(void *arg) +{ + struct find_race data; + int ncpus = num_online_cpus(); + struct task_struct **threads; + unsigned long count; + int err; + int i; + + err = fence_chains_init(&data.fc, CHAIN_SZ, seqno_inc); + if (err) + return err; + + threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL); + if (!threads) { + err = -ENOMEM; + goto err; + } + + atomic_set(&data.children, 0); + for (i = 0; i < ncpus; i++) { + threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i); + if (IS_ERR(threads[i])) { + ncpus = i; + break; + } + atomic_inc(&data.children); + get_task_struct(threads[i]); + } + + wait_var_event_timeout(&data.children, + !atomic_read(&data.children), + 5 * HZ); + + for (i = 0; i < ncpus; i++) { + int ret; + + ret = kthread_stop(threads[i]); + if (ret && !err) + err = ret; + put_task_struct(threads[i]); + } + kfree(threads); + + count = 0; + for (i = 0; i < data.fc.chain_length; i++) + if (dma_fence_is_signaled(data.fc.fences[i])) + count++; + pr_info("Completed %lu cycles\n", count); + +err: + fence_chains_fini(&data.fc); + return err; +} + +static int signal_forward(void *arg) +{ + struct fence_chains fc; + int err; + int i; + + err = fence_chains_init(&fc, 64, seqno_inc); + if (err) + return err; + + for (i = 0; i < fc.chain_length; i++) { + dma_fence_signal(fc.fences[i]); + + if (!dma_fence_is_signaled(fc.chains[i])) { + pr_err("chain[%d] not signaled!\n", i); + err = -EINVAL; + goto err; + } + + if (i + 1 < fc.chain_length && + dma_fence_is_signaled(fc.chains[i + 1])) { + pr_err("chain[%d] is signaled!\n", i); + err = -EINVAL; + goto err; + } + } + +err: + fence_chains_fini(&fc); + return err; +} + +static int signal_backward(void *arg) +{ + struct fence_chains fc; + int err; + int i; + + err = fence_chains_init(&fc, 64, seqno_inc); + if (err) + return err; + + for (i = fc.chain_length; i--; ) { + dma_fence_signal(fc.fences[i]); + + if (i > 0 && dma_fence_is_signaled(fc.chains[i])) { + pr_err("chain[%d] is signaled!\n", i); + err = -EINVAL; + goto err; + } + } + + for (i = 0; i < fc.chain_length; i++) { + if (!dma_fence_is_signaled(fc.chains[i])) { + pr_err("chain[%d] was not signaled!\n", i); + err = -EINVAL; + goto err; + } + } + +err: + fence_chains_fini(&fc); + return err; +} + +static int __wait_fence_chains(void *arg) +{ + struct fence_chains *fc = arg; + + if (dma_fence_wait(fc->tail, false)) + return -EIO; + + return 0; +} + +static int wait_forward(void *arg) +{ + struct fence_chains fc; + struct task_struct *tsk; + int err; + int i; + + err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc); + if (err) + return err; + + tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); + if (IS_ERR(tsk)) { + err = PTR_ERR(tsk); + goto err; + } + get_task_struct(tsk); + yield_to(tsk, true); + + for (i = 0; i < fc.chain_length; i++) + dma_fence_signal(fc.fences[i]); + + err = kthread_stop(tsk); + put_task_struct(tsk); + +err: + fence_chains_fini(&fc); + return err; +} + +static int wait_backward(void *arg) +{ + struct fence_chains fc; + struct task_struct *tsk; + int err; + int i; + + err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc); + if (err) + return err; + + tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); + if (IS_ERR(tsk)) { + err = PTR_ERR(tsk); + goto err; + } + get_task_struct(tsk); + yield_to(tsk, true); + + for (i = fc.chain_length; i--; ) + dma_fence_signal(fc.fences[i]); + + err = kthread_stop(tsk); + put_task_struct(tsk); + +err: + fence_chains_fini(&fc); + return err; +} + +static void randomise_fences(struct fence_chains *fc) +{ + unsigned int count = fc->chain_length; + + /* Fisher-Yates shuffle courtesy of Knuth */ + while (--count) { + unsigned int swp; + + swp = prandom_u32_max(count + 1); + if (swp == count) + continue; + + swap(fc->fences[count], fc->fences[swp]); + } +} + +static int wait_random(void *arg) +{ + struct fence_chains fc; + struct task_struct *tsk; + int err; + int i; + + err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc); + if (err) + return err; + + randomise_fences(&fc); + + tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait"); + if (IS_ERR(tsk)) { + err = PTR_ERR(tsk); + goto err; + } + get_task_struct(tsk); + yield_to(tsk, true); + + for (i = 0; i < fc.chain_length; i++) + dma_fence_signal(fc.fences[i]); + + err = kthread_stop(tsk); + put_task_struct(tsk); + +err: + fence_chains_fini(&fc); + return err; +} + +int dma_fence_chain(void) +{ + static const struct subtest tests[] = { + SUBTEST(sanitycheck), + SUBTEST(find_seqno), + SUBTEST(find_signaled), + SUBTEST(find_out_of_order), + SUBTEST(find_gap), + SUBTEST(find_race), + SUBTEST(signal_forward), + SUBTEST(signal_backward), + SUBTEST(wait_forward), + SUBTEST(wait_backward), + SUBTEST(wait_random), + }; + int ret; + + pr_info("sizeof(dma_fence_chain)=%zu\n", + sizeof(struct dma_fence_chain)); + + slab_fences = KMEM_CACHE(mock_fence, + SLAB_TYPESAFE_BY_RCU | + SLAB_HWCACHE_ALIGN); + if (!slab_fences) + return -ENOMEM; + + ret = subtests(tests, NULL); + + kmem_cache_destroy(slab_fences); + return ret; +} diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c new file mode 100644 index 000000000000..f0cee984b6c7 --- /dev/null +++ b/drivers/dma-buf/st-dma-fence-unwrap.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: MIT + +/* + * Copyright (C) 2022 Advanced Micro Devices, Inc. + */ + +#include <linux/dma-fence.h> +#include <linux/dma-fence-array.h> +#include <linux/dma-fence-chain.h> +#include <linux/dma-fence-unwrap.h> + +#include "selftest.h" + +#define CHAIN_SZ (4 << 10) + +struct mock_fence { + struct dma_fence base; + spinlock_t lock; +}; + +static const char *mock_name(struct dma_fence *f) +{ + return "mock"; +} + +static const struct dma_fence_ops mock_ops = { + .get_driver_name = mock_name, + .get_timeline_name = mock_name, +}; + +static struct dma_fence *mock_fence(void) +{ + struct mock_fence *f; + + f = kmalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + spin_lock_init(&f->lock); + dma_fence_init(&f->base, &mock_ops, &f->lock, + dma_fence_context_alloc(1), 1); + + return &f->base; +} + +static struct dma_fence *mock_array(unsigned int num_fences, ...) +{ + struct dma_fence_array *array; + struct dma_fence **fences; + va_list valist; + int i; + + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) + goto error_put; + + va_start(valist, num_fences); + for (i = 0; i < num_fences; ++i) + fences[i] = va_arg(valist, typeof(*fences)); + va_end(valist); + + array = dma_fence_array_create(num_fences, fences, + dma_fence_context_alloc(1), + 1, false); + if (!array) + goto error_free; + return &array->base; + +error_free: + kfree(fences); + +error_put: + va_start(valist, num_fences); + for (i = 0; i < num_fences; ++i) + dma_fence_put(va_arg(valist, typeof(*fences))); + va_end(valist); + return NULL; +} + +static struct dma_fence *mock_chain(struct dma_fence *prev, + struct dma_fence *fence) +{ + struct dma_fence_chain *f; + + f = dma_fence_chain_alloc(); + if (!f) { + dma_fence_put(prev); + dma_fence_put(fence); + return NULL; + } + + dma_fence_chain_init(f, prev, fence, 1); + return &f->base; +} + +static int sanitycheck(void *arg) +{ + struct dma_fence *f, *chain, *array; + int err = 0; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f); + + array = mock_array(1, f); + if (!array) + return -ENOMEM; + + chain = mock_chain(NULL, array); + if (!chain) + return -ENOMEM; + + dma_fence_put(chain); + return err; +} + +static int unwrap_array(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *array; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f1); + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + dma_fence_enable_sw_signaling(f2); + + array = mock_array(2, f1, f2); + if (!array) + return -ENOMEM; + + dma_fence_unwrap_for_each(fence, &iter, array) { + if (fence == f1) { + f1 = NULL; + } else if (fence == f2) { + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_put(array); + return err; +} + +static int unwrap_chain(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *chain; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f1); + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + dma_fence_enable_sw_signaling(f2); + + chain = mock_chain(f1, f2); + if (!chain) + return -ENOMEM; + + dma_fence_unwrap_for_each(fence, &iter, chain) { + if (fence == f1) { + f1 = NULL; + } else if (fence == f2) { + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_put(chain); + return err; +} + +static int unwrap_chain_array(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *array, *chain; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f1); + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + dma_fence_enable_sw_signaling(f2); + + array = mock_array(2, f1, f2); + if (!array) + return -ENOMEM; + + chain = mock_chain(NULL, array); + if (!chain) + return -ENOMEM; + + dma_fence_unwrap_for_each(fence, &iter, chain) { + if (fence == f1) { + f1 = NULL; + } else if (fence == f2) { + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_put(chain); + return err; +} + +static int unwrap_merge(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *f3; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f1); + + f2 = mock_fence(); + if (!f2) { + err = -ENOMEM; + goto error_put_f1; + } + + dma_fence_enable_sw_signaling(f2); + + f3 = dma_fence_unwrap_merge(f1, f2); + if (!f3) { + err = -ENOMEM; + goto error_put_f2; + } + + dma_fence_unwrap_for_each(fence, &iter, f3) { + if (fence == f1) { + dma_fence_put(f1); + f1 = NULL; + } else if (fence == f2) { + dma_fence_put(f2); + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_put(f3); +error_put_f2: + dma_fence_put(f2); +error_put_f1: + dma_fence_put(f1); + return err; +} + +static int unwrap_merge_complex(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5; + struct dma_fence_unwrap iter; + int err = -ENOMEM; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f1); + + f2 = mock_fence(); + if (!f2) + goto error_put_f1; + + dma_fence_enable_sw_signaling(f2); + + f3 = dma_fence_unwrap_merge(f1, f2); + if (!f3) + goto error_put_f2; + + /* The resulting array has the fences in reverse */ + f4 = dma_fence_unwrap_merge(f2, f1); + if (!f4) + goto error_put_f3; + + /* Signaled fences should be filtered, the two arrays merged. */ + f5 = dma_fence_unwrap_merge(f3, f4, dma_fence_get_stub()); + if (!f5) + goto error_put_f4; + + err = 0; + dma_fence_unwrap_for_each(fence, &iter, f5) { + if (fence == f1) { + dma_fence_put(f1); + f1 = NULL; + } else if (fence == f2) { + dma_fence_put(f2); + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_put(f5); +error_put_f4: + dma_fence_put(f4); +error_put_f3: + dma_fence_put(f3); +error_put_f2: + dma_fence_put(f2); +error_put_f1: + dma_fence_put(f1); + return err; +} + +int dma_fence_unwrap(void) +{ + static const struct subtest tests[] = { + SUBTEST(sanitycheck), + SUBTEST(unwrap_array), + SUBTEST(unwrap_chain), + SUBTEST(unwrap_chain_array), + SUBTEST(unwrap_merge), + SUBTEST(unwrap_merge_complex), + }; + + return subtests(tests, NULL); +} diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c index e593064341c8..fb6e0a6ae2c9 100644 --- a/drivers/dma-buf/st-dma-fence.c +++ b/drivers/dma-buf/st-dma-fence.c @@ -102,6 +102,8 @@ static int sanitycheck(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_fence_signal(f); dma_fence_put(f); @@ -117,6 +119,8 @@ static int test_signaling(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + if (dma_fence_is_signaled(f)) { pr_err("Fence unexpectedly signaled on creation\n"); goto err_free; @@ -190,6 +194,8 @@ static int test_late_add_callback(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_fence_signal(f); if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) { @@ -282,6 +288,8 @@ static int test_status(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + if (dma_fence_get_status(f)) { pr_err("Fence unexpectedly has signaled status on creation\n"); goto err_free; @@ -308,6 +316,8 @@ static int test_error(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_fence_set_error(f, -EIO); if (dma_fence_get_status(f)) { @@ -337,6 +347,8 @@ static int test_wait(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + if (dma_fence_wait_timeout(f, false, 0) != -ETIME) { pr_err("Wait reported complete before being signaled\n"); goto err_free; @@ -379,6 +391,8 @@ static int test_wait_timeout(void *arg) if (!wt.f) return -ENOMEM; + dma_fence_enable_sw_signaling(wt.f); + if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) { pr_err("Wait reported complete before being signaled\n"); goto err_free; @@ -458,6 +472,8 @@ static int thread_signal_callback(void *arg) break; } + dma_fence_enable_sw_signaling(f1); + rcu_assign_pointer(t->fences[t->id], f1); smp_wmb(); @@ -471,8 +487,11 @@ static int thread_signal_callback(void *arg) dma_fence_signal(f1); smp_store_mb(cb.seen, false); - if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback)) - miss++, cb.seen = true; + if (!f2 || + dma_fence_add_callback(f2, &cb.cb, simple_callback)) { + miss++; + cb.seen = true; + } if (!t->before) dma_fence_signal(f1); diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c new file mode 100644 index 000000000000..15dbea1462ed --- /dev/null +++ b/drivers/dma-buf/st-dma-resv.c @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: MIT */ + +/* +* Copyright © 2019 Intel Corporation +* Copyright © 2021 Advanced Micro Devices, Inc. +*/ + +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/dma-resv.h> + +#include "selftest.h" + +static struct spinlock fence_lock; + +static const char *fence_name(struct dma_fence *f) +{ + return "selftest"; +} + +static const struct dma_fence_ops fence_ops = { + .get_driver_name = fence_name, + .get_timeline_name = fence_name, +}; + +static struct dma_fence *alloc_fence(void) +{ + struct dma_fence *f; + + f = kmalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + dma_fence_init(f, &fence_ops, &fence_lock, 0, 0); + return f; +} + +static int sanitycheck(void *arg) +{ + struct dma_resv resv; + struct dma_fence *f; + int r; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f); + + dma_fence_signal(f); + dma_fence_put(f); + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) + pr_err("Resv locking failed\n"); + else + dma_resv_unlock(&resv); + dma_resv_fini(&resv); + return r; +} + +static int test_signaling(void *arg) +{ + enum dma_resv_usage usage = (unsigned long)arg; + struct dma_resv resv; + struct dma_fence *f; + int r; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f); + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) { + pr_err("Resv locking failed\n"); + goto err_free; + } + + r = dma_resv_reserve_fences(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + goto err_unlock; + } + + dma_resv_add_fence(&resv, f, usage); + if (dma_resv_test_signaled(&resv, usage)) { + pr_err("Resv unexpectedly signaled\n"); + r = -EINVAL; + goto err_unlock; + } + dma_fence_signal(f); + if (!dma_resv_test_signaled(&resv, usage)) { + pr_err("Resv not reporting signaled\n"); + r = -EINVAL; + goto err_unlock; + } +err_unlock: + dma_resv_unlock(&resv); +err_free: + dma_resv_fini(&resv); + dma_fence_put(f); + return r; +} + +static int test_for_each(void *arg) +{ + enum dma_resv_usage usage = (unsigned long)arg; + struct dma_resv_iter cursor; + struct dma_fence *f, *fence; + struct dma_resv resv; + int r; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f); + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) { + pr_err("Resv locking failed\n"); + goto err_free; + } + + r = dma_resv_reserve_fences(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + goto err_unlock; + } + + dma_resv_add_fence(&resv, f, usage); + + r = -ENOENT; + dma_resv_for_each_fence(&cursor, &resv, usage, fence) { + if (!r) { + pr_err("More than one fence found\n"); + r = -EINVAL; + goto err_unlock; + } + if (f != fence) { + pr_err("Unexpected fence\n"); + r = -EINVAL; + goto err_unlock; + } + if (dma_resv_iter_usage(&cursor) != usage) { + pr_err("Unexpected fence usage\n"); + r = -EINVAL; + goto err_unlock; + } + r = 0; + } + if (r) { + pr_err("No fence found\n"); + goto err_unlock; + } + dma_fence_signal(f); +err_unlock: + dma_resv_unlock(&resv); +err_free: + dma_resv_fini(&resv); + dma_fence_put(f); + return r; +} + +static int test_for_each_unlocked(void *arg) +{ + enum dma_resv_usage usage = (unsigned long)arg; + struct dma_resv_iter cursor; + struct dma_fence *f, *fence; + struct dma_resv resv; + int r; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f); + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) { + pr_err("Resv locking failed\n"); + goto err_free; + } + + r = dma_resv_reserve_fences(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + dma_resv_unlock(&resv); + goto err_free; + } + + dma_resv_add_fence(&resv, f, usage); + dma_resv_unlock(&resv); + + r = -ENOENT; + dma_resv_iter_begin(&cursor, &resv, usage); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + if (!r) { + pr_err("More than one fence found\n"); + r = -EINVAL; + goto err_iter_end; + } + if (!dma_resv_iter_is_restarted(&cursor)) { + pr_err("No restart flag\n"); + goto err_iter_end; + } + if (f != fence) { + pr_err("Unexpected fence\n"); + r = -EINVAL; + goto err_iter_end; + } + if (dma_resv_iter_usage(&cursor) != usage) { + pr_err("Unexpected fence usage\n"); + r = -EINVAL; + goto err_iter_end; + } + + /* We use r as state here */ + if (r == -ENOENT) { + r = -EINVAL; + /* That should trigger an restart */ + cursor.fences = (void*)~0; + } else if (r == -EINVAL) { + r = 0; + } + } + if (r) + pr_err("No fence found\n"); +err_iter_end: + dma_resv_iter_end(&cursor); + dma_fence_signal(f); +err_free: + dma_resv_fini(&resv); + dma_fence_put(f); + return r; +} + +static int test_get_fences(void *arg) +{ + enum dma_resv_usage usage = (unsigned long)arg; + struct dma_fence *f, **fences = NULL; + struct dma_resv resv; + int r, i; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f); + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) { + pr_err("Resv locking failed\n"); + goto err_resv; + } + + r = dma_resv_reserve_fences(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + dma_resv_unlock(&resv); + goto err_resv; + } + + dma_resv_add_fence(&resv, f, usage); + dma_resv_unlock(&resv); + + r = dma_resv_get_fences(&resv, usage, &i, &fences); + if (r) { + pr_err("get_fences failed\n"); + goto err_free; + } + + if (i != 1 || fences[0] != f) { + pr_err("get_fences returned unexpected fence\n"); + goto err_free; + } + + dma_fence_signal(f); +err_free: + while (i--) + dma_fence_put(fences[i]); + kfree(fences); +err_resv: + dma_resv_fini(&resv); + dma_fence_put(f); + return r; +} + +int dma_resv(void) +{ + static const struct subtest tests[] = { + SUBTEST(sanitycheck), + SUBTEST(test_signaling), + SUBTEST(test_for_each), + SUBTEST(test_for_each_unlocked), + SUBTEST(test_get_fences), + }; + enum dma_resv_usage usage; + int r; + + spin_lock_init(&fence_lock); + for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_BOOKKEEP; + ++usage) { + r = subtests(tests, (void *)(unsigned long)usage); + if (r) + return r; + } + return 0; +} diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 5a5a1da01a00..af57799c86ce 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -5,6 +5,7 @@ * Copyright (C) 2012 Google, Inc. */ +#include <linux/dma-fence-unwrap.h> #include <linux/export.h> #include <linux/file.h> #include <linux/fs.h> @@ -131,7 +132,7 @@ EXPORT_SYMBOL(sync_file_get_fence); char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) { if (sync_file->user_name[0]) { - strlcpy(buf, sync_file->user_name, len); + strscpy(buf, sync_file->user_name, len); } else { struct dma_fence *fence = sync_file->fence; @@ -145,58 +146,6 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) return buf; } -static int sync_file_set_fence(struct sync_file *sync_file, - struct dma_fence **fences, int num_fences) -{ - struct dma_fence_array *array; - - /* - * The reference for the fences in the new sync_file and held - * in add_fence() during the merge procedure, so for num_fences == 1 - * we already own a new reference to the fence. For num_fence > 1 - * we own the reference of the dma_fence_array creation. - */ - if (num_fences == 1) { - sync_file->fence = fences[0]; - kfree(fences); - } else { - array = dma_fence_array_create(num_fences, fences, - dma_fence_context_alloc(1), - 1, false); - if (!array) - return -ENOMEM; - - sync_file->fence = &array->base; - } - - return 0; -} - -static struct dma_fence **get_fences(struct sync_file *sync_file, - int *num_fences) -{ - if (dma_fence_is_array(sync_file->fence)) { - struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); - - *num_fences = array->num_fences; - return array->fences; - } - - *num_fences = 1; - return &sync_file->fence; -} - -static void add_fence(struct dma_fence **fences, - int *i, struct dma_fence *fence) -{ - fences[*i] = fence; - - if (!dma_fence_is_signaled(fence)) { - dma_fence_get(fence); - (*i)++; - } -} - /** * sync_file_merge() - merge two sync_files * @name: name of new fence @@ -211,85 +160,20 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; - struct dma_fence **fences, **nfences, **a_fences, **b_fences; - int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; + struct dma_fence *fence; sync_file = sync_file_alloc(); if (!sync_file) return NULL; - a_fences = get_fences(a, &a_num_fences); - b_fences = get_fences(b, &b_num_fences); - if (a_num_fences > INT_MAX - b_num_fences) - goto err; - - num_fences = a_num_fences + b_num_fences; - - fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); - if (!fences) - goto err; - - /* - * Assume sync_file a and b are both ordered and have no - * duplicates with the same context. - * - * If a sync_file can only be created with sync_file_merge - * and sync_file_create, this is a reasonable assumption. - */ - for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { - struct dma_fence *pt_a = a_fences[i_a]; - struct dma_fence *pt_b = b_fences[i_b]; - - if (pt_a->context < pt_b->context) { - add_fence(fences, &i, pt_a); - - i_a++; - } else if (pt_a->context > pt_b->context) { - add_fence(fences, &i, pt_b); - - i_b++; - } else { - if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno, - pt_a->ops)) - add_fence(fences, &i, pt_a); - else - add_fence(fences, &i, pt_b); - - i_a++; - i_b++; - } - } - - for (; i_a < a_num_fences; i_a++) - add_fence(fences, &i, a_fences[i_a]); - - for (; i_b < b_num_fences; i_b++) - add_fence(fences, &i, b_fences[i_b]); - - if (i == 0) - fences[i++] = dma_fence_get(a_fences[0]); - - if (num_fences > i) { - nfences = krealloc(fences, i * sizeof(*fences), - GFP_KERNEL); - if (!nfences) - goto err; - - fences = nfences; - } - - if (sync_file_set_fence(sync_file, fences, i) < 0) { - kfree(fences); - goto err; + fence = dma_fence_unwrap_merge(a->fence, b->fence); + if (!fence) { + fput(sync_file->file); + return NULL; } - - strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); + sync_file->fence = fence; + strscpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; - -err: - fput(sync_file->file); - return NULL; - } static int sync_file_release(struct inode *inode, struct file *file) @@ -378,9 +262,9 @@ err_put_fd: static int sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { - strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), + strscpy(info->obj_name, fence->ops->get_timeline_name(fence), sizeof(info->obj_name)); - strlcpy(info->driver_name, fence->ops->get_driver_name(fence), + strscpy(info->driver_name, fence->ops->get_driver_name(fence), sizeof(info->driver_name)); info->status = dma_fence_get_status(fence); @@ -398,11 +282,13 @@ static int sync_fill_fence_info(struct dma_fence *fence, static long sync_file_ioctl_fence_info(struct sync_file *sync_file, unsigned long arg) { - struct sync_file_info info; struct sync_fence_info *fence_info = NULL; - struct dma_fence **fences; + struct dma_fence_unwrap iter; + struct sync_file_info info; + unsigned int num_fences; + struct dma_fence *fence; + int ret; __u32 size; - int num_fences, ret, i; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) return -EFAULT; @@ -410,7 +296,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, if (info.flags || info.pad) return -EINVAL; - fences = get_fences(sync_file, &num_fences); + num_fences = 0; + dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) + ++num_fences; /* * Passing num_fences = 0 means that userspace doesn't want to @@ -433,8 +321,11 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, if (!fence_info) return -ENOMEM; - for (i = 0; i < num_fences; i++) { - int status = sync_fill_fence_info(fences[i], &fence_info[i]); + num_fences = 0; + dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) { + int status; + + status = sync_fill_fence_info(fence, &fence_info[num_fences++]); info.status = info.status <= 0 ? info.status : status; } diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index acb26c627d27..2bcdb935a3ac 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -11,9 +11,15 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/udmabuf.h> +#include <linux/hugetlb.h> -static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */ -static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ +static int list_limit = 1024; +module_param(list_limit, int, 0644); +MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024."); + +static int size_limit_mb = 64; +module_param(size_limit_mb, int, 0644); +MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64."); struct udmabuf { pgoff_t pagecount; @@ -26,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct udmabuf *ubuf = vma->vm_private_data; + pgoff_t pgoff = vmf->pgoff; - vmf->page = ubuf->pages[vmf->pgoff]; + if (pgoff >= ubuf->pagecount) + return VM_FAULT_SIGBUS; + vmf->page = ubuf->pages[pgoff]; get_page(vmf->page); return 0; } @@ -63,10 +72,9 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, GFP_KERNEL); if (ret < 0) goto err; - if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) { - ret = -EINVAL; + ret = dma_map_sgtable(dev, sg, direction, 0); + if (ret < 0) goto err; - } return sg; err: @@ -78,7 +86,7 @@ err: static void put_sg_table(struct device *dev, struct sg_table *sg, enum dma_data_direction direction) { - dma_unmap_sg(dev, sg->sgl, sg->nents, direction); + dma_unmap_sgtable(dev, sg, direction, 0); sg_free_table(sg); kfree(sg); } @@ -116,17 +124,20 @@ static int begin_cpu_udmabuf(struct dma_buf *buf, { struct udmabuf *ubuf = buf->priv; struct device *dev = ubuf->device->this_device; + int ret = 0; if (!ubuf->sg) { ubuf->sg = get_sg_table(dev, buf, direction); - if (IS_ERR(ubuf->sg)) - return PTR_ERR(ubuf->sg); + if (IS_ERR(ubuf->sg)) { + ret = PTR_ERR(ubuf->sg); + ubuf->sg = NULL; + } } else { dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction); } - return 0; + return ret; } static int end_cpu_udmabuf(struct dma_buf *buf, @@ -161,10 +172,13 @@ static long udmabuf_create(struct miscdevice *device, { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct file *memfd = NULL; + struct address_space *mapping = NULL; struct udmabuf *ubuf; struct dma_buf *buf; pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page; + struct page *page, *hpage = NULL; + pgoff_t subpgoff, maxsubpgs; + struct hstate *hpstate; int seals, ret = -EINVAL; u32 i, flags; @@ -182,6 +196,10 @@ static long udmabuf_create(struct miscdevice *device, if (ubuf->pagecount > pglimit) goto err; } + + if (!ubuf->pagecount) + goto err; + ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages), GFP_KERNEL); if (!ubuf->pages) { @@ -195,7 +213,8 @@ static long udmabuf_create(struct miscdevice *device, memfd = fget(list[i].memfd); if (!memfd) goto err; - if (!shmem_mapping(file_inode(memfd)->i_mapping)) + mapping = memfd->f_mapping; + if (!shmem_mapping(mapping) && !is_file_hugepages(memfd)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -206,17 +225,48 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; + if (is_file_hugepages(memfd)) { + hpstate = hstate_file(memfd); + pgoff = list[i].offset >> huge_page_shift(hpstate); + subpgoff = (list[i].offset & + ~huge_page_mask(hpstate)) >> PAGE_SHIFT; + maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; + } for (pgidx = 0; pgidx < pgcnt; pgidx++) { - page = shmem_read_mapping_page( - file_inode(memfd)->i_mapping, pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err; + if (is_file_hugepages(memfd)) { + if (!hpage) { + hpage = find_get_page_flags(mapping, pgoff, + FGP_ACCESSED); + if (!hpage) { + ret = -EINVAL; + goto err; + } + } + page = hpage + subpgoff; + get_page(page); + subpgoff++; + if (subpgoff == maxsubpgs) { + put_page(hpage); + hpage = NULL; + subpgoff = 0; + pgoff++; + } + } else { + page = shmem_read_mapping_page(mapping, + pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; + } } ubuf->pages[pgbuf++] = page; } fput(memfd); memfd = NULL; + if (hpage) { + put_page(hpage); + hpage = NULL; + } } exp_info.ops = &udmabuf_ops; @@ -308,6 +358,9 @@ static long udmabuf_ioctl(struct file *filp, unsigned int ioctl, static const struct file_operations udmabuf_fops = { .owner = THIS_MODULE, .unlocked_ioctl = udmabuf_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = udmabuf_ioctl, +#endif }; static struct miscdevice udmabuf_misc = { @@ -318,7 +371,23 @@ static struct miscdevice udmabuf_misc = { static int __init udmabuf_dev_init(void) { - return misc_register(&udmabuf_misc); + int ret; + + ret = misc_register(&udmabuf_misc); + if (ret < 0) { + pr_err("Could not initialize udmabuf device\n"); + return ret; + } + + ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device, + DMA_BIT_MASK(64)); + if (ret < 0) { + pr_err("Could not setup DMA mask for udmabuf device\n"); + misc_deregister(&udmabuf_misc); + return ret; + } + + return 0; } static void __exit udmabuf_dev_exit(void) |