From fd8be27e50e04f6e80af0f3e327cced525558256 Mon Sep 17 00:00:00 2001 From: Michal Suchanek Date: Fri, 25 Feb 2022 21:51:35 +0100 Subject: efifb: Remove redundant efifb_setup_from_dmi stub efifb is the only user of efifb_setup_from_dmi which is provided by sysfb which is selected by efifb. That makes the stub redundant. Signed-off-by: Michal Suchanek Reviewed-by: Javier Martinez Canillas Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/7416c439d68e9e96068ea5c77e05c99c7df41750.1645822213.git.msuchanek@suse.de --- include/linux/efi.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/efi.h b/include/linux/efi.h index ccd4d3f91c98..0cbbc4103632 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1329,10 +1329,6 @@ static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find( } #endif -#ifdef CONFIG_SYSFB extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); -#else -static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { } -#endif #endif /* _LINUX_EFI_H */ -- cgit v1.2.3-59-g8ed1b From 548e7432dc2da475a18077b612e8d55b8ff51891 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 24 Sep 2021 10:55:45 +0200 Subject: dma-buf: add dma_resv_replace_fences v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function allows to replace fences from the shared fence list when we can gurantee that the operation represented by the original fence has finished or no accesses to the resources protected by the dma_resv object any more when the new fence finishes. Then use this function in the amdkfd code when BOs are unmapped from the process. v2: add an example when this is usefull. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220321135856.1331-1-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 45 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 49 ++++-------------------- include/linux/dma-resv.h | 2 + 3 files changed, 54 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index b51416405e86..509060861cf3 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -289,6 +289,51 @@ replace: } EXPORT_SYMBOL(dma_resv_add_shared_fence); +/** + * dma_resv_replace_fences - replace fences in the dma_resv obj + * @obj: the reservation object + * @context: the context of the fences to replace + * @replacement: the new fence to use instead + * + * Replace fences with a specified context with a new fence. Only valid if the + * operation represented by the original fence has no longer access to the + * resources represented by the dma_resv object when the new fence completes. + * + * And example for using this is replacing a preemption fence with a page table + * update fence which makes the resource inaccessible. + */ +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, + struct dma_fence *replacement) +{ + struct dma_resv_list *list; + struct dma_fence *old; + unsigned int i; + + dma_resv_assert_held(obj); + + write_seqcount_begin(&obj->seq); + + old = dma_resv_excl_fence(obj); + if (old->context == context) { + RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement)); + dma_fence_put(old); + } + + list = dma_resv_shared_list(obj); + for (i = 0; list && i < list->shared_count; ++i) { + old = rcu_dereference_protected(list->shared[i], + dma_resv_held(obj)); + if (old->context != context) + continue; + + rcu_assign_pointer(list->shared[i], dma_fence_get(replacement)); + dma_fence_put(old); + } + + write_seqcount_end(&obj->seq); +} +EXPORT_SYMBOL(dma_resv_replace_fences); + /** * dma_resv_add_excl_fence - Add an exclusive fence. * @obj: the reservation object diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index f9bab963a948..b6f266f612ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -253,53 +253,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, struct amdgpu_amdkfd_fence *ef) { - struct dma_resv *resv = bo->tbo.base.resv; - struct dma_resv_list *old, *new; - unsigned int i, j, k; + struct dma_fence *replacement; if (!ef) return -EINVAL; - old = dma_resv_shared_list(resv); - if (!old) - return 0; - - new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL); - if (!new) - return -ENOMEM; - - /* Go through all the shared fences in the resevation object and sort - * the interesting ones to the end of the list. + /* TODO: Instead of block before we should use the fence of the page + * table update and TLB flush here directly. */ - for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { - struct dma_fence *f; - - f = rcu_dereference_protected(old->shared[i], - dma_resv_held(resv)); - - if (f->context == ef->base.context) - RCU_INIT_POINTER(new->shared[--j], f); - else - RCU_INIT_POINTER(new->shared[k++], f); - } - new->shared_max = old->shared_max; - new->shared_count = k; - - /* Install the new fence list, seqcount provides the barriers */ - write_seqcount_begin(&resv->seq); - RCU_INIT_POINTER(resv->fence, new); - write_seqcount_end(&resv->seq); - - /* Drop the references to the removed fences or move them to ef_list */ - for (i = j; i < old->shared_count; ++i) { - struct dma_fence *f; - - f = rcu_dereference_protected(new->shared[i], - dma_resv_held(resv)); - dma_fence_put(f); - } - kfree_rcu(old, rcu); - + replacement = dma_fence_get_stub(); + dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, + replacement); + dma_fence_put(replacement); return 0; } diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index afdfdfac729f..3f53177bdb46 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -468,6 +468,8 @@ void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, + struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); int dma_resv_get_fences(struct dma_resv *obj, bool write, unsigned int *num_fences, struct dma_fence ***fences); -- cgit v1.2.3-59-g8ed1b From 8938d48451f5d7cb565dfa68aa0bd0e81985da09 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 24 Sep 2021 14:19:22 +0200 Subject: dma-buf: finally make the dma_resv_list private v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drivers should never touch this directly. v2: drop kerneldoc for now internal handling Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220321135856.1331-2-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 11 +++++++++++ include/linux/dma-resv.h | 26 +------------------------- 2 files changed, 12 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 509060861cf3..5001e9b4420a 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -56,6 +56,12 @@ DEFINE_WD_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); +struct dma_resv_list { + struct rcu_head rcu; + u32 shared_count, shared_max; + struct dma_fence __rcu *shared[]; +}; + /** * dma_resv_list_alloc - allocate fence list * @shared_max: number of fences we need space for @@ -133,6 +139,11 @@ void dma_resv_fini(struct dma_resv *obj) } EXPORT_SYMBOL(dma_resv_fini); +static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) +{ + return rcu_dereference_check(obj->fence, dma_resv_held(obj)); +} + /** * dma_resv_reserve_shared - Reserve space to add shared fences to * a dma_resv. diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 3f53177bdb46..202cc65d0621 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -47,18 +47,7 @@ extern struct ww_class reservation_ww_class; -/** - * struct dma_resv_list - a list of shared fences - * @rcu: for internal use - * @shared_count: table of shared fences - * @shared_max: for growing shared fence table - * @shared: shared fence table - */ -struct dma_resv_list { - struct rcu_head rcu; - u32 shared_count, shared_max; - struct dma_fence __rcu *shared[]; -}; +struct dma_resv_list; /** * struct dma_resv - a reservation object manages fences for a buffer @@ -451,19 +440,6 @@ dma_resv_excl_fence(struct dma_resv *obj) return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); } -/** - * dma_resv_shared_list - get the reservation object's shared fence list - * @obj: the reservation object - * - * Returns the shared fence list. Caller must either hold the objects - * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), - * or one of the variants of each - */ -static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) -{ - return rcu_dereference_check(obj->fence, dma_resv_held(obj)); -} - void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); -- cgit v1.2.3-59-g8ed1b From 15325e3c1013035c2e3e266ba79a0c3bef905f25 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 11 Nov 2021 15:18:34 +0100 Subject: dma-buf: drop the DAG approach for the dma_resv object v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So far we had the approach of using a directed acyclic graph with the dma_resv obj. This turned out to have many downsides, especially it means that every single driver and user of this interface needs to be aware of this restriction when adding fences. If the rules for the DAG are not followed then we end up with potential hard to debug memory corruption, information leaks or even elephant big security holes because we allow userspace to access freed up memory. Since we already took a step back from that by always looking at all fences we now go a step further and stop dropping the shared fences when a new exclusive one is added. v2: Drop some now superflous documentation v3: Add some more documentation for the new handling. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220321135856.1331-11-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 16 +--------------- include/linux/dma-buf.h | 4 +--- include/linux/dma-resv.h | 22 +++++----------------- 3 files changed, 7 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 5001e9b4420a..be65522f0f47 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -351,35 +351,21 @@ EXPORT_SYMBOL(dma_resv_replace_fences); * @fence: the exclusive fence to add * * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock(). - * Note that this function replaces all fences attached to @obj, see also - * &dma_resv.fence_excl for a discussion of the semantics. + * See also &dma_resv.fence_excl for a discussion of the semantics. */ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) { struct dma_fence *old_fence = dma_resv_excl_fence(obj); - struct dma_resv_list *old; - u32 i = 0; dma_resv_assert_held(obj); - old = dma_resv_shared_list(obj); - if (old) - i = old->shared_count; - dma_fence_get(fence); write_seqcount_begin(&obj->seq); /* write_seqcount_begin provides the necessary memory barrier */ RCU_INIT_POINTER(obj->fence_excl, fence); - if (old) - old->shared_count = 0; write_seqcount_end(&obj->seq); - /* inplace update, no shared fences */ - while (i--) - dma_fence_put(rcu_dereference_protected(old->shared[i], - dma_resv_held(obj))); - dma_fence_put(old_fence); } EXPORT_SYMBOL(dma_resv_add_excl_fence); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 7ab50076e7a6..85ab5554425e 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -424,9 +424,7 @@ struct dma_buf { * IMPORTANT: * * All drivers must obey the struct dma_resv rules, specifically the - * rules for updating fences, see &dma_resv.fence_excl and - * &dma_resv.fence. If these dependency rules are broken access tracking - * can be lost resulting in use after free issues. + * rules for updating and obeying fences. */ struct dma_resv *resv; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 202cc65d0621..dccaf7b1663e 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -93,23 +93,11 @@ struct dma_resv { * * The exclusive fence, if there is one currently. * - * There are two ways to update this fence: - * - * - First by calling dma_resv_add_excl_fence(), which replaces all - * fences attached to the reservation object. To guarantee that no - * fences are lost, this new fence must signal only after all previous - * fences, both shared and exclusive, have signalled. In some cases it - * is convenient to achieve that by attaching a struct dma_fence_array - * with all the new and old fences. - * - * - Alternatively the fence can be set directly, which leaves the - * shared fences unchanged. To guarantee that no fences are lost, this - * new fence must signal only after the previous exclusive fence has - * signalled. Since the shared fences are staying intact, it is not - * necessary to maintain any ordering against those. If semantically - * only a new access is added without actually treating the previous - * one as a dependency the exclusive fences can be strung together - * using struct dma_fence_chain. + * To guarantee that no fences are lost, this new fence must signal + * only after the previous exclusive fence has signalled. If + * semantically only a new access is added without actually treating the + * previous one as a dependency the exclusive fences can be strung + * together using struct dma_fence_chain. * * Note that actual semantics of what an exclusive or shared fence mean * is defined by the user, for reservation objects shared across drivers -- cgit v1.2.3-59-g8ed1b From 92cedee6a6a3e6fcc3ffc0e3866baae5f6f76ac1 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 3 Nov 2021 10:02:08 +0100 Subject: dma-buf: add dma_resv_get_singleton v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a function to simplify getting a single fence for all the fences in the dma_resv object. v2: fix ref leak in error handling Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220321135856.1331-3-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/dma-resv.h | 2 ++ 2 files changed, 56 insertions(+) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index be65522f0f47..ff16da0a54ec 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -34,6 +34,7 @@ */ #include +#include #include #include #include @@ -636,6 +637,59 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write, } EXPORT_SYMBOL_GPL(dma_resv_get_fences); +/** + * dma_resv_get_singleton - Get a single fence for all the fences + * @obj: the reservation object + * @write: true if we should return all fences + * @fence: the resulting fence + * + * Get a single fence representing all the fences inside the resv object. + * Returns either 0 for success or -ENOMEM. + * + * Warning: This can't be used like this when adding the fence back to the resv + * object since that can lead to stack corruption when finalizing the + * dma_fence_array. + * + * Returns 0 on success and negative error values on failure. + */ +int dma_resv_get_singleton(struct dma_resv *obj, bool write, + struct dma_fence **fence) +{ + struct dma_fence_array *array; + struct dma_fence **fences; + unsigned count; + int r; + + r = dma_resv_get_fences(obj, write, &count, &fences); + if (r) + return r; + + if (count == 0) { + *fence = NULL; + return 0; + } + + if (count == 1) { + *fence = fences[0]; + kfree(fences); + return 0; + } + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), + 1, false); + if (!array) { + while (count--) + dma_fence_put(fences[count]); + kfree(fences); + return -ENOMEM; + } + + *fence = &array->base; + return 0; +} +EXPORT_SYMBOL_GPL(dma_resv_get_singleton); + /** * dma_resv_wait_timeout - Wait on reservation's objects * shared and/or exclusive fences. diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index dccaf7b1663e..233ed4f14d9e 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -437,6 +437,8 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); int dma_resv_get_fences(struct dma_resv *obj, bool write, unsigned int *num_fences, struct dma_fence ***fences); +int dma_resv_get_singleton(struct dma_resv *obj, bool write, + struct dma_fence **fence); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, unsigned long timeout); -- cgit v1.2.3-59-g8ed1b From 71d637823cac7748079a912e0373476c7cf6f985 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 3 Nov 2021 13:35:14 +0100 Subject: dma-buf: finally make dma_resv_excl_fence private v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drivers should never touch this directly. v2: fix rebase clash Signed-off-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20220321135856.1331-10-christian.koenig@amd.com Reviewed-by: Daniel Vetter --- drivers/dma-buf/dma-resv.c | 6 ++++++ include/linux/dma-resv.h | 17 ----------------- 2 files changed, 6 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index ff16da0a54ec..15ffac35439d 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -140,6 +140,12 @@ void dma_resv_fini(struct dma_resv *obj) } EXPORT_SYMBOL(dma_resv_fini); +static inline struct dma_fence * +dma_resv_excl_fence(struct dma_resv *obj) +{ + return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); +} + static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) { return rcu_dereference_check(obj->fence, dma_resv_held(obj)); diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 233ed4f14d9e..ecb697d4d861 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -411,23 +411,6 @@ static inline void dma_resv_unlock(struct dma_resv *obj) ww_mutex_unlock(&obj->lock); } -/** - * dma_resv_excl_fence - return the object's exclusive fence - * @obj: the reservation object - * - * Returns the exclusive fence (if any). Caller must either hold the objects - * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), - * or one of the variants of each - * - * RETURNS - * The exclusive fence or NULL - */ -static inline struct dma_fence * -dma_resv_excl_fence(struct dma_resv *obj) -{ - return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); -} - void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); -- cgit v1.2.3-59-g8ed1b From fe696ccb277d332dc4e625b5b20b988b04d16c04 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 3 Apr 2022 15:53:54 -0700 Subject: gpu: host1x: Fix a kernel-doc warning Add @cache description to eliminate a kernel-doc warning. include/linux/host1x.h:104: warning: Function parameter or member 'cache' not described in 'host1x_client' Fixes: 1f39b1dfa53c ("drm/tegra: Implement buffer object cache") Signed-off-by: Randy Dunlap Cc: Thierry Reding Cc: linux-tegra@vger.kernel.org Cc: David Airlie Cc: Daniel Vetter Cc: dri-devel@lists.freedesktop.org Signed-off-by: Thierry Reding --- include/linux/host1x.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/host1x.h b/include/linux/host1x.h index e8dc5bc41f79..00278853eadf 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -81,6 +81,7 @@ struct host1x_client_ops { * @parent: pointer to parent structure * @usecount: reference count for this structure * @lock: mutex for mutually exclusive concurrency + * @cache: host1x buffer object cache */ struct host1x_client { struct list_head list; -- cgit v1.2.3-59-g8ed1b From 3e9c4584336149146fe15cb5703fc10a2ca2d2a0 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Thu, 24 Mar 2022 11:30:25 +0100 Subject: gpu: host1x: Do not use mapping cache for job submissions Buffer mappings used in job submissions are usually small and not rapidly reused as opposed to framebuffers (which are usually large and rapidly reused, for example when page-flipping between double-buffered framebuffers). Avoid going through the mapping cache for these buffers since the cache would also lead to leaks if nobody is ever releasing the cache's last reference. For DRM/KMS these last references are dropped when the framebuffers are removed and therefore no longer needed. While at it, also add a note about the need to explicitly remove the final reference to the mapping in the cache. Reviewed-by: Jon Hunter Tested-by: Jon Hunter Signed-off-by: Thierry Reding --- drivers/gpu/host1x/job.c | 4 ++-- include/linux/host1x.h | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 5e8c183167b7..b2761aa03b95 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -175,7 +175,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - map = host1x_bo_pin(dev, bo, direction, &client->cache); + map = host1x_bo_pin(dev, bo, direction, NULL); if (IS_ERR(map)) { err = PTR_ERR(map); goto unpin; @@ -222,7 +222,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, &host->cache); + map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, NULL); if (IS_ERR(map)) { err = PTR_ERR(map); goto unpin; diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 00278853eadf..c0bf4e581fe9 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -31,6 +31,11 @@ u64 host1x_get_dma_mask(struct host1x *host1x); * struct host1x_bo_cache - host1x buffer object cache * @mappings: list of mappings * @lock: synchronizes accesses to the list of mappings + * + * Note that entries are not periodically evicted from this cache and instead need to be + * explicitly released. This is used primarily for DRM/KMS where the cache's reference is + * released when the last reference to a buffer object represented by a mapping in this + * cache is dropped. */ struct host1x_bo_cache { struct list_head mappings; -- cgit v1.2.3-59-g8ed1b From c8d4c18bfbc4ab467188dbe45cc8155759f49d9e Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 16 Nov 2021 15:20:45 +0100 Subject: dma-buf/drivers: make reserving a shared slot mandatory v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Audit all the users of dma_resv_add_excl_fence() and make sure they reserve a shared slot also when only trying to add an exclusive fence. This is the next step towards handling the exclusive fence like a shared one. v2: fix missed case in amdgpu v3: and two more radeon, rename function v4: add one more case to TTM, fix i915 after rebase Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220406075132.3263-2-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 10 ++-- drivers/dma-buf/st-dma-resv.c | 64 ++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 8 +-- drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 3 +- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 10 ++-- drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 6 +- .../gpu/drm/i915/gem/selftests/i915_gem_migrate.c | 5 +- drivers/gpu/drm/i915/i915_vma.c | 10 +++- .../gpu/drm/i915/selftests/intel_memory_region.c | 7 +++ drivers/gpu/drm/lima/lima_gem.c | 10 ++-- drivers/gpu/drm/msm/msm_gem_submit.c | 18 +++--- drivers/gpu/drm/nouveau/nouveau_fence.c | 8 +-- drivers/gpu/drm/panfrost/panfrost_job.c | 4 ++ drivers/gpu/drm/qxl/qxl_release.c | 2 +- drivers/gpu/drm/radeon/radeon_cs.c | 4 ++ drivers/gpu/drm/radeon/radeon_object.c | 8 +++ drivers/gpu/drm/radeon/radeon_vm.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 8 ++- drivers/gpu/drm/ttm/ttm_bo_util.c | 12 +++- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 15 +++-- drivers/gpu/drm/v3d/v3d_gem.c | 15 +++-- drivers/gpu/drm/vc4/vc4_gem.c | 2 +- drivers/gpu/drm/vgem/vgem_fence.c | 12 ++-- drivers/gpu/drm/virtio/virtgpu_gem.c | 9 +++ drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 16 ++++-- include/linux/dma-resv.h | 4 +- 30 files changed, 176 insertions(+), 114 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 15ffac35439d..8c650b96357a 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -152,7 +152,7 @@ static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) } /** - * dma_resv_reserve_shared - Reserve space to add shared fences to + * dma_resv_reserve_fences - Reserve space to add shared fences to * a dma_resv. * @obj: reservation object * @num_fences: number of fences we want to add @@ -167,7 +167,7 @@ static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) * RETURNS * Zero for success, or -errno */ -int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) +int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) { struct dma_resv_list *old, *new; unsigned int i, j, k, max; @@ -230,7 +230,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) return 0; } -EXPORT_SYMBOL(dma_resv_reserve_shared); +EXPORT_SYMBOL(dma_resv_reserve_fences); #ifdef CONFIG_DEBUG_MUTEXES /** @@ -238,7 +238,7 @@ EXPORT_SYMBOL(dma_resv_reserve_shared); * @obj: the dma_resv object to reset * * Reset the number of pre-reserved shared slots to test that drivers do - * correct slot allocation using dma_resv_reserve_shared(). See also + * correct slot allocation using dma_resv_reserve_fences(). See also * &dma_resv_list.shared_max. */ void dma_resv_reset_shared_max(struct dma_resv *obj) @@ -260,7 +260,7 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max); * @fence: the shared fence to add * * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and - * dma_resv_reserve_shared() has been called. + * dma_resv_reserve_fences() has been called. * * See also &dma_resv.fence for a discussion of the semantics. */ diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index cbe999c6e7a6..d2e61f6ae989 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -75,17 +75,16 @@ static int test_signaling(void *arg, bool shared) goto err_free; } - if (shared) { - r = dma_resv_reserve_shared(&resv, 1); - if (r) { - pr_err("Resv shared slot allocation failed\n"); - goto err_unlock; - } + r = dma_resv_reserve_fences(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + goto err_unlock; + } + if (shared) dma_resv_add_shared_fence(&resv, f); - } else { + else dma_resv_add_excl_fence(&resv, f); - } if (dma_resv_test_signaled(&resv, shared)) { pr_err("Resv unexpectedly signaled\n"); @@ -134,17 +133,16 @@ static int test_for_each(void *arg, bool shared) goto err_free; } - if (shared) { - r = dma_resv_reserve_shared(&resv, 1); - if (r) { - pr_err("Resv shared slot allocation failed\n"); - goto err_unlock; - } + r = dma_resv_reserve_fences(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + goto err_unlock; + } + if (shared) dma_resv_add_shared_fence(&resv, f); - } else { + else dma_resv_add_excl_fence(&resv, f); - } r = -ENOENT; dma_resv_for_each_fence(&cursor, &resv, shared, fence) { @@ -206,18 +204,17 @@ static int test_for_each_unlocked(void *arg, bool shared) goto err_free; } - if (shared) { - r = dma_resv_reserve_shared(&resv, 1); - if (r) { - pr_err("Resv shared slot allocation failed\n"); - dma_resv_unlock(&resv); - goto err_free; - } + r = dma_resv_reserve_fences(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + dma_resv_unlock(&resv); + goto err_free; + } + if (shared) dma_resv_add_shared_fence(&resv, f); - } else { + else dma_resv_add_excl_fence(&resv, f); - } dma_resv_unlock(&resv); r = -ENOENT; @@ -290,18 +287,17 @@ static int test_get_fences(void *arg, bool shared) goto err_resv; } - if (shared) { - r = dma_resv_reserve_shared(&resv, 1); - if (r) { - pr_err("Resv shared slot allocation failed\n"); - dma_resv_unlock(&resv); - goto err_resv; - } + r = dma_resv_reserve_fences(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + dma_resv_unlock(&resv); + goto err_resv; + } + if (shared) dma_resv_add_shared_fence(&resv, f); - } else { + else dma_resv_add_excl_fence(&resv, f); - } dma_resv_unlock(&resv); r = dma_resv_get_fences(&resv, shared, &i, &fences); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 900ed2a7483b..98b1736bb221 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1233,7 +1233,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, AMDGPU_FENCE_OWNER_KFD, false); if (ret) goto wait_pd_fail; - ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); + ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1); if (ret) goto reserve_shared_fail; amdgpu_bo_fence(vm->root.bo, @@ -2571,7 +2571,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem * Add process eviction fence to bo so they can * evict each other. */ - ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); + ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1); if (ret) goto reserve_shared_fail; amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 25731719c627..6f57a2fd5fe3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1388,6 +1388,14 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared) { struct dma_resv *resv = bo->tbo.base.resv; + int r; + + r = dma_resv_reserve_fences(resv, 1); + if (r) { + /* As last resort on OOM we block for the fence */ + dma_fence_wait(fence, false); + return; + } if (shared) dma_resv_add_shared_fence(resv, fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5d11978c162e..b13451255e8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2926,7 +2926,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) if (r) goto error_free_root; - r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1); + r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1); if (r) goto error_unreserve; @@ -3369,7 +3369,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, value = 0; } - r = dma_resv_reserve_shared(root->tbo.base.resv, 1); + r = dma_resv_reserve_fences(root->tbo.base.resv, 1); if (r) { pr_debug("failed %d to reserve fence slot\n", r); goto error_unlock; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 3b8856b4cece..b3fc3e958227 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -548,7 +548,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, goto reserve_bo_failed; } - r = dma_resv_reserve_shared(bo->tbo.base.resv, 1); + r = dma_resv_reserve_fences(bo->tbo.base.resv, 1); if (r) { pr_debug("failed %d to reserve bo\n", r); amdgpu_bo_unreserve(bo); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 5f502c49aec2..53f7c78628a4 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -179,11 +179,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; struct dma_resv *robj = bo->obj->base.resv; - if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { - ret = dma_resv_reserve_shared(robj, 1); - if (ret) - return ret; - } + ret = dma_resv_reserve_fences(robj, 1); + if (ret) + return ret; if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT) continue; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index ce91b23385cf..1fd0cc9ca213 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -108,7 +108,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, trace_i915_gem_object_clflush(obj); clflush = NULL; - if (!(flags & I915_CLFLUSH_SYNC)) + if (!(flags & I915_CLFLUSH_SYNC) && + dma_resv_reserve_fences(obj->base.resv, 1) == 0) clflush = clflush_work_create(obj); if (clflush) { i915_sw_fence_await_reservation(&clflush->base.chain, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index d42f437149c9..78f8797853ce 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -998,11 +998,9 @@ static int eb_validate_vmas(struct i915_execbuffer *eb) } } - if (!(ev->flags & EXEC_OBJECT_WRITE)) { - err = dma_resv_reserve_shared(vma->obj->base.resv, 1); - if (err) - return err; - } + err = dma_resv_reserve_fences(vma->obj->base.resv, 1); + if (err) + return err; GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && eb_vma_misplaced(&eb->exec[i], vma, ev->flags)); @@ -2303,7 +2301,7 @@ static int eb_parse(struct i915_execbuffer *eb) if (IS_ERR(batch)) return PTR_ERR(batch); - err = dma_resv_reserve_shared(shadow->obj->base.resv, 1); + err = dma_resv_reserve_fences(shadow->obj->base.resv, 1); if (err) return err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 1ebe6e4086a1..432ac74ff225 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -611,7 +611,11 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, assert_object_held(src); i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); - ret = dma_resv_reserve_shared(src_bo->base.resv, 1); + ret = dma_resv_reserve_fences(src_bo->base.resv, 1); + if (ret) + return ret; + + ret = dma_resv_reserve_fences(dst_bo->base.resv, 1); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c index d534141b2cf7..0e52eb87cd55 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c @@ -216,7 +216,10 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt, i915_gem_object_is_lmem(obj), 0xdeadbeaf, &rq); if (rq) { - dma_resv_add_excl_fence(obj->base.resv, &rq->fence); + err = dma_resv_reserve_fences(obj->base.resv, 1); + if (!err) + dma_resv_add_excl_fence(obj->base.resv, + &rq->fence); i915_gem_object_set_moving_fence(obj, &rq->fence); i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 94fcdb7bd21d..bae3423f58e8 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1819,6 +1819,12 @@ int _i915_vma_move_to_active(struct i915_vma *vma, intel_frontbuffer_put(front); } + if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { + err = dma_resv_reserve_fences(vma->obj->base.resv, 1); + if (unlikely(err)) + return err; + } + if (fence) { dma_resv_add_excl_fence(vma->obj->base.resv, fence); obj->write_domain = I915_GEM_DOMAIN_RENDER; @@ -1826,7 +1832,7 @@ int _i915_vma_move_to_active(struct i915_vma *vma, } } else { if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_shared(vma->obj->base.resv, 1); + err = dma_resv_reserve_fences(vma->obj->base.resv, 1); if (unlikely(err)) return err; } @@ -2044,7 +2050,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) if (!obj->mm.rsgt) return -EBUSY; - err = dma_resv_reserve_shared(obj->base.resv, 1); + err = dma_resv_reserve_fences(obj->base.resv, 1); if (err) return -EBUSY; diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index ba32893e0873..6114e013092b 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -1043,6 +1043,13 @@ static int igt_lmem_write_cpu(void *arg) } i915_gem_object_lock(obj, NULL); + + err = dma_resv_reserve_fences(obj->base.resv, 1); + if (err) { + i915_gem_object_unlock(obj); + goto out_put; + } + /* Put the pages into a known state -- from the gpu for added fun */ intel_engine_pm_get(engine); err = intel_context_migrate_clear(engine->gt->migrate.context, NULL, diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 55bb1ec3c4f7..e0a11ee0e86d 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -257,13 +257,11 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, bool write, bool explicit) { - int err = 0; + int err; - if (!write) { - err = dma_resv_reserve_shared(lima_bo_resv(bo), 1); - if (err) - return err; - } + err = dma_resv_reserve_fences(lima_bo_resv(bo), 1); + if (err) + return err; /* explicit sync use user passed dep fence */ if (explicit) diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index c6d60c8d286d..3164db8be893 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -320,16 +320,14 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) struct drm_gem_object *obj = &submit->bos[i].obj->base; bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; - if (!write) { - /* NOTE: _reserve_shared() must happen before - * _add_shared_fence(), which makes this a slightly - * strange place to call it. OTOH this is a - * convenient can-fail point to hook it in. - */ - ret = dma_resv_reserve_shared(obj->resv, 1); - if (ret) - return ret; - } + /* NOTE: _reserve_shared() must happen before + * _add_shared_fence(), which makes this a slightly + * strange place to call it. OTOH this is a + * convenient can-fail point to hook it in. + */ + ret = dma_resv_reserve_fences(obj->resv, 1); + if (ret) + return ret; /* exclusive fences must be ordered */ if (no_implicit && !write) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index a3a04e0d76ec..0268259e97eb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -346,11 +346,9 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, struct dma_resv *resv = nvbo->bo.base.resv; int i, ret; - if (!exclusive) { - ret = dma_resv_reserve_shared(resv, 1); - if (ret) - return ret; - } + ret = dma_resv_reserve_fences(resv, 1); + if (ret) + return ret; /* Waiting for the exclusive fence first causes performance regressions * under some circumstances. So manually wait for the shared ones first. diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index a6925dbb6224..c34114560e49 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -247,6 +247,10 @@ static int panfrost_acquire_object_fences(struct drm_gem_object **bos, int i, ret; for (i = 0; i < bo_count; i++) { + ret = dma_resv_reserve_fences(bos[i]->resv, 1); + if (ret) + return ret; + /* panfrost always uses write mode in its current uapi */ ret = drm_sched_job_add_implicit_dependencies(job, bos[i], true); diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 469979cd0341..cde1e8ddaeaa 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -200,7 +200,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo) return ret; } - ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1); + ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 9ed2b2700e0a..446f7bae54c4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -535,6 +535,10 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, return r; radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update); + + r = dma_resv_reserve_fences(bo->tbo.base.resv, 1); + if (r) + return r; } return radeon_vm_clear_invalids(rdev, vm); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 91a72cd14304..7ffd2e90f325 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -782,6 +782,14 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, bool shared) { struct dma_resv *resv = bo->tbo.base.resv; + int r; + + r = dma_resv_reserve_fences(resv, 1); + if (r) { + /* As last resort on OOM we block for the fence */ + dma_fence_wait(&fence->base, false); + return; + } if (shared) dma_resv_add_shared_fence(resv, &fence->base); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index bb53016f3138..987cabbf1318 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev, int r; radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); - r = dma_resv_reserve_shared(pt->tbo.base.resv, 1); + r = dma_resv_reserve_fences(pt->tbo.base.resv, 1); if (r) return r; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e5fd0f2c0299..c49996cf25d0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -151,6 +151,10 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } } + ret = dma_resv_reserve_fences(bo->base.resv, 1); + if (ret) + goto out_err; + ret = bdev->funcs->move(bo, evict, ctx, mem, hop); if (ret) { if (ret == -EMULTIHOP) @@ -735,7 +739,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, dma_resv_add_shared_fence(bo->base.resv, fence); - ret = dma_resv_reserve_shared(bo->base.resv, 1); + ret = dma_resv_reserve_fences(bo->base.resv, 1); if (unlikely(ret)) { dma_fence_put(fence); return ret; @@ -794,7 +798,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, bool type_found = false; int i, ret; - ret = dma_resv_reserve_shared(bo->base.resv, 1); + ret = dma_resv_reserve_fences(bo->base.resv, 1); if (unlikely(ret)) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 219dd81bbeab..1b96b91bf81b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -221,9 +221,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, fbo->base = *bo; - ttm_bo_get(bo); - fbo->bo = bo; - /** * Fix up members that we shouldn't copy directly: * TODO: Explicit member copy would probably be better here. @@ -250,6 +247,15 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); + ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); + if (ret) { + kfree(fbo); + return ret; + } + + ttm_bo_get(bo); + fbo->bo = bo; + ttm_bo_move_to_lru_tail_unlocked(&fbo->base); *new_obj = &fbo->base; diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 071c48d672c6..789c645f004e 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -90,6 +90,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; + unsigned int num_fences; ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); if (ret == -EALREADY && dups) { @@ -100,12 +101,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, continue; } + num_fences = min(entry->num_shared, 1u); if (!ret) { - if (!entry->num_shared) - continue; - - ret = dma_resv_reserve_shared(bo->base.resv, - entry->num_shared); + ret = dma_resv_reserve_fences(bo->base.resv, + num_fences); if (!ret) continue; } @@ -120,9 +119,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ret = ttm_bo_reserve_slowpath(bo, intr, ticket); } - if (!ret && entry->num_shared) - ret = dma_resv_reserve_shared(bo->base.resv, - entry->num_shared); + if (!ret) + ret = dma_resv_reserve_fences(bo->base.resv, + num_fences); if (unlikely(ret != 0)) { if (ticket) { diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 92bc0faee84f..961812d33827 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -259,16 +259,21 @@ v3d_lock_bo_reservations(struct v3d_job *job, return ret; for (i = 0; i < job->bo_count; i++) { + ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); + if (ret) + goto fail; + ret = drm_sched_job_add_implicit_dependencies(&job->base, job->bo[i], true); - if (ret) { - drm_gem_unlock_reservations(job->bo, job->bo_count, - acquire_ctx); - return ret; - } + if (ret) + goto fail; } return 0; + +fail: + drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); + return ret; } /** diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 4abf10b66fe8..594bd6bb00d2 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -644,7 +644,7 @@ retry: for (i = 0; i < exec->bo_count; i++) { bo = &exec->bo[i]->base; - ret = dma_resv_reserve_shared(bo->resv, 1); + ret = dma_resv_reserve_fences(bo->resv, 1); if (ret) { vc4_unlock_bo_reservations(dev, exec, acquire_ctx); return ret; diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index bd6f75285fd9..2ddbebca87d9 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -157,12 +157,14 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, } /* Expose the fence via the dma-buf */ - ret = 0; dma_resv_lock(resv, NULL); - if (arg->flags & VGEM_FENCE_WRITE) - dma_resv_add_excl_fence(resv, fence); - else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0) - dma_resv_add_shared_fence(resv, fence); + ret = dma_resv_reserve_fences(resv, 1); + if (!ret) { + if (arg->flags & VGEM_FENCE_WRITE) + dma_resv_add_excl_fence(resv, fence); + else + dma_resv_add_shared_fence(resv, fence); + } dma_resv_unlock(resv); /* Record the fence in our idr for later signaling */ diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 48d3c9955f0d..1820ca6cf673 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -214,6 +214,7 @@ void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs) { + unsigned int i; int ret; if (objs->nents == 1) { @@ -222,6 +223,14 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs) ret = drm_gem_lock_reservations(objs->objs, objs->nents, &objs->ticket); } + if (ret) + return ret; + + for (i = 0; i < objs->nents; ++i) { + ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1); + if (ret) + return ret; + } return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 31aecc46624b..fe13aa8b4a64 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -747,16 +747,22 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence) { struct ttm_device *bdev = bo->bdev; - struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); + int ret; - if (fence == NULL) { + if (fence == NULL) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + else + dma_fence_get(&fence->base); + + ret = dma_resv_reserve_fences(bo->base.resv, 1); + if (!ret) dma_resv_add_excl_fence(bo->base.resv, &fence->base); - dma_fence_put(&fence->base); - } else - dma_resv_add_excl_fence(bo->base.resv, &fence->base); + else + /* Last resort fallback when we are OOM */ + dma_fence_wait(&fence->base, false); + dma_fence_put(&fence->base); } diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index ecb697d4d861..5fa04d0fccad 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -117,7 +117,7 @@ struct dma_resv { * A new fence is added by calling dma_resv_add_shared_fence(). Since * this often needs to be done past the point of no return in command * submission it cannot fail, and therefore sufficient slots need to be - * reserved by calling dma_resv_reserve_shared(). + * reserved by calling dma_resv_reserve_fences(). * * Note that actual semantics of what an exclusive or shared fence mean * is defined by the user, for reservation objects shared across drivers @@ -413,7 +413,7 @@ static inline void dma_resv_unlock(struct dma_resv *obj) void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); -int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); +int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, struct dma_fence *fence); -- cgit v1.2.3-59-g8ed1b From 7bc80a5462c37eab58a9ea386064307c0f447fd1 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 9 Nov 2021 11:08:18 +0100 Subject: dma-buf: add enum dma_resv_usage v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change adds the dma_resv_usage enum and allows us to specify why a dma_resv object is queried for its containing fences. Additional to that a dma_resv_usage_rw() helper function is added to aid retrieving the fences for a read or write userspace submission. This is then deployed to the different query functions of the dma_resv object and all of their users. When the write paratermer was previously true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ otherwise. v2: add KERNEL/OTHER in separate patch v3: some kerneldoc suggestions by Daniel v4: some more kerneldoc suggestions by Daniel, fix missing cases lost in the rebase pointed out by Bas. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-2-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 6 +- drivers/dma-buf/dma-resv.c | 35 +++++------ drivers/dma-buf/st-dma-resv.c | 48 +++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 ++- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +- drivers/gpu/drm/drm_gem.c | 3 +- drivers/gpu/drm/drm_gem_atomic_helper.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 6 +- drivers/gpu/drm/i915/display/intel_atomic_plane.c | 3 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_wait.c | 6 +- .../gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c | 3 +- drivers/gpu/drm/i915/i915_request.c | 3 +- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 3 +- drivers/gpu/drm/nouveau/dispnv50/wndw.c | 3 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 8 +-- drivers/gpu/drm/nouveau/nouveau_fence.c | 8 ++- drivers/gpu/drm/nouveau/nouveau_gem.c | 3 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 3 +- drivers/gpu/drm/qxl/qxl_debugfs.c | 3 +- drivers/gpu/drm/radeon/radeon_display.c | 3 +- drivers/gpu/drm/radeon/radeon_gem.c | 9 ++- drivers/gpu/drm/radeon/radeon_mn.c | 4 +- drivers/gpu/drm/radeon/radeon_sync.c | 2 +- drivers/gpu/drm/radeon/radeon_uvd.c | 4 +- drivers/gpu/drm/scheduler/sched_main.c | 3 +- drivers/gpu/drm/ttm/ttm_bo.c | 18 +++--- drivers/gpu/drm/vgem/vgem_fence.c | 4 +- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 5 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 4 +- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 3 +- drivers/infiniband/core/umem_dmabuf.c | 3 +- include/linux/dma-buf.h | 8 ++- include/linux/dma-resv.h | 73 ++++++++++++++++++---- 46 files changed, 215 insertions(+), 126 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 775d3afb4169..1cddb65eafda 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -216,7 +216,8 @@ static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, struct dma_fence *fence; int r; - dma_resv_for_each_fence(&cursor, resv, write, fence) { + dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), + fence) { dma_fence_get(fence); r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); if (!r) @@ -1124,7 +1125,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, long ret; /* Wait on any implicit rendering fences */ - ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT); + ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), + true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 8c650b96357a..17237e6ee30c 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -384,7 +384,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) cursor->seq = read_seqcount_begin(&cursor->obj->seq); cursor->index = -1; cursor->shared_count = 0; - if (cursor->all_fences) { + if (cursor->usage >= DMA_RESV_USAGE_READ) { cursor->fences = dma_resv_shared_list(cursor->obj); if (cursor->fences) cursor->shared_count = cursor->fences->shared_count; @@ -496,7 +496,7 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor) dma_resv_assert_held(cursor->obj); cursor->index = 0; - if (cursor->all_fences) + if (cursor->usage >= DMA_RESV_USAGE_READ) cursor->fences = dma_resv_shared_list(cursor->obj); else cursor->fences = NULL; @@ -551,7 +551,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) list = NULL; excl = NULL; - dma_resv_iter_begin(&cursor, src, true); + dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, f) { if (dma_resv_iter_is_restarted(&cursor)) { @@ -597,7 +597,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences); * dma_resv_get_fences - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object - * @write: true if we should return all fences + * @usage: controls which fences to include, see enum dma_resv_usage. * @num_fences: the number of fences returned * @fences: the array of fence ptrs returned (array is krealloc'd to the * required size, and must be freed by caller) @@ -605,7 +605,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences); * Retrieve all fences from the reservation object. * Returns either zero or -ENOMEM. */ -int dma_resv_get_fences(struct dma_resv *obj, bool write, +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences) { struct dma_resv_iter cursor; @@ -614,7 +614,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write, *num_fences = 0; *fences = NULL; - dma_resv_iter_begin(&cursor, obj, write); + dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) { @@ -646,7 +646,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences); /** * dma_resv_get_singleton - Get a single fence for all the fences * @obj: the reservation object - * @write: true if we should return all fences + * @usage: controls which fences to include, see enum dma_resv_usage. * @fence: the resulting fence * * Get a single fence representing all the fences inside the resv object. @@ -658,7 +658,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences); * * Returns 0 on success and negative error values on failure. */ -int dma_resv_get_singleton(struct dma_resv *obj, bool write, +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, struct dma_fence **fence) { struct dma_fence_array *array; @@ -666,7 +666,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write, unsigned count; int r; - r = dma_resv_get_fences(obj, write, &count, &fences); + r = dma_resv_get_fences(obj, usage, &count, &fences); if (r) return r; @@ -700,7 +700,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton); * dma_resv_wait_timeout - Wait on reservation's objects * shared and/or exclusive fences. * @obj: the reservation object - * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @usage: controls which fences to include, see enum dma_resv_usage. * @intr: if true, do interruptible wait * @timeout: timeout value in jiffies or zero to return immediately * @@ -710,14 +710,14 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton); * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than zer on success. */ -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout) +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, + bool intr, unsigned long timeout) { long ret = timeout ? timeout : 1; struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, obj, wait_all); + dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { ret = dma_fence_wait_timeout(fence, intr, ret); @@ -737,8 +737,7 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); * dma_resv_test_signaled - Test if a reservation object's fences have been * signaled. * @obj: the reservation object - * @test_all: if true, test all fences, otherwise only test the exclusive - * fence + * @usage: controls which fences to include, see enum dma_resv_usage. * * Callers are not required to hold specific locks, but maybe hold * dma_resv_lock() already. @@ -747,12 +746,12 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); * * True if all fences signaled, else false. */ -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage) { struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, obj, test_all); + dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { dma_resv_iter_end(&cursor); return false; @@ -775,7 +774,7 @@ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_for_each_fence(&cursor, obj, true, fence) { + dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) { seq_printf(seq, "\t%s fence:", dma_resv_iter_is_exclusive(&cursor) ? "Exclusive" : "Shared"); diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index d2e61f6ae989..d097981061b1 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -58,7 +58,7 @@ static int sanitycheck(void *arg) return r; } -static int test_signaling(void *arg, bool shared) +static int test_signaling(void *arg, enum dma_resv_usage usage) { struct dma_resv resv; struct dma_fence *f; @@ -81,18 +81,18 @@ static int test_signaling(void *arg, bool shared) goto err_unlock; } - if (shared) + if (usage >= DMA_RESV_USAGE_READ) dma_resv_add_shared_fence(&resv, f); else dma_resv_add_excl_fence(&resv, f); - if (dma_resv_test_signaled(&resv, shared)) { + if (dma_resv_test_signaled(&resv, usage)) { pr_err("Resv unexpectedly signaled\n"); r = -EINVAL; goto err_unlock; } dma_fence_signal(f); - if (!dma_resv_test_signaled(&resv, shared)) { + if (!dma_resv_test_signaled(&resv, usage)) { pr_err("Resv not reporting signaled\n"); r = -EINVAL; goto err_unlock; @@ -107,15 +107,15 @@ err_free: static int test_excl_signaling(void *arg) { - return test_signaling(arg, false); + return test_signaling(arg, DMA_RESV_USAGE_WRITE); } static int test_shared_signaling(void *arg) { - return test_signaling(arg, true); + return test_signaling(arg, DMA_RESV_USAGE_READ); } -static int test_for_each(void *arg, bool shared) +static int test_for_each(void *arg, enum dma_resv_usage usage) { struct dma_resv_iter cursor; struct dma_fence *f, *fence; @@ -139,13 +139,13 @@ static int test_for_each(void *arg, bool shared) goto err_unlock; } - if (shared) + if (usage >= DMA_RESV_USAGE_READ) dma_resv_add_shared_fence(&resv, f); else dma_resv_add_excl_fence(&resv, f); r = -ENOENT; - dma_resv_for_each_fence(&cursor, &resv, shared, fence) { + dma_resv_for_each_fence(&cursor, &resv, usage, fence) { if (!r) { pr_err("More than one fence found\n"); r = -EINVAL; @@ -156,7 +156,8 @@ static int test_for_each(void *arg, bool shared) r = -EINVAL; goto err_unlock; } - if (dma_resv_iter_is_exclusive(&cursor) != !shared) { + if (dma_resv_iter_is_exclusive(&cursor) != + (usage >= DMA_RESV_USAGE_READ)) { pr_err("Unexpected fence usage\n"); r = -EINVAL; goto err_unlock; @@ -178,15 +179,15 @@ err_free: static int test_excl_for_each(void *arg) { - return test_for_each(arg, false); + return test_for_each(arg, DMA_RESV_USAGE_WRITE); } static int test_shared_for_each(void *arg) { - return test_for_each(arg, true); + return test_for_each(arg, DMA_RESV_USAGE_READ); } -static int test_for_each_unlocked(void *arg, bool shared) +static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) { struct dma_resv_iter cursor; struct dma_fence *f, *fence; @@ -211,14 +212,14 @@ static int test_for_each_unlocked(void *arg, bool shared) goto err_free; } - if (shared) + if (usage >= DMA_RESV_USAGE_READ) dma_resv_add_shared_fence(&resv, f); else dma_resv_add_excl_fence(&resv, f); dma_resv_unlock(&resv); r = -ENOENT; - dma_resv_iter_begin(&cursor, &resv, shared); + dma_resv_iter_begin(&cursor, &resv, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (!r) { pr_err("More than one fence found\n"); @@ -234,7 +235,8 @@ static int test_for_each_unlocked(void *arg, bool shared) r = -EINVAL; goto err_iter_end; } - if (dma_resv_iter_is_exclusive(&cursor) != !shared) { + if (dma_resv_iter_is_exclusive(&cursor) != + (usage >= DMA_RESV_USAGE_READ)) { pr_err("Unexpected fence usage\n"); r = -EINVAL; goto err_iter_end; @@ -262,15 +264,15 @@ err_free: static int test_excl_for_each_unlocked(void *arg) { - return test_for_each_unlocked(arg, false); + return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE); } static int test_shared_for_each_unlocked(void *arg) { - return test_for_each_unlocked(arg, true); + return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ); } -static int test_get_fences(void *arg, bool shared) +static int test_get_fences(void *arg, enum dma_resv_usage usage) { struct dma_fence *f, **fences = NULL; struct dma_resv resv; @@ -294,13 +296,13 @@ static int test_get_fences(void *arg, bool shared) goto err_resv; } - if (shared) + if (usage >= DMA_RESV_USAGE_READ) dma_resv_add_shared_fence(&resv, f); else dma_resv_add_excl_fence(&resv, f); dma_resv_unlock(&resv); - r = dma_resv_get_fences(&resv, shared, &i, &fences); + r = dma_resv_get_fences(&resv, usage, &i, &fences); if (r) { pr_err("get_fences failed\n"); goto err_free; @@ -324,12 +326,12 @@ err_resv: static int test_excl_get_fences(void *arg) { - return test_get_fences(arg, false); + return test_get_fences(arg, DMA_RESV_USAGE_WRITE); } static int test_shared_get_fences(void *arg) { - return test_get_fences(arg, true); + return test_get_fences(arg, DMA_RESV_USAGE_READ); } int dma_resv(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e85e347eb670..413f32c3fd63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1288,7 +1288,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, * * TODO: Remove together with dma_resv rework. */ - dma_resv_for_each_fence(&cursor, resv, false, fence) { + dma_resv_for_each_fence(&cursor, resv, + DMA_RESV_USAGE_WRITE, + fence) { break; } dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index fae5c1debfad..7a6908d71820 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -200,8 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - /* TODO: Unify this with other drivers */ - r = dma_resv_get_fences(new_abo->tbo.base.resv, true, + r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE, &work->shared_count, &work->shared); if (unlikely(r != 0)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 57b74d35052f..84a53758e18e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -526,7 +526,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, + true, timeout); /* ret == 0 means not signaled, * ret > 0 means signaled diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 81207737c716..65998cbcd7f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, struct dma_fence *fence; int r; - r = dma_resv_get_singleton(resv, true, &fence); + r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence); if (r) goto fallback; @@ -139,7 +139,8 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, + false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 4b153daf283d..86f5248676b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, + false, MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6f57a2fd5fe3..a7f39f8ab7be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -768,8 +768,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE, + false, MAX_SCHEDULE_TIMEOUT); if (r < 0) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 40e06745fae9..744e144e5fc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -259,7 +259,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, if (resv == NULL) return -EINVAL; - dma_resv_for_each_fence(&cursor, resv, true, f) { + /* TODO: Use DMA_RESV_USAGE_READ here */ + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) { dma_fence_chain_for_each(f, f) { struct dma_fence *tmp = dma_fence_chain_contained(f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f7f149588432..5db5066e74b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1344,7 +1344,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) { + dma_resv_for_each_fence(&resv_cursor, bo->base.resv, + DMA_RESV_USAGE_READ, f) { if (amdkfd_fence_check_mm(f, current->mm)) return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 39c74d9fa7cc..3654326219e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1163,7 +1163,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + r = dma_resv_wait_timeout(bo->tbo.base.resv, + DMA_RESV_USAGE_WRITE, false, msecs_to_jiffies(10)); if (r == 0) r = -ETIMEDOUT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b13451255e8b..a0376fd36a82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2059,7 +2059,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_for_each_fence(&cursor, resv, true, fence) { + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, fence) { /* Add a callback for each fence in the reservation object */ amdgpu_vm_prt_get(adev); amdgpu_vm_add_prt_cb(adev, fence); @@ -2665,7 +2665,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_READ)) return false; /* Try to block ongoing updates */ @@ -2845,7 +2845,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, + timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, + DMA_RESV_USAGE_READ, true, timeout); if (timeout <= 0) return timeout; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b30656959fd8..9e24b1e616af 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9236,7 +9236,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * deadlock during GPU reset when this fence will not signal * but we hold reservation lock for the BO. */ - r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, + r = dma_resv_wait_timeout(abo->tbo.base.resv, + DMA_RESV_USAGE_WRITE, false, msecs_to_jiffies(5000)); if (unlikely(r <= 0)) DRM_ERROR("Waiting for fences timed out!"); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 133dfae06fab..eb0c2d041f13 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -771,7 +771,8 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, return -EINVAL; } - ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout); + ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), + true, timeout); if (ret == 0) ret = -ETIME; else if (ret > 0) diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index 9338ddb7edff..a6d89aed0bda 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -151,7 +151,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st return 0; obj = drm_gem_fb_get_obj(state->fb, 0); - ret = dma_resv_get_singleton(obj->resv, false, &fence); + ret = dma_resv_get_singleton(obj->resv, DMA_RESV_USAGE_WRITE, &fence); if (ret) return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index d5314aa28ff7..507172e2780b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -380,12 +380,14 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (op & ETNA_PREP_NOSYNC) { - if (!dma_resv_test_signaled(obj->resv, write)) + if (!dma_resv_test_signaled(obj->resv, + dma_resv_usage_rw(write))) return -EBUSY; } else { unsigned long remain = etnaviv_timeout_to_jiffies(timeout); - ret = dma_resv_wait_timeout(obj->resv, write, true, remain); + ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), + true, remain); if (ret <= 0) return ret == 0 ? -ETIMEDOUT : ret; } diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 5712688232fb..03e86e836a17 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -997,7 +997,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane, if (ret < 0) goto unpin_fb; - dma_resv_iter_begin(&cursor, obj->base.resv, false); + dma_resv_iter_begin(&cursor, obj->base.resv, + DMA_RESV_USAGE_WRITE); dma_resv_for_each_fence_unlocked(&cursor, fence) { add_rps_boost_after_vblank(new_plane_state->hw.crtc, fence); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 470fdfd61a0f..14a1c0ad8c3c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -138,12 +138,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * Alternatively, we can trade that extra information on read/write * activity with * args->busy = - * !dma_resv_test_signaled(obj->resv, true); + * !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ); * to report the overall busyness. This is what the wait-ioctl does. * */ args->busy = 0; - dma_resv_iter_begin(&cursor, obj->base.resv, true); + dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) args->busy = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 444f8268b9c5..a200d3e66573 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -66,7 +66,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) struct intel_memory_region *mr = READ_ONCE(obj->mm.region); #ifdef CONFIG_LOCKDEP - GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) && + GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_READ) && i915_gem_object_evictable(obj)); #endif return mr && (mr->type == INTEL_MEMORY_LOCAL || diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 6d1a71d6404c..644fe237601c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -86,7 +86,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, return true; /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout(obj->base.resv, true, false, + r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_READ, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index dab3d30c09a0..319936f91ac5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -40,7 +40,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, struct dma_fence *fence; long ret = timeout ?: 1; - dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL); + dma_resv_iter_begin(&cursor, resv, + dma_resv_usage_rw(flags & I915_WAIT_ALL)); dma_resv_for_each_fence_unlocked(&cursor, fence) { ret = i915_gem_object_wait_fence(fence, flags, timeout); if (ret <= 0) @@ -117,7 +118,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL); + dma_resv_iter_begin(&cursor, obj->base.resv, + dma_resv_usage_rw(flags & I915_WAIT_ALL)); dma_resv_for_each_fence_unlocked(&cursor, fence) i915_gem_fence_wait_priority(fence, attr); dma_resv_iter_end(&cursor); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index b071a58dd6da..b4275b55e5b8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -219,7 +219,8 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, goto out_detach; } - timeout = dma_resv_wait_timeout(dmabuf->resv, false, true, 5 * HZ); + timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE, + true, 5 * HZ); if (!timeout) { pr_err("dmabuf wait for exclusive fence timed out.\n"); timeout = -ETIME; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 582770360ad1..73d5195146b0 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1598,7 +1598,8 @@ i915_request_await_object(struct i915_request *to, struct dma_fence *fence; int ret = 0; - dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) { + dma_resv_for_each_fence(&cursor, obj->base.resv, + dma_resv_usage_rw(write), fence) { ret = i915_request_await_dma_fence(to, fence); if (ret) break; diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 2a74a9a1cafe..ae984c66c48a 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -585,7 +585,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, debug_fence_assert(fence); might_sleep_if(gfpflags_allow_blocking(gfp)); - dma_resv_iter_begin(&cursor, resv, write); + dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(write)); dma_resv_for_each_fence_unlocked(&cursor, f) { pending = i915_sw_fence_await_dma_fence(fence, f, timeout, gfp); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 02b9ae65a96a..01bbb5f2d462 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -848,7 +848,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); long ret; - ret = dma_resv_wait_timeout(obj->resv, write, true, remain); + ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), + true, remain); if (ret == 0) return remain == 0 ? -EBUSY : -ETIMEDOUT; else if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index e2faf92e4831..8642b84ea20c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -558,7 +558,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) asyw->image.handle[0] = ctxdma->object.handle; } - ret = dma_resv_get_singleton(nvbo->bo.base.resv, false, + ret = dma_resv_get_singleton(nvbo->bo.base.resv, + DMA_RESV_USAGE_WRITE, &asyw->state.fence); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 74f8652d2bd3..c6bb4dbcd735 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -962,11 +962,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, struct dma_fence *fence; int ret; - /* TODO: This is actually a memory management dependency */ - ret = dma_resv_get_singleton(bo->base.resv, false, &fence); + ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE, + &fence); if (ret) - dma_resv_wait_timeout(bo->base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE, + false, MAX_SCHEDULE_TIMEOUT); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 0268259e97eb..d5e81ccee01c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -350,14 +350,16 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, if (ret) return ret; - /* Waiting for the exclusive fence first causes performance regressions - * under some circumstances. So manually wait for the shared ones first. + /* Waiting for the writes first causes performance regressions + * under some circumstances. So manually wait for the reads first. */ for (i = 0; i < 2; ++i) { struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_for_each_fence(&cursor, resv, exclusive, fence) { + dma_resv_for_each_fence(&cursor, resv, + dma_resv_usage_rw(exclusive), + fence) { struct nouveau_fence *f; if (i == 0 && dma_resv_iter_is_exclusive(&cursor)) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9416bee92141..fab542a758ff 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -962,7 +962,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, return -ENOENT; nvbo = nouveau_gem_object(gem); - lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true, + lret = dma_resv_wait_timeout(nvbo->bo.base.resv, + dma_resv_usage_rw(write), true, no_wait ? 0 : 30 * HZ); if (!lret) ret = -EBUSY; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 94b6f0a19c83..7fcbc2a5b6cd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -316,7 +316,8 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, if (!gem_obj) return -ENOENT; - ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout); + ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ, + true, timeout); if (!ret) ret = timeout ? -ETIMEDOUT : -EBUSY; diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 6a36b0fd845c..33e5889d6608 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -61,7 +61,8 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) struct dma_fence *fence; int rel = 0; - dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true); + dma_resv_iter_begin(&cursor, bo->tbo.base.resv, + DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) rel = 0; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index f60e826cd292..57ff2b723c87 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -533,7 +533,8 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - r = dma_resv_get_singleton(new_rbo->tbo.base.resv, false, &work->fence); + r = dma_resv_get_singleton(new_rbo->tbo.base.resv, DMA_RESV_USAGE_WRITE, + &work->fence); if (r) { radeon_bo_unreserve(new_rbo); DRM_ERROR("failed to get new rbo buffer fences\n"); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index f563284a7fac..6616a828f40b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -162,7 +162,9 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); + r = dma_resv_wait_timeout(robj->tbo.base.resv, + DMA_RESV_USAGE_READ, + true, 30 * HZ); if (!r) r = -EBUSY; @@ -524,7 +526,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - r = dma_resv_test_signaled(robj->tbo.base.resv, true); + r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ); if (r == 0) r = -EBUSY; else @@ -553,7 +555,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, + true, 30 * HZ); if (ret == 0) r = -EBUSY; else if (ret < 0) diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 9fa88549c89e..68ebeb1bdfff 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, return true; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, + false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index b991ba1bcd51..49bbb2266c0f 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev, struct dma_fence *f; int r = 0; - dma_resv_for_each_fence(&cursor, resv, shared, f) { + dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(shared), f) { fence = to_radeon_fence(f); if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index bc0f44299bb9..a50750740ab0 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -478,8 +478,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE, + false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) { DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); return r ? r : -ETIME; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c5660b066554..76fd2904c7c6 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -705,7 +705,8 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, dma_resv_assert_held(obj->resv); - dma_resv_for_each_fence(&cursor, obj->resv, write, fence) { + dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write), + fence) { /* Make sure to grab an additional ref on the added fence */ dma_fence_get(fence); ret = drm_sched_job_add_dependency(job, fence); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c49996cf25d0..cff05b62f3f7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -223,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, resv, true); + dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (!fence->ops->signaled) dma_fence_enable_sw_signaling(fence); @@ -252,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, struct dma_resv *resv = &bo->base._resv; int ret; - if (dma_resv_test_signaled(resv, true)) + if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_READ)) ret = 0; else ret = -EBUSY; @@ -264,7 +264,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, dma_resv_unlock(bo->base.resv); spin_unlock(&bo->bdev->lru_lock); - lret = dma_resv_wait_timeout(resv, true, interruptible, + lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, + interruptible, 30 * HZ); if (lret < 0) @@ -367,7 +368,8 @@ static void ttm_bo_release(struct kref *kref) /* Last resort, if we fail to allocate memory for the * fences block for the BO to become idle */ - dma_resv_wait_timeout(bo->base.resv, true, false, + dma_resv_wait_timeout(bo->base.resv, + DMA_RESV_USAGE_READ, false, 30 * HZ); } @@ -378,7 +380,7 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_free(bdev, bo->resource); } - if (!dma_resv_test_signaled(bo->base.resv, true) || + if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ) || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); @@ -1044,14 +1046,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, long timeout = 15 * HZ; if (no_wait) { - if (dma_resv_test_signaled(bo->base.resv, true)) + if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) return 0; else return -EBUSY; } - timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible, - timeout); + timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, + interruptible, timeout); if (timeout < 0) return timeout; diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 2ddbebca87d9..91fc4940c65a 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -130,6 +130,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, struct vgem_file *vfile = file->driver_priv; struct dma_resv *resv; struct drm_gem_object *obj; + enum dma_resv_usage usage; struct dma_fence *fence; int ret; @@ -151,7 +152,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, /* Check for a conflicting fence */ resv = obj->resv; - if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) { + usage = dma_resv_usage_rw(arg->flags & VGEM_FENCE_WRITE); + if (!dma_resv_test_signaled(resv, usage)) { ret = -EBUSY; goto err_fence; } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 77743fd2c61a..f8d83358d2a0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -518,9 +518,10 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, return -ENOENT; if (args->flags & VIRTGPU_WAIT_NOWAIT) { - ret = dma_resv_test_signaled(obj->resv, true); + ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ); } else { - ret = dma_resv_wait_timeout(obj->resv, true, true, timeout); + ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, + true, timeout); } if (ret == 0) ret = -EBUSY; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index fe13aa8b4a64..b96884f7d03d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -528,8 +528,8 @@ static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo, if (flags & drm_vmw_synccpu_allow_cs) { long lret; - lret = dma_resv_wait_timeout(bo->base.resv, true, true, - nonblock ? 0 : + lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, + true, nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); if (!lret) return -EBUSY; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 626067104751..a84d1d5628d0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1164,7 +1164,8 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, if (bo->moving) dma_fence_put(bo->moving); - return dma_resv_get_singleton(bo->base.resv, false, + return dma_resv_get_singleton(bo->base.resv, + DMA_RESV_USAGE_WRITE, &bo->moving); } diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index d32cd7538835..f9901d273b8e 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -67,7 +67,8 @@ wait_fence: * may be not up-to-date. Wait for the exporter to finish * the migration. */ - return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, false, + return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, + DMA_RESV_USAGE_WRITE, false, MAX_SCHEDULE_TIMEOUT); } EXPORT_SYMBOL(ib_umem_dmabuf_map_pages); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 6fb91956ab8d..a297397743a2 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -408,6 +408,9 @@ struct dma_buf { * pipelining across drivers. These do not set any fences for their * access. An example here is v4l. * + * - Driver should use dma_resv_usage_rw() when retrieving fences as + * dependency for implicit synchronization. + * * DYNAMIC IMPORTER RULES: * * Dynamic importers, see dma_buf_attachment_is_dynamic(), have @@ -423,8 +426,9 @@ struct dma_buf { * * IMPORTANT: * - * All drivers must obey the struct dma_resv rules, specifically the - * rules for updating and obeying fences. + * All drivers and memory management related functions must obey the + * struct dma_resv rules, specifically the rules for updating and + * obeying fences. See enum dma_resv_usage for further descriptions. */ struct dma_resv *resv; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 5fa04d0fccad..92cd8023980f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -49,6 +49,53 @@ extern struct ww_class reservation_ww_class; struct dma_resv_list; +/** + * enum dma_resv_usage - how the fences from a dma_resv obj are used + * + * This enum describes the different use cases for a dma_resv object and + * controls which fences are returned when queried. + * + * An important fact is that there is the order WRITEobj = obj; - cursor->all_fences = all_fences; + cursor->usage = usage; cursor->fence = NULL; } @@ -241,7 +288,7 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * dma_resv_for_each_fence - fence iterator * @cursor: a struct dma_resv_iter pointer * @obj: a dma_resv object pointer - * @all_fences: true if all fences should be returned + * @usage: controls which fences to return * @fence: the current fence * * Iterate over the fences in a struct dma_resv object while holding the @@ -250,8 +297,8 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * valid as long as the lock is held and so no extra reference to the fence is * taken. */ -#define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \ - for (dma_resv_iter_begin(cursor, obj, all_fences), \ +#define dma_resv_for_each_fence(cursor, obj, usage, fence) \ + for (dma_resv_iter_begin(cursor, obj, usage), \ fence = dma_resv_iter_first(cursor); fence; \ fence = dma_resv_iter_next(cursor)) @@ -418,14 +465,14 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); -int dma_resv_get_fences(struct dma_resv *obj, bool write, +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences); -int dma_resv_get_singleton(struct dma_resv *obj, bool write, +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, struct dma_fence **fence); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, + bool intr, unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); #endif /* _LINUX_RESERVATION_H */ -- cgit v1.2.3-59-g8ed1b From 73511edf8b196e6f1ccda0fdf294ff57aa2dc9db Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 9 Nov 2021 11:08:18 +0100 Subject: dma-buf: specify usage while adding fences to dma_resv obj v7 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of distingting between shared and exclusive fences specify the fence usage while adding fences. Rework all drivers to use this interface instead and deprecate the old one. v2: some kerneldoc comments suggested by Daniel v3: fix a missing case in radeon v4: rebase on nouveau changes, fix lockdep and temporary disable warning v5: more documentation updates v6: separate internal dma_resv changes from this patch, avoids to disable warning temporary, rebase on upstream changes v7: fix missed case in lima driver, minimize changes to i915_gem_busy_ioctl Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-3-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 48 +++++++--- drivers/dma-buf/st-dma-resv.c | 101 ++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 10 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 6 +- drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 3 +- drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 5 +- .../gpu/drm/i915/gem/selftests/i915_gem_migrate.c | 4 +- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 3 +- drivers/gpu/drm/i915/i915_vma.c | 8 +- .../gpu/drm/i915/selftests/intel_memory_region.c | 3 +- drivers/gpu/drm/lima/lima_gem.c | 7 +- drivers/gpu/drm/msm/msm_gem_submit.c | 6 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 9 +- drivers/gpu/drm/nouveau/nouveau_fence.c | 4 +- drivers/gpu/drm/panfrost/panfrost_job.c | 2 +- drivers/gpu/drm/qxl/qxl_release.c | 3 +- drivers/gpu/drm/radeon/radeon_object.c | 6 +- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 5 +- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 6 +- drivers/gpu/drm/v3d/v3d_gem.c | 4 +- drivers/gpu/drm/vc4/vc4_gem.c | 2 +- drivers/gpu/drm/vgem/vgem_fence.c | 9 +- drivers/gpu/drm/virtio/virtgpu_gem.c | 3 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 3 +- include/linux/dma-buf.h | 16 ++-- include/linux/dma-resv.h | 25 +++-- 30 files changed, 149 insertions(+), 166 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 17237e6ee30c..543dae6566d2 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -234,14 +234,14 @@ EXPORT_SYMBOL(dma_resv_reserve_fences); #ifdef CONFIG_DEBUG_MUTEXES /** - * dma_resv_reset_shared_max - reset shared fences for debugging + * dma_resv_reset_max_fences - reset shared fences for debugging * @obj: the dma_resv object to reset * * Reset the number of pre-reserved shared slots to test that drivers do * correct slot allocation using dma_resv_reserve_fences(). See also * &dma_resv_list.shared_max. */ -void dma_resv_reset_shared_max(struct dma_resv *obj) +void dma_resv_reset_max_fences(struct dma_resv *obj) { struct dma_resv_list *fences = dma_resv_shared_list(obj); @@ -251,7 +251,7 @@ void dma_resv_reset_shared_max(struct dma_resv *obj) if (fences) fences->shared_max = fences->shared_count; } -EXPORT_SYMBOL(dma_resv_reset_shared_max); +EXPORT_SYMBOL(dma_resv_reset_max_fences); #endif /** @@ -264,7 +264,8 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max); * * See also &dma_resv.fence for a discussion of the semantics. */ -void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) +static void dma_resv_add_shared_fence(struct dma_resv *obj, + struct dma_fence *fence) { struct dma_resv_list *fobj; struct dma_fence *old; @@ -305,13 +306,13 @@ replace: write_seqcount_end(&obj->seq); dma_fence_put(old); } -EXPORT_SYMBOL(dma_resv_add_shared_fence); /** * dma_resv_replace_fences - replace fences in the dma_resv obj * @obj: the reservation object * @context: the context of the fences to replace * @replacement: the new fence to use instead + * @usage: how the new fence is used, see enum dma_resv_usage * * Replace fences with a specified context with a new fence. Only valid if the * operation represented by the original fence has no longer access to the @@ -321,12 +322,16 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence); * update fence which makes the resource inaccessible. */ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, - struct dma_fence *replacement) + struct dma_fence *replacement, + enum dma_resv_usage usage) { struct dma_resv_list *list; struct dma_fence *old; unsigned int i; + /* Only readers supported for now */ + WARN_ON(usage != DMA_RESV_USAGE_READ); + dma_resv_assert_held(obj); write_seqcount_begin(&obj->seq); @@ -360,7 +365,8 @@ EXPORT_SYMBOL(dma_resv_replace_fences); * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock(). * See also &dma_resv.fence_excl for a discussion of the semantics. */ -void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) +static void dma_resv_add_excl_fence(struct dma_resv *obj, + struct dma_fence *fence) { struct dma_fence *old_fence = dma_resv_excl_fence(obj); @@ -375,7 +381,27 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) dma_fence_put(old_fence); } -EXPORT_SYMBOL(dma_resv_add_excl_fence); + +/** + * dma_resv_add_fence - Add a fence to the dma_resv obj + * @obj: the reservation object + * @fence: the fence to add + * @usage: how the fence is used, see enum dma_resv_usage + * + * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and + * dma_resv_reserve_fences() has been called. + * + * See also &dma_resv.fence for a discussion of the semantics. + */ +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage) +{ + if (usage == DMA_RESV_USAGE_WRITE) + dma_resv_add_excl_fence(obj, fence); + else + dma_resv_add_shared_fence(obj, fence); +} +EXPORT_SYMBOL(dma_resv_add_fence); /* Restart the iterator by initializing all the necessary fields, but not the * relation to the dma_resv object. */ @@ -574,7 +600,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) } dma_fence_get(f); - if (dma_resv_iter_is_exclusive(&cursor)) + if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE) excl = f; else RCU_INIT_POINTER(list->shared[list->shared_count++], f); @@ -771,13 +797,13 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled); */ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) { + static const char *usage[] = { "write", "read" }; struct dma_resv_iter cursor; struct dma_fence *fence; dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) { seq_printf(seq, "\t%s fence:", - dma_resv_iter_is_exclusive(&cursor) ? - "Exclusive" : "Shared"); + usage[dma_resv_iter_usage(&cursor)]); dma_fence_describe(fence, seq); } } diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index d097981061b1..d0f7c2bfd4f0 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -58,8 +58,9 @@ static int sanitycheck(void *arg) return r; } -static int test_signaling(void *arg, enum dma_resv_usage usage) +static int test_signaling(void *arg) { + enum dma_resv_usage usage = (unsigned long)arg; struct dma_resv resv; struct dma_fence *f; int r; @@ -81,11 +82,7 @@ static int test_signaling(void *arg, enum dma_resv_usage usage) goto err_unlock; } - if (usage >= DMA_RESV_USAGE_READ) - dma_resv_add_shared_fence(&resv, f); - else - dma_resv_add_excl_fence(&resv, f); - + dma_resv_add_fence(&resv, f, usage); if (dma_resv_test_signaled(&resv, usage)) { pr_err("Resv unexpectedly signaled\n"); r = -EINVAL; @@ -105,18 +102,9 @@ err_free: return r; } -static int test_excl_signaling(void *arg) -{ - return test_signaling(arg, DMA_RESV_USAGE_WRITE); -} - -static int test_shared_signaling(void *arg) -{ - return test_signaling(arg, DMA_RESV_USAGE_READ); -} - -static int test_for_each(void *arg, enum dma_resv_usage usage) +static int test_for_each(void *arg) { + enum dma_resv_usage usage = (unsigned long)arg; struct dma_resv_iter cursor; struct dma_fence *f, *fence; struct dma_resv resv; @@ -139,10 +127,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage) goto err_unlock; } - if (usage >= DMA_RESV_USAGE_READ) - dma_resv_add_shared_fence(&resv, f); - else - dma_resv_add_excl_fence(&resv, f); + dma_resv_add_fence(&resv, f, usage); r = -ENOENT; dma_resv_for_each_fence(&cursor, &resv, usage, fence) { @@ -156,8 +141,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage) r = -EINVAL; goto err_unlock; } - if (dma_resv_iter_is_exclusive(&cursor) != - (usage >= DMA_RESV_USAGE_READ)) { + if (dma_resv_iter_usage(&cursor) != usage) { pr_err("Unexpected fence usage\n"); r = -EINVAL; goto err_unlock; @@ -177,18 +161,9 @@ err_free: return r; } -static int test_excl_for_each(void *arg) -{ - return test_for_each(arg, DMA_RESV_USAGE_WRITE); -} - -static int test_shared_for_each(void *arg) -{ - return test_for_each(arg, DMA_RESV_USAGE_READ); -} - -static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) +static int test_for_each_unlocked(void *arg) { + enum dma_resv_usage usage = (unsigned long)arg; struct dma_resv_iter cursor; struct dma_fence *f, *fence; struct dma_resv resv; @@ -212,10 +187,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) goto err_free; } - if (usage >= DMA_RESV_USAGE_READ) - dma_resv_add_shared_fence(&resv, f); - else - dma_resv_add_excl_fence(&resv, f); + dma_resv_add_fence(&resv, f, usage); dma_resv_unlock(&resv); r = -ENOENT; @@ -235,8 +207,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) r = -EINVAL; goto err_iter_end; } - if (dma_resv_iter_is_exclusive(&cursor) != - (usage >= DMA_RESV_USAGE_READ)) { + if (dma_resv_iter_usage(&cursor) != usage) { pr_err("Unexpected fence usage\n"); r = -EINVAL; goto err_iter_end; @@ -262,18 +233,9 @@ err_free: return r; } -static int test_excl_for_each_unlocked(void *arg) -{ - return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE); -} - -static int test_shared_for_each_unlocked(void *arg) -{ - return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ); -} - -static int test_get_fences(void *arg, enum dma_resv_usage usage) +static int test_get_fences(void *arg) { + enum dma_resv_usage usage = (unsigned long)arg; struct dma_fence *f, **fences = NULL; struct dma_resv resv; int r, i; @@ -296,10 +258,7 @@ static int test_get_fences(void *arg, enum dma_resv_usage usage) goto err_resv; } - if (usage >= DMA_RESV_USAGE_READ) - dma_resv_add_shared_fence(&resv, f); - else - dma_resv_add_excl_fence(&resv, f); + dma_resv_add_fence(&resv, f, usage); dma_resv_unlock(&resv); r = dma_resv_get_fences(&resv, usage, &i, &fences); @@ -324,30 +283,24 @@ err_resv: return r; } -static int test_excl_get_fences(void *arg) -{ - return test_get_fences(arg, DMA_RESV_USAGE_WRITE); -} - -static int test_shared_get_fences(void *arg) -{ - return test_get_fences(arg, DMA_RESV_USAGE_READ); -} - int dma_resv(void) { static const struct subtest tests[] = { SUBTEST(sanitycheck), - SUBTEST(test_excl_signaling), - SUBTEST(test_shared_signaling), - SUBTEST(test_excl_for_each), - SUBTEST(test_shared_for_each), - SUBTEST(test_excl_for_each_unlocked), - SUBTEST(test_shared_for_each_unlocked), - SUBTEST(test_excl_get_fences), - SUBTEST(test_shared_get_fences), + SUBTEST(test_signaling), + SUBTEST(test_for_each), + SUBTEST(test_for_each_unlocked), + SUBTEST(test_get_fences), }; + enum dma_resv_usage usage; + int r; spin_lock_init(&fence_lock); - return subtests(tests, NULL); + for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ; + ++usage) { + r = subtests(tests, (void *)(unsigned long)usage); + if (r) + return r; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 98b1736bb221..5031e26e6716 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, */ replacement = dma_fence_get_stub(); dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, - replacement); + replacement, DMA_RESV_USAGE_READ); dma_fence_put(replacement); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 413f32c3fd63..76fd916424d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -55,8 +55,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf_entry.priority = 0; p->uf_entry.tv.bo = &bo->tbo; - /* One for TTM and one for the CS job */ - p->uf_entry.tv.num_shared = 2; + /* One for TTM and two for the CS job */ + p->uf_entry.tv.num_shared = 3; drm_gem_object_put(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a7f39f8ab7be..a3cdf8a24377 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1397,10 +1397,8 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, return; } - if (shared) - dma_resv_add_shared_fence(resv, fence); - else - dma_resv_add_excl_fence(resv, fence); + dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ : + DMA_RESV_USAGE_WRITE); } /** diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 53f7c78628a4..98bb5c9239de 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -202,14 +202,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = &submit->bos[i].obj->base; + bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; - if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) - dma_resv_add_excl_fence(obj->resv, - submit->out_fence); - else - dma_resv_add_shared_fence(obj->resv, - submit->out_fence); - + dma_resv_add_fence(obj->resv, submit->out_fence, write ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); submit_unlock_object(submit, i); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 14a1c0ad8c3c..ddda468241ef 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -148,11 +148,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, if (dma_resv_iter_is_restarted(&cursor)) args->busy = 0; - if (dma_resv_iter_is_exclusive(&cursor)) - /* Translate the exclusive fence to the READ *and* WRITE engine */ + if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE) + /* Translate the write fences to the READ *and* WRITE engine */ args->busy |= busy_check_writer(fence); else - /* Translate shared fences to READ set of engines */ + /* Translate read fences to READ set of engines */ args->busy |= busy_check_reader(fence); } dma_resv_iter_end(&cursor); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index 1fd0cc9ca213..f5f2b8b115ea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -116,7 +116,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, obj->base.resv, NULL, true, i915_fence_timeout(i915), I915_FENCE_GFP); - dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma); + dma_resv_add_fence(obj->base.resv, &clflush->base.dma, + DMA_RESV_USAGE_WRITE); dma_fence_work_commit(&clflush->base); /* * We must have successfully populated the pages(since we are diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 432ac74ff225..438b8a95b3d1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -637,9 +637,8 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, if (IS_ERR_OR_NULL(copy_fence)) return PTR_ERR_OR_ZERO(copy_fence); - dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence); - dma_resv_add_shared_fence(src_bo->base.resv, copy_fence); - + dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE); + dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ); dma_fence_put(copy_fence); return 0; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c index 0e52eb87cd55..4997ed18b6e4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c @@ -218,8 +218,8 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt, if (rq) { err = dma_resv_reserve_fences(obj->base.resv, 1); if (!err) - dma_resv_add_excl_fence(obj->base.resv, - &rq->fence); + dma_resv_add_fence(obj->base.resv, &rq->fence, + DMA_RESV_USAGE_WRITE); i915_gem_object_set_moving_fence(obj, &rq->fence); i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index a132e241c3ee..3a6e3f6d239f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1220,7 +1220,8 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements, expand32(POISON_INUSE), &rq); i915_gem_object_unpin_pages(obj); if (rq) { - dma_resv_add_excl_fence(obj->base.resv, &rq->fence); + dma_resv_add_fence(obj->base.resv, &rq->fence, + DMA_RESV_USAGE_WRITE); i915_gem_object_set_moving_fence(obj, &rq->fence); i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index bae3423f58e8..524477d8939e 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1826,7 +1826,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma, } if (fence) { - dma_resv_add_excl_fence(vma->obj->base.resv, fence); + dma_resv_add_fence(vma->obj->base.resv, fence, + DMA_RESV_USAGE_WRITE); obj->write_domain = I915_GEM_DOMAIN_RENDER; obj->read_domains = 0; } @@ -1838,7 +1839,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma, } if (fence) { - dma_resv_add_shared_fence(vma->obj->base.resv, fence); + dma_resv_add_fence(vma->obj->base.resv, fence, + DMA_RESV_USAGE_READ); obj->write_domain = 0; } } @@ -2078,7 +2080,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) goto out_rpm; } - dma_resv_add_shared_fence(obj->base.resv, fence); + dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ); dma_fence_put(fence); out_rpm: diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 6114e013092b..73eb53edb8de 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -1056,7 +1056,8 @@ static int igt_lmem_write_cpu(void *arg) obj->mm.pages->sgl, I915_CACHE_NONE, true, 0xdeadbeaf, &rq); if (rq) { - dma_resv_add_excl_fence(obj->base.resv, &rq->fence); + dma_resv_add_fence(obj->base.resv, &rq->fence, + DMA_RESV_USAGE_WRITE); i915_request_put(rq); } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index e0a11ee0e86d..0f1ca0b0db49 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -364,10 +364,9 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) fence = lima_sched_context_queue_task(submit->task); for (i = 0; i < submit->nr_bos; i++) { - if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) - dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence); - else - dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence); + dma_resv_add_fence(lima_bo_resv(bos[i]), fence, + submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); } drm_gem_unlock_reservations((struct drm_gem_object **)bos, diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3164db8be893..8d1eef914ba8 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -395,9 +395,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit) struct drm_gem_object *obj = &submit->bos[i].obj->base; if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) - dma_resv_add_excl_fence(obj->resv, submit->user_fence); + dma_resv_add_fence(obj->resv, submit->user_fence, + DMA_RESV_USAGE_WRITE); else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) - dma_resv_add_shared_fence(obj->resv, submit->user_fence); + dma_resv_add_fence(obj->resv, submit->user_fence, + DMA_RESV_USAGE_READ); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c6bb4dbcd735..05076e530e7d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1308,10 +1308,11 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl { struct dma_resv *resv = nvbo->bo.base.resv; - if (exclusive) - dma_resv_add_excl_fence(resv, &fence->base); - else if (fence) - dma_resv_add_shared_fence(resv, &fence->base); + if (!fence) + return; + + dma_resv_add_fence(resv, &fence->base, exclusive ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); } static void diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index d5e81ccee01c..7f01dcf81fab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -360,9 +360,11 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(exclusive), fence) { + enum dma_resv_usage usage; struct nouveau_fence *f; - if (i == 0 && dma_resv_iter_is_exclusive(&cursor)) + usage = dma_resv_iter_usage(&cursor); + if (i == 0 && usage == DMA_RESV_USAGE_WRITE) continue; f = nouveau_local_fence(fence, chan->drm); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index c34114560e49..fda5871aebe3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -268,7 +268,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos, int i; for (i = 0; i < bo_count; i++) - dma_resv_add_excl_fence(bos[i]->resv, fence); + dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE); } int panfrost_job_push(struct panfrost_job *job) diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index cde1e8ddaeaa..368d26da0d6a 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -429,7 +429,8 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) list_for_each_entry(entry, &release->bos, head) { bo = entry->bo; - dma_resv_add_shared_fence(bo->base.resv, &release->base); + dma_resv_add_fence(bo->base.resv, &release->base, + DMA_RESV_USAGE_READ); ttm_bo_move_to_lru_tail_unlocked(bo); dma_resv_unlock(bo->base.resv); } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 7ffd2e90f325..cb5c4aa45cef 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -791,8 +791,6 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, return; } - if (shared) - dma_resv_add_shared_fence(resv, &fence->base); - else - dma_resv_add_excl_fence(resv, &fence->base); + dma_resv_add_fence(resv, &fence->base, shared ? + DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cff05b62f3f7..d74f9eea855e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -739,7 +739,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, return ret; } - dma_resv_add_shared_fence(bo->base.resv, fence); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); ret = dma_resv_reserve_fences(bo->base.resv, 1); if (unlikely(ret)) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 1b96b91bf81b..7a96a1db13a7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -507,7 +507,8 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, if (ret) return ret; - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); + dma_resv_add_fence(&ghost_obj->base._resv, fence, + DMA_RESV_USAGE_WRITE); /** * If we're not moving to fixed memory, the TTM object @@ -561,7 +562,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); int ret = 0; - dma_resv_add_excl_fence(bo->base.resv, fence); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); if (!evict) ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); else if (!from->use_tt && pipeline) diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 789c645f004e..0eb995d25df1 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -154,10 +154,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - if (entry->num_shared) - dma_resv_add_shared_fence(bo->base.resv, fence); - else - dma_resv_add_excl_fence(bo->base.resv, fence); + dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ? + DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); ttm_bo_move_to_lru_tail_unlocked(bo); dma_resv_unlock(bo->base.resv); } diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 961812d33827..2352e9640922 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -550,8 +550,8 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, for (i = 0; i < job->bo_count; i++) { /* XXX: Use shared fences for read-only objects. */ - dma_resv_add_excl_fence(job->bo[i]->resv, - job->done_fence); + dma_resv_add_fence(job->bo[i]->resv, job->done_fence, + DMA_RESV_USAGE_WRITE); } drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 594bd6bb00d2..38550317e025 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -546,7 +546,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) bo = to_vc4_bo(&exec->bo[i]->base); bo->seqno = seqno; - dma_resv_add_shared_fence(bo->base.base.resv, exec->fence); + dma_resv_add_fence(bo->base.base.resv, exec->fence); } list_for_each_entry(bo, &exec->unref_list, unref_head) { diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 91fc4940c65a..c2a879734d40 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -161,12 +161,9 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, /* Expose the fence via the dma-buf */ dma_resv_lock(resv, NULL); ret = dma_resv_reserve_fences(resv, 1); - if (!ret) { - if (arg->flags & VGEM_FENCE_WRITE) - dma_resv_add_excl_fence(resv, fence); - else - dma_resv_add_shared_fence(resv, fence); - } + if (!ret) + dma_resv_add_fence(resv, fence, arg->flags & VGEM_FENCE_WRITE ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); dma_resv_unlock(resv); /* Record the fence in our idr for later signaling */ diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 1820ca6cf673..580a78809836 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -250,7 +250,8 @@ void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, int i; for (i = 0; i < objs->nents; i++) - dma_resv_add_excl_fence(objs->objs[i]->resv, fence); + dma_resv_add_fence(objs->objs[i]->resv, fence, + DMA_RESV_USAGE_WRITE); } void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index b96884f7d03d..bec50223efe5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -758,7 +758,8 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo, ret = dma_resv_reserve_fences(bo->base.resv, 1); if (!ret) - dma_resv_add_excl_fence(bo->base.resv, &fence->base); + dma_resv_add_fence(bo->base.resv, &fence->base, + DMA_RESV_USAGE_WRITE); else /* Last resort fallback when we are OOM */ dma_fence_wait(&fence->base, false); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index a297397743a2..71731796c8c3 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -393,15 +393,15 @@ struct dma_buf { * e.g. exposed in `Implicit Fence Poll Support`_ must follow the * below rules. * - * - Drivers must add a shared fence through dma_resv_add_shared_fence() - * for anything the userspace API considers a read access. This highly - * depends upon the API and window system. + * - Drivers must add a read fence through dma_resv_add_fence() with the + * DMA_RESV_USAGE_READ flag for anything the userspace API considers a + * read access. This highly depends upon the API and window system. * - * - Similarly drivers must set the exclusive fence through - * dma_resv_add_excl_fence() for anything the userspace API considers - * write access. + * - Similarly drivers must add a write fence through + * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for + * anything the userspace API considers write access. * - * - Drivers may just always set the exclusive fence, since that only + * - Drivers may just always add a write fence, since that only * causes unecessarily synchronization, but no correctness issues. * * - Some drivers only expose a synchronous userspace API with no @@ -416,7 +416,7 @@ struct dma_buf { * Dynamic importers, see dma_buf_attachment_is_dynamic(), have * additional constraints on how they set up fences: * - * - Dynamic importers must obey the exclusive fence and wait for it to + * - Dynamic importers must obey the write fences and wait for them to * signal before allowing access to the buffer's underlying storage * through the device. * diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 92cd8023980f..98dc5234b487 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -195,6 +195,9 @@ struct dma_resv_iter { /** @fence: the currently handled fence */ struct dma_fence *fence; + /** @fence_usage: the usage of the current fence */ + enum dma_resv_usage fence_usage; + /** @seq: sequence number to check for modifications */ unsigned int seq; @@ -244,14 +247,15 @@ static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) } /** - * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one + * dma_resv_iter_usage - Return the usage of the current fence * @cursor: the cursor of the current position * - * Returns true if the currently returned fence is the exclusive one. + * Returns the usage of the currently processed fence. */ -static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor) +static inline enum dma_resv_usage +dma_resv_iter_usage(struct dma_resv_iter *cursor) { - return cursor->index == 0; + return cursor->fence_usage; } /** @@ -306,9 +310,9 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) #ifdef CONFIG_DEBUG_MUTEXES -void dma_resv_reset_shared_max(struct dma_resv *obj); +void dma_resv_reset_max_fences(struct dma_resv *obj); #else -static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} +static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {} #endif /** @@ -454,17 +458,18 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) */ static inline void dma_resv_unlock(struct dma_resv *obj) { - dma_resv_reset_shared_max(obj); + dma_resv_reset_max_fences(obj); ww_mutex_unlock(&obj->lock); } void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences); -void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage); void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, - struct dma_fence *fence); -void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); + struct dma_fence *fence, + enum dma_resv_usage usage); int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences); int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, -- cgit v1.2.3-59-g8ed1b From 047a1b877ed48098bed71fcfb1d4891e1b54441d Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 23 Nov 2021 09:33:07 +0100 Subject: dma-buf & drm/amdgpu: remove dma_resv workaround MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rework the internals of the dma_resv object to allow adding more than one write fence and remember for each fence what purpose it had. This allows removing the workaround from amdgpu which used a container for this instead. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Cc: amd-gfx@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-4-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 353 +++++++++++----------------- drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 53 +---- include/linux/dma-resv.h | 47 +--- 4 files changed, 157 insertions(+), 297 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 543dae6566d2..378d47e1cfea 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -44,12 +44,12 @@ /** * DOC: Reservation Object Overview * - * The reservation object provides a mechanism to manage shared and - * exclusive fences associated with a buffer. A reservation object - * can have attached one exclusive fence (normally associated with - * write operations) or N shared fences (read operations). The RCU - * mechanism is used to protect read access to fences from locked - * write-side updates. + * The reservation object provides a mechanism to manage a container of + * dma_fence object associated with a resource. A reservation object + * can have any number of fences attaches to it. Each fence carries an usage + * parameter determining how the operation represented by the fence is using the + * resource. The RCU mechanism is used to protect read access to fences from + * locked write-side updates. * * See struct dma_resv for more details. */ @@ -57,39 +57,59 @@ DEFINE_WD_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); +/* Mask for the lower fence pointer bits */ +#define DMA_RESV_LIST_MASK 0x3 + struct dma_resv_list { struct rcu_head rcu; - u32 shared_count, shared_max; - struct dma_fence __rcu *shared[]; + u32 num_fences, max_fences; + struct dma_fence __rcu *table[]; }; -/** - * dma_resv_list_alloc - allocate fence list - * @shared_max: number of fences we need space for - * +/* Extract the fence and usage flags from an RCU protected entry in the list. */ +static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index, + struct dma_resv *resv, struct dma_fence **fence, + enum dma_resv_usage *usage) +{ + long tmp; + + tmp = (long)rcu_dereference_check(list->table[index], + resv ? dma_resv_held(resv) : true); + *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK); + if (usage) + *usage = tmp & DMA_RESV_LIST_MASK; +} + +/* Set the fence and usage flags at the specific index in the list. */ +static void dma_resv_list_set(struct dma_resv_list *list, + unsigned int index, + struct dma_fence *fence, + enum dma_resv_usage usage) +{ + long tmp = ((long)fence) | usage; + + RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp); +} + +/* * Allocate a new dma_resv_list and make sure to correctly initialize - * shared_max. + * max_fences. */ -static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) +static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences) { struct dma_resv_list *list; - list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL); + list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL); if (!list) return NULL; - list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) / - sizeof(*list->shared); + list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) / + sizeof(*list->table); return list; } -/** - * dma_resv_list_free - free fence list - * @list: list to free - * - * Free a dma_resv_list and make sure to drop all references. - */ +/* Free a dma_resv_list and make sure to drop all references. */ static void dma_resv_list_free(struct dma_resv_list *list) { unsigned int i; @@ -97,9 +117,12 @@ static void dma_resv_list_free(struct dma_resv_list *list) if (!list) return; - for (i = 0; i < list->shared_count; ++i) - dma_fence_put(rcu_dereference_protected(list->shared[i], true)); + for (i = 0; i < list->num_fences; ++i) { + struct dma_fence *fence; + dma_resv_list_entry(list, i, NULL, &fence, NULL); + dma_fence_put(fence); + } kfree_rcu(list, rcu); } @@ -112,8 +135,7 @@ void dma_resv_init(struct dma_resv *obj) ww_mutex_init(&obj->lock, &reservation_ww_class); seqcount_ww_mutex_init(&obj->seq, &obj->lock); - RCU_INIT_POINTER(obj->fence, NULL); - RCU_INIT_POINTER(obj->fence_excl, NULL); + RCU_INIT_POINTER(obj->fences, NULL); } EXPORT_SYMBOL(dma_resv_init); @@ -123,46 +145,32 @@ EXPORT_SYMBOL(dma_resv_init); */ void dma_resv_fini(struct dma_resv *obj) { - struct dma_resv_list *fobj; - struct dma_fence *excl; - /* * This object should be dead and all references must have * been released to it, so no need to be protected with rcu. */ - excl = rcu_dereference_protected(obj->fence_excl, 1); - if (excl) - dma_fence_put(excl); - - fobj = rcu_dereference_protected(obj->fence, 1); - dma_resv_list_free(fobj); + dma_resv_list_free(rcu_dereference_protected(obj->fences, true)); ww_mutex_destroy(&obj->lock); } EXPORT_SYMBOL(dma_resv_fini); -static inline struct dma_fence * -dma_resv_excl_fence(struct dma_resv *obj) -{ - return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); -} - -static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) +/* Dereference the fences while ensuring RCU rules */ +static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj) { - return rcu_dereference_check(obj->fence, dma_resv_held(obj)); + return rcu_dereference_check(obj->fences, dma_resv_held(obj)); } /** - * dma_resv_reserve_fences - Reserve space to add shared fences to - * a dma_resv. + * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object. * @obj: reservation object * @num_fences: number of fences we want to add * - * Should be called before dma_resv_add_shared_fence(). Must - * be called with @obj locked through dma_resv_lock(). + * Should be called before dma_resv_add_fence(). Must be called with @obj + * locked through dma_resv_lock(). * * Note that the preallocated slots need to be re-reserved if @obj is unlocked - * at any time before calling dma_resv_add_shared_fence(). This is validated - * when CONFIG_DEBUG_MUTEXES is enabled. + * at any time before calling dma_resv_add_fence(). This is validated when + * CONFIG_DEBUG_MUTEXES is enabled. * * RETURNS * Zero for success, or -errno @@ -174,11 +182,11 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) dma_resv_assert_held(obj); - old = dma_resv_shared_list(obj); - if (old && old->shared_max) { - if ((old->shared_count + num_fences) <= old->shared_max) + old = dma_resv_fences_list(obj); + if (old && old->max_fences) { + if ((old->num_fences + num_fences) <= old->max_fences) return 0; - max = max(old->shared_count + num_fences, old->shared_max * 2); + max = max(old->num_fences + num_fences, old->max_fences * 2); } else { max = max(4ul, roundup_pow_of_two(num_fences)); } @@ -193,27 +201,27 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) * references from the old struct are carried over to * the new. */ - for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) { + for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) { + enum dma_resv_usage usage; struct dma_fence *fence; - fence = rcu_dereference_protected(old->shared[i], - dma_resv_held(obj)); + dma_resv_list_entry(old, i, obj, &fence, &usage); if (dma_fence_is_signaled(fence)) - RCU_INIT_POINTER(new->shared[--k], fence); + RCU_INIT_POINTER(new->table[--k], fence); else - RCU_INIT_POINTER(new->shared[j++], fence); + dma_resv_list_set(new, j++, fence, usage); } - new->shared_count = j; + new->num_fences = j; /* * We are not changing the effective set of fences here so can * merely update the pointer to the new array; both existing * readers and new readers will see exactly the same set of - * active (unsignaled) shared fences. Individual fences and the + * active (unsignaled) fences. Individual fences and the * old array are protected by RCU and so will not vanish under * the gaze of the rcu_read_lock() readers. */ - rcu_assign_pointer(obj->fence, new); + rcu_assign_pointer(obj->fences, new); if (!old) return 0; @@ -222,7 +230,7 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) for (i = k; i < max; ++i) { struct dma_fence *fence; - fence = rcu_dereference_protected(new->shared[i], + fence = rcu_dereference_protected(new->table[i], dma_resv_held(obj)); dma_fence_put(fence); } @@ -234,38 +242,39 @@ EXPORT_SYMBOL(dma_resv_reserve_fences); #ifdef CONFIG_DEBUG_MUTEXES /** - * dma_resv_reset_max_fences - reset shared fences for debugging + * dma_resv_reset_max_fences - reset fences for debugging * @obj: the dma_resv object to reset * - * Reset the number of pre-reserved shared slots to test that drivers do + * Reset the number of pre-reserved fence slots to test that drivers do * correct slot allocation using dma_resv_reserve_fences(). See also - * &dma_resv_list.shared_max. + * &dma_resv_list.max_fences. */ void dma_resv_reset_max_fences(struct dma_resv *obj) { - struct dma_resv_list *fences = dma_resv_shared_list(obj); + struct dma_resv_list *fences = dma_resv_fences_list(obj); dma_resv_assert_held(obj); - /* Test shared fence slot reservation */ + /* Test fence slot reservation */ if (fences) - fences->shared_max = fences->shared_count; + fences->max_fences = fences->num_fences; } EXPORT_SYMBOL(dma_resv_reset_max_fences); #endif /** - * dma_resv_add_shared_fence - Add a fence to a shared slot + * dma_resv_add_fence - Add a fence to the dma_resv obj * @obj: the reservation object - * @fence: the shared fence to add + * @fence: the fence to add + * @usage: how the fence is used, see enum dma_resv_usage * - * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and + * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and * dma_resv_reserve_fences() has been called. * * See also &dma_resv.fence for a discussion of the semantics. */ -static void dma_resv_add_shared_fence(struct dma_resv *obj, - struct dma_fence *fence) +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage) { struct dma_resv_list *fobj; struct dma_fence *old; @@ -280,32 +289,33 @@ static void dma_resv_add_shared_fence(struct dma_resv *obj, */ WARN_ON(dma_fence_is_container(fence)); - fobj = dma_resv_shared_list(obj); - count = fobj->shared_count; + fobj = dma_resv_fences_list(obj); + count = fobj->num_fences; write_seqcount_begin(&obj->seq); for (i = 0; i < count; ++i) { + enum dma_resv_usage old_usage; - old = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(obj)); - if (old->context == fence->context || + dma_resv_list_entry(fobj, i, obj, &old, &old_usage); + if ((old->context == fence->context && old_usage >= usage) || dma_fence_is_signaled(old)) goto replace; } - BUG_ON(fobj->shared_count >= fobj->shared_max); + BUG_ON(fobj->num_fences >= fobj->max_fences); old = NULL; count++; replace: - RCU_INIT_POINTER(fobj->shared[i], fence); - /* pointer update must be visible before we extend the shared_count */ - smp_store_mb(fobj->shared_count, count); + dma_resv_list_set(fobj, i, fence, usage); + /* pointer update must be visible before we extend the num_fences */ + smp_store_mb(fobj->num_fences, count); write_seqcount_end(&obj->seq); dma_fence_put(old); } +EXPORT_SYMBOL(dma_resv_add_fence); /** * dma_resv_replace_fences - replace fences in the dma_resv obj @@ -326,128 +336,63 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, enum dma_resv_usage usage) { struct dma_resv_list *list; - struct dma_fence *old; unsigned int i; - /* Only readers supported for now */ - WARN_ON(usage != DMA_RESV_USAGE_READ); - dma_resv_assert_held(obj); + list = dma_resv_fences_list(obj); write_seqcount_begin(&obj->seq); + for (i = 0; list && i < list->num_fences; ++i) { + struct dma_fence *old; - old = dma_resv_excl_fence(obj); - if (old->context == context) { - RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement)); - dma_fence_put(old); - } - - list = dma_resv_shared_list(obj); - for (i = 0; list && i < list->shared_count; ++i) { - old = rcu_dereference_protected(list->shared[i], - dma_resv_held(obj)); + dma_resv_list_entry(list, i, obj, &old, NULL); if (old->context != context) continue; - rcu_assign_pointer(list->shared[i], dma_fence_get(replacement)); + dma_resv_list_set(list, i, replacement, usage); dma_fence_put(old); } - write_seqcount_end(&obj->seq); } EXPORT_SYMBOL(dma_resv_replace_fences); -/** - * dma_resv_add_excl_fence - Add an exclusive fence. - * @obj: the reservation object - * @fence: the exclusive fence to add - * - * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock(). - * See also &dma_resv.fence_excl for a discussion of the semantics. - */ -static void dma_resv_add_excl_fence(struct dma_resv *obj, - struct dma_fence *fence) -{ - struct dma_fence *old_fence = dma_resv_excl_fence(obj); - - dma_resv_assert_held(obj); - - dma_fence_get(fence); - - write_seqcount_begin(&obj->seq); - /* write_seqcount_begin provides the necessary memory barrier */ - RCU_INIT_POINTER(obj->fence_excl, fence); - write_seqcount_end(&obj->seq); - - dma_fence_put(old_fence); -} - -/** - * dma_resv_add_fence - Add a fence to the dma_resv obj - * @obj: the reservation object - * @fence: the fence to add - * @usage: how the fence is used, see enum dma_resv_usage - * - * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and - * dma_resv_reserve_fences() has been called. - * - * See also &dma_resv.fence for a discussion of the semantics. - */ -void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, - enum dma_resv_usage usage) -{ - if (usage == DMA_RESV_USAGE_WRITE) - dma_resv_add_excl_fence(obj, fence); - else - dma_resv_add_shared_fence(obj, fence); -} -EXPORT_SYMBOL(dma_resv_add_fence); - -/* Restart the iterator by initializing all the necessary fields, but not the - * relation to the dma_resv object. */ +/* Restart the unlocked iteration by initializing the cursor object. */ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) { cursor->seq = read_seqcount_begin(&cursor->obj->seq); - cursor->index = -1; - cursor->shared_count = 0; - if (cursor->usage >= DMA_RESV_USAGE_READ) { - cursor->fences = dma_resv_shared_list(cursor->obj); - if (cursor->fences) - cursor->shared_count = cursor->fences->shared_count; - } else { - cursor->fences = NULL; - } + cursor->index = 0; + cursor->num_fences = 0; + cursor->fences = dma_resv_fences_list(cursor->obj); + if (cursor->fences) + cursor->num_fences = cursor->fences->num_fences; cursor->is_restarted = true; } /* Walk to the next not signaled fence and grab a reference to it */ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) { - struct dma_resv *obj = cursor->obj; + if (!cursor->fences) + return; do { /* Drop the reference from the previous round */ dma_fence_put(cursor->fence); - if (cursor->index == -1) { - cursor->fence = dma_resv_excl_fence(obj); - cursor->index++; - if (!cursor->fence) - continue; - - } else if (!cursor->fences || - cursor->index >= cursor->shared_count) { + if (cursor->index >= cursor->num_fences) { cursor->fence = NULL; break; - } else { - struct dma_resv_list *fences = cursor->fences; - unsigned int idx = cursor->index++; - - cursor->fence = rcu_dereference(fences->shared[idx]); } + + dma_resv_list_entry(cursor->fences, cursor->index++, + cursor->obj, &cursor->fence, + &cursor->fence_usage); cursor->fence = dma_fence_get_rcu(cursor->fence); - if (!cursor->fence || !dma_fence_is_signaled(cursor->fence)) + if (!cursor->fence) + break; + + if (!dma_fence_is_signaled(cursor->fence) && + cursor->usage >= cursor->fence_usage) break; } while (true); } @@ -522,15 +467,9 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor) dma_resv_assert_held(cursor->obj); cursor->index = 0; - if (cursor->usage >= DMA_RESV_USAGE_READ) - cursor->fences = dma_resv_shared_list(cursor->obj); - else - cursor->fences = NULL; - - fence = dma_resv_excl_fence(cursor->obj); - if (!fence) - fence = dma_resv_iter_next(cursor); + cursor->fences = dma_resv_fences_list(cursor->obj); + fence = dma_resv_iter_next(cursor); cursor->is_restarted = true; return fence; } @@ -545,17 +484,22 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_first); */ struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor) { - unsigned int idx; + struct dma_fence *fence; dma_resv_assert_held(cursor->obj); cursor->is_restarted = false; - if (!cursor->fences || cursor->index >= cursor->fences->shared_count) - return NULL; - idx = cursor->index++; - return rcu_dereference_protected(cursor->fences->shared[idx], - dma_resv_held(cursor->obj)); + do { + if (!cursor->fences || + cursor->index >= cursor->fences->num_fences) + return NULL; + + dma_resv_list_entry(cursor->fences, cursor->index++, + cursor->obj, &fence, &cursor->fence_usage); + } while (cursor->fence_usage > cursor->usage); + + return fence; } EXPORT_SYMBOL_GPL(dma_resv_iter_next); @@ -570,57 +514,43 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) { struct dma_resv_iter cursor; struct dma_resv_list *list; - struct dma_fence *f, *excl; + struct dma_fence *f; dma_resv_assert_held(dst); list = NULL; - excl = NULL; dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, f) { if (dma_resv_iter_is_restarted(&cursor)) { dma_resv_list_free(list); - dma_fence_put(excl); - - if (cursor.shared_count) { - list = dma_resv_list_alloc(cursor.shared_count); - if (!list) { - dma_resv_iter_end(&cursor); - return -ENOMEM; - } - list->shared_count = 0; - - } else { - list = NULL; + list = dma_resv_list_alloc(cursor.num_fences); + if (!list) { + dma_resv_iter_end(&cursor); + return -ENOMEM; } - excl = NULL; + list->num_fences = 0; } dma_fence_get(f); - if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE) - excl = f; - else - RCU_INIT_POINTER(list->shared[list->shared_count++], f); + dma_resv_list_set(list, list->num_fences++, f, + dma_resv_iter_usage(&cursor)); } dma_resv_iter_end(&cursor); write_seqcount_begin(&dst->seq); - excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst)); - list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst)); + list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst)); write_seqcount_end(&dst->seq); dma_resv_list_free(list); - dma_fence_put(excl); - return 0; } EXPORT_SYMBOL(dma_resv_copy_fences); /** - * dma_resv_get_fences - Get an object's shared and exclusive + * dma_resv_get_fences - Get an object's fences * fences without update side lock held * @obj: the reservation object * @usage: controls which fences to include, see enum dma_resv_usage. @@ -649,7 +579,7 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, while (*num_fences) dma_fence_put((*fences)[--(*num_fences)]); - count = cursor.shared_count + 1; + count = cursor.num_fences + 1; /* Eventually re-allocate the array */ *fences = krealloc_array(*fences, count, @@ -723,8 +653,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, EXPORT_SYMBOL_GPL(dma_resv_get_singleton); /** - * dma_resv_wait_timeout - Wait on reservation's objects - * shared and/or exclusive fences. + * dma_resv_wait_timeout - Wait on reservation's objects fences * @obj: the reservation object * @usage: controls which fences to include, see enum dma_resv_usage. * @intr: if true, do interruptible wait diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 044b41f0bfd9..529d52a204cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -34,7 +34,6 @@ struct amdgpu_fpriv; struct amdgpu_bo_list_entry { struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; - struct dma_fence_chain *chain; uint32_t priority; struct page **user_pages; bool user_invalidated; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 76fd916424d6..8de283997769 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -574,14 +574,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); e->bo_va = amdgpu_vm_bo_find(vm, bo); - - if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) { - e->chain = dma_fence_chain_alloc(); - if (!e->chain) { - r = -ENOMEM; - goto error_validate; - } - } } /* Move fence waiting after getting reservation lock of @@ -642,13 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } error_validate: - if (r) { - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - dma_fence_chain_free(e->chain); - e->chain = NULL; - } + if (r) ttm_eu_backoff_reservation(&p->ticket, &p->validated); - } out: return r; } @@ -688,17 +675,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, { unsigned i; - if (error && backoff) { - struct amdgpu_bo_list_entry *e; - - amdgpu_bo_list_for_each_entry(e, parser->bo_list) { - dma_fence_chain_free(e->chain); - e->chain = NULL; - } - + if (error && backoff) ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); - } for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj); @@ -1272,31 +1251,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct dma_resv *resv = e->tv.bo->base.resv; - struct dma_fence_chain *chain = e->chain; - struct dma_resv_iter cursor; - struct dma_fence *fence; - - if (!chain) - continue; - - /* - * Temporary workaround dma_resv shortcommings by wrapping up - * the submission in a dma_fence_chain and add it as exclusive - * fence. - * - * TODO: Remove together with dma_resv rework. - */ - dma_resv_for_each_fence(&cursor, resv, - DMA_RESV_USAGE_WRITE, - fence) { - break; - } - dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1); - rcu_assign_pointer(resv->fence_excl, &chain->base); - e->chain = NULL; - } + /* Make sure all BOs are remembered as writers */ + amdgpu_bo_list_for_each_entry(e, p->bo_list) + e->tv.num_shared = 0; ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock); diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 98dc5234b487..7bb7e7edbb6f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -99,8 +99,8 @@ static inline enum dma_resv_usage dma_resv_usage_rw(bool write) /** * struct dma_resv - a reservation object manages fences for a buffer * - * There are multiple uses for this, with sometimes slightly different rules in - * how the fence slots are used. + * This is a container for dma_fence objects which needs to handle multiple use + * cases. * * One use is to synchronize cross-driver access to a struct dma_buf, either for * dynamic buffer management or just to handle implicit synchronization between @@ -130,47 +130,22 @@ struct dma_resv { * @seq: * * Sequence count for managing RCU read-side synchronization, allows - * read-only access to @fence_excl and @fence while ensuring we take a - * consistent snapshot. + * read-only access to @fences while ensuring we take a consistent + * snapshot. */ seqcount_ww_mutex_t seq; /** - * @fence_excl: + * @fences: * - * The exclusive fence, if there is one currently. + * Array of fences which where added to the dma_resv object * - * To guarantee that no fences are lost, this new fence must signal - * only after the previous exclusive fence has signalled. If - * semantically only a new access is added without actually treating the - * previous one as a dependency the exclusive fences can be strung - * together using struct dma_fence_chain. - * - * Note that actual semantics of what an exclusive or shared fence mean - * is defined by the user, for reservation objects shared across drivers - * see &dma_buf.resv. - */ - struct dma_fence __rcu *fence_excl; - - /** - * @fence: - * - * List of current shared fences. - * - * There are no ordering constraints of shared fences against the - * exclusive fence slot. If a waiter needs to wait for all access, it - * has to wait for both sets of fences to signal. - * - * A new fence is added by calling dma_resv_add_shared_fence(). Since - * this often needs to be done past the point of no return in command + * A new fence is added by calling dma_resv_add_fence(). Since this + * often needs to be done past the point of no return in command * submission it cannot fail, and therefore sufficient slots need to be * reserved by calling dma_resv_reserve_fences(). - * - * Note that actual semantics of what an exclusive or shared fence mean - * is defined by the user, for reservation objects shared across drivers - * see &dma_buf.resv. */ - struct dma_resv_list __rcu *fence; + struct dma_resv_list __rcu *fences; }; /** @@ -207,8 +182,8 @@ struct dma_resv_iter { /** @fences: the shared fences; private, *MUST* not dereference */ struct dma_resv_list *fences; - /** @shared_count: number of shared fences */ - unsigned int shared_count; + /** @num_fences: number of fences */ + unsigned int num_fences; /** @is_restarted: true if this is the first returned fence */ bool is_restarted; -- cgit v1.2.3-59-g8ed1b From b29895e18304feb7e8afc6388db7ece60327b23c Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 26 Nov 2021 14:12:42 +0100 Subject: dma-buf: add DMA_RESV_USAGE_KERNEL v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add an usage for kernel submissions. Waiting for those are mandatory for dynamic DMA-bufs. As a precaution this patch also changes all occurrences where fences are added as part of memory management in TTM, VMWGFX and i915 to use the new value because it now becomes possible for drivers to ignore fences with the WRITE usage. v2: use "must" in documentation, fix whitespaces v3: separate out some driver changes and better document why some changes should still be part of this patch. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-5-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 2 +- drivers/dma-buf/st-dma-resv.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 2 +- include/linux/dma-resv.h | 24 +++++++++++++++++++++--- 7 files changed, 28 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 378d47e1cfea..f4860e5f2d8b 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -726,7 +726,7 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled); */ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) { - static const char *usage[] = { "write", "read" }; + static const char *usage[] = { "kernel", "write", "read" }; struct dma_resv_iter cursor; struct dma_fence *fence; diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index d0f7c2bfd4f0..062b57d63fa6 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -296,7 +296,7 @@ int dma_resv(void) int r; spin_lock_init(&fence_lock); - for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ; + for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ; ++usage) { r = subtests(tests, (void *)(unsigned long)usage); if (r) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index f5f2b8b115ea..0512afdd20d8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -117,7 +117,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, i915_fence_timeout(i915), I915_FENCE_GFP); dma_resv_add_fence(obj->base.resv, &clflush->base.dma, - DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_KERNEL); dma_fence_work_commit(&clflush->base); /* * We must have successfully populated the pages(since we are diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d74f9eea855e..6bf3fb1c8045 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -739,7 +739,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, return ret; } - dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); ret = dma_resv_reserve_fences(bo->base.resv, 1); if (unlikely(ret)) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7a96a1db13a7..99deb45894f4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -508,7 +508,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, return ret; dma_resv_add_fence(&ghost_obj->base._resv, fence, - DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_KERNEL); /** * If we're not moving to fixed memory, the TTM object @@ -562,7 +562,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); int ret = 0; - dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); if (!evict) ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); else if (!from->use_tt && pipeline) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index bec50223efe5..408ede1f967f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -759,7 +759,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo, ret = dma_resv_reserve_fences(bo->base.resv, 1); if (!ret) dma_resv_add_fence(bo->base.resv, &fence->base, - DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_KERNEL); else /* Last resort fallback when we are OOM */ dma_fence_wait(&fence->base, false); diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 7bb7e7edbb6f..a749f229ae91 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -55,11 +55,29 @@ struct dma_resv_list; * This enum describes the different use cases for a dma_resv object and * controls which fences are returned when queried. * - * An important fact is that there is the order WRITE Date: Tue, 9 Nov 2021 11:08:18 +0100 Subject: dma-buf: add DMA_RESV_USAGE_BOOKKEEP v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add an usage for submissions independent of implicit sync but still interesting for memory management. v2: cleanup the kerneldoc a bit v3: separate amdgpu changes from this Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-10-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 4 ++-- drivers/dma-buf/st-dma-resv.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 +++--- drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +- drivers/gpu/drm/qxl/qxl_debugfs.c | 2 +- drivers/gpu/drm/radeon/radeon_gem.c | 2 +- drivers/gpu/drm/radeon/radeon_mn.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 14 +++++++------- include/linux/dma-resv.h | 13 ++++++++++++- 14 files changed, 35 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index f4860e5f2d8b..5b64aa554c36 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -520,7 +520,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) list = NULL; - dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ); + dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP); dma_resv_for_each_fence_unlocked(&cursor, f) { if (dma_resv_iter_is_restarted(&cursor)) { @@ -726,7 +726,7 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled); */ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) { - static const char *usage[] = { "kernel", "write", "read" }; + static const char *usage[] = { "kernel", "write", "read", "bookkeep" }; struct dma_resv_iter cursor; struct dma_fence *fence; diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index 062b57d63fa6..8ace9e84c845 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -296,7 +296,7 @@ int dma_resv(void) int r; spin_lock_init(&fence_lock); - for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ; + for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_BOOKKEEP; ++usage) { r = subtests(tests, (void *)(unsigned long)usage); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 65998cbcd7f7..4ba4b54092f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, struct dma_fence *fence; int r; - r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence); + r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence); if (r) goto fallback; @@ -139,7 +139,7 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, + dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 86f5248676b0..b86c0b8252a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,7 +75,7 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 744e144e5fc2..11c46b3e4c60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -260,7 +260,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, return -EINVAL; /* TODO: Use DMA_RESV_USAGE_READ here */ - dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) { + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) { dma_fence_chain_for_each(f, f) { struct dma_fence *tmp = dma_fence_chain_contained(f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5db5066e74b4..49ffad312d5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1345,7 +1345,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * be resident to run successfully */ dma_resv_for_each_fence(&resv_cursor, bo->base.resv, - DMA_RESV_USAGE_READ, f) { + DMA_RESV_USAGE_BOOKKEEP, f) { if (amdkfd_fence_check_mm(f, current->mm)) return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a0376fd36a82..5277c10d901d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2059,7 +2059,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, fence) { + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) { /* Add a callback for each fence in the reservation object */ amdgpu_vm_prt_get(adev); amdgpu_vm_add_prt_cb(adev, fence); @@ -2665,7 +2665,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_READ)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP)) return false; /* Try to block ongoing updates */ @@ -2846,7 +2846,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, - DMA_RESV_USAGE_READ, + DMA_RESV_USAGE_BOOKKEEP, true, timeout); if (timeout <= 0) return timeout; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index a200d3e66573..4115a222a853 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -66,7 +66,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) struct intel_memory_region *mr = READ_ONCE(obj->mm.region); #ifdef CONFIG_LOCKDEP - GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_READ) && + GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) && i915_gem_object_evictable(obj)); #endif return mr && (mr->type == INTEL_MEMORY_LOCAL || diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 644fe237601c..094f06b4ce33 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -86,7 +86,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, return true; /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_READ, false, + r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 33e5889d6608..2d9ed3b94574 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -62,7 +62,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) int rel = 0; dma_resv_iter_begin(&cursor, bo->tbo.base.resv, - DMA_RESV_USAGE_READ); + DMA_RESV_USAGE_BOOKKEEP); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) rel = 0; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 6616a828f40b..8c01a7f0e027 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -163,7 +163,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ r = dma_resv_wait_timeout(robj->tbo.base.resv, - DMA_RESV_USAGE_READ, + DMA_RESV_USAGE_BOOKKEEP, true, 30 * HZ); if (!r) r = -EBUSY; diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 68ebeb1bdfff..29fe8423bd90 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -66,7 +66,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, return true; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6bf3fb1c8045..360f980c7e10 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -223,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ); + dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (!fence->ops->signaled) dma_fence_enable_sw_signaling(fence); @@ -252,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, struct dma_resv *resv = &bo->base._resv; int ret; - if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_READ)) + if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) ret = 0; else ret = -EBUSY; @@ -264,7 +264,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, dma_resv_unlock(bo->base.resv); spin_unlock(&bo->bdev->lru_lock); - lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, + lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, interruptible, 30 * HZ); @@ -369,7 +369,7 @@ static void ttm_bo_release(struct kref *kref) * fences block for the BO to become idle */ dma_resv_wait_timeout(bo->base.resv, - DMA_RESV_USAGE_READ, false, + DMA_RESV_USAGE_BOOKKEEP, false, 30 * HZ); } @@ -380,7 +380,7 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_free(bdev, bo->resource); } - if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ) || + if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); @@ -1046,13 +1046,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, long timeout = 15 * HZ; if (no_wait) { - if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) + if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) return 0; else return -EBUSY; } - timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, + timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, interruptible, timeout); if (timeout < 0) return timeout; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index a749f229ae91..1db759eacc98 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -55,7 +55,7 @@ struct dma_resv_list; * This enum describes the different use cases for a dma_resv object and * controls which fences are returned when queried. * - * An important fact is that there is the order KERNEL Date: Mon, 4 Apr 2022 14:58:37 +0200 Subject: dma-buf: drop seq count based update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This should be possible now since we don't have the distinction between exclusive and shared fences any more. The only possible pitfall is that a dma_fence would be reused during the RCU grace period, but even that could be handled with a single extra check. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-15-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 33 ++++++++++++--------------------- drivers/dma-buf/st-dma-resv.c | 2 +- include/linux/dma-resv.h | 12 ------------ 3 files changed, 13 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 5b64aa554c36..0cce6e4ec946 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -133,7 +133,6 @@ static void dma_resv_list_free(struct dma_resv_list *list) void dma_resv_init(struct dma_resv *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); - seqcount_ww_mutex_init(&obj->seq, &obj->lock); RCU_INIT_POINTER(obj->fences, NULL); } @@ -292,28 +291,24 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, fobj = dma_resv_fences_list(obj); count = fobj->num_fences; - write_seqcount_begin(&obj->seq); - for (i = 0; i < count; ++i) { enum dma_resv_usage old_usage; dma_resv_list_entry(fobj, i, obj, &old, &old_usage); if ((old->context == fence->context && old_usage >= usage) || - dma_fence_is_signaled(old)) - goto replace; + dma_fence_is_signaled(old)) { + dma_resv_list_set(fobj, i, fence, usage); + dma_fence_put(old); + return; + } } BUG_ON(fobj->num_fences >= fobj->max_fences); - old = NULL; count++; -replace: dma_resv_list_set(fobj, i, fence, usage); /* pointer update must be visible before we extend the num_fences */ smp_store_mb(fobj->num_fences, count); - - write_seqcount_end(&obj->seq); - dma_fence_put(old); } EXPORT_SYMBOL(dma_resv_add_fence); @@ -341,7 +336,6 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, dma_resv_assert_held(obj); list = dma_resv_fences_list(obj); - write_seqcount_begin(&obj->seq); for (i = 0; list && i < list->num_fences; ++i) { struct dma_fence *old; @@ -352,14 +346,12 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, dma_resv_list_set(list, i, replacement, usage); dma_fence_put(old); } - write_seqcount_end(&obj->seq); } EXPORT_SYMBOL(dma_resv_replace_fences); /* Restart the unlocked iteration by initializing the cursor object. */ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) { - cursor->seq = read_seqcount_begin(&cursor->obj->seq); cursor->index = 0; cursor->num_fences = 0; cursor->fences = dma_resv_fences_list(cursor->obj); @@ -388,8 +380,10 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) cursor->obj, &cursor->fence, &cursor->fence_usage); cursor->fence = dma_fence_get_rcu(cursor->fence); - if (!cursor->fence) - break; + if (!cursor->fence) { + dma_resv_iter_restart_unlocked(cursor); + continue; + } if (!dma_fence_is_signaled(cursor->fence) && cursor->usage >= cursor->fence_usage) @@ -415,7 +409,7 @@ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) do { dma_resv_iter_restart_unlocked(cursor); dma_resv_iter_walk_unlocked(cursor); - } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq)); + } while (dma_resv_fences_list(cursor->obj) != cursor->fences); rcu_read_unlock(); return cursor->fence; @@ -438,13 +432,13 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) rcu_read_lock(); cursor->is_restarted = false; - restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq); + restart = dma_resv_fences_list(cursor->obj) != cursor->fences; do { if (restart) dma_resv_iter_restart_unlocked(cursor); dma_resv_iter_walk_unlocked(cursor); restart = true; - } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq)); + } while (dma_resv_fences_list(cursor->obj) != cursor->fences); rcu_read_unlock(); return cursor->fence; @@ -540,10 +534,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) } dma_resv_iter_end(&cursor); - write_seqcount_begin(&dst->seq); list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst)); - write_seqcount_end(&dst->seq); - dma_resv_list_free(list); return 0; } diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index 8ace9e84c845..813779e3c9be 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -217,7 +217,7 @@ static int test_for_each_unlocked(void *arg) if (r == -ENOENT) { r = -EINVAL; /* That should trigger an restart */ - cursor.seq--; + cursor.fences = (void*)~0; } else if (r == -EINVAL) { r = 0; } diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 1db759eacc98..c8ccbc94d5d2 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -155,15 +155,6 @@ struct dma_resv { */ struct ww_mutex lock; - /** - * @seq: - * - * Sequence count for managing RCU read-side synchronization, allows - * read-only access to @fences while ensuring we take a consistent - * snapshot. - */ - seqcount_ww_mutex_t seq; - /** * @fences: * @@ -202,9 +193,6 @@ struct dma_resv_iter { /** @fence_usage: the usage of the current fence */ enum dma_resv_usage fence_usage; - /** @seq: sequence number to check for modifications */ - unsigned int seq; - /** @index: index into the shared fences */ unsigned int index; -- cgit v1.2.3-59-g8ed1b From e84815cbbc767617221e6891e77f2486c9199dfa Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 7 Apr 2022 10:20:55 +0200 Subject: seqlock: drop seqcount_ww_mutex_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Daniel pointed out that this series removes the last user of seqcount_ww_mutex_t, so let's drop this. Signed-off-by: Christian König Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Will Deacon Cc: Waiman Long Cc: Boqun Feng Cc: linux-kernel@vger.kernel.org Acked-by: Peter Zijlstra (Intel) Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-16-christian.koenig@amd.com --- include/linux/seqlock.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 37ded6b8fee6..3926e9027947 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -164,7 +163,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * static initializer or init function. This enables lockdep to validate * that the write side critical section is properly serialized. * - * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex. + * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex */ /* @@ -184,7 +183,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) -#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex) /* * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers @@ -277,7 +275,6 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_s SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock)) SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock)) SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock)) -SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL)) /* * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t @@ -304,8 +301,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu __seqprop_case((s), raw_spinlock, prop), \ __seqprop_case((s), spinlock, prop), \ __seqprop_case((s), rwlock, prop), \ - __seqprop_case((s), mutex, prop), \ - __seqprop_case((s), ww_mutex, prop)) + __seqprop_case((s), mutex, prop)) #define seqprop_ptr(s) __seqprop(s, ptr) #define seqprop_sequence(s) __seqprop(s, sequence) -- cgit v1.2.3-59-g8ed1b From 6b2060cf9138a2cd5f3468a949d3869abed049ef Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Apr 2022 23:03:26 +0200 Subject: fb: Delete fb_info->queue It was only used by fbcon, and that now switched to its own, private work. Acked-by: Sam Ravnborg Acked-by: Thomas Zimmermann Signed-off-by: Daniel Vetter Cc: Helge Deller Cc: linux-fbdev@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-9-daniel.vetter@ffwll.ch --- include/linux/fb.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fb.h b/include/linux/fb.h index 9a77ab615c36..f95da1af9ff6 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -450,7 +450,6 @@ struct fb_info { struct fb_var_screeninfo var; /* Current var */ struct fb_fix_screeninfo fix; /* Current fix */ struct fb_monspecs monspecs; /* Current Monitor specs */ - struct work_struct queue; /* Framebuffer event queue */ struct fb_pixmap pixmap; /* Image hardware mapper */ struct fb_pixmap sprite; /* Cursor hardware mapper */ struct fb_cmap cmap; /* Current cmap */ -- cgit v1.2.3-59-g8ed1b From 6c7f98b334a32df5cac8abac8983ac4ce17cab57 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:13:59 +0200 Subject: vfio/mdev: Remove vfio_mdev.c Now that all mdev drivers directly create their own mdev_device driver and directly register with the vfio core's vfio_device_ops this is all dead code. Delete vfio_mdev.c and the mdev_parent_ops members that are connected to it. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-31-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- Documentation/driver-api/vfio-mediated-device.rst | 3 - drivers/vfio/mdev/Makefile | 2 +- drivers/vfio/mdev/mdev_core.c | 40 +----- drivers/vfio/mdev/mdev_driver.c | 10 -- drivers/vfio/mdev/mdev_private.h | 2 - drivers/vfio/mdev/vfio_mdev.c | 152 ---------------------- include/linux/mdev.h | 48 +------ 7 files changed, 6 insertions(+), 251 deletions(-) delete mode 100644 drivers/vfio/mdev/vfio_mdev.c (limited to 'include/linux') diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst index 9f26079cacae..5a6e18a651a1 100644 --- a/Documentation/driver-api/vfio-mediated-device.rst +++ b/Documentation/driver-api/vfio-mediated-device.rst @@ -138,9 +138,6 @@ The structures in the mdev_parent_ops structure are as follows: * supported_config: attributes to define supported configurations * device_driver: device driver to bind for mediated device instances -The mdev_parent_ops also still has various functions pointers. Theses exist -for historical reasons only and shall not be used for new drivers. - When a driver wants to add the GUID creation sysfs to an existing device it has probe'd to then it should call:: diff --git a/drivers/vfio/mdev/Makefile b/drivers/vfio/mdev/Makefile index ff9ecd802125..7c236ba1b90e 100644 --- a/drivers/vfio/mdev/Makefile +++ b/drivers/vfio/mdev/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o vfio_mdev.o +mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o obj-$(CONFIG_VFIO_MDEV) += mdev.o diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index b314101237fe..8b1e86b9e8bc 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -89,17 +89,10 @@ void mdev_release_parent(struct kref *kref) static void mdev_device_remove_common(struct mdev_device *mdev) { struct mdev_parent *parent = mdev->type->parent; - int ret; mdev_remove_sysfs_files(mdev); device_del(&mdev->dev); lockdep_assert_held(&parent->unreg_sem); - if (parent->ops->remove) { - ret = parent->ops->remove(mdev); - if (ret) - dev_err(&mdev->dev, "Remove failed: err=%d\n", ret); - } - /* Balances with device_initialize() */ put_device(&mdev->dev); } @@ -131,7 +124,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) /* check for mandatory ops */ if (!ops || !ops->supported_type_groups) return -EINVAL; - if (!ops->device_driver && (!ops->create || !ops->remove)) + if (!ops->device_driver) return -EINVAL; dev = get_device(dev); @@ -297,18 +290,10 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) goto out_put_device; } - if (parent->ops->create) { - ret = parent->ops->create(mdev); - if (ret) - goto out_unlock; - } - ret = device_add(&mdev->dev); if (ret) - goto out_remove; + goto out_unlock; - if (!drv) - drv = &vfio_mdev_driver; ret = device_driver_attach(&drv->driver, &mdev->dev); if (ret) goto out_del; @@ -325,9 +310,6 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) out_del: device_del(&mdev->dev); -out_remove: - if (parent->ops->remove) - parent->ops->remove(mdev); out_unlock: up_read(&parent->unreg_sem); out_put_device: @@ -370,28 +352,14 @@ int mdev_device_remove(struct mdev_device *mdev) static int __init mdev_init(void) { - int rc; - - rc = mdev_bus_register(); - if (rc) - return rc; - rc = mdev_register_driver(&vfio_mdev_driver); - if (rc) - goto err_bus; - return 0; -err_bus: - mdev_bus_unregister(); - return rc; + return bus_register(&mdev_bus_type); } static void __exit mdev_exit(void) { - mdev_unregister_driver(&vfio_mdev_driver); - if (mdev_bus_compat_class) class_compat_unregister(mdev_bus_compat_class); - - mdev_bus_unregister(); + bus_unregister(&mdev_bus_type); } subsys_initcall(mdev_init) diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c index 7927ed4f1711..9c2af59809e2 100644 --- a/drivers/vfio/mdev/mdev_driver.c +++ b/drivers/vfio/mdev/mdev_driver.c @@ -74,13 +74,3 @@ void mdev_unregister_driver(struct mdev_driver *drv) driver_unregister(&drv->driver); } EXPORT_SYMBOL(mdev_unregister_driver); - -int mdev_bus_register(void) -{ - return bus_register(&mdev_bus_type); -} - -void mdev_bus_unregister(void) -{ - bus_unregister(&mdev_bus_type); -} diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index afbad7b0a14a..6999c89db7b1 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -37,8 +37,6 @@ struct mdev_type { #define to_mdev_type(_kobj) \ container_of(_kobj, struct mdev_type, kobj) -extern struct mdev_driver vfio_mdev_driver; - int parent_create_sysfs_files(struct mdev_parent *parent); void parent_remove_sysfs_files(struct mdev_parent *parent); diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c deleted file mode 100644 index a90e24b0c851..000000000000 --- a/drivers/vfio/mdev/vfio_mdev.c +++ /dev/null @@ -1,152 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * VFIO based driver for Mediated device - * - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * Author: Neo Jia - * Kirti Wankhede - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "mdev_private.h" - -static int vfio_mdev_open_device(struct vfio_device *core_vdev) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->open_device)) - return 0; - - return parent->ops->open_device(mdev); -} - -static void vfio_mdev_close_device(struct vfio_device *core_vdev) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (likely(parent->ops->close_device)) - parent->ops->close_device(mdev); -} - -static long vfio_mdev_unlocked_ioctl(struct vfio_device *core_vdev, - unsigned int cmd, unsigned long arg) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->ioctl)) - return 0; - - return parent->ops->ioctl(mdev, cmd, arg); -} - -static ssize_t vfio_mdev_read(struct vfio_device *core_vdev, char __user *buf, - size_t count, loff_t *ppos) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->read)) - return -EINVAL; - - return parent->ops->read(mdev, buf, count, ppos); -} - -static ssize_t vfio_mdev_write(struct vfio_device *core_vdev, - const char __user *buf, size_t count, - loff_t *ppos) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->write)) - return -EINVAL; - - return parent->ops->write(mdev, buf, count, ppos); -} - -static int vfio_mdev_mmap(struct vfio_device *core_vdev, - struct vm_area_struct *vma) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->mmap)) - return -EINVAL; - - return parent->ops->mmap(mdev, vma); -} - -static void vfio_mdev_request(struct vfio_device *core_vdev, unsigned int count) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (parent->ops->request) - parent->ops->request(mdev, count); - else if (count == 0) - dev_notice(mdev_dev(mdev), - "No mdev vendor driver request callback support, blocked until released by user\n"); -} - -static const struct vfio_device_ops vfio_mdev_dev_ops = { - .name = "vfio-mdev", - .open_device = vfio_mdev_open_device, - .close_device = vfio_mdev_close_device, - .ioctl = vfio_mdev_unlocked_ioctl, - .read = vfio_mdev_read, - .write = vfio_mdev_write, - .mmap = vfio_mdev_mmap, - .request = vfio_mdev_request, -}; - -static int vfio_mdev_probe(struct mdev_device *mdev) -{ - struct vfio_device *vdev; - int ret; - - vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); - if (!vdev) - return -ENOMEM; - - vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops); - ret = vfio_register_emulated_iommu_dev(vdev); - if (ret) - goto out_uninit; - - dev_set_drvdata(&mdev->dev, vdev); - return 0; - -out_uninit: - vfio_uninit_group_dev(vdev); - kfree(vdev); - return ret; -} - -static void vfio_mdev_remove(struct mdev_device *mdev) -{ - struct vfio_device *vdev = dev_get_drvdata(&mdev->dev); - - vfio_unregister_group_dev(vdev); - vfio_uninit_group_dev(vdev); - kfree(vdev); -} - -struct mdev_driver vfio_mdev_driver = { - .driver = { - .name = "vfio_mdev", - .owner = THIS_MODULE, - .mod_name = KBUILD_MODNAME, - }, - .probe = vfio_mdev_probe, - .remove = vfio_mdev_remove, -}; diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 15d03f6532d0..192aed656116 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -40,40 +40,7 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * @mdev_attr_groups: Attributes of the mediated device. * @supported_type_groups: Attributes to define supported types. It is mandatory * to provide supported types. - * @create: Called to allocate basic resources in parent device's - * driver for a particular mediated device. It is - * mandatory to provide create ops. - * @mdev: mdev_device structure on of mediated device - * that is being created - * Returns integer: success (0) or error (< 0) - * @remove: Called to free resources in parent device's driver for - * a mediated device. It is mandatory to provide 'remove' - * ops. - * @mdev: mdev_device device structure which is being - * destroyed - * Returns integer: success (0) or error (< 0) - * @read: Read emulation callback - * @mdev: mediated device structure - * @buf: read buffer - * @count: number of bytes to read - * @ppos: address. - * Retuns number on bytes read on success or error. - * @write: Write emulation callback - * @mdev: mediated device structure - * @buf: write buffer - * @count: number of bytes to be written - * @ppos: address. - * Retuns number on bytes written on success or error. - * @ioctl: IOCTL callback - * @mdev: mediated device structure - * @cmd: ioctl command - * @arg: arguments to ioctl - * @mmap: mmap callback - * @mdev: mediated device structure - * @vma: vma structure - * @request: request callback to release device - * @mdev: mediated device structure - * @count: request sequence number + * * Parent device that support mediated device should be registered with mdev * module with mdev_parent_ops structure. **/ @@ -83,19 +50,6 @@ struct mdev_parent_ops { const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; - - int (*create)(struct mdev_device *mdev); - int (*remove)(struct mdev_device *mdev); - int (*open_device)(struct mdev_device *mdev); - void (*close_device)(struct mdev_device *mdev); - ssize_t (*read)(struct mdev_device *mdev, char __user *buf, - size_t count, loff_t *ppos); - ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, - size_t count, loff_t *ppos); - long (*ioctl)(struct mdev_device *mdev, unsigned int cmd, - unsigned long arg); - int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); - void (*request)(struct mdev_device *mdev, unsigned int count); }; /* interface for exporting mdev supported type attributes */ -- cgit v1.2.3-59-g8ed1b From e6486939d8ea22569af942a1888aa3824f59cc5e Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:00 +0200 Subject: vfio/mdev: Remove mdev_parent_ops dev_attr_groups This is only used by one sample to print a fixed string that is pointless. In general, having a device driver attach sysfs attributes to the parent is horrific. This should never happen, and always leads to some kind of liftime bug as it become very difficult for the sysfs attribute to go back to any data owned by the device driver. Remove the general mechanism to create this abuse. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-32-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- drivers/vfio/mdev/mdev_sysfs.c | 12 ++---------- include/linux/mdev.h | 2 -- samples/vfio-mdev/mtty.c | 30 +----------------------------- 3 files changed, 3 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index f5cf1931c54e..66eef08833a4 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -197,7 +197,6 @@ void parent_remove_sysfs_files(struct mdev_parent *parent) remove_mdev_supported_type(type); } - sysfs_remove_groups(&parent->dev->kobj, parent->ops->dev_attr_groups); kset_unregister(parent->mdev_types_kset); } @@ -213,17 +212,10 @@ int parent_create_sysfs_files(struct mdev_parent *parent) INIT_LIST_HEAD(&parent->type_list); - ret = sysfs_create_groups(&parent->dev->kobj, - parent->ops->dev_attr_groups); - if (ret) - goto create_err; - ret = add_mdev_supported_type_groups(parent); if (ret) - sysfs_remove_groups(&parent->dev->kobj, - parent->ops->dev_attr_groups); - else - return ret; + goto create_err; + return 0; create_err: kset_unregister(parent->mdev_types_kset); diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 192aed656116..69df1fa2cb6f 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -36,7 +36,6 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * * @owner: The module owner. * @device_driver: Which device driver to probe() on newly created devices - * @dev_attr_groups: Attributes of the parent device. * @mdev_attr_groups: Attributes of the mediated device. * @supported_type_groups: Attributes to define supported types. It is mandatory * to provide supported types. @@ -47,7 +46,6 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); struct mdev_parent_ops { struct module *owner; struct mdev_driver *device_driver; - const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; }; diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c index a0e1a469bd47..4f227dc26785 100644 --- a/samples/vfio-mdev/mtty.c +++ b/samples/vfio-mdev/mtty.c @@ -1207,38 +1207,11 @@ static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd, return -ENOTTY; } -static ssize_t -sample_mtty_dev_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "This is phy device\n"); -} - -static DEVICE_ATTR_RO(sample_mtty_dev); - -static struct attribute *mtty_dev_attrs[] = { - &dev_attr_sample_mtty_dev.attr, - NULL, -}; - -static const struct attribute_group mtty_dev_group = { - .name = "mtty_dev", - .attrs = mtty_dev_attrs, -}; - -static const struct attribute_group *mtty_dev_groups[] = { - &mtty_dev_group, - NULL, -}; - static ssize_t sample_mdev_dev_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (mdev_from_dev(dev)) - return sprintf(buf, "This is MDEV %s\n", dev_name(dev)); - - return sprintf(buf, "\n"); + return sprintf(buf, "This is MDEV %s\n", dev_name(dev)); } static DEVICE_ATTR_RO(sample_mdev_dev); @@ -1333,7 +1306,6 @@ static struct mdev_driver mtty_driver = { static const struct mdev_parent_ops mdev_fops = { .owner = THIS_MODULE, .device_driver = &mtty_driver, - .dev_attr_groups = mtty_dev_groups, .supported_type_groups = mdev_type_groups, }; -- cgit v1.2.3-59-g8ed1b From 6b42f491e17ce13f5ff7f2d1f49c73a0f4c47b20 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:01 +0200 Subject: vfio/mdev: Remove mdev_parent_ops The last useful member in this struct is the supported_type_groups, move it to the mdev_driver and delete mdev_parent_ops. Replace it with mdev_driver as an argument to mdev_register_device() Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-33-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- Documentation/driver-api/vfio-mediated-device.rst | 24 +++++----------------- drivers/gpu/drm/i915/gvt/kvmgt.c | 7 +------ drivers/s390/cio/vfio_ccw_ops.c | 7 +------ drivers/s390/crypto/vfio_ap_ops.c | 9 ++------ drivers/vfio/mdev/mdev_core.c | 13 +++++------- drivers/vfio/mdev/mdev_private.h | 2 +- drivers/vfio/mdev/mdev_sysfs.c | 6 +++--- include/linux/mdev.h | 25 ++++------------------- samples/vfio-mdev/mbochs.c | 9 ++------ samples/vfio-mdev/mdpy.c | 9 ++------ samples/vfio-mdev/mtty.c | 9 ++------ 11 files changed, 28 insertions(+), 92 deletions(-) (limited to 'include/linux') diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst index 5a6e18a651a1..784bbeb22adc 100644 --- a/Documentation/driver-api/vfio-mediated-device.rst +++ b/Documentation/driver-api/vfio-mediated-device.rst @@ -105,6 +105,7 @@ structure to represent a mediated device's driver:: struct mdev_driver { int (*probe) (struct mdev_device *dev); void (*remove) (struct mdev_device *dev); + struct attribute_group **supported_type_groups; struct device_driver driver; }; @@ -119,30 +120,15 @@ to register and unregister itself with the core driver: extern void mdev_unregister_driver(struct mdev_driver *drv); -The mediated bus driver is responsible for adding mediated devices to the VFIO -group when devices are bound to the driver and removing mediated devices from -the VFIO when devices are unbound from the driver. - - -Physical Device Driver Interface --------------------------------- - -The physical device driver interface provides the mdev_parent_ops[3] structure -to define the APIs to manage work in the mediated core driver that is related -to the physical device. - -The structures in the mdev_parent_ops structure are as follows: - -* dev_attr_groups: attributes of the parent device -* mdev_attr_groups: attributes of the mediated device -* supported_config: attributes to define supported configurations -* device_driver: device driver to bind for mediated device instances +The mediated bus driver's probe function should create a vfio_device on top of +the mdev_device and connect it to an appropriate implementation of +vfio_device_ops. When a driver wants to add the GUID creation sysfs to an existing device it has probe'd to then it should call:: extern int mdev_register_device(struct device *dev, - const struct mdev_parent_ops *ops); + struct mdev_driver *mdev_driver); This will provide the 'mdev_supported_types/XX/create' files which can then be used to trigger the creation of a mdev_device. The created mdev_device will be diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index f033031c676d..0787ba5c301f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1723,12 +1723,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = { }, .probe = intel_vgpu_probe, .remove = intel_vgpu_remove, -}; - -static const struct mdev_parent_ops intel_vgpu_mdev_ops = { - .owner = THIS_MODULE, .supported_type_groups = gvt_vgpu_type_groups, - .device_driver = &intel_vgpu_mdev_driver, }; int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) @@ -2131,7 +2126,7 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) if (ret) goto out_destroy_idle_vgpu; - ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_ops); + ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver); if (ret) goto out_cleanup_vgpu_type_groups; diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index d8589afac272..c4d60cdbf247 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -656,17 +656,12 @@ struct mdev_driver vfio_ccw_mdev_driver = { }, .probe = vfio_ccw_mdev_probe, .remove = vfio_ccw_mdev_remove, -}; - -static const struct mdev_parent_ops vfio_ccw_mdev_ops = { - .owner = THIS_MODULE, - .device_driver = &vfio_ccw_mdev_driver, .supported_type_groups = mdev_type_groups, }; int vfio_ccw_mdev_reg(struct subchannel *sch) { - return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops); + return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver); } void vfio_ccw_mdev_unreg(struct subchannel *sch) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 6e08d04b605d..ee0a3bf8f476 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -1496,12 +1496,7 @@ static struct mdev_driver vfio_ap_matrix_driver = { }, .probe = vfio_ap_mdev_probe, .remove = vfio_ap_mdev_remove, -}; - -static const struct mdev_parent_ops vfio_ap_matrix_ops = { - .owner = THIS_MODULE, - .device_driver = &vfio_ap_matrix_driver, - .supported_type_groups = vfio_ap_mdev_type_groups, + .supported_type_groups = vfio_ap_mdev_type_groups, }; int vfio_ap_mdev_register(void) @@ -1514,7 +1509,7 @@ int vfio_ap_mdev_register(void) if (ret) return ret; - ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops); + ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver); if (ret) goto err_driver; return 0; diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index 8b1e86b9e8bc..19a20ff420b7 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -109,12 +109,12 @@ static int mdev_device_remove_cb(struct device *dev, void *data) /* * mdev_register_device : Register a device * @dev: device structure representing parent device. - * @ops: Parent device operation structure to be registered. + * @mdev_driver: Device driver to bind to the newly created mdev * * Add device to list of registered parent devices. * Returns a negative value on error, otherwise 0. */ -int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) +int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver) { int ret; struct mdev_parent *parent; @@ -122,9 +122,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) char *envp[] = { env_string, NULL }; /* check for mandatory ops */ - if (!ops || !ops->supported_type_groups) - return -EINVAL; - if (!ops->device_driver) + if (!mdev_driver->supported_type_groups) return -EINVAL; dev = get_device(dev); @@ -151,7 +149,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) init_rwsem(&parent->unreg_sem); parent->dev = dev; - parent->ops = ops; + parent->mdev_driver = mdev_driver; if (!mdev_bus_compat_class) { mdev_bus_compat_class = class_compat_register("mdev_bus"); @@ -249,7 +247,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) int ret; struct mdev_device *mdev, *tmp; struct mdev_parent *parent = type->parent; - struct mdev_driver *drv = parent->ops->device_driver; + struct mdev_driver *drv = parent->mdev_driver; mutex_lock(&mdev_list_lock); @@ -271,7 +269,6 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) mdev->dev.parent = parent->dev; mdev->dev.bus = &mdev_bus_type; mdev->dev.release = mdev_device_release; - mdev->dev.groups = parent->ops->mdev_attr_groups; mdev->type = type; /* Pairs with the put in mdev_device_release() */ kobject_get(&type->kobj); diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index 6999c89db7b1..5a7ffd4e770f 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -15,7 +15,7 @@ void mdev_bus_unregister(void); struct mdev_parent { struct device *dev; - const struct mdev_parent_ops *ops; + struct mdev_driver *mdev_driver; struct kref ref; struct list_head next; struct kset *mdev_types_kset; diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 66eef08833a4..5a3873d1a275 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -97,7 +97,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, { struct mdev_type *type; struct attribute_group *group = - parent->ops->supported_type_groups[type_group_id]; + parent->mdev_driver->supported_type_groups[type_group_id]; int ret; if (!group->name) { @@ -154,7 +154,7 @@ attr_create_failed: static void remove_mdev_supported_type(struct mdev_type *type) { struct attribute_group *group = - type->parent->ops->supported_type_groups[type->type_group_id]; + type->parent->mdev_driver->supported_type_groups[type->type_group_id]; sysfs_remove_files(&type->kobj, (const struct attribute **)group->attrs); @@ -168,7 +168,7 @@ static int add_mdev_supported_type_groups(struct mdev_parent *parent) { int i; - for (i = 0; parent->ops->supported_type_groups[i]; i++) { + for (i = 0; parent->mdev_driver->supported_type_groups[i]; i++) { struct mdev_type *type; type = add_mdev_supported_type(parent, i); diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 69df1fa2cb6f..1f6f57a3c316 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -30,26 +30,6 @@ unsigned int mdev_get_type_group_id(struct mdev_device *mdev); unsigned int mtype_get_type_group_id(struct mdev_type *mtype); struct device *mtype_get_parent_dev(struct mdev_type *mtype); -/** - * struct mdev_parent_ops - Structure to be registered for each parent device to - * register the device to mdev module. - * - * @owner: The module owner. - * @device_driver: Which device driver to probe() on newly created devices - * @mdev_attr_groups: Attributes of the mediated device. - * @supported_type_groups: Attributes to define supported types. It is mandatory - * to provide supported types. - * - * Parent device that support mediated device should be registered with mdev - * module with mdev_parent_ops structure. - **/ -struct mdev_parent_ops { - struct module *owner; - struct mdev_driver *device_driver; - const struct attribute_group **mdev_attr_groups; - struct attribute_group **supported_type_groups; -}; - /* interface for exporting mdev supported type attributes */ struct mdev_type_attribute { struct attribute attr; @@ -74,12 +54,15 @@ struct mdev_type_attribute mdev_type_attr_##_name = \ * struct mdev_driver - Mediated device driver * @probe: called when new device created * @remove: called when device removed + * @supported_type_groups: Attributes to define supported types. It is mandatory + * to provide supported types. * @driver: device driver structure * **/ struct mdev_driver { int (*probe)(struct mdev_device *dev); void (*remove)(struct mdev_device *dev); + struct attribute_group **supported_type_groups; struct device_driver driver; }; @@ -98,7 +81,7 @@ static inline const guid_t *mdev_uuid(struct mdev_device *mdev) extern struct bus_type mdev_bus_type; -int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops); +int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver); void mdev_unregister_device(struct device *dev); int mdev_register_driver(struct mdev_driver *drv); diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index e90c8552cc31..344c2901a82b 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -1412,12 +1412,7 @@ static struct mdev_driver mbochs_driver = { }, .probe = mbochs_probe, .remove = mbochs_remove, -}; - -static const struct mdev_parent_ops mdev_fops = { - .owner = THIS_MODULE, - .device_driver = &mbochs_driver, - .supported_type_groups = mdev_type_groups, + .supported_type_groups = mdev_type_groups, }; static const struct file_operations vd_fops = { @@ -1462,7 +1457,7 @@ static int __init mbochs_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mbochs_dev, &mdev_fops); + ret = mdev_register_device(&mbochs_dev, &mbochs_driver); if (ret) goto err_device; diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c index fe5d43e797b6..e8c46eb2e246 100644 --- a/samples/vfio-mdev/mdpy.c +++ b/samples/vfio-mdev/mdpy.c @@ -723,12 +723,7 @@ static struct mdev_driver mdpy_driver = { }, .probe = mdpy_probe, .remove = mdpy_remove, -}; - -static const struct mdev_parent_ops mdev_fops = { - .owner = THIS_MODULE, - .device_driver = &mdpy_driver, - .supported_type_groups = mdev_type_groups, + .supported_type_groups = mdev_type_groups, }; static const struct file_operations vd_fops = { @@ -771,7 +766,7 @@ static int __init mdpy_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mdpy_dev, &mdev_fops); + ret = mdev_register_device(&mdpy_dev, &mdpy_driver); if (ret) goto err_device; diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c index 4f227dc26785..f42a59ed2e3f 100644 --- a/samples/vfio-mdev/mtty.c +++ b/samples/vfio-mdev/mtty.c @@ -1301,12 +1301,7 @@ static struct mdev_driver mtty_driver = { }, .probe = mtty_probe, .remove = mtty_remove, -}; - -static const struct mdev_parent_ops mdev_fops = { - .owner = THIS_MODULE, - .device_driver = &mtty_driver, - .supported_type_groups = mdev_type_groups, + .supported_type_groups = mdev_type_groups, }; static void mtty_device_release(struct device *dev) @@ -1357,7 +1352,7 @@ static int __init mtty_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mtty_dev.dev, &mdev_fops); + ret = mdev_register_device(&mtty_dev.dev, &mtty_driver); if (ret) goto err_device; return 0; -- cgit v1.2.3-59-g8ed1b From 2917f53113be3b7a0f374e02cebe6d6b749366b5 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:03 +0200 Subject: vfio/mdev: Remove mdev drvdata This is no longer used, remove it. All usages were moved over to either use container_of() from a vfio_device or to use dev_drvdata() directly on the mdev. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-35-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- include/linux/mdev.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 1f6f57a3c316..bb539794f54a 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -15,7 +15,6 @@ struct mdev_type; struct mdev_device { struct device dev; guid_t uuid; - void *driver_data; struct list_head next; struct mdev_type *type; bool active; @@ -66,14 +65,6 @@ struct mdev_driver { struct device_driver driver; }; -static inline void *mdev_get_drvdata(struct mdev_device *mdev) -{ - return mdev->driver_data; -} -static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data) -{ - mdev->driver_data = data; -} static inline const guid_t *mdev_uuid(struct mdev_device *mdev) { return &mdev->uuid; -- cgit v1.2.3-59-g8ed1b From 1e3dc1d8622b2699e6cf1cc06885105b13c9c514 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Tue, 19 Apr 2022 12:33:08 -0700 Subject: drm/i915/gsc: add gsc as a mei auxiliary device GSC is a graphics system controller, it provides a chassis controller for graphics discrete cards. There are two MEI interfaces in GSC: HECI1 and HECI2. Both interfaces are on the BAR0 at offsets 0x00258000 and 0x00259000. GSC is a GT Engine (class 4: instance 6). HECI1 interrupt is signaled via bit 15 and HECI2 via bit 14 in the interrupt register. This patch exports GSC as auxiliary device for mei driver to bind to for HECI2 interface and prepares for HECI1 interface as it will follow up soon. CC: Rodrigo Vivi Signed-off-by: Tomas Winkler Signed-off-by: Vitaly Lubart Signed-off-by: Alexander Usyskin Acked-by: Tvrtko Ursulin Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20220419193314.526966-2-daniele.ceraolospurio@intel.com --- MAINTAINERS | 1 + drivers/gpu/drm/i915/Kconfig | 1 + drivers/gpu/drm/i915/Makefile | 3 + drivers/gpu/drm/i915/gt/intel_gsc.c | 204 +++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_gsc.h | 37 ++++++ drivers/gpu/drm/i915/gt/intel_gt.c | 3 + drivers/gpu/drm/i915/gt/intel_gt.h | 5 + drivers/gpu/drm/i915/gt/intel_gt_irq.c | 13 ++ drivers/gpu/drm/i915/gt/intel_gt_regs.h | 1 + drivers/gpu/drm/i915/gt/intel_gt_types.h | 2 + drivers/gpu/drm/i915/i915_drv.h | 8 ++ drivers/gpu/drm/i915/i915_pci.c | 3 +- drivers/gpu/drm/i915/i915_reg.h | 2 + drivers/gpu/drm/i915/intel_device_info.h | 2 + include/linux/mei_aux.h | 19 +++ 15 files changed, 303 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_gsc.c create mode 100644 drivers/gpu/drm/i915/gt/intel_gsc.h create mode 100644 include/linux/mei_aux.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index 9b2b0dc44506..2cc358f4564e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9996,6 +9996,7 @@ S: Supported F: Documentation/driver-api/mei/* F: drivers/misc/mei/ F: drivers/watchdog/mei_wdt.c +F: include/linux/mei_aux.h F: include/linux/mei_cl_bus.h F: include/uapi/linux/mei.h F: samples/mei/* diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index b8d45d259337..aa1e7f0b1fe4 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -30,6 +30,7 @@ config DRM_I915 select VMAP_PFN select DRM_TTM select DRM_BUDDY + select AUXILIARY_BUS help Choose this option if you have a system that has "Intel Graphics Media Accelerator" or "HD Graphics" integrated graphics, diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 7e37455ba88d..cd0bf6806228 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -204,6 +204,9 @@ i915-y += gt/uc/intel_uc.o \ gt/uc/intel_huc_debugfs.o \ gt/uc/intel_huc_fw.o +# graphics system controller (GSC) support +i915-y += gt/intel_gsc.o + # modesetting core code i915-y += \ display/hsw_ips.o \ diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c new file mode 100644 index 000000000000..21e860861f0b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2019-2022, Intel Corporation. All rights reserved. + */ + +#include +#include +#include "i915_drv.h" +#include "i915_reg.h" +#include "gt/intel_gsc.h" +#include "gt/intel_gt.h" + +#define GSC_BAR_LENGTH 0x00000FFC + +static void gsc_irq_mask(struct irq_data *d) +{ + /* generic irq handling */ +} + +static void gsc_irq_unmask(struct irq_data *d) +{ + /* generic irq handling */ +} + +static struct irq_chip gsc_irq_chip = { + .name = "gsc_irq_chip", + .irq_mask = gsc_irq_mask, + .irq_unmask = gsc_irq_unmask, +}; + +static int gsc_irq_init(int irq) +{ + irq_set_chip_and_handler_name(irq, &gsc_irq_chip, + handle_simple_irq, "gsc_irq_handler"); + + return irq_set_chip_data(irq, NULL); +} + +struct gsc_def { + const char *name; + unsigned long bar; + size_t bar_size; +}; + +/* gsc resources and definitions (HECI1 and HECI2) */ +static const struct gsc_def gsc_def_dg1[] = { + { + /* HECI1 not yet implemented. */ + }, + { + .name = "mei-gscfi", + .bar = DG1_GSC_HECI2_BASE, + .bar_size = GSC_BAR_LENGTH, + } +}; + +static void gsc_release_dev(struct device *dev) +{ + struct auxiliary_device *aux_dev = to_auxiliary_dev(dev); + struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev); + + kfree(adev); +} + +static void gsc_destroy_one(struct intel_gsc_intf *intf) +{ + if (intf->adev) { + auxiliary_device_delete(&intf->adev->aux_dev); + auxiliary_device_uninit(&intf->adev->aux_dev); + intf->adev = NULL; + } + if (intf->irq >= 0) + irq_free_desc(intf->irq); + intf->irq = -1; +} + +static void gsc_init_one(struct drm_i915_private *i915, + struct intel_gsc_intf *intf, + unsigned int intf_id) +{ + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct mei_aux_device *adev; + struct auxiliary_device *aux_dev; + const struct gsc_def *def; + int ret; + + intf->irq = -1; + intf->id = intf_id; + + if (intf_id == 0 && !HAS_HECI_PXP(i915)) + return; + + def = &gsc_def_dg1[intf_id]; + + if (!def->name) { + drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1); + return; + } + + intf->irq = irq_alloc_desc(0); + if (intf->irq < 0) { + drm_err(&i915->drm, "gsc irq error %d\n", intf->irq); + return; + } + + ret = gsc_irq_init(intf->irq); + if (ret < 0) { + drm_err(&i915->drm, "gsc irq init failed %d\n", ret); + goto fail; + } + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + goto fail; + + adev->irq = intf->irq; + adev->bar.parent = &pdev->resource[0]; + adev->bar.start = def->bar + pdev->resource[0].start; + adev->bar.end = adev->bar.start + def->bar_size - 1; + adev->bar.flags = IORESOURCE_MEM; + adev->bar.desc = IORES_DESC_NONE; + + aux_dev = &adev->aux_dev; + aux_dev->name = def->name; + aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | + PCI_DEVID(pdev->bus->number, pdev->devfn); + aux_dev->dev.parent = &pdev->dev; + aux_dev->dev.release = gsc_release_dev; + + ret = auxiliary_device_init(aux_dev); + if (ret < 0) { + drm_err(&i915->drm, "gsc aux init failed %d\n", ret); + kfree(adev); + goto fail; + } + + ret = auxiliary_device_add(aux_dev); + if (ret < 0) { + drm_err(&i915->drm, "gsc aux add failed %d\n", ret); + /* adev will be freed with the put_device() and .release sequence */ + auxiliary_device_uninit(aux_dev); + goto fail; + } + intf->adev = adev; + + return; +fail: + gsc_destroy_one(intf); +} + +static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) +{ + int ret; + + if (intf_id >= INTEL_GSC_NUM_INTERFACES) { + drm_warn_once(>->i915->drm, "GSC irq: intf_id %d is out of range", intf_id); + return; + } + + if (!HAS_HECI_GSC(gt->i915)) { + drm_warn_once(>->i915->drm, "GSC irq: not supported"); + return; + } + + if (gt->gsc.intf[intf_id].irq < 0) { + drm_err_ratelimited(>->i915->drm, "GSC irq: irq not set"); + return; + } + + ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); + if (ret) + drm_err_ratelimited(>->i915->drm, "error handling GSC irq: %d\n", ret); +} + +void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir) +{ + if (iir & GSC_IRQ_INTF(0)) + gsc_irq_handler(gt, 0); + if (iir & GSC_IRQ_INTF(1)) + gsc_irq_handler(gt, 1); +} + +void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915) +{ + unsigned int i; + + if (!HAS_HECI_GSC(i915)) + return; + + for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) + gsc_init_one(i915, &gsc->intf[i], i); +} + +void intel_gsc_fini(struct intel_gsc *gsc) +{ + struct intel_gt *gt = gsc_to_gt(gsc); + unsigned int i; + + if (!HAS_HECI_GSC(gt->i915)) + return; + + for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) + gsc_destroy_one(&gsc->intf[i]); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h new file mode 100644 index 000000000000..68582f912b21 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gsc.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2019-2022, Intel Corporation. All rights reserved. + */ +#ifndef __INTEL_GSC_DEV_H__ +#define __INTEL_GSC_DEV_H__ + +#include + +struct drm_i915_private; +struct intel_gt; +struct mei_aux_device; + +#define INTEL_GSC_NUM_INTERFACES 2 +/* + * The HECI1 bit corresponds to bit15 and HECI2 to bit14. + * The reason for this is to allow growth for more interfaces in the future. + */ +#define GSC_IRQ_INTF(_x) BIT(15 - (_x)) + +/** + * struct intel_gsc - graphics security controller + * @intf : gsc interface + */ +struct intel_gsc { + struct intel_gsc_intf { + struct mei_aux_device *adev; + int irq; + unsigned int id; + } intf[INTEL_GSC_NUM_INTERFACES]; +}; + +void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv); +void intel_gsc_fini(struct intel_gsc *gsc); +void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir); + +#endif /* __INTEL_GSC_DEV_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f0014c5072c9..92394f13b42f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -456,6 +456,8 @@ void intel_gt_chipset_flush(struct intel_gt *gt) void intel_gt_driver_register(struct intel_gt *gt) { + intel_gsc_init(>->gsc, gt->i915); + intel_rps_driver_register(>->rps); intel_gt_debugfs_register(gt); @@ -784,6 +786,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt) intel_wakeref_t wakeref; intel_rps_driver_unregister(>->rps); + intel_gsc_fini(>->gsc); intel_pxp_fini(>->pxp); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 0163bba0959e..44c6cb63ccbc 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -46,6 +46,11 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) return container_of(huc, struct intel_gt, uc.huc); } +static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc) +{ + return container_of(gsc, struct intel_gt, gsc); +} + void intel_root_gt_init_early(struct drm_i915_private *i915); int intel_gt_assign_ggtt(struct intel_gt *gt); int intel_gt_init_mmio(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index e443ac4c8059..88b4becfcb17 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -68,6 +68,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, if (instance == OTHER_KCR_INSTANCE) return intel_pxp_irq_handler(>->pxp, iir); + if (instance == OTHER_GSC_INSTANCE) + return intel_gsc_irq_handler(gt, iir); + WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", instance, iir); } @@ -184,6 +187,8 @@ void gen11_gt_irq_reset(struct intel_gt *gt) intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); if (CCS_MASK(gt)) intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0); + if (HAS_HECI_GSC(gt->i915)) + intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, 0); /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); @@ -201,6 +206,8 @@ void gen11_gt_irq_reset(struct intel_gt *gt) intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0); if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3)) intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0); + if (HAS_HECI_GSC(gt->i915)) + intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~0); intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); @@ -215,6 +222,7 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; u32 irqs = GT_RENDER_USER_INTERRUPT; + const u32 gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1); u32 dmask; u32 smask; @@ -233,6 +241,9 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); if (CCS_MASK(gt)) intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask); + if (HAS_HECI_GSC(gt->i915)) + intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, + gsc_mask); /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); @@ -250,6 +261,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask); if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3)) intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask); + if (HAS_HECI_GSC(gt->i915)) + intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask); /* * RPS interrupts will get enabled/disabled on demand when RPS itself diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 12d892851684..a39718a40cc3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -1502,6 +1502,7 @@ #define OTHER_GUC_INSTANCE 0 #define OTHER_GTPM_INSTANCE 1 #define OTHER_KCR_INSTANCE 4 +#define OTHER_GSC_INSTANCE 6 #define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4)) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 937b2e1a305e..b06611c1d4ad 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -16,6 +16,7 @@ #include #include "uc/intel_uc.h" +#include "intel_gsc.h" #include "i915_vma.h" #include "intel_engine_types.h" @@ -73,6 +74,7 @@ struct intel_gt { struct i915_ggtt *ggtt; struct intel_uc uc; + struct intel_gsc gsc; struct mutex tlb_invalidate_lock; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9274417cd87a..a6cf9716d6aa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1308,6 +1308,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc) +#define HAS_HECI_PXP(dev_priv) \ + (INTEL_INFO(dev_priv)->has_heci_pxp) + +#define HAS_HECI_GSCFI(dev_priv) \ + (INTEL_INFO(dev_priv)->has_heci_gscfi) + +#define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv)) + #define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12) #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 9e077929ed67..37cc8d180f60 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -901,7 +901,8 @@ static const struct intel_device_info rkl_info = { .has_llc = 0, \ .has_pxp = 0, \ .has_snoop = 1, \ - .is_dgfx = 1 + .is_dgfx = 1, \ + .has_heci_gscfi = 1 static const struct intel_device_info dg1_info = { GEN12_FEATURES, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fef71b242706..1dd7b7de6002 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -976,6 +976,8 @@ #define GEN12_COMPUTE2_RING_BASE 0x1e000 #define GEN12_COMPUTE3_RING_BASE 0x26000 #define BLT_RING_BASE 0x22000 +#define DG1_GSC_HECI1_BASE 0x00258000 +#define DG1_GSC_HECI2_BASE 0x00259000 diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index f9b955810593..576d15a04c9e 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -141,6 +141,8 @@ enum intel_ppgtt_type { func(has_flat_ccs); \ func(has_global_mocs); \ func(has_gt_uc); \ + func(has_heci_pxp); \ + func(has_heci_gscfi); \ func(has_guc_deprivilege); \ func(has_l3_dpf); \ func(has_llc); \ diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h new file mode 100644 index 000000000000..587f25128848 --- /dev/null +++ b/include/linux/mei_aux.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Intel Corporation. All rights reserved. + */ +#ifndef _LINUX_MEI_AUX_H +#define _LINUX_MEI_AUX_H + +#include + +struct mei_aux_device { + struct auxiliary_device aux_dev; + int irq; + struct resource bar; +}; + +#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \ + container_of(auxiliary_dev, struct mei_aux_device, aux_dev) + +#endif /* _LINUX_MEI_AUX_H */ -- cgit v1.2.3-59-g8ed1b From 56c134f7f1b58be08bdb0ca8372474a4a5165f31 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 29 Apr 2022 12:08:31 +0200 Subject: fbdev: Track deferred-I/O pages in pageref struct Store the per-page state for fbdev's deferred I/O in struct fb_deferred_io_pageref. Maintain a list of pagerefs for the pages that have to be written back to video memory. Update all affected drivers. As with pages before, fbdev acquires a pageref when an mmaped page of the framebuffer is being written to. It holds the pageref in a list of all currently written pagerefs until it flushes the written pages to video memory. Writeback occurs periodically. After writeback fbdev releases all pagerefs and builds up a new dirty list until the next writeback occurs. Using pagerefs has a number of benefits. For pages of the framebuffer, the deferred I/O code used struct page.lru as an entry into the list of dirty pages. The lru field is owned by the page cache, which makes deferred I/O incompatible with some memory pages (e.g., most notably DRM's GEM SHMEM allocator). struct fb_deferred_io_pageref now provides an entry into a list of dirty framebuffer pages, freeing lru for use with the page cache. Drivers also assumed that struct page.index is the page offset into the framebuffer. This is not true for DRM buffers, which are located at various offset within a mapped area. struct fb_deferred_io_pageref explicitly stores an offset into the framebuffer. struct page.index is now only the page offset into the mapped area. These changes will allow DRM to use fbdev deferred I/O without an intermediate shadow buffer. v3: * use pageref->offset for sorting * fix grammar in comment v2: * minor fixes in commit message Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20220429100834.18898-3-tzimmermann@suse.de --- drivers/gpu/drm/drm_fb_helper.c | 6 +- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 5 +- drivers/staging/fbtft/fbtft-core.c | 5 +- drivers/video/fbdev/broadsheetfb.c | 5 +- drivers/video/fbdev/core/fb_defio.c | 156 ++++++++++++++++++++++----------- drivers/video/fbdev/hyperv_fb.c | 5 +- drivers/video/fbdev/metronomefb.c | 5 +- drivers/video/fbdev/sh_mobile_lcdcfb.c | 6 +- drivers/video/fbdev/smscufx.c | 5 +- drivers/video/fbdev/udlfb.c | 5 +- drivers/video/fbdev/xen-fbfront.c | 5 +- include/linux/fb.h | 11 ++- 12 files changed, 145 insertions(+), 74 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d06ce0e92d66..dd1d72d58b35 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -717,13 +717,13 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagelist) { unsigned long start, end, min, max; - struct page *page; + struct fb_deferred_io_pageref *pageref; struct drm_rect damage_area; min = ULONG_MAX; max = 0; - list_for_each_entry(page, pagelist, lru) { - start = page->index << PAGE_SHIFT; + list_for_each_entry(pageref, pagelist, list) { + start = pageref->offset; end = start + PAGE_SIZE; min = min(min, start); max = max(max, end); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 02a4a4fa3da3..db02e17e12b6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -322,12 +322,13 @@ static void vmw_deferred_io(struct fb_info *info, struct vmw_fb_par *par = info->par; unsigned long start, end, min, max; unsigned long flags; - struct page *page; + struct fb_deferred_io_pageref *pageref; int y1, y2; min = ULONG_MAX; max = 0; - list_for_each_entry(page, pagelist, lru) { + list_for_each_entry(pageref, pagelist, list) { + struct page *page = pageref->page; start = page->index << PAGE_SHIFT; end = start + PAGE_SIZE - 1; min = min(min, start); diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index d4e14f7c9d57..e9662822e1ef 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -326,7 +326,7 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagelist) { struct fbtft_par *par = info->par; unsigned int dirty_lines_start, dirty_lines_end; - struct page *page; + struct fb_deferred_io_pageref *pageref; unsigned long index; unsigned int y_low = 0, y_high = 0; int count = 0; @@ -340,7 +340,8 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagelist) spin_unlock(&par->dirty_lock); /* Mark display lines as dirty */ - list_for_each_entry(page, pagelist, lru) { + list_for_each_entry(pageref, pagelist, list) { + struct page *page = pageref->page; count++; index = page->index << PAGE_SHIFT; y_low = index / info->fix.line_length; diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 528bc0902338..6afc6ef4cb5e 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -934,7 +934,7 @@ static void broadsheetfb_dpy_deferred_io(struct fb_info *info, { u16 y1 = 0, h = 0; int prev_index = -1; - struct page *cur; + struct fb_deferred_io_pageref *pageref; struct fb_deferred_io *fbdefio = info->fbdefio; int h_inc; u16 yres = info->var.yres; @@ -944,7 +944,8 @@ static void broadsheetfb_dpy_deferred_io(struct fb_info *info, h_inc = DIV_ROUND_UP(PAGE_SIZE , xres); /* walk the written page list and swizzle the data */ - list_for_each_entry(cur, &fbdefio->pagelist, lru) { + list_for_each_entry(pageref, &fbdefio->pagelist, list) { + struct page *cur = pageref->page; if (prev_index < 0) { /* just starting so assign first page */ y1 = (cur->index << PAGE_SHIFT) / xres; diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 6924d489a289..1bdef5c241f7 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -36,6 +36,60 @@ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs return page; } +static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info, + unsigned long offset, + struct page *page) +{ + struct fb_deferred_io *fbdefio = info->fbdefio; + struct list_head *pos = &fbdefio->pagelist; + unsigned long pgoff = offset >> PAGE_SHIFT; + struct fb_deferred_io_pageref *pageref, *cur; + + if (WARN_ON_ONCE(pgoff >= info->npagerefs)) + return NULL; /* incorrect allocation size */ + + /* 1:1 mapping between pageref and page offset */ + pageref = &info->pagerefs[pgoff]; + + /* + * This check is to catch the case where a new process could start + * writing to the same page through a new PTE. This new access + * can cause a call to .page_mkwrite even if the original process' + * PTE is marked writable. + */ + if (!list_empty(&pageref->list)) + goto pageref_already_added; + + pageref->page = page; + pageref->offset = pgoff << PAGE_SHIFT; + + if (unlikely(fbdefio->sort_pagelist)) { + /* + * We loop through the list of pagerefs before adding in + * order to keep the pagerefs sorted. This has significant + * overhead of O(n^2) with n being the number of written + * pages. If possible, drivers should try to work with + * unsorted page lists instead. + */ + list_for_each_entry(cur, &info->fbdefio->pagelist, list) { + if (cur->offset > pageref->offset) + break; + } + pos = &cur->list; + } + + list_add_tail(&pageref->list, pos); + +pageref_already_added: + return pageref; +} + +static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref, + struct fb_info *info) +{ + list_del_init(&pageref->list); +} + /* this is to find and return the vmalloc-ed fb pages */ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) { @@ -59,7 +113,7 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) printk(KERN_ERR "no mapping available\n"); BUG_ON(!page->mapping); - page->index = vmf->pgoff; + page->index = vmf->pgoff; /* for page_mkclean() */ vmf->page = page; return 0; @@ -95,7 +149,11 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) struct page *page = vmf->page; struct fb_info *info = vmf->vma->vm_private_data; struct fb_deferred_io *fbdefio = info->fbdefio; - struct list_head *pos = &fbdefio->pagelist; + struct fb_deferred_io_pageref *pageref; + unsigned long offset; + vm_fault_t ret; + + offset = (vmf->address - vmf->vma->vm_start); /* this is a callback we get when userspace first tries to write to the page. we schedule a workqueue. that workqueue @@ -112,6 +170,12 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) if (fbdefio->first_io && list_empty(&fbdefio->pagelist)) fbdefio->first_io(info); + pageref = fb_deferred_io_pageref_get(info, offset, page); + if (WARN_ON_ONCE(!pageref)) { + ret = VM_FAULT_OOM; + goto err_mutex_unlock; + } + /* * We want the page to remain locked from ->page_mkwrite until * the PTE is marked dirty to avoid page_mkclean() being called @@ -120,47 +184,17 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) * Do this by locking the page here and informing the caller * about it with VM_FAULT_LOCKED. */ - lock_page(page); - - /* - * This check is to catch the case where a new process could start - * writing to the same page through a new PTE. This new access - * can cause a call to .page_mkwrite even if the original process' - * PTE is marked writable. - * - * TODO: The lru field is owned by the page cache; hence the name. - * We dequeue in fb_deferred_io_work() after flushing the - * page's content into video memory. Instead of lru, fbdefio - * should have it's own field. - */ - if (!list_empty(&page->lru)) - goto page_already_added; - - if (unlikely(fbdefio->sort_pagelist)) { - /* - * We loop through the pagelist before adding in order to - * keep the pagelist sorted. This has significant overhead - * of O(n^2) with n being the number of written pages. If - * possible, drivers should try to work with unsorted page - * lists instead. - */ - struct page *cur; - - list_for_each_entry(cur, &fbdefio->pagelist, lru) { - if (cur->index > page->index) - break; - } - pos = &cur->lru; - } - - list_add_tail(&page->lru, pos); + lock_page(pageref->page); -page_already_added: mutex_unlock(&fbdefio->lock); /* come back after delay to process the deferred IO */ schedule_delayed_work(&info->deferred_work, fbdefio->delay); return VM_FAULT_LOCKED; + +err_mutex_unlock: + mutex_unlock(&fbdefio->lock); + return ret; } static const struct vm_operations_struct fb_deferred_io_vm_ops = { @@ -186,15 +220,14 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_mmap); /* workqueue callback */ static void fb_deferred_io_work(struct work_struct *work) { - struct fb_info *info = container_of(work, struct fb_info, - deferred_work.work); - struct list_head *node, *next; - struct page *cur; + struct fb_info *info = container_of(work, struct fb_info, deferred_work.work); + struct fb_deferred_io_pageref *pageref, *next; struct fb_deferred_io *fbdefio = info->fbdefio; /* here we mkclean the pages, then do all deferred IO */ mutex_lock(&fbdefio->lock); - list_for_each_entry(cur, &fbdefio->pagelist, lru) { + list_for_each_entry(pageref, &fbdefio->pagelist, list) { + struct page *cur = pageref->page; lock_page(cur); page_mkclean(cur); unlock_page(cur); @@ -204,30 +237,48 @@ static void fb_deferred_io_work(struct work_struct *work) fbdefio->deferred_io(info, &fbdefio->pagelist); /* clear the list */ - list_for_each_safe(node, next, &fbdefio->pagelist) { - list_del_init(node); - } + list_for_each_entry_safe(pageref, next, &fbdefio->pagelist, list) + fb_deferred_io_pageref_put(pageref, info); + mutex_unlock(&fbdefio->lock); } -void fb_deferred_io_init(struct fb_info *info) +int fb_deferred_io_init(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; - struct page *page; - unsigned int i; + struct fb_deferred_io_pageref *pagerefs; + unsigned long npagerefs, i; + int ret; BUG_ON(!fbdefio); + + if (WARN_ON(!info->fix.smem_len)) + return -EINVAL; + mutex_init(&fbdefio->lock); INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); INIT_LIST_HEAD(&fbdefio->pagelist); if (fbdefio->delay == 0) /* set a default of 1 s */ fbdefio->delay = HZ; - /* initialize all the page lists one time */ - for (i = 0; i < info->fix.smem_len; i += PAGE_SIZE) { - page = fb_deferred_io_page(info, i); - INIT_LIST_HEAD(&page->lru); + npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE); + + /* alloc a page ref for each page of the display memory */ + pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL); + if (!pagerefs) { + ret = -ENOMEM; + goto err; } + for (i = 0; i < npagerefs; ++i) + INIT_LIST_HEAD(&pagerefs[i].list); + info->npagerefs = npagerefs; + info->pagerefs = pagerefs; + + return 0; + +err: + mutex_destroy(&fbdefio->lock); + return ret; } EXPORT_SYMBOL_GPL(fb_deferred_io_init); @@ -254,6 +305,7 @@ void fb_deferred_io_cleanup(struct fb_info *info) page->mapping = NULL; } + kvfree(info->pagerefs); mutex_destroy(&fbdefio->lock); } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index d7f6abf827b9..51eb57ea68f7 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -424,7 +424,7 @@ static void synthvid_deferred_io(struct fb_info *p, struct list_head *pagelist) { struct hvfb_par *par = p->par; - struct page *page; + struct fb_deferred_io_pageref *pageref; unsigned long start, end; int y1, y2, miny, maxy; @@ -437,7 +437,8 @@ static void synthvid_deferred_io(struct fb_info *p, * in synthvid_update function by clamping the y2 * value to yres. */ - list_for_each_entry(page, pagelist, lru) { + list_for_each_entry(pageref, pagelist, list) { + struct page *page = pageref->page; start = page->index << PAGE_SHIFT; end = start + PAGE_SIZE - 1; y1 = start / p->fix.line_length; diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c index 2541f2fe065b..8352fa3f4cef 100644 --- a/drivers/video/fbdev/metronomefb.c +++ b/drivers/video/fbdev/metronomefb.c @@ -469,12 +469,13 @@ static void metronomefb_dpy_deferred_io(struct fb_info *info, struct list_head *pagelist) { u16 cksum; - struct page *cur; + struct fb_deferred_io_pageref *pageref; struct fb_deferred_io *fbdefio = info->fbdefio; struct metronomefb_par *par = info->par; /* walk the written page list and swizzle the data */ - list_for_each_entry(cur, &fbdefio->pagelist, lru) { + list_for_each_entry(pageref, &fbdefio->pagelist, list) { + struct page *cur = pageref->page; cksum = metronomefb_dpy_update_page(par, (cur->index << PAGE_SHIFT)); par->metromem_img_csum -= par->csum_table[cur->index]; diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index ad034eb0e7a0..aea0660ba0c3 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -440,13 +440,15 @@ static int sh_mobile_lcdc_sginit(struct fb_info *info, { struct sh_mobile_lcdc_chan *ch = info->par; unsigned int nr_pages_max = ch->fb_size >> PAGE_SHIFT; - struct page *page; + struct fb_deferred_io_pageref *pageref; int nr_pages = 0; sg_init_table(ch->sglist, nr_pages_max); - list_for_each_entry(page, pagelist, lru) + list_for_each_entry(pageref, pagelist, list) { + struct page *page = pageref->page; sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0); + } return nr_pages; } diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index 9383f8d0914c..eb108f56ea04 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -958,7 +958,7 @@ static void ufx_ops_fillrect(struct fb_info *info, static void ufx_dpy_deferred_io(struct fb_info *info, struct list_head *pagelist) { - struct page *cur; + struct fb_deferred_io_pageref *pageref; struct fb_deferred_io *fbdefio = info->fbdefio; struct ufx_data *dev = info->par; @@ -969,9 +969,10 @@ static void ufx_dpy_deferred_io(struct fb_info *info, return; /* walk the written page list and render each to device */ - list_for_each_entry(cur, &fbdefio->pagelist, lru) { + list_for_each_entry(pageref, &fbdefio->pagelist, list) { /* create a rectangle of full screen width that encloses the * entire dirty framebuffer page */ + struct page *cur = pageref->page; const int x = 0; const int width = dev->info->var.xres; const int y = (cur->index << PAGE_SHIFT) / (width * 2); diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 179b30ba09f5..f70b9cc8e855 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -784,7 +784,7 @@ static void dlfb_ops_fillrect(struct fb_info *info, static void dlfb_dpy_deferred_io(struct fb_info *info, struct list_head *pagelist) { - struct page *cur; + struct fb_deferred_io_pageref *pageref; struct fb_deferred_io *fbdefio = info->fbdefio; struct dlfb_data *dlfb = info->par; struct urb *urb; @@ -811,7 +811,8 @@ static void dlfb_dpy_deferred_io(struct fb_info *info, cmd = urb->transfer_buffer; /* walk the written page list and render each to device */ - list_for_each_entry(cur, &fbdefio->pagelist, lru) { + list_for_each_entry(pageref, &fbdefio->pagelist, list) { + struct page *cur = pageref->page; if (dlfb_render_hline(dlfb, &urb, (char *) info->fix.smem_start, &cmd, cur->index << PAGE_SHIFT, diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 6c8846eba2fb..608fcde767d3 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -185,13 +185,14 @@ static void xenfb_deferred_io(struct fb_info *fb_info, struct list_head *pagelist) { struct xenfb_info *info = fb_info->par; - struct page *page; + struct fb_deferred_io_pageref *pageref; unsigned long beg, end; int y1, y2, miny, maxy; miny = INT_MAX; maxy = 0; - list_for_each_entry(page, pagelist, lru) { + list_for_each_entry(pageref, pagelist, list) { + struct page *page = pageref->page; beg = page->index << PAGE_SHIFT; end = beg + PAGE_SIZE - 1; y1 = beg / fb_info->fix.line_length; diff --git a/include/linux/fb.h b/include/linux/fb.h index f95da1af9ff6..a332590c0fae 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -201,6 +201,13 @@ struct fb_pixmap { }; #ifdef CONFIG_FB_DEFERRED_IO +struct fb_deferred_io_pageref { + struct page *page; + unsigned long offset; + /* private */ + struct list_head list; +}; + struct fb_deferred_io { /* delay between mkwrite and deferred handler */ unsigned long delay; @@ -468,6 +475,8 @@ struct fb_info { #endif #ifdef CONFIG_FB_DEFERRED_IO struct delayed_work deferred_work; + unsigned long npagerefs; + struct fb_deferred_io_pageref *pagerefs; struct fb_deferred_io *fbdefio; #endif @@ -661,7 +670,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, /* drivers/video/fb_defio.c */ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); -extern void fb_deferred_io_init(struct fb_info *info); +extern int fb_deferred_io_init(struct fb_info *info); extern void fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file); -- cgit v1.2.3-59-g8ed1b From e80eec1b871a2acb8f5c92db4c237e9ae6dd322b Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 29 Apr 2022 12:08:33 +0200 Subject: fbdev: Rename pagelist to pagereflist for deferred I/O Rename various instances of pagelist to pagereflist. The list now stores pageref structures, so the new name is more appropriate. In their write-back helpers, several fbdev drivers refer to the pageref list in struct fb_deferred_io instead of using the one supplied as argument to the function. Convert them over to the supplied one. It's the same instance, so no change of behavior occurs. v4: * fix commit message (Javier) Suggested-by: Sam Ravnborg Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20220429100834.18898-5-tzimmermann@suse.de --- drivers/gpu/drm/drm_fb_helper.c | 7 +++---- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 5 ++--- drivers/hid/hid-picolcd_fb.c | 2 +- drivers/staging/fbtft/fbtft-core.c | 10 +++++----- drivers/video/fbdev/broadsheetfb.c | 12 +++++------- drivers/video/fbdev/core/fb_defio.c | 18 +++++++++--------- drivers/video/fbdev/hecubafb.c | 3 +-- drivers/video/fbdev/hyperv_fb.c | 5 ++--- drivers/video/fbdev/metronomefb.c | 12 +++++------- drivers/video/fbdev/sh_mobile_lcdcfb.c | 16 +++++++--------- drivers/video/fbdev/smscufx.c | 8 +++----- drivers/video/fbdev/ssd1307fb.c | 3 +-- drivers/video/fbdev/udlfb.c | 8 +++----- drivers/video/fbdev/xen-fbfront.c | 5 ++--- include/drm/drm_fb_helper.h | 3 +-- include/linux/fb.h | 6 +++--- 16 files changed, 53 insertions(+), 70 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index dd1d72d58b35..5ad2b6a2778c 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -708,13 +708,12 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, /** * drm_fb_helper_deferred_io() - fbdev deferred_io callback function * @info: fb_info struct pointer - * @pagelist: list of mmap framebuffer pages that have to be flushed + * @pagereflist: list of mmap framebuffer pages that have to be flushed * * This function is used as the &fb_deferred_io.deferred_io * callback function for flushing the fbdev mmap writes. */ -void drm_fb_helper_deferred_io(struct fb_info *info, - struct list_head *pagelist) +void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist) { unsigned long start, end, min, max; struct fb_deferred_io_pageref *pageref; @@ -722,7 +721,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info, min = ULONG_MAX; max = 0; - list_for_each_entry(pageref, pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { start = pageref->offset; end = start + PAGE_SIZE; min = min(min, start); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index db02e17e12b6..9a3dc5c4eec8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -316,8 +316,7 @@ static int vmw_fb_pan_display(struct fb_var_screeninfo *var, return 0; } -static void vmw_deferred_io(struct fb_info *info, - struct list_head *pagelist) +static void vmw_deferred_io(struct fb_info *info, struct list_head *pagereflist) { struct vmw_fb_par *par = info->par; unsigned long start, end, min, max; @@ -327,7 +326,7 @@ static void vmw_deferred_io(struct fb_info *info, min = ULONG_MAX; max = 0; - list_for_each_entry(pageref, pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { struct page *page = pageref->page; start = page->index << PAGE_SHIFT; end = start + PAGE_SIZE - 1; diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c index 78515e6bacf0..de61c08fabea 100644 --- a/drivers/hid/hid-picolcd_fb.c +++ b/drivers/hid/hid-picolcd_fb.c @@ -433,7 +433,7 @@ static const struct fb_ops picolcdfb_ops = { /* Callback from deferred IO workqueue */ -static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist) +static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagereflist) { picolcd_fb_update(info); } diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index e9662822e1ef..8774bffe94cc 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -322,7 +322,7 @@ static void fbtft_mkdirty(struct fb_info *info, int y, int height) schedule_delayed_work(&info->deferred_work, fbdefio->delay); } -static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagelist) +static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagereflist) { struct fbtft_par *par = info->par; unsigned int dirty_lines_start, dirty_lines_end; @@ -340,7 +340,7 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagelist) spin_unlock(&par->dirty_lock); /* Mark display lines as dirty */ - list_for_each_entry(pageref, pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { struct page *page = pageref->page; count++; index = page->index << PAGE_SHIFT; @@ -655,9 +655,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, fbops->fb_blank = fbtft_fb_blank; fbops->fb_mmap = fb_deferred_io_mmap; - fbdefio->delay = HZ / fps; - fbdefio->sort_pagelist = true; - fbdefio->deferred_io = fbtft_deferred_io; + fbdefio->delay = HZ / fps; + fbdefio->sort_pagereflist = true; + fbdefio->deferred_io = fbtft_deferred_io; fb_deferred_io_init(info); snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name); diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 6afc6ef4cb5e..883a3ac03189 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -929,13 +929,11 @@ static void broadsheetfb_dpy_update(struct broadsheetfb_par *par) } /* this is called back from the deferred io workqueue */ -static void broadsheetfb_dpy_deferred_io(struct fb_info *info, - struct list_head *pagelist) +static void broadsheetfb_dpy_deferred_io(struct fb_info *info, struct list_head *pagereflist) { u16 y1 = 0, h = 0; int prev_index = -1; struct fb_deferred_io_pageref *pageref; - struct fb_deferred_io *fbdefio = info->fbdefio; int h_inc; u16 yres = info->var.yres; u16 xres = info->var.xres; @@ -944,7 +942,7 @@ static void broadsheetfb_dpy_deferred_io(struct fb_info *info, h_inc = DIV_ROUND_UP(PAGE_SIZE , xres); /* walk the written page list and swizzle the data */ - list_for_each_entry(pageref, &fbdefio->pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { struct page *cur = pageref->page; if (prev_index < 0) { /* just starting so assign first page */ @@ -1060,9 +1058,9 @@ static const struct fb_ops broadsheetfb_ops = { }; static struct fb_deferred_io broadsheetfb_defio = { - .delay = HZ/4, - .sort_pagelist = true, - .deferred_io = broadsheetfb_dpy_deferred_io, + .delay = HZ/4, + .sort_pagereflist = true, + .deferred_io = broadsheetfb_dpy_deferred_io, }; static int broadsheetfb_probe(struct platform_device *dev) diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 3f9bb4725119..c730253ab85c 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -41,7 +41,7 @@ static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info struct page *page) { struct fb_deferred_io *fbdefio = info->fbdefio; - struct list_head *pos = &fbdefio->pagelist; + struct list_head *pos = &fbdefio->pagereflist; unsigned long pgoff = offset >> PAGE_SHIFT; struct fb_deferred_io_pageref *pageref, *cur; @@ -63,7 +63,7 @@ static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info pageref->page = page; pageref->offset = pgoff << PAGE_SHIFT; - if (unlikely(fbdefio->sort_pagelist)) { + if (unlikely(fbdefio->sort_pagereflist)) { /* * We loop through the list of pagerefs before adding in * order to keep the pagerefs sorted. This has significant @@ -71,7 +71,7 @@ static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info * pages. If possible, drivers should try to work with * unsorted page lists instead. */ - list_for_each_entry(cur, &info->fbdefio->pagelist, list) { + list_for_each_entry(cur, &fbdefio->pagereflist, list) { if (cur->offset > pageref->offset) break; } @@ -158,7 +158,7 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long mutex_lock(&fbdefio->lock); /* first write in this cycle, notify the driver */ - if (fbdefio->first_io && list_empty(&fbdefio->pagelist)) + if (fbdefio->first_io && list_empty(&fbdefio->pagereflist)) fbdefio->first_io(info); pageref = fb_deferred_io_pageref_get(info, offset, page); @@ -249,18 +249,18 @@ static void fb_deferred_io_work(struct work_struct *work) /* here we mkclean the pages, then do all deferred IO */ mutex_lock(&fbdefio->lock); - list_for_each_entry(pageref, &fbdefio->pagelist, list) { + list_for_each_entry(pageref, &fbdefio->pagereflist, list) { struct page *cur = pageref->page; lock_page(cur); page_mkclean(cur); unlock_page(cur); } - /* driver's callback with pagelist */ - fbdefio->deferred_io(info, &fbdefio->pagelist); + /* driver's callback with pagereflist */ + fbdefio->deferred_io(info, &fbdefio->pagereflist); /* clear the list */ - list_for_each_entry_safe(pageref, next, &fbdefio->pagelist, list) + list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list) fb_deferred_io_pageref_put(pageref, info); mutex_unlock(&fbdefio->lock); @@ -280,7 +280,7 @@ int fb_deferred_io_init(struct fb_info *info) mutex_init(&fbdefio->lock); INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); - INIT_LIST_HEAD(&fbdefio->pagelist); + INIT_LIST_HEAD(&fbdefio->pagereflist); if (fbdefio->delay == 0) /* set a default of 1 s */ fbdefio->delay = HZ; diff --git a/drivers/video/fbdev/hecubafb.c b/drivers/video/fbdev/hecubafb.c index 2c483e2cf9ec..eb1eaadc1bbb 100644 --- a/drivers/video/fbdev/hecubafb.c +++ b/drivers/video/fbdev/hecubafb.c @@ -115,8 +115,7 @@ static void hecubafb_dpy_update(struct hecubafb_par *par) } /* this is called back from the deferred io workqueue */ -static void hecubafb_dpy_deferred_io(struct fb_info *info, - struct list_head *pagelist) +static void hecubafb_dpy_deferred_io(struct fb_info *info, struct list_head *pagereflist) { hecubafb_dpy_update(info->par); } diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 51eb57ea68f7..550cf0990070 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -420,8 +420,7 @@ static void hvfb_docopy(struct hvfb_par *par, } /* Deferred IO callback */ -static void synthvid_deferred_io(struct fb_info *p, - struct list_head *pagelist) +static void synthvid_deferred_io(struct fb_info *p, struct list_head *pagereflist) { struct hvfb_par *par = p->par; struct fb_deferred_io_pageref *pageref; @@ -437,7 +436,7 @@ static void synthvid_deferred_io(struct fb_info *p, * in synthvid_update function by clamping the y2 * value to yres. */ - list_for_each_entry(pageref, pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { struct page *page = pageref->page; start = page->index << PAGE_SHIFT; end = start + PAGE_SIZE - 1; diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c index 8352fa3f4cef..f581c73d39df 100644 --- a/drivers/video/fbdev/metronomefb.c +++ b/drivers/video/fbdev/metronomefb.c @@ -465,16 +465,14 @@ static u16 metronomefb_dpy_update_page(struct metronomefb_par *par, int index) } /* this is called back from the deferred io workqueue */ -static void metronomefb_dpy_deferred_io(struct fb_info *info, - struct list_head *pagelist) +static void metronomefb_dpy_deferred_io(struct fb_info *info, struct list_head *pagereflist) { u16 cksum; struct fb_deferred_io_pageref *pageref; - struct fb_deferred_io *fbdefio = info->fbdefio; struct metronomefb_par *par = info->par; /* walk the written page list and swizzle the data */ - list_for_each_entry(pageref, &fbdefio->pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { struct page *cur = pageref->page; cksum = metronomefb_dpy_update_page(par, (cur->index << PAGE_SHIFT)); @@ -569,9 +567,9 @@ static const struct fb_ops metronomefb_ops = { }; static struct fb_deferred_io metronomefb_defio = { - .delay = HZ, - .sort_pagelist = true, - .deferred_io = metronomefb_dpy_deferred_io, + .delay = HZ, + .sort_pagereflist = true, + .deferred_io = metronomefb_dpy_deferred_io, }; static int metronomefb_probe(struct platform_device *dev) diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index aea0660ba0c3..2b060eca4360 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -435,8 +435,7 @@ static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = { .read_data = lcdc_sys_read_data, }; -static int sh_mobile_lcdc_sginit(struct fb_info *info, - struct list_head *pagelist) +static int sh_mobile_lcdc_sginit(struct fb_info *info, struct list_head *pagereflist) { struct sh_mobile_lcdc_chan *ch = info->par; unsigned int nr_pages_max = ch->fb_size >> PAGE_SHIFT; @@ -445,7 +444,7 @@ static int sh_mobile_lcdc_sginit(struct fb_info *info, sg_init_table(ch->sglist, nr_pages_max); - list_for_each_entry(pageref, pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { struct page *page = pageref->page; sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0); } @@ -453,8 +452,7 @@ static int sh_mobile_lcdc_sginit(struct fb_info *info, return nr_pages; } -static void sh_mobile_lcdc_deferred_io(struct fb_info *info, - struct list_head *pagelist) +static void sh_mobile_lcdc_deferred_io(struct fb_info *info, struct list_head *pagereflist) { struct sh_mobile_lcdc_chan *ch = info->par; const struct sh_mobile_lcdc_panel_cfg *panel = &ch->cfg->panel_cfg; @@ -463,7 +461,7 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info, sh_mobile_lcdc_clk_on(ch->lcdc); /* - * It's possible to get here without anything on the pagelist via + * It's possible to get here without anything on the pagereflist via * sh_mobile_lcdc_deferred_io_touch() or via a userspace fsync() * invocation. In the former case, the acceleration routines are * stepped in to when using the framebuffer console causing the @@ -473,12 +471,12 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info, * acceleration routines have their own methods for writing in * that still need to be updated. * - * The fsync() and empty pagelist case could be optimized for, + * The fsync() and empty pagereflist case could be optimized for, * but we don't bother, as any application exhibiting such * behaviour is fundamentally broken anyways. */ - if (!list_empty(pagelist)) { - unsigned int nr_pages = sh_mobile_lcdc_sginit(info, pagelist); + if (!list_empty(pagereflist)) { + unsigned int nr_pages = sh_mobile_lcdc_sginit(info, pagereflist); /* trigger panel update */ dma_map_sg(ch->lcdc->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index eb108f56ea04..7f00605bc0d1 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -955,12 +955,10 @@ static void ufx_ops_fillrect(struct fb_info *info, * Touching ANY framebuffer memory that triggers a page fault * in fb_defio will cause a deadlock, when it also tries to * grab the same mutex. */ -static void ufx_dpy_deferred_io(struct fb_info *info, - struct list_head *pagelist) +static void ufx_dpy_deferred_io(struct fb_info *info, struct list_head *pagereflist) { - struct fb_deferred_io_pageref *pageref; - struct fb_deferred_io *fbdefio = info->fbdefio; struct ufx_data *dev = info->par; + struct fb_deferred_io_pageref *pageref; if (!fb_defio) return; @@ -969,7 +967,7 @@ static void ufx_dpy_deferred_io(struct fb_info *info, return; /* walk the written page list and render each to device */ - list_for_each_entry(pageref, &fbdefio->pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { /* create a rectangle of full screen width that encloses the * entire dirty framebuffer page */ struct page *cur = pageref->page; diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 7547b4628afc..5c765655d000 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -371,8 +371,7 @@ static const struct fb_ops ssd1307fb_ops = { .fb_mmap = fb_deferred_io_mmap, }; -static void ssd1307fb_deferred_io(struct fb_info *info, - struct list_head *pagelist) +static void ssd1307fb_deferred_io(struct fb_info *info, struct list_head *pagereflist) { ssd1307fb_update_display(info->par); } diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index f70b9cc8e855..66eefd317431 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -781,11 +781,9 @@ static void dlfb_ops_fillrect(struct fb_info *info, * in fb_defio will cause a deadlock, when it also tries to * grab the same mutex. */ -static void dlfb_dpy_deferred_io(struct fb_info *info, - struct list_head *pagelist) +static void dlfb_dpy_deferred_io(struct fb_info *info, struct list_head *pagereflist) { struct fb_deferred_io_pageref *pageref; - struct fb_deferred_io *fbdefio = info->fbdefio; struct dlfb_data *dlfb = info->par; struct urb *urb; char *cmd; @@ -811,7 +809,7 @@ static void dlfb_dpy_deferred_io(struct fb_info *info, cmd = urb->transfer_buffer; /* walk the written page list and render each to device */ - list_for_each_entry(pageref, &fbdefio->pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { struct page *cur = pageref->page; if (dlfb_render_hline(dlfb, &urb, (char *) info->fix.smem_start, @@ -984,7 +982,7 @@ static int dlfb_ops_open(struct fb_info *info, int user) if (fbdefio) { fbdefio->delay = DL_DEFIO_WRITE_DELAY; - fbdefio->sort_pagelist = true; + fbdefio->sort_pagereflist = true; fbdefio->deferred_io = dlfb_dpy_deferred_io; } diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 608fcde767d3..bc8244b9ce03 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -181,8 +181,7 @@ static void xenfb_refresh(struct xenfb_info *info, xenfb_do_update(info, x1, y1, x2 - x1 + 1, y2 - y1 + 1); } -static void xenfb_deferred_io(struct fb_info *fb_info, - struct list_head *pagelist) +static void xenfb_deferred_io(struct fb_info *fb_info, struct list_head *pagereflist) { struct xenfb_info *info = fb_info->par; struct fb_deferred_io_pageref *pageref; @@ -191,7 +190,7 @@ static void xenfb_deferred_io(struct fb_info *fb_info, miny = INT_MAX; maxy = 0; - list_for_each_entry(pageref, pagelist, list) { + list_for_each_entry(pageref, pagereflist, list) { struct page *page = pageref->page; beg = page->index << PAGE_SHIFT; end = beg + PAGE_SIZE - 1; diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 3af4624368d8..329607ca65c0 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -229,8 +229,7 @@ void drm_fb_helper_fill_info(struct fb_info *info, struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes); -void drm_fb_helper_deferred_io(struct fb_info *info, - struct list_head *pagelist); +void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist); ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); diff --git a/include/linux/fb.h b/include/linux/fb.h index a332590c0fae..69c67c70fa78 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -211,9 +211,9 @@ struct fb_deferred_io_pageref { struct fb_deferred_io { /* delay between mkwrite and deferred handler */ unsigned long delay; - bool sort_pagelist; /* sort pagelist by offset */ - struct mutex lock; /* mutex that protects the page list */ - struct list_head pagelist; /* list of touched pages */ + bool sort_pagereflist; /* sort pagelist by offset */ + struct mutex lock; /* mutex that protects the pageref list */ + struct list_head pagereflist; /* list of pagerefs for touched pages */ /* callback */ void (*first_io)(struct fb_info *info); void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); -- cgit v1.2.3-59-g8ed1b From 26817fb7b066b21c66cd41d75ed2137e046045be Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 4 May 2022 09:27:10 -0400 Subject: Revert "fbdev: fbmem: add a helper to determine if an aperture is used by a fw fb" This reverts commit 9a45ac2320d0a6ae01880a30d4b86025fce4061b. This was added a helper for amdgpu to workaround a runtime pm regression caused by a runtime pm fix in efifb. We now have a better workaround in amdgpu in commit f95af4a9236695 ("drm/amdgpu: don't runtime suspend if there are displays attached (v3)") so this workaround is no longer necessary. Since amdgpu was the only user of this interface, we can remove it. Reviewed-by: Javier Martinez Canillas Acked-by: Daniel Vetter Signed-off-by: Alex Deucher --- drivers/video/fbdev/core/fbmem.c | 47 ---------------------------------------- include/linux/fb.h | 1 - 2 files changed, 48 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index ad9aac06427a..700ac4a83329 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1783,53 +1783,6 @@ int remove_conflicting_framebuffers(struct apertures_struct *a, } EXPORT_SYMBOL(remove_conflicting_framebuffers); -/** - * is_firmware_framebuffer - detect if firmware-configured framebuffer matches - * @a: memory range, users of which are to be checked - * - * This function checks framebuffer devices (initialized by firmware/bootloader) - * which use memory range described by @a. If @a matchesm the function returns - * true, otherwise false. - */ -bool is_firmware_framebuffer(struct apertures_struct *a) -{ - bool do_free = false; - bool found = false; - int i; - - if (!a) { - a = alloc_apertures(1); - if (!a) - return false; - - a->ranges[0].base = 0; - a->ranges[0].size = ~0; - do_free = true; - } - - mutex_lock(®istration_lock); - /* check all firmware fbs and kick off if the base addr overlaps */ - for_each_registered_fb(i) { - struct apertures_struct *gen_aper; - - if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE)) - continue; - - gen_aper = registered_fb[i]->apertures; - if (fb_do_apertures_overlap(gen_aper, a)) { - found = true; - break; - } - } - mutex_unlock(®istration_lock); - - if (do_free) - kfree(a); - - return found; -} -EXPORT_SYMBOL(is_firmware_framebuffer); - /** * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices * @pdev: PCI device diff --git a/include/linux/fb.h b/include/linux/fb.h index 9a77ab615c36..147d582dab41 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -612,7 +612,6 @@ extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name); extern int remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary); -extern bool is_firmware_framebuffer(struct apertures_struct *a); extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); extern int fb_show_logo(struct fb_info *fb_info, int rotate); extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); -- cgit v1.2.3-59-g8ed1b