diff options
Diffstat (limited to 'include/linux/dma-resv.h')
-rw-r--r-- | include/linux/dma-resv.h | 386 |
1 files changed, 288 insertions, 98 deletions
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index ee50d10f052b..0637659a702c 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -46,55 +46,278 @@ #include <linux/rcupdate.h> extern struct ww_class reservation_ww_class; -extern struct lock_class_key reservation_seqcount_class; -extern const char reservation_seqcount_string[]; + +struct dma_resv_list; /** - * struct dma_resv_list - a list of shared fences - * @rcu: for internal use - * @shared_count: table of shared fences - * @shared_max: for growing shared fence table - * @shared: shared fence table + * enum dma_resv_usage - how the fences from a dma_resv obj are used + * + * This enum describes the different use cases for a dma_resv object and + * controls which fences are returned when queried. + * + * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and + * when the dma_resv object is asked for fences for one use case the fences + * for the lower use case are returned as well. + * + * For example when asking for WRITE fences then the KERNEL fences are returned + * as well. Similar when asked for READ fences then both WRITE and KERNEL + * fences are returned as well. + * + * Already used fences can be promoted in the sense that a fence with + * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again + * with this usage. But fences can never be degraded in the sense that a fence + * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ. */ -struct dma_resv_list { - struct rcu_head rcu; - u32 shared_count, shared_max; - struct dma_fence __rcu *shared[]; +enum dma_resv_usage { + /** + * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only. + * + * This should only be used for things like copying or clearing memory + * with a DMA hardware engine for the purpose of kernel memory + * management. + * + * Drivers *always* must wait for those fences before accessing the + * resource protected by the dma_resv object. The only exception for + * that is when the resource is known to be locked down in place by + * pinning it previously. + */ + DMA_RESV_USAGE_KERNEL, + + /** + * @DMA_RESV_USAGE_WRITE: Implicit write synchronization. + * + * This should only be used for userspace command submissions which add + * an implicit write dependency. + */ + DMA_RESV_USAGE_WRITE, + + /** + * @DMA_RESV_USAGE_READ: Implicit read synchronization. + * + * This should only be used for userspace command submissions which add + * an implicit read dependency. + */ + DMA_RESV_USAGE_READ, + + /** + * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync. + * + * This should be used by submissions which don't want to participate in + * any implicit synchronization. + * + * The most common case are preemption fences, page table updates, TLB + * flushes as well as explicit synced user submissions. + * + * Explicit synced user user submissions can be promoted to + * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using + * dma_buf_import_sync_file() when implicit synchronization should + * become necessary after initial adding of the fence. + */ + DMA_RESV_USAGE_BOOKKEEP }; /** + * dma_resv_usage_rw - helper for implicit sync + * @write: true if we create a new implicit sync write + * + * This returns the implicit synchronization usage for write or read accesses, + * see enum dma_resv_usage and &dma_buf.resv. + */ +static inline enum dma_resv_usage dma_resv_usage_rw(bool write) +{ + /* This looks confusing at first sight, but is indeed correct. + * + * The rational is that new write operations needs to wait for the + * existing read and write operations to finish. + * But a new read operation only needs to wait for the existing write + * operations to finish. + */ + return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE; +} + +/** * struct dma_resv - a reservation object manages fences for a buffer - * @lock: update side lock - * @seq: sequence count for managing RCU read-side synchronization - * @fence_excl: the exclusive fence, if there is one currently - * @fence: list of current shared fences + * + * This is a container for dma_fence objects which needs to handle multiple use + * cases. + * + * One use is to synchronize cross-driver access to a struct dma_buf, either for + * dynamic buffer management or just to handle implicit synchronization between + * different users of the buffer in userspace. See &dma_buf.resv for a more + * in-depth discussion. + * + * The other major use is to manage access and locking within a driver in a + * buffer based memory manager. struct ttm_buffer_object is the canonical + * example here, since this is where reservation objects originated from. But + * use in drivers is spreading and some drivers also manage struct + * drm_gem_object with the same scheme. */ struct dma_resv { + /** + * @lock: + * + * Update side lock. Don't use directly, instead use the wrapper + * functions like dma_resv_lock() and dma_resv_unlock(). + * + * Drivers which use the reservation object to manage memory dynamically + * also use this lock to protect buffer object state like placement, + * allocation policies or throughout command submission. + */ struct ww_mutex lock; - seqcount_t seq; - struct dma_fence __rcu *fence_excl; - struct dma_resv_list __rcu *fence; + /** + * @fences: + * + * Array of fences which where added to the dma_resv object + * + * A new fence is added by calling dma_resv_add_fence(). Since this + * often needs to be done past the point of no return in command + * submission it cannot fail, and therefore sufficient slots need to be + * reserved by calling dma_resv_reserve_fences(). + */ + struct dma_resv_list __rcu *fences; }; -#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) -#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) +/** + * struct dma_resv_iter - current position into the dma_resv fences + * + * Don't touch this directly in the driver, use the accessor function instead. + * + * IMPORTANT + * + * When using the lockless iterators like dma_resv_iter_next_unlocked() or + * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted. + * Code which accumulates statistics or similar needs to check for this with + * dma_resv_iter_is_restarted(). + */ +struct dma_resv_iter { + /** @obj: The dma_resv object we iterate over */ + struct dma_resv *obj; + + /** @usage: Return fences with this usage or lower. */ + enum dma_resv_usage usage; + + /** @fence: the currently handled fence */ + struct dma_fence *fence; + + /** @fence_usage: the usage of the current fence */ + enum dma_resv_usage fence_usage; + + /** @index: index into the shared fences */ + unsigned int index; + + /** @fences: the shared fences; private, *MUST* not dereference */ + struct dma_resv_list *fences; + + /** @num_fences: number of fences */ + unsigned int num_fences; + + /** @is_restarted: true if this is the first returned fence */ + bool is_restarted; +}; + +struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor); +struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor); +struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor); +struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor); /** - * dma_resv_get_list - get the reservation object's - * shared fence list, with update-side lock held - * @obj: the reservation object + * dma_resv_iter_begin - initialize a dma_resv_iter object + * @cursor: The dma_resv_iter object to initialize + * @obj: The dma_resv object which we want to iterate over + * @usage: controls which fences to include, see enum dma_resv_usage. + */ +static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor, + struct dma_resv *obj, + enum dma_resv_usage usage) +{ + cursor->obj = obj; + cursor->usage = usage; + cursor->fence = NULL; +} + +/** + * dma_resv_iter_end - cleanup a dma_resv_iter object + * @cursor: the dma_resv_iter object which should be cleaned up + * + * Make sure that the reference to the fence in the cursor is properly + * dropped. + */ +static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) +{ + dma_fence_put(cursor->fence); +} + +/** + * dma_resv_iter_usage - Return the usage of the current fence + * @cursor: the cursor of the current position + * + * Returns the usage of the currently processed fence. + */ +static inline enum dma_resv_usage +dma_resv_iter_usage(struct dma_resv_iter *cursor) +{ + return cursor->fence_usage; +} + +/** + * dma_resv_iter_is_restarted - test if this is the first fence after a restart + * @cursor: the cursor with the current position * - * Returns the shared fence list. Does NOT take references to - * the fence. The obj->lock must be held. + * Return true if this is the first fence in an iteration after a restart. */ -static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) +static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) { - return rcu_dereference_protected(obj->fence, - dma_resv_held(obj)); + return cursor->is_restarted; } /** + * dma_resv_for_each_fence_unlocked - unlocked fence iterator + * @cursor: a struct dma_resv_iter pointer + * @fence: the current fence + * + * Iterate over the fences in a struct dma_resv object without holding the + * &dma_resv.lock and using RCU instead. The cursor needs to be initialized + * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside + * the iterator a reference to the dma_fence is held and the RCU lock dropped. + * + * Beware that the iterator can be restarted when the struct dma_resv for + * @cursor is modified. Code which accumulates statistics or similar needs to + * check for this with dma_resv_iter_is_restarted(). For this reason prefer the + * lock iterator dma_resv_for_each_fence() whenever possible. + */ +#define dma_resv_for_each_fence_unlocked(cursor, fence) \ + for (fence = dma_resv_iter_first_unlocked(cursor); \ + fence; fence = dma_resv_iter_next_unlocked(cursor)) + +/** + * dma_resv_for_each_fence - fence iterator + * @cursor: a struct dma_resv_iter pointer + * @obj: a dma_resv object pointer + * @usage: controls which fences to return + * @fence: the current fence + * + * Iterate over the fences in a struct dma_resv object while holding the + * &dma_resv.lock. @all_fences controls if the shared fences are returned as + * well. The cursor initialisation is part of the iterator and the fence stays + * valid as long as the lock is held and so no extra reference to the fence is + * taken. + */ +#define dma_resv_for_each_fence(cursor, obj, usage, fence) \ + for (dma_resv_iter_begin(cursor, obj, usage), \ + fence = dma_resv_iter_first(cursor); fence; \ + fence = dma_resv_iter_next(cursor)) + +#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) +#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) + +#ifdef CONFIG_DEBUG_MUTEXES +void dma_resv_reset_max_fences(struct dma_resv *obj); +#else +static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {} +#endif + +/** * dma_resv_lock - lock the reservation object * @obj: the reservation object * @ctx: the locking context @@ -108,6 +331,13 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. + * + * When a die situation is indicated by returning -EDEADLK all locks held by + * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj. + * + * Unlocked by calling dma_resv_unlock(). + * + * See also dma_resv_lock_interruptible() for the interruptible variant. */ static inline int dma_resv_lock(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -129,6 +359,12 @@ static inline int dma_resv_lock(struct dma_resv *obj, * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. + * + * When a die situation is indicated by returning -EDEADLK all locks held by + * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on + * @obj. + * + * Unlocked by calling dma_resv_unlock(). */ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -144,6 +380,8 @@ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, * Acquires the reservation object after a die case. This function * will sleep until the lock becomes available. See dma_resv_lock() as * well. + * + * See also dma_resv_lock_slow_interruptible() for the interruptible variant. */ static inline void dma_resv_lock_slow(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -177,13 +415,13 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, * if they overlap with a writer. * * Also note that since no context is provided, no deadlock protection is - * possible. + * possible, which is also not needed for a trylock. * * Returns true if the lock was acquired, false otherwise. */ static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) { - return ww_mutex_trylock(&obj->lock); + return ww_mutex_trylock(&obj->lock, NULL); } /** @@ -203,6 +441,11 @@ static inline bool dma_resv_is_locked(struct dma_resv *obj) * * Returns the context used to lock a reservation object or NULL if no context * was used or the object is not locked at all. + * + * WARNING: This interface is pretty horrible, but TTM needs it because it + * doesn't pass the struct ww_acquire_ctx around in some very long callchains. + * Everyone else just uses it to check whether they're holding a reservation or + * not. */ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) { @@ -217,79 +460,26 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) */ static inline void dma_resv_unlock(struct dma_resv *obj) { -#ifdef CONFIG_DEBUG_MUTEXES - /* Test shared fence slot reservation */ - if (rcu_access_pointer(obj->fence)) { - struct dma_resv_list *fence = dma_resv_get_list(obj); - - fence->shared_max = fence->shared_count; - } -#endif + dma_resv_reset_max_fences(obj); ww_mutex_unlock(&obj->lock); } -/** - * dma_resv_get_excl - get the reservation object's - * exclusive fence, with update-side lock held - * @obj: the reservation object - * - * Returns the exclusive fence (if any). Does NOT take a - * reference. Writers must hold obj->lock, readers may only - * hold a RCU read side lock. - * - * RETURNS - * The exclusive fence or NULL - */ -static inline struct dma_fence * -dma_resv_get_excl(struct dma_resv *obj) -{ - return rcu_dereference_protected(obj->fence_excl, - dma_resv_held(obj)); -} - -/** - * dma_resv_get_excl_rcu - get the reservation object's - * exclusive fence, without lock held. - * @obj: the reservation object - * - * If there is an exclusive fence, this atomically increments it's - * reference count and returns it. - * - * RETURNS - * The exclusive fence or NULL if none - */ -static inline struct dma_fence * -dma_resv_get_excl_rcu(struct dma_resv *obj) -{ - struct dma_fence *fence; - - if (!rcu_access_pointer(obj->fence_excl)) - return NULL; - - rcu_read_lock(); - fence = dma_fence_get_rcu_safe(&obj->fence_excl); - rcu_read_unlock(); - - return fence; -} - void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); -int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); -void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); - -void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); - -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared); - +int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences); +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage); +void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, + struct dma_fence *fence, + enum dma_resv_usage usage); +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, + unsigned int *num_fences, struct dma_fence ***fences); +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, + struct dma_fence **fence); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); - -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); - -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, + bool intr, unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); +void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); #endif /* _LINUX_RESERVATION_H */ |