From dda510890498b9a2f4b2142192f6d516c6c1e2e5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 6 Dec 2019 14:24:35 +0100 Subject: dmaengine: Remove spaces before TABs Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20191206132435.29139-1-geert+renesas@glider.be Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/dmaengine.h') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8fcdee1c0cf9..dfd2d35b64af 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -481,7 +481,7 @@ struct dmaengine_unmap_data { * @cookie: tracking cookie for this transaction, set to -EBUSY if * this tx is sitting on a dependency list * @flags: flags to augment operation preparation, control completion, and - * communicate status + * communicate status * @phys: physical address of the descriptor * @chan: target channel for this operation * @tx_submit: accept the descriptor, assign ordered cookie and mark the -- cgit v1.2.3-59-g8ed1b From dae7a589c18a4d979d5f14b09374e871b995ceb1 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Mon, 16 Dec 2019 12:01:16 -0700 Subject: dmaengine: Store module owner in dma_device struct dma_chan_to_owner() dereferences the driver from the struct device to obtain the owner and call module_[get|put](). However, if the backing device is unbound before the dma_device is unregistered, the driver will be cleared and this will cause a NULL pointer dereference. Instead, store a pointer to the owner module in the dma_device struct so the module reference can be properly put when the channel is put, even if the backing device was destroyed first. This change helps to support a safer unbind of DMA engines. If the dma_device is unregistered in the driver's remove function, there's no guarantee that there are no existing clients and a users action may trigger the WARN_ONCE in dma_async_device_unregister() which is unlikely to leave the system in a consistent state. Instead, a better approach is to allow the backing driver to go away and fail any subsequent requests to it. Signed-off-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20191216190120.21374-2-logang@deltatee.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 4 +++- include/linux/dmaengine.h | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux/dmaengine.h') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 03ac4b96117c..4b604086b1b3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -179,7 +179,7 @@ __dma_device_satisfies_mask(struct dma_device *device, static struct module *dma_chan_to_owner(struct dma_chan *chan) { - return chan->device->dev->driver->owner; + return chan->device->owner; } /** @@ -919,6 +919,8 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + device->owner = device->dev->driver->owner; + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { dev_err(device->dev, "Device claims capability %s, but op is not defined\n", diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index dfd2d35b64af..11b15a2e97a0 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -674,6 +674,7 @@ struct dma_filter { * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api + * @owner: owner module (automatically set based on the provided dev) * @src_addr_widths: bit mask of src addr widths the device supports * Width is specified in bytes, e.g. for a device supporting * a width of 4 the mask should have BIT(4) set. @@ -737,6 +738,7 @@ struct dma_device { int dev_id; struct device *dev; + struct module *owner; u32 src_addr_widths; u32 dst_addr_widths; -- cgit v1.2.3-59-g8ed1b From 8ad342a863590b24ce77681b7e081363fb3333f7 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Mon, 16 Dec 2019 12:01:19 -0700 Subject: dmaengine: Add reference counting to dma_device struct Adding a reference count helps drivers to properly implement the unbind while in use case. References are taken and put every time a channel is allocated or freed. Once the final reference is put, the device is removed from the dma_device_list and a release callback function is called to signal the driver to free the memory. Signed-off-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20191216190120.21374-5-logang@deltatee.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 57 ++++++++++++++++++++++++++++++++++++++++------- include/linux/dmaengine.h | 8 ++++++- 2 files changed, 56 insertions(+), 9 deletions(-) (limited to 'include/linux/dmaengine.h') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 1f9a6293f15a..e316abe3672d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -342,6 +342,23 @@ static void balance_ref_count(struct dma_chan *chan) } } +static void dma_device_release(struct kref *ref) +{ + struct dma_device *device = container_of(ref, struct dma_device, ref); + + list_del_rcu(&device->global_node); + dma_channel_rebalance(); + + if (device->device_release) + device->device_release(device); +} + +static void dma_device_put(struct dma_device *device) +{ + lockdep_assert_held(&dma_list_mutex); + kref_put(&device->ref, dma_device_release); +} + /** * dma_chan_get - try to grab a dma channel's parent driver module * @chan - channel to grab @@ -362,6 +379,12 @@ static int dma_chan_get(struct dma_chan *chan) if (!try_module_get(owner)) return -ENODEV; + ret = kref_get_unless_zero(&chan->device->ref); + if (!ret) { + ret = -ENODEV; + goto module_put_out; + } + /* allocate upon first client reference */ if (chan->device->device_alloc_chan_resources) { ret = chan->device->device_alloc_chan_resources(chan); @@ -377,6 +400,8 @@ out: return 0; err_out: + dma_device_put(chan->device); +module_put_out: module_put(owner); return ret; } @@ -402,6 +427,7 @@ static void dma_chan_put(struct dma_chan *chan) chan->device->device_free_chan_resources(chan); } + dma_device_put(chan->device); module_put(dma_chan_to_owner(chan)); /* If the channel is used via a DMA request router, free the mapping */ @@ -837,14 +863,14 @@ EXPORT_SYMBOL(dmaengine_get); */ void dmaengine_put(void) { - struct dma_device *device; + struct dma_device *device, *_d; struct dma_chan *chan; mutex_lock(&dma_list_mutex); dmaengine_ref_count--; BUG_ON(dmaengine_ref_count < 0); /* drop channel references */ - list_for_each_entry(device, &dma_device_list, global_node) { + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) continue; list_for_each_entry(chan, &device->channels, device_node) @@ -906,6 +932,10 @@ static int get_dma_id(struct dma_device *device) /** * dma_async_device_register - registers DMA devices found * @device: &dma_device + * + * After calling this routine the structure should not be freed except in the + * device_release() callback which will be called after + * dma_async_device_unregister() is called and no further references are taken. */ int dma_async_device_register(struct dma_device *device) { @@ -999,6 +1029,12 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + if (!device->device_release) + dev_warn(device->dev, + "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); + + kref_init(&device->ref); + /* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case */ @@ -1115,13 +1151,8 @@ void dma_async_device_unregister(struct dma_device *device) { struct dma_chan *chan; - mutex_lock(&dma_list_mutex); - list_del_rcu(&device->global_node); - dma_channel_rebalance(); - mutex_unlock(&dma_list_mutex); - list_for_each_entry(chan, &device->channels, device_node) { - WARN_ONCE(chan->client_count, + WARN_ONCE(!device->device_release && chan->client_count, "%s called while %d clients hold a reference\n", __func__, chan->client_count); mutex_lock(&dma_list_mutex); @@ -1130,6 +1161,16 @@ void dma_async_device_unregister(struct dma_device *device) device_unregister(&chan->dev->device); free_percpu(chan->local); } + + mutex_lock(&dma_list_mutex); + /* + * setting DMA_PRIVATE ensures the device being torn down will not + * be used in the channel_table + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); + dma_channel_rebalance(); + dma_device_put(device); + mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL(dma_async_device_unregister); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 11b15a2e97a0..7927731e3716 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -719,9 +719,14 @@ struct dma_filter { * will just return a simple status code * @device_issue_pending: push pending transactions to hardware * @descriptor_reuse: a submitted transfer can be resubmitted after completion + * @device_release: called sometime atfer dma_async_device_unregister() is + * called and there are no further references to this structure. This + * must be implemented to free resources however many existing drivers + * do not and are therefore not safe to unbind while in use. + * */ struct dma_device { - + struct kref ref; unsigned int chancnt; unsigned int privatecnt; struct list_head channels; @@ -802,6 +807,7 @@ struct dma_device { dma_cookie_t cookie, struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); + void (*device_release)(struct dma_device *dev); }; static inline int dmaengine_slave_config(struct dma_chan *chan, -- cgit v1.2.3-59-g8ed1b From 4db8fd32ed2be7cc510e51e43ec3349aa64074a9 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 23 Dec 2019 13:04:44 +0200 Subject: dmaengine: Add metadata_ops for dma_async_tx_descriptor The metadata is best described as side band data or parameters traveling alongside the data DMAd by the DMA engine. It is data which is understood by the peripheral and the peripheral driver only, the DMA engine see it only as data block and it is not interpreting it in any way. The metadata can be different per descriptor as it is a parameter for the data being transferred. If the DMA supports per descriptor metadata it can implement the attach, get_ptr/set_len callbacks. Client drivers must only use either attach or get_ptr/set_len to avoid misconfiguration. Client driver can check if a given metadata mode is supported by the channel during probe time with dmaengine_is_metadata_mode_supported(chan, DESC_METADATA_CLIENT); dmaengine_is_metadata_mode_supported(chan, DESC_METADATA_ENGINE); and based on this information can use either mode. Wrappers are also added for the metadata_ops. To be used in DESC_METADATA_CLIENT mode: dmaengine_desc_attach_metadata() To be used in DESC_METADATA_ENGINE mode: dmaengine_desc_get_metadata_ptr() dmaengine_desc_set_metadata_len() Signed-off-by: Peter Ujfalusi Reviewed-by: Tero Kristo Tested-by: Keerthy Reviewed-by: Grygorii Strashko Link: https://lore.kernel.org/r/20191223110458.30766-5-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 73 ++++++++++++++++++++++++++++++ include/linux/dmaengine.h | 112 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 185 insertions(+) (limited to 'include/linux/dmaengine.h') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 4ac77456e830..158aeb1b6a8a 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1348,6 +1348,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, } EXPORT_SYMBOL(dma_async_tx_descriptor_init); +static inline int desc_check_and_set_metadata_mode( + struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) +{ + /* Make sure that the metadata mode is not mixed */ + if (!desc->desc_metadata_mode) { + if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) + desc->desc_metadata_mode = mode; + else + return -ENOTSUPP; + } else if (desc->desc_metadata_mode != mode) { + return -EINVAL; + } + + return 0; +} + +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->attach) + return -ENOTSUPP; + + return desc->metadata_ops->attach(desc, data, len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); + +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + int ret; + + if (!desc) + return ERR_PTR(-EINVAL); + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ERR_PTR(ret); + + if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) + return ERR_PTR(-ENOTSUPP); + + return desc->metadata_ops->get_ptr(desc, payload_len, max_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); + +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->set_len) + return -ENOTSUPP; + + return desc->metadata_ops->set_len(desc, payload_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); + /* dma_wait_for_async_tx - spin wait for a transaction to complete * @tx: in-flight transaction to wait on */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 7927731e3716..68b361891a61 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -219,6 +219,62 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; * @bytes_transferred: byte counter */ +/** + * enum dma_desc_metadata_mode - per descriptor metadata mode types supported + * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the + * client driver and it is attached (via the dmaengine_desc_attach_metadata() + * helper) to the descriptor. + * + * Client drivers interested to use this mode can follow: + * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * construct the metadata in the client's buffer + * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the + * descriptor + * 3. submit the transfer + * - DMA_DEV_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the + * descriptor + * 3. submit the transfer + * 4. when the transfer is completed, the metadata should be available in the + * attached buffer + * + * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA + * driver. The client driver can ask for the pointer, maximum size and the + * currently used size of the metadata and can directly update or read it. + * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is + * provided as helper functions. + * + * Note: the metadata area for the descriptor is no longer valid after the + * transfer has been completed (valid up to the point when the completion + * callback returns if used). + * + * Client drivers interested to use this mode can follow: + * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's + * metadata area + * 3. update the metadata at the pointer + * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount + * of data the client has placed into the metadata buffer + * 5. submit the transfer + * - DMA_DEV_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * 2. submit the transfer + * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the + * pointer to the engine's metadata area + * 4. Read out the metadata from the pointer + * + * Note: the two mode is not compatible and clients must use one mode for a + * descriptor. + */ +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = BIT(0), + DESC_METADATA_ENGINE = BIT(1), +}; + struct dma_chan_percpu { /* stats */ unsigned long memcpy_count; @@ -475,6 +531,18 @@ struct dmaengine_unmap_data { dma_addr_t addr[0]; }; +struct dma_async_tx_descriptor; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *desc, void *data, + size_t len); + + void *(*get_ptr)(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len); + int (*set_len)(struct dma_async_tx_descriptor *desc, + size_t payload_len); +}; + /** * struct dma_async_tx_descriptor - async transaction descriptor * ---dma generic offload fields--- @@ -488,6 +556,11 @@ struct dmaengine_unmap_data { * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine + * @desc_metadata_mode: core managed metadata mode to protect mixed use of + * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise + * DESC_METADATA_NONE + * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the + * DMA driver if metadata mode is supported with the descriptor * ---async_tx api specific fields--- * @next: at completion submit this descriptor * @parent: pointer to the next level up in the dependency chain @@ -504,6 +577,8 @@ struct dma_async_tx_descriptor { dma_async_tx_callback_result callback_result; void *callback_param; struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; @@ -666,6 +741,7 @@ struct dma_filter { * @global_node: list_head for global dma_device_list * @filter: information for device/slave to filter function/param mapping * @cap_mask: one or more dma_capability flags + * @desc_metadata_modes: supported metadata modes by the DMA device * @max_xor: maximum number of xor sources, 0 if no capability * @max_pq: maximum number of PQ sources and PQ-continue capability * @copy_align: alignment shift for memcpy operations @@ -733,6 +809,7 @@ struct dma_device { struct list_head global_node; struct dma_filter filter; dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; unsigned short max_xor; unsigned short max_pq; enum dmaengine_alignment copy_align; @@ -910,6 +987,41 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( len, flags); } +static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan, + enum dma_desc_metadata_mode mode) +{ + if (!chan) + return false; + + return !!(chan->device->desc_metadata_modes & mode); +} + +#ifdef CONFIG_DMA_ENGINE +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len); +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len); +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len); +#else /* CONFIG_DMA_ENGINE */ +static inline int dmaengine_desc_attach_metadata( + struct dma_async_tx_descriptor *desc, void *data, size_t len) +{ + return -EINVAL; +} +static inline void *dmaengine_desc_get_metadata_ptr( + struct dma_async_tx_descriptor *desc, size_t *payload_len, + size_t *max_len) +{ + return NULL; +} +static inline int dmaengine_desc_set_metadata_len( + struct dma_async_tx_descriptor *desc, size_t payload_len) +{ + return -EINVAL; +} +#endif /* CONFIG_DMA_ENGINE */ + /** * dmaengine_terminate_all() - Terminate all active DMA transfers * @chan: The channel for which to terminate the transfers -- cgit v1.2.3-59-g8ed1b From 6755ec06d1333765d2b935e4e4a5bd011332bac6 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 23 Dec 2019 13:04:45 +0200 Subject: dmaengine: Add support for reporting DMA cached data amount A DMA hardware can have big cache or FIFO and the amount of data sitting in the DMA fabric can be an interest for the clients. For example in audio we want to know the delay in the data flow and in case the DMA have significantly large FIFO/cache, it can affect the latenc/delay Signed-off-by: Peter Ujfalusi Reviewed-by: Tero Kristo Tested-by: Keerthy Reviewed-by: Grygorii Strashko Link: https://lore.kernel.org/r/20191223110458.30766-6-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.h | 8 ++++++++ include/linux/dmaengine.h | 2 ++ 2 files changed, 10 insertions(+) (limited to 'include/linux/dmaengine.h') diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 501c0b063f85..b0b97475707a 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan, state->last = complete; state->used = used; state->residue = 0; + state->in_flight_bytes = 0; } return dma_async_is_complete(cookie, complete, used); } @@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) state->residue = residue; } +static inline void dma_set_in_flight_bytes(struct dma_tx_state *state, + u32 in_flight_bytes) +{ + if (state) + state->in_flight_bytes = in_flight_bytes; +} + struct dmaengine_desc_callback { dma_async_tx_callback callback; dma_async_tx_callback_result callback_result; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 68b361891a61..b44b9c608709 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -686,11 +686,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr * @residue: the remaining number of bytes left to transmit * on the selected transfer for states DMA_IN_PROGRESS and * DMA_PAUSED if this is implemented in the driver, else 0 + * @in_flight_bytes: amount of data in bytes cached by the DMA. */ struct dma_tx_state { dma_cookie_t last; dma_cookie_t used; u32 residue; + u32 in_flight_bytes; }; /** -- cgit v1.2.3-59-g8ed1b From 816ebf48442eef1c61db26d2ec055f5c8ac83b21 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 23 Dec 2019 13:04:46 +0200 Subject: dmaengine: Add helper function to convert direction value to text dmaengine_get_direction_text() can be useful when the direction is printed out. The text is easier to comprehend than the number. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20191223110458.30766-7-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include/linux/dmaengine.h') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b44b9c608709..62225d46908b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1545,4 +1545,23 @@ static inline struct dma_chan return __dma_request_channel(mask, fn, fn_param, NULL); } + +static inline char * +dmaengine_get_direction_text(enum dma_transfer_direction dir) +{ + switch (dir) { + case DMA_DEV_TO_MEM: + return "DEV_TO_MEM"; + case DMA_MEM_TO_DEV: + return "MEM_TO_DEV"; + case DMA_MEM_TO_MEM: + return "MEM_TO_MEM"; + case DMA_DEV_TO_DEV: + return "DEV_TO_DEV"; + default: + break; + } + + return "invalid"; +} #endif /* DMAENGINE_H */ -- cgit v1.2.3-59-g8ed1b From 71ca5b78235e79c36f774773491064d7921d1942 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 21 Jan 2020 10:33:10 +0100 Subject: dmaengine: Remove dma_request_slave_channel_compat() wrapper At its original introduction, dma_request_slave_channel_compat() used a wrapper, to accommodate filter functions that modify the mask passed. Filter functions can no longer modify masks, and the mask parameter was made const in commit a53e28da574a40bc ("dma: Make the 'mask' parameter of __dma_request_channel const") consecutively. Hence remove the wrapper, and rename __dma_request_slave_channel_compat() to dma_request_slave_channel_compat(), to get rid of one more function name starting with a double underscore. Signed-off-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200121093311.28639-3-geert+renesas@glider.be Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux/dmaengine.h') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 62225d46908b..230d50ef7360 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1526,11 +1526,9 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); #define dma_request_channel(mask, x, y) \ __dma_request_channel(&(mask), x, y, NULL) -#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ - __dma_request_slave_channel_compat(&(mask), x, y, dev, name) static inline struct dma_chan -*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, +*dma_request_slave_channel_compat(const dma_cap_mask_t mask, dma_filter_fn fn, void *fn_param, struct device *dev, const char *name) { @@ -1543,7 +1541,7 @@ static inline struct dma_chan if (!fn || !fn_param) return NULL; - return __dma_request_channel(mask, fn, fn_param, NULL); + return __dma_request_channel(&mask, fn, fn_param, NULL); } static inline char * -- cgit v1.2.3-59-g8ed1b From c3c431de99c068e3f64d01335c1532b22e4b1d1b Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 21 Jan 2020 10:33:11 +0100 Subject: dmaengine: Move dma_get_{,any_}slave_channel() to private dmaengine.h The functions dma_get_slave_channel() and dma_get_any_slave_channel() are called from DMA engine drivers only. Hence move their declarations from the public header file to the private header file drivers/dma/dmaengine.h. Signed-off-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200121093311.28639-4-geert+renesas@glider.be Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.h | 3 +++ drivers/dma/of-dma.c | 2 ++ include/linux/dmaengine.h | 2 -- 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include/linux/dmaengine.h') diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index b0b97475707a..e8a320c9e57c 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -179,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) return (cb->callback) ? true : false; } +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); + #endif diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index c2d779daa4b5..b2c2b5e8093c 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -15,6 +15,8 @@ #include #include +#include "dmaengine.h" + static LIST_HEAD(of_dma_list); static DEFINE_MUTEX(of_dma_lock); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 230d50ef7360..9cc0e70e7c35 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1522,8 +1522,6 @@ int dma_async_device_register(struct dma_device *device); int dmaenginem_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); -struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); -struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); #define dma_request_channel(mask, x, y) \ __dma_request_channel(&(mask), x, y, NULL) -- cgit v1.2.3-59-g8ed1b From e81274cd6b5264809384066e09a5253708822522 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 21 Jan 2020 16:43:53 -0700 Subject: dmaengine: add support to dynamic register/unregister of channels With the channel registration routines broken out, now add support code to allow independent registering and unregistering of channels in a hotplug fashion. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/157965023364.73301.7821862091077299040.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 34 ++++++++++++++++++++++++++-------- include/linux/dmaengine.h | 4 ++++ 2 files changed, 30 insertions(+), 8 deletions(-) (limited to 'include/linux/dmaengine.h') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2daf2ee9bebd..51a2f2b1b2de 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -986,6 +986,20 @@ static int __dma_async_device_channel_register(struct dma_device *device, return rc; } +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan) +{ + int rc; + + rc = __dma_async_device_channel_register(device, chan, -1); + if (rc < 0) + return rc; + + dma_channel_rebalance(); + return 0; +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_register); + static void __dma_async_device_channel_unregister(struct dma_device *device, struct dma_chan *chan) { @@ -993,12 +1007,22 @@ static void __dma_async_device_channel_unregister(struct dma_device *device, "%s called while %d clients hold a reference\n", __func__, chan->client_count); mutex_lock(&dma_list_mutex); + list_del(&chan->device_node); + device->chancnt--; chan->dev->chan = NULL; mutex_unlock(&dma_list_mutex); device_unregister(&chan->dev->device); free_percpu(chan->local); } +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + __dma_async_device_channel_unregister(device, chan); + dma_channel_rebalance(); +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); + /** * dma_async_device_register - registers DMA devices found * @device: &dma_device @@ -1121,12 +1145,6 @@ int dma_async_device_register(struct dma_device *device) goto err_out; } - if (!device->chancnt) { - dev_err(device->dev, "%s: device has no channels!\n", __func__); - rc = -ENODEV; - goto err_out; - } - mutex_lock(&dma_list_mutex); /* take references on public channels */ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) @@ -1181,9 +1199,9 @@ EXPORT_SYMBOL(dma_async_device_register); */ void dma_async_device_unregister(struct dma_device *device) { - struct dma_chan *chan; + struct dma_chan *chan, *n; - list_for_each_entry(chan, &device->channels, device_node) + list_for_each_entry_safe(chan, n, &device->channels, device_node) __dma_async_device_channel_unregister(device, chan); mutex_lock(&dma_list_mutex); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 9cc0e70e7c35..f52f274773ed 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1521,6 +1521,10 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) int dma_async_device_register(struct dma_device *device); int dmaenginem_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan); +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); #define dma_request_channel(mask, x, y) \ __dma_request_channel(&(mask), x, y, NULL) -- cgit v1.2.3-59-g8ed1b From 71723a96b8b1367fefc18f60025dae792477d602 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 17 Jan 2020 16:30:56 +0100 Subject: dmaengine: Create symlinks between DMA channels and slaves MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently it is not easy to find out which DMA channels are in use, and which slave devices are using which channels. Fix this by creating two symlinks between the DMA channel and the actual slave device when a channel is requested: 1. A "slave" symlink from DMA channel to slave device, 2. A "dma:" symlink slave device to DMA channel. When the channel is released, the symlinks are removed again. The latter requires keeping track of the slave device and the channel name in the dma_chan structure. Note that this is limited to channel request functions for requesting an exclusive slave channel that take a device pointer (dma_request_chan() and dma_request_slave_channel*()). Signed-off-by: Geert Uytterhoeven Tested-by: Niklas Söderlund Link: https://lore.kernel.org/r/20200117153056.31363-1-geert+renesas@glider.be Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 37 +++++++++++++++++++++++++++++++------ include/linux/dmaengine.h | 4 ++++ 2 files changed, 35 insertions(+), 6 deletions(-) (limited to 'include/linux/dmaengine.h') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 51a2f2b1b2de..f3ef4edd4de1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -60,6 +60,8 @@ static long dmaengine_ref_count; /* --- sysfs implementation --- */ +#define DMA_SLAVE_NAME "slave" + /** * dev_to_dma_chan - convert a device pointer to its sysfs container object * @dev - device node @@ -730,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) if (has_acpi_companion(dev) && !chan) chan = acpi_dma_request_slave_chan_by_name(dev, name); - if (chan) { - /* Valid channel found or requester needs to be deferred */ - if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) - return chan; - } + if (PTR_ERR(chan) == -EPROBE_DEFER) + return chan; + + if (!IS_ERR_OR_NULL(chan)) + goto found; /* Try to find the channel via the DMA filter map(s) */ mutex_lock(&dma_list_mutex); @@ -754,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) } mutex_unlock(&dma_list_mutex); - return chan ? chan : ERR_PTR(-EPROBE_DEFER); + if (!IS_ERR_OR_NULL(chan)) + goto found; + + return ERR_PTR(-EPROBE_DEFER); + +found: + chan->slave = dev; + chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); + if (!chan->name) + return ERR_PTR(-ENOMEM); + + if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, + DMA_SLAVE_NAME)) + dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); + if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) + dev_err(dev, "Cannot create DMA %s symlink\n", chan->name); + return chan; } EXPORT_SYMBOL_GPL(dma_request_chan); @@ -812,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan) /* drop PRIVATE cap enabled by __dma_request_channel() */ if (--chan->device->privatecnt == 0) dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); + if (chan->slave) { + sysfs_remove_link(&chan->slave->kobj, chan->name); + kfree(chan->name); + chan->name = NULL; + chan->slave = NULL; + } + sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL_GPL(dma_release_channel); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f52f274773ed..fef69a9c5824 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -294,10 +294,12 @@ struct dma_router { /** * struct dma_chan - devices supply DMA channels, clients use them * @device: ptr to the dma device who supplies this channel, always !%NULL + * @slave: ptr to the device using this channel * @cookie: last cookie value returned to client * @completed_cookie: last completed cookie for this channel * @chan_id: channel ID for sysfs * @dev: class device for sysfs + * @name: backlink name for sysfs * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client_count: how many clients are using this channel @@ -308,12 +310,14 @@ struct dma_router { */ struct dma_chan { struct dma_device *device; + struct device *slave; dma_cookie_t cookie; dma_cookie_t completed_cookie; /* sysfs */ int chan_id; struct dma_chan_dev *dev; + const char *name; struct list_head device_node; struct dma_chan_percpu __percpu *local; -- cgit v1.2.3-59-g8ed1b