aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linaro.org>2019-05-20 19:32:14 +0800
committerVinod Koul <vkoul@kernel.org>2019-05-27 11:23:52 +0530
commitf5151311c3f37f6edc85b2253ccf6d3e2a4c4c26 (patch)
tree9c9a6c10e2377a16f1eb678605546875f210c8e8
parentdmaengine: xilinx_dma: Remove set but unused ‘tail_desc’ (diff)
downloadwireguard-linux-f5151311c3f37f6edc85b2253ccf6d3e2a4c4c26.tar.xz
wireguard-linux-f5151311c3f37f6edc85b2253ccf6d3e2a4c4c26.zip
dmaengine: Add matching device node validation in __dma_request_channel()
When user try to request one DMA channel by __dma_request_channel(), it won't validate if it is the correct DMA device to request, that will lead each DMA engine driver to validate the correct device node in their filter function if it is necessary. Thus we can add the matching device node validation in the DMA engine core, to remove all of device node validation in the drivers. Tested-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Baolin Wang <baolin.wang@linaro.org> Signed-off-by: Vinod Koul <vkoul@kernel.org>
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/of-dma.c4
-rw-r--r--include/linux/dmaengine.h12
3 files changed, 18 insertions, 8 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3a11b1092e80..610080c629bb 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -641,11 +641,13 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
* @mask: capabilities that the channel must satisfy
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
+ * @np: device node to look for DMA channels
*
* Returns pointer to appropriate DMA channel on success or NULL.
*/
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param)
+ dma_filter_fn fn, void *fn_param,
+ struct device_node *np)
{
struct dma_device *device, *_d;
struct dma_chan *chan = NULL;
@@ -653,6 +655,10 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
/* Find a channel */
mutex_lock(&dma_list_mutex);
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+ /* Finds a DMA controller with matching device node */
+ if (np && device->dev->of_node && np != device->dev->of_node)
+ continue;
+
chan = find_candidate(device, mask, fn, fn_param);
if (!IS_ERR(chan))
break;
@@ -769,7 +775,7 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
if (!mask)
return ERR_PTR(-ENODEV);
- chan = __dma_request_channel(mask, NULL, NULL);
+ chan = __dma_request_channel(mask, NULL, NULL, NULL);
if (!chan) {
mutex_lock(&dma_list_mutex);
if (list_empty(&dma_device_list))
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 91fd395c90c4..6b43d04da05d 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -316,8 +316,8 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
if (count != 1)
return NULL;
- return dma_request_channel(info->dma_cap, info->filter_fn,
- &dma_spec->args[0]);
+ return __dma_request_channel(&info->dma_cap, info->filter_fn,
+ &dma_spec->args[0], dma_spec->np);
}
EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index d49ec5c31944..504085b2bf21 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1314,7 +1314,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param);
+ dma_filter_fn fn, void *fn_param,
+ struct device_node *np);
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
@@ -1339,7 +1340,9 @@ static inline void dma_issue_pending_all(void)
{
}
static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param)
+ dma_filter_fn fn,
+ void *fn_param,
+ struct device_node *np)
{
return NULL;
}
@@ -1411,7 +1414,8 @@ void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
-#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+#define dma_request_channel(mask, x, y) \
+ __dma_request_channel(&(mask), x, y, NULL)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
@@ -1429,6 +1433,6 @@ static inline struct dma_chan
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(mask, fn, fn_param);
+ return __dma_request_channel(mask, fn, fn_param, NULL);
}
#endif /* DMAENGINE_H */