aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-01-27 10:55:50 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-27 10:55:50 -0800
commita5b871c91d470326eed3ae0ebd2fc07f3aee9050 (patch)
tree95b46574591a6eb56e2d7e62219c2629cf064f03 /include/linux/dmaengine.h
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching (diff)
parentdmaengine: Create symlinks between DMA channels and slaves (diff)
downloadwireguard-linux-a5b871c91d470326eed3ae0ebd2fc07f3aee9050.tar.xz
wireguard-linux-a5b871c91d470326eed3ae0ebd2fc07f3aee9050.zip
Merge tag 'dmaengine-5.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: "This time we have a bunch of core changes to support dynamic channels, hotplug of controllers, new apis for metadata ops etc along with new drivers for Intel data accelerators, TI K3 UDMA, PLX DMA engine and hisilicon Kunpeng DMA engine. Also usual assorted updates to drivers. Core: - Support for dynamic channels - Removal of various slave wrappers - Make few slave request APIs as private to dmaengine - Symlinks between channels and slaves - Support for hotplug of controllers - Support for metadata_ops for dma_async_tx_descriptor - Reporting DMA cached data amount - Virtual dma channel locking updates New drivers/device/feature support support: - Driver for Intel data accelerators - Driver for TI K3 UDMA - Driver for PLX DMA engine - Driver for hisilicon Kunpeng DMA engine - Support for eDMA support for QorIQ LS1028A in fsl edma driver - Support for cyclic dma in sun4i driver - Support for X1830 in JZ4780 driver" * tag 'dmaengine-5.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (62 commits) dmaengine: Create symlinks between DMA channels and slaves dmaengine: hisilicon: Add Kunpeng DMA engine support dmaengine: idxd: add char driver to expose submission portal to userland dmaengine: idxd: connect idxd to dmaengine subsystem dmaengine: idxd: add descriptor manipulation routines dmaengine: idxd: add sysfs ABI for idxd driver dmaengine: idxd: add configuration component of driver dmaengine: idxd: Init and probe for Intel data accelerators dmaengine: add support to dynamic register/unregister of channels dmaengine: break out channel registration x86/asm: add iosubmit_cmds512() based on MOVDIR64B CPU instruction dmaengine: ti: k3-udma: fix spelling mistake "limted" -> "limited" dmaengine: s3c24xx-dma: fix spelling mistake "to" -> "too" dmaengine: Move dma_get_{,any_}slave_channel() to private dmaengine.h dmaengine: Remove dma_request_slave_channel_compat() wrapper dmaengine: Remove dma_device_satisfies_mask() wrapper dt-bindings: fsl-imx-sdma: Add i.MX8MM/i.MX8MN/i.MX8MP compatible string dmaengine: zynqmp_dma: fix burst length configuration dmaengine: sun4i: Add support for cyclic requests with dedicated DMA dmaengine: fsl-qdma: fix duplicated argument to && ...
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h161
1 files changed, 153 insertions, 8 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index dad4a68fa009..64461fc64e1b 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -219,6 +219,62 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
* @bytes_transferred: byte counter
*/
+/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ * client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ * helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * construct the metadata in the client's buffer
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * 4. when the transfer is completed, the metadata should be available in the
+ * attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ * driver. The client driver can ask for the pointer, maximum size and the
+ * currently used size of the metadata and can directly update or read it.
+ * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ * provided as helper functions.
+ *
+ * Note: the metadata area for the descriptor is no longer valid after the
+ * transfer has been completed (valid up to the point when the completion
+ * callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ * metadata area
+ * 3. update the metadata at the pointer
+ * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
+ * of data the client has placed into the metadata buffer
+ * 5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. submit the transfer
+ * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ * pointer to the engine's metadata area
+ * 4. Read out the metadata from the pointer
+ *
+ * Note: the two mode is not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+ DESC_METADATA_NONE = 0,
+ DESC_METADATA_CLIENT = BIT(0),
+ DESC_METADATA_ENGINE = BIT(1),
+};
+
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
@@ -238,10 +294,12 @@ struct dma_router {
/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
* @cookie: last cookie value returned to client
* @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
+ * @name: backlink name for sysfs
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
@@ -252,12 +310,14 @@ struct dma_router {
*/
struct dma_chan {
struct dma_device *device;
+ struct device *slave;
dma_cookie_t cookie;
dma_cookie_t completed_cookie;
/* sysfs */
int chan_id;
struct dma_chan_dev *dev;
+ const char *name;
struct list_head device_node;
struct dma_chan_percpu __percpu *local;
@@ -475,19 +535,36 @@ struct dmaengine_unmap_data {
dma_addr_t addr[0];
};
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+ int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+ size_t len);
+
+ void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+ int (*set_len)(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+};
+
/**
* struct dma_async_tx_descriptor - async transaction descriptor
* ---dma generic offload fields---
* @cookie: tracking cookie for this transaction, set to -EBUSY if
* this tx is sitting on a dependency list
* @flags: flags to augment operation preparation, control completion, and
- * communicate status
+ * communicate status
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
* descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ * DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ * DMA driver if metadata mode is supported with the descriptor
* ---async_tx api specific fields---
* @next: at completion submit this descriptor
* @parent: pointer to the next level up in the dependency chain
@@ -504,6 +581,8 @@ struct dma_async_tx_descriptor {
dma_async_tx_callback_result callback_result;
void *callback_param;
struct dmaengine_unmap_data *unmap;
+ enum dma_desc_metadata_mode desc_metadata_mode;
+ struct dma_descriptor_metadata_ops *metadata_ops;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
@@ -611,11 +690,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
* @residue: the remaining number of bytes left to transmit
* on the selected transfer for states DMA_IN_PROGRESS and
* DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
*/
struct dma_tx_state {
dma_cookie_t last;
dma_cookie_t used;
u32 residue;
+ u32 in_flight_bytes;
};
/**
@@ -666,6 +747,7 @@ struct dma_filter {
* @global_node: list_head for global dma_device_list
* @filter: information for device/slave to filter function/param mapping
* @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
* @max_xor: maximum number of xor sources, 0 if no capability
* @max_pq: maximum number of PQ sources and PQ-continue capability
* @copy_align: alignment shift for memcpy operations
@@ -674,6 +756,7 @@ struct dma_filter {
* @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
+ * @owner: owner module (automatically set based on the provided dev)
* @src_addr_widths: bit mask of src addr widths the device supports
* Width is specified in bytes, e.g. for a device supporting
* a width of 4 the mask should have BIT(4) set.
@@ -718,15 +801,21 @@ struct dma_filter {
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
* @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ * called and there are no further references to this structure. This
+ * must be implemented to free resources however many existing drivers
+ * do not and are therefore not safe to unbind while in use.
+ *
*/
struct dma_device {
-
+ struct kref ref;
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
dma_cap_mask_t cap_mask;
+ enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
enum dmaengine_alignment copy_align;
@@ -737,6 +826,7 @@ struct dma_device {
int dev_id;
struct device *dev;
+ struct module *owner;
u32 src_addr_widths;
u32 dst_addr_widths;
@@ -800,6 +890,7 @@ struct dma_device {
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ void (*device_release)(struct dma_device *dev);
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -902,6 +993,41 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+ enum dma_desc_metadata_mode mode)
+{
+ if (!chan)
+ return false;
+
+ return !!(chan->device->desc_metadata_modes & mode);
+}
+
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+ struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+ return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+ struct dma_async_tx_descriptor *desc, size_t *payload_len,
+ size_t *max_len)
+{
+ return NULL;
+}
+static inline int dmaengine_desc_set_metadata_len(
+ struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_DMA_ENGINE */
+
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
* @chan: The channel for which to terminate the transfers
@@ -1402,16 +1528,16 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
int dma_async_device_register(struct dma_device *device);
int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan);
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
-struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
#define dma_request_channel(mask, x, y) \
__dma_request_channel(&(mask), x, y, NULL)
-#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
- __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
static inline struct dma_chan
-*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
dma_filter_fn fn, void *fn_param,
struct device *dev, const char *name)
{
@@ -1424,6 +1550,25 @@ static inline struct dma_chan
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(mask, fn, fn_param, NULL);
+ return __dma_request_channel(&mask, fn, fn_param, NULL);
+}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ return "DEV_TO_MEM";
+ case DMA_MEM_TO_DEV:
+ return "MEM_TO_DEV";
+ case DMA_MEM_TO_MEM:
+ return "MEM_TO_MEM";
+ case DMA_DEV_TO_DEV:
+ return "DEV_TO_DEV";
+ default:
+ break;
+ }
+
+ return "invalid";
}
#endif /* DMAENGINE_H */