aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dma-mapping.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-20 12:09:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-20 12:09:52 -0700
commitac60602a6d8f6830dee89f4b87ee005f62eb7171 (patch)
treeea8810e0d7abc82755c8db00904015ecbf99a8b4 /include/linux/dma-mapping.h
parentMerge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip (diff)
parentdma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device (diff)
downloadlinux-dev-ac60602a6d8f6830dee89f4b87ee005f62eb7171.tar.xz
linux-dev-ac60602a6d8f6830dee89f4b87ee005f62eb7171.zip
Merge tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: "Fix various regressions: - force unencrypted dma-coherent buffers if encryption bit can't fit into the dma coherent mask (Tom Lendacky) - avoid limiting request size if swiotlb is not used (me) - fix swiotlb handling in dma_direct_sync_sg_for_cpu/device (Fugang Duan)" * tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping: dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device dma-direct: only limit the mapping size if swiotlb could be used dma-mapping: add a dma_addressing_limited helper dma-direct: Force unencrypted DMA under SME for certain DMA masks
Diffstat (limited to 'include/linux/dma-mapping.h')
-rw-r--r--include/linux/dma-mapping.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 8d13e28a8e07..e11b115dd0e4 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -679,6 +679,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
+/**
+ * dma_addressing_limited - return if the device is addressing limited
+ * @dev: device to check
+ *
+ * Return %true if the devices DMA mask is too small to address all memory in
+ * the system, else %false. Lack of addressing bits is the prime reason for
+ * bounce buffering, but might not be the only one.
+ */
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) <
+ dma_get_required_mask(dev);
+}
+
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent);