From cde14bbfb3aa79b479db35bd29e6c083513d8614 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 5 Feb 2007 18:46:40 -0800 Subject: [IA64] swiotlb bug fixes This patch fixes - marking I-cache clean of pages DMAed to now only done for IA64 - broken multiple inclusion in include/asm-x86_64/swiotlb.h - missing call to mark_clean in swiotlb_sync_sg() - a (perhaps only theoretical) issue in swiotlb_dma_supported() when io_tlb_end is exactly at the end of memory Signed-off-by: Jan Beulich Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/mm/init.c | 19 +++++++++++++++++++ include/asm-ia64/dma.h | 2 ++ include/asm-x86_64/swiotlb.h | 7 ++++--- lib/swiotlb.c | 33 ++++++++------------------------- 4 files changed, 33 insertions(+), 28 deletions(-) diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 8b7599808dd5..faaca21a3718 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -129,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte) set_bit(PG_arch_1, &page->flags); /* mark page as clean */ } +/* + * Since DMA is i-cache coherent, any (complete) pages that were written via + * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to + * flush them when they get mapped into an executable vm-area. + */ +void +dma_mark_clean(void *addr, size_t size) +{ + unsigned long pg_addr, end; + + pg_addr = PAGE_ALIGN((unsigned long) addr); + end = (unsigned long) addr + size; + while (pg_addr + PAGE_SIZE <= end) { + struct page *page = virt_to_page(pg_addr); + set_bit(PG_arch_1, &page->flags); + pg_addr += PAGE_SIZE; + } +} + inline void ia64_set_rbs_bot (void) { diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h index dad3a735df8b..4d97f60f1ef5 100644 --- a/include/asm-ia64/dma.h +++ b/include/asm-ia64/dma.h @@ -19,4 +19,6 @@ extern unsigned long MAX_DMA_ADDRESS; #define free_dma(x) +void dma_mark_clean(void *addr, size_t size); + #endif /* _ASM_IA64_DMA_H */ diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h index ba94ab3d2673..f9c589539a82 100644 --- a/include/asm-x86_64/swiotlb.h +++ b/include/asm-x86_64/swiotlb.h @@ -1,6 +1,5 @@ #ifndef _ASM_SWIOTLB_H -#define _ASM_SWTIOLB_H 1 - +#define _ASM_SWIOTLB_H 1 #include @@ -52,4 +51,6 @@ extern int swiotlb; extern void pci_swiotlb_init(void); -#endif /* _ASM_SWTIOLB_H */ +static inline void dma_mark_clean(void *addr, size_t size) {} + +#endif /* _ASM_SWIOTLB_H */ diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 10625785eefd..34278338aad0 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -557,25 +557,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) return dev_addr; } -/* - * Since DMA is i-cache coherent, any (complete) pages that were written via - * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to - * flush them when they get mapped into an executable vm-area. - */ -static void -mark_clean(void *addr, size_t size) -{ - unsigned long pg_addr, end; - - pg_addr = PAGE_ALIGN((unsigned long) addr); - end = (unsigned long) addr + size; - while (pg_addr + PAGE_SIZE <= end) { - struct page *page = virt_to_page(pg_addr); - set_bit(PG_arch_1, &page->flags); - pg_addr += PAGE_SIZE; - } -} - /* * Unmap a single streaming mode DMA translation. The dma_addr and size must * match what was provided for in a previous swiotlb_map_single call. All @@ -594,7 +575,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) unmap_single(hwdev, dma_addr, size, dir); else if (dir == DMA_FROM_DEVICE) - mark_clean(dma_addr, size); + dma_mark_clean(dma_addr, size); } /* @@ -617,7 +598,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) - mark_clean(dma_addr, size); + dma_mark_clean(dma_addr, size); } void @@ -648,7 +629,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) - mark_clean(dma_addr, size); + dma_mark_clean(dma_addr, size); } void @@ -698,7 +679,6 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, dev_addr = virt_to_phys(addr); if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { void *map = map_single(hwdev, addr, sg->length, dir); - sg->dma_address = virt_to_bus(map); if (!map) { /* Don't panic here, we expect map_sg users to do proper error handling. */ @@ -707,6 +687,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, sg[0].dma_length = 0; return 0; } + sg->dma_address = virt_to_bus(map); } else sg->dma_address = dev_addr; sg->dma_length = sg->length; @@ -730,7 +711,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); else if (dir == DMA_FROM_DEVICE) - mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); + dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); } /* @@ -752,6 +733,8 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir, target); + else if (dir == DMA_FROM_DEVICE) + dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); } void @@ -783,7 +766,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) int swiotlb_dma_supported (struct device *hwdev, u64 mask) { - return (virt_to_phys (io_tlb_end) - 1) <= mask; + return virt_to_phys(io_tlb_end - 1) <= mask; } EXPORT_SYMBOL(swiotlb_init); -- cgit v1.2.3-59-g8ed1b