aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMaciej W. Rozycki <macro@linux-mips.org>2014-07-05 15:14:22 +0100
committerDavid S. Miller <davem@davemloft.net>2014-07-08 15:30:10 -0700
commitd68ab591f874cf752101ac77b08c01123b6f3a2e (patch)
tree65d6ad13d4d7a4b2de150eb84280a2e6ba674563 /drivers/net
parentMerge branch 'sctp_command_queue' (diff)
downloadlinux-dev-d68ab591f874cf752101ac77b08c01123b6f3a2e.tar.xz
linux-dev-d68ab591f874cf752101ac77b08c01123b6f3a2e.zip
defxx: Correct the receive DMA map size
Receive DMA maps are oversized, they include EISA legacy 128-byte alignment padding in size calculation whereas this padding is never used for data. Worse yet, if the skb's data area has been realigned indeed, then data beyond the end of the buffer will be synchronised from the receive DMA bounce buffer, possibly corrupting data structures residing in memory beyond the actual end of this data buffer. Therefore switch to using PI_RCV_DATA_K_SIZE_MAX rather than NEW_SKB_SIZE in DMA mapping, the value the former macro expands to is written to the receive ring DMA descriptor of the PDQ DMA chip and determines the maximum amount of data PDQ will ever transfer to the corresponding data buffer, including all headers and padding. Reported-by: Robert Coerver <Robert.Coerver@ll.mit.edu> Tested-by: Robert Coerver <Robert.Coerver@ll.mit.edu> Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/fddi/defxx.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index eb78203cd58e..4dcfb32983d9 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -2936,7 +2936,7 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
my_skb_align(newskb, 128);
bp->descr_block_virt->rcv_data[i + j].long_1 =
(u32)dma_map_single(bp->bus_dev, newskb->data,
- NEW_SKB_SIZE,
+ PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
/*
* p_rcv_buff_va is only used inside the
@@ -3053,14 +3053,14 @@ static void dfx_rcv_queue_process(
skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
dma_unmap_single(bp->bus_dev,
bp->descr_block_virt->rcv_data[entry].long_1,
- NEW_SKB_SIZE,
+ PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
skb_reserve(skb, RCV_BUFF_K_PADDING);
bp->p_rcv_buff_va[entry] = (char *)newskb;
bp->descr_block_virt->rcv_data[entry].long_1 =
(u32)dma_map_single(bp->bus_dev,
newskb->data,
- NEW_SKB_SIZE,
+ PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
} else
skb = NULL;