From e8b6c54f6d57822e228027d41a1edb317034a08c Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:34 +0000 Subject: net: xilinx: temac: Relax Kconfig dependencies Similar to axienet, the temac driver is now architecture agnostic, and can be at least compiled for several architectures. Especially the fact that this is a soft IP for implementing in FPGAs makes the current restriction rather pointless, as it could literally appear on any architecture, as long as an FPGA is connected to the bus. The driver hasn't been actually tried on any hardware, it is just a drive-by patch when doing the same for axienet (a similar patch for axienet is already merged). This (temac and axienet) have been compile-tested for: alpha hppa64 microblaze mips64 powerpc powerpc64 riscv64 s390 sparc64 (using kernel.org cross compilers). Signed-off-by: Andre Przywara Reviewed-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 6304ebd8b5c6..0810af8193cb 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -32,7 +32,6 @@ config XILINX_AXI_EMAC config XILINX_LL_TEMAC tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" - depends on PPC || MICROBLAZE || X86 || COMPILE_TEST select PHYLIB ---help--- This driver supports the Xilinx 10/100/1000 LocalLink TEMAC -- cgit v1.2.3-59-g8ed1b From 24201a64770afe2e17050b2ab9e8c0e24e9c23b2 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:35 +0000 Subject: net: axienet: Convert DMA error handler to a work queue The DMA error handler routine is currently a tasklet, scheduled to run after the DMA error IRQ was handled. However it needs to take the MDIO mutex, which is not allowed to do in a tasklet. A kernel (with debug options) complains consequently: [ 614.050361] net eth0: DMA Tx error 0x174019 [ 614.064002] net eth0: Current BD is at: 0x8f84aa0ce [ 614.080195] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:935 [ 614.109484] in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 40, name: kworker/u4:4 [ 614.135428] 3 locks held by kworker/u4:4/40: [ 614.149075] #0: ffff000879863328 ((wq_completion)rpciod){....}, at: process_one_work+0x1f0/0x6a8 [ 614.177528] #1: ffff80001251bdf8 ((work_completion)(&task->u.tk_work)){....}, at: process_one_work+0x1f0/0x6a8 [ 614.209033] #2: ffff0008784e0110 (sk_lock-AF_INET-RPC){....}, at: tcp_sendmsg+0x24/0x58 [ 614.235429] CPU: 0 PID: 40 Comm: kworker/u4:4 Not tainted 5.6.0-rc3-00926-g4a165a9d5921 #26 [ 614.260854] Hardware name: ARM Test FPGA (DT) [ 614.274734] Workqueue: rpciod rpc_async_schedule [ 614.289022] Call trace: [ 614.296871] dump_backtrace+0x0/0x1a0 [ 614.308311] show_stack+0x14/0x20 [ 614.318751] dump_stack+0xbc/0x100 [ 614.329403] ___might_sleep+0xf0/0x140 [ 614.341018] __might_sleep+0x4c/0x80 [ 614.352201] __mutex_lock+0x5c/0x8a8 [ 614.363348] mutex_lock_nested+0x1c/0x28 [ 614.375654] axienet_dma_err_handler+0x38/0x388 [ 614.389999] tasklet_action_common.isra.15+0x160/0x1a8 [ 614.405894] tasklet_action+0x24/0x30 [ 614.417297] efi_header_end+0xe0/0x494 [ 614.429020] irq_exit+0xd0/0xd8 [ 614.439047] __handle_domain_irq+0x60/0xb0 [ 614.451877] gic_handle_irq+0xdc/0x2d0 [ 614.463486] el1_irq+0xcc/0x180 [ 614.473451] __tcp_transmit_skb+0x41c/0xb58 [ 614.486513] tcp_write_xmit+0x224/0x10a0 [ 614.498792] __tcp_push_pending_frames+0x38/0xc8 [ 614.513126] tcp_rcv_established+0x41c/0x820 [ 614.526301] tcp_v4_do_rcv+0x8c/0x218 [ 614.537784] __release_sock+0x5c/0x108 [ 614.549466] release_sock+0x34/0xa0 [ 614.560318] tcp_sendmsg+0x40/0x58 [ 614.571053] inet_sendmsg+0x40/0x68 [ 614.582061] sock_sendmsg+0x18/0x30 [ 614.593074] xs_sendpages+0x218/0x328 [ 614.604506] xs_tcp_send_request+0xa0/0x1b8 [ 614.617461] xprt_transmit+0xc8/0x4f0 [ 614.628943] call_transmit+0x8c/0xa0 [ 614.640028] __rpc_execute+0xbc/0x6f8 [ 614.651380] rpc_async_schedule+0x28/0x48 [ 614.663846] process_one_work+0x298/0x6a8 [ 614.676299] worker_thread+0x40/0x490 [ 614.687687] kthread+0x134/0x138 [ 614.697804] ret_from_fork+0x10/0x18 [ 614.717319] xilinx_axienet 7fe00000.ethernet eth0: Link is Down [ 615.748343] xilinx_axienet 7fe00000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off Since tasklets are not really popular anymore anyway, lets convert this over to a work queue, which can sleep and thus can take the MDIO mutex. Signed-off-by: Andre Przywara Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet.h | 2 +- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 24 +++++++++++------------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 2dacfc85b3ba..04e51af32178 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -435,7 +435,7 @@ struct axienet_local { void __iomem *regs; void __iomem *dma_regs; - struct tasklet_struct dma_err_tasklet; + struct work_struct dma_err_task; int tx_irq; int rx_irq; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index e2f3e2b0cec7..d4c872425065 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -806,7 +806,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) /* Write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - tasklet_schedule(&lp->dma_err_tasklet); + schedule_work(&lp->dma_err_task); axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); } out: @@ -855,7 +855,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) /* write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - tasklet_schedule(&lp->dma_err_tasklet); + schedule_work(&lp->dma_err_task); axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); } out: @@ -891,7 +891,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev) return IRQ_HANDLED; } -static void axienet_dma_err_handler(unsigned long data); +static void axienet_dma_err_handler(struct work_struct *work); /** * axienet_open - Driver open routine. @@ -935,9 +935,8 @@ static int axienet_open(struct net_device *ndev) phylink_start(lp->phylink); - /* Enable tasklets for Axi DMA error handling */ - tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, - (unsigned long) lp); + /* Enable worker thread for Axi DMA error handling */ + INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); /* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, @@ -966,7 +965,7 @@ err_rx_irq: err_tx_irq: phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); - tasklet_kill(&lp->dma_err_tasklet); + cancel_work_sync(&lp->dma_err_task); dev_err(lp->dev, "request_irq() failed\n"); return ret; } @@ -1025,7 +1024,7 @@ static int axienet_stop(struct net_device *ndev) axienet_mdio_enable(lp); mutex_unlock(&lp->mii_bus->mdio_lock); - tasklet_kill(&lp->dma_err_tasklet); + cancel_work_sync(&lp->dma_err_task); if (lp->eth_irq > 0) free_irq(lp->eth_irq, ndev); @@ -1484,17 +1483,18 @@ static const struct phylink_mac_ops axienet_phylink_ops = { }; /** - * axienet_dma_err_handler - Tasklet handler for Axi DMA Error - * @data: Data passed + * axienet_dma_err_handler - Work queue task for Axi DMA Error + * @work: pointer to work_struct * * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the * Tx/Rx BDs. */ -static void axienet_dma_err_handler(unsigned long data) +static void axienet_dma_err_handler(struct work_struct *work) { u32 axienet_status; u32 cr, i; - struct axienet_local *lp = (struct axienet_local *) data; + struct axienet_local *lp = container_of(work, struct axienet_local, + dma_err_task); struct net_device *ndev = lp->ndev; struct axidma_bd *cur_p; -- cgit v1.2.3-59-g8ed1b From ee44d0b78839b21591501424fd3cb3648cc803b5 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:36 +0000 Subject: net: axienet: Propagate failure of DMA descriptor setup When we fail allocating the DMA buffers in axienet_dma_bd_init(), we report this error, but carry on with initialisation nevertheless. This leads to a kernel panic when the driver later wants to send a packet, as it uses uninitialised data structures. Make the axienet_device_reset() routine return an error value, as it contains the DMA buffer initialisation. Make sure we propagate the error up the chain and eventually fail the driver initialisation, to avoid relying on non-initialised buffers. Signed-off-by: Andre Przywara Reviewed-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 26 +++++++++++++++++------ 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index d4c872425065..a25423e6fb36 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -437,9 +437,10 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) lp->options |= options; } -static void __axienet_device_reset(struct axienet_local *lp) +static int __axienet_device_reset(struct axienet_local *lp) { u32 timeout; + /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending * commands/transfers will be flushed or completed during this @@ -455,9 +456,11 @@ static void __axienet_device_reset(struct axienet_local *lp) if (--timeout == 0) { netdev_err(lp->ndev, "%s: DMA reset timeout!\n", __func__); - break; + return -ETIMEDOUT; } } + + return 0; } /** @@ -470,13 +473,17 @@ static void __axienet_device_reset(struct axienet_local *lp) * areconnected to Axi Ethernet reset lines, this in turn resets the Axi * Ethernet core. No separate hardware reset is done for the Axi Ethernet * core. + * Returns 0 on success or a negative error number otherwise. */ -static void axienet_device_reset(struct net_device *ndev) +static int axienet_device_reset(struct net_device *ndev) { u32 axienet_status; struct axienet_local *lp = netdev_priv(ndev); + int ret; - __axienet_device_reset(lp); + ret = __axienet_device_reset(lp); + if (ret) + return ret; lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; lp->options |= XAE_OPTION_VLAN; @@ -491,9 +498,11 @@ static void axienet_device_reset(struct net_device *ndev) lp->options |= XAE_OPTION_JUMBO; } - if (axienet_dma_bd_init(ndev)) { + ret = axienet_dma_bd_init(ndev); + if (ret) { netdev_err(ndev, "%s: descriptor allocation failed\n", __func__); + return ret; } axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); @@ -518,6 +527,8 @@ static void axienet_device_reset(struct net_device *ndev) axienet_setoptions(ndev, lp->options); netif_trans_update(ndev); + + return 0; } /** @@ -921,8 +932,9 @@ static int axienet_open(struct net_device *ndev) */ mutex_lock(&lp->mii_bus->mdio_lock); axienet_mdio_disable(lp); - axienet_device_reset(ndev); - ret = axienet_mdio_enable(lp); + ret = axienet_device_reset(ndev); + if (ret == 0) + ret = axienet_mdio_enable(lp); mutex_unlock(&lp->mii_bus->mdio_lock); if (ret < 0) return ret; -- cgit v1.2.3-59-g8ed1b From f26667a373f34ace925c90a1e881b1774d640dc8 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:37 +0000 Subject: net: axienet: Fix DMA descriptor cleanup path When axienet_dma_bd_init() bails out during the initialisation process, it might do so with parts of the structure already allocated and initialised, while other parts have not been touched yet. Before returning in this case, we call axienet_dma_bd_release(), which does not take care of this corner case. This is most obvious by the first loop happily dereferencing lp->rx_bd_v, which we actually check to be non NULL *afterwards*. Make sure we only unmap or free already allocated structures, by: - directly returning with -ENOMEM if nothing has been allocated at all - checking for lp->rx_bd_v to be non-NULL *before* using it - only unmapping allocated DMA RX regions This avoids NULL pointer dereferences when initialisation fails. Signed-off-by: Andre Przywara Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 43 +++++++++++++++-------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index a25423e6fb36..415179cbdc51 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -160,24 +160,37 @@ static void axienet_dma_bd_release(struct net_device *ndev) int i; struct axienet_local *lp = netdev_priv(ndev); + /* If we end up here, tx_bd_v must have been DMA allocated. */ + dma_free_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, + lp->tx_bd_v, + lp->tx_bd_p); + + if (!lp->rx_bd_v) + return; + for (i = 0; i < lp->rx_bd_num; i++) { - dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, - lp->max_frm_size, DMA_FROM_DEVICE); + /* A NULL skb means this descriptor has not been initialised + * at all. + */ + if (!lp->rx_bd_v[i].skb) + break; + dev_kfree_skb(lp->rx_bd_v[i].skb); - } - if (lp->rx_bd_v) { - dma_free_coherent(ndev->dev.parent, - sizeof(*lp->rx_bd_v) * lp->rx_bd_num, - lp->rx_bd_v, - lp->rx_bd_p); - } - if (lp->tx_bd_v) { - dma_free_coherent(ndev->dev.parent, - sizeof(*lp->tx_bd_v) * lp->tx_bd_num, - lp->tx_bd_v, - lp->tx_bd_p); + /* For each descriptor, we programmed cntrl with the (non-zero) + * descriptor size, after it had been successfully allocated. + * So a non-zero value in there means we need to unmap it. + */ + if (lp->rx_bd_v[i].cntrl) + dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, + lp->max_frm_size, DMA_FROM_DEVICE); } + + dma_free_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, + lp->rx_bd_v, + lp->rx_bd_p); } /** @@ -207,7 +220,7 @@ static int axienet_dma_bd_init(struct net_device *ndev) sizeof(*lp->tx_bd_v) * lp->tx_bd_num, &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) - goto out; + return -ENOMEM; lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * lp->rx_bd_num, -- cgit v1.2.3-59-g8ed1b From e7fea0b9d09e2f7d32776f5198192dfc2572a5b9 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:38 +0000 Subject: net: axienet: Improve DMA error handling Since 0 is a valid DMA address, we cannot use the physical address to check whether a TX descriptor is valid and is holding a DMA mapping. Use the "cntrl" member of the descriptor to make this decision, as it contains at least the length of the buffer, so 0 points to an uninitialised buffer. Signed-off-by: Andre Przywara Reviewed-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 415179cbdc51..82ec7deacdfe 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -571,7 +571,7 @@ static void axienet_start_xmit_done(struct net_device *ndev) DMA_TO_DEVICE); if (cur_p->skb) dev_consume_skb_irq(cur_p->skb); - /*cur_p->phys = 0;*/ + cur_p->cntrl = 0; cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; @@ -1539,7 +1539,7 @@ static void axienet_dma_err_handler(struct work_struct *work) for (i = 0; i < lp->tx_bd_num; i++) { cur_p = &lp->tx_bd_v[i]; - if (cur_p->phys) + if (cur_p->cntrl) dma_unmap_single(ndev->dev.parent, cur_p->phys, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), -- cgit v1.2.3-59-g8ed1b From ab365c3393664f32116aa22fe322cb04a93fab31 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:39 +0000 Subject: net: axienet: Factor out TX descriptor chain cleanup Factor out the code that cleans up a number of connected TX descriptors, as we will need it to properly roll back a failed _xmit() call. There are subtle differences between cleaning up a successfully sent chain (unknown number of involved descriptors, total data size needed) and a chain that was about to set up (number of descriptors known), so cater for those variations with some extra parameters. Signed-off-by: Andre Przywara Reviewed-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 79 ++++++++++++++++------- 1 file changed, 57 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 82ec7deacdfe..5c1b53944771 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -545,32 +545,46 @@ static int axienet_device_reset(struct net_device *ndev) } /** - * axienet_start_xmit_done - Invoked once a transmit is completed by the - * Axi DMA Tx channel. + * axienet_free_tx_chain - Clean up a series of linked TX descriptors. * @ndev: Pointer to the net_device structure + * @first_bd: Index of first descriptor to clean up + * @nr_bds: Number of descriptors to clean up, can be -1 if unknown. + * @sizep: Pointer to a u32 filled with the total sum of all bytes + * in all cleaned-up descriptors. Ignored if NULL. * - * This function is invoked from the Axi DMA Tx isr to notify the completion - * of transmit operation. It clears fields in the corresponding Tx BDs and - * unmaps the corresponding buffer so that CPU can regain ownership of the - * buffer. It finally invokes "netif_wake_queue" to restart transmission if - * required. + * Would either be called after a successful transmit operation, or after + * there was an error when setting up the chain. + * Returns the number of descriptors handled. */ -static void axienet_start_xmit_done(struct net_device *ndev) +static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, + int nr_bds, u32 *sizep) { - u32 size = 0; - u32 packets = 0; struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p; - unsigned int status = 0; + int max_bds = nr_bds; + unsigned int status; + int i; + + if (max_bds == -1) + max_bds = lp->tx_bd_num; + + for (i = 0; i < max_bds; i++) { + cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; + status = cur_p->status; + + /* If no number is given, clean up *all* descriptors that have + * been completed by the MAC. + */ + if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) + break; - cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; - status = cur_p->status; - while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { dma_unmap_single(ndev->dev.parent, cur_p->phys, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); - if (cur_p->skb) + + if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) dev_consume_skb_irq(cur_p->skb); + cur_p->cntrl = 0; cur_p->app0 = 0; cur_p->app1 = 0; @@ -579,15 +593,36 @@ static void axienet_start_xmit_done(struct net_device *ndev) cur_p->status = 0; cur_p->skb = NULL; - size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; - packets++; - - if (++lp->tx_bd_ci >= lp->tx_bd_num) - lp->tx_bd_ci = 0; - cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; - status = cur_p->status; + if (sizep) + *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; } + return i; +} + +/** + * axienet_start_xmit_done - Invoked once a transmit is completed by the + * Axi DMA Tx channel. + * @ndev: Pointer to the net_device structure + * + * This function is invoked from the Axi DMA Tx isr to notify the completion + * of transmit operation. It clears fields in the corresponding Tx BDs and + * unmaps the corresponding buffer so that CPU can regain ownership of the + * buffer. It finally invokes "netif_wake_queue" to restart transmission if + * required. + */ +static void axienet_start_xmit_done(struct net_device *ndev) +{ + struct axienet_local *lp = netdev_priv(ndev); + u32 packets = 0; + u32 size = 0; + + packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size); + + lp->tx_bd_ci += packets; + if (lp->tx_bd_ci >= lp->tx_bd_num) + lp->tx_bd_ci -= lp->tx_bd_num; + ndev->stats.tx_packets += packets; ndev->stats.tx_bytes += size; -- cgit v1.2.3-59-g8ed1b From 71791dc8bdea55eeb2a0caefe98a0b7450c6e0af Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:40 +0000 Subject: net: axienet: Check for DMA mapping errors Especially with the default 32-bit DMA mask, DMA buffers are a limited resource, so their allocation can fail. So as the DMA API documentation requires, add error checking code after dma_map_single() calls to catch the case where we run out of "low" memory. Signed-off-by: Andre Przywara Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 31 ++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 5c1b53944771..736ac1b7a052 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -248,6 +248,11 @@ static int axienet_dma_bd_init(struct net_device *ndev) skb->data, lp->max_frm_size, DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) { + netdev_err(ndev, "DMA mapping error\n"); + goto out; + } + lp->rx_bd_v[i].cntrl = lp->max_frm_size; } @@ -679,6 +684,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) dma_addr_t tail_p; struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p; + u32 orig_tail_ptr = lp->tx_bd_tail; num_frag = skb_shinfo(skb)->nr_frags; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; @@ -714,9 +720,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ } - cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) { + if (net_ratelimit()) + netdev_err(ndev, "TX DMA mapping error\n"); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; for (ii = 0; ii < num_frag; ii++) { if (++lp->tx_bd_tail >= lp->tx_bd_num) @@ -727,6 +739,16 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_frag_address(frag), skb_frag_size(frag), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) { + if (net_ratelimit()) + netdev_err(ndev, "TX DMA mapping error\n"); + ndev->stats.tx_dropped++; + axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, + NULL); + lp->tx_bd_tail = orig_tail_ptr; + + return NETDEV_TX_OK; + } cur_p->cntrl = skb_frag_size(frag); } @@ -807,6 +829,13 @@ static void axienet_recv(struct net_device *ndev) cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, lp->max_frm_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) { + if (net_ratelimit()) + netdev_err(ndev, "RX DMA mapping error\n"); + dev_kfree_skb(new_skb); + return; + } + cur_p->cntrl = lp->max_frm_size; cur_p->status = 0; cur_p->skb = new_skb; -- cgit v1.2.3-59-g8ed1b From d6349e3e14c7f7b7e5cfec09a873af8c192f1271 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:41 +0000 Subject: net: axienet: Mark eth_irq as optional According to the DT binding, the Ethernet core interrupt is optional. Use platform_get_irq_optional() to avoid the error message when the IRQ is not specified. Signed-off-by: Andre Przywara Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 736ac1b7a052..60c8cde365f8 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1851,7 +1851,7 @@ static int axienet_probe(struct platform_device *pdev) lp->rx_irq = irq_of_parse_and_map(np, 1); lp->tx_irq = irq_of_parse_and_map(np, 0); of_node_put(np); - lp->eth_irq = platform_get_irq(pdev, 0); + lp->eth_irq = platform_get_irq_optional(pdev, 0); } else { /* Check for these resources directly on the Ethernet node. */ struct resource *res = platform_get_resource(pdev, @@ -1859,7 +1859,7 @@ static int axienet_probe(struct platform_device *pdev) lp->dma_regs = devm_ioremap_resource(&pdev->dev, res); lp->rx_irq = platform_get_irq(pdev, 1); lp->tx_irq = platform_get_irq(pdev, 0); - lp->eth_irq = platform_get_irq(pdev, 2); + lp->eth_irq = platform_get_irq_optional(pdev, 2); } if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); -- cgit v1.2.3-59-g8ed1b From c30cb8f0bec69d56e1fbc7fb65bd735c729a69e4 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:42 +0000 Subject: net: axienet: Drop MDIO interrupt registers from ethtools dump Newer revisions of the IP don't have these registers. Since we don't really use them, just drop them from the ethtools dump. Signed-off-by: Andre Przywara Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet.h | 7 ------- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 4 ---- 2 files changed, 11 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 04e51af32178..fb7450ca5c53 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -165,13 +165,6 @@ #define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */ #define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */ #define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */ -#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */ -/* MII Mgmt Interrupt Pending register offset */ -#define XAE_MDIO_MIP_OFFSET 0x00000620 -/* MII Management Interrupt Enable register offset */ -#define XAE_MDIO_MIE_OFFSET 0x00000640 -/* MII Management Interrupt Clear register offset. */ -#define XAE_MDIO_MIC_OFFSET 0x00000660 #define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */ #define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */ #define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 60c8cde365f8..217f58bed2c5 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1258,10 +1258,6 @@ static void axienet_ethtools_get_regs(struct net_device *ndev, data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); - data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); - data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); - data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); - data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); data[29] = axienet_ior(lp, XAE_FMI_OFFSET); -- cgit v1.2.3-59-g8ed1b From 2a9b65ea511cf301150181bef91bb0097dc5bf0b Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:43 +0000 Subject: net: axienet: Add mii-tool support mii-tool is useful for debugging, and all it requires to work is to wire up the ioctl ops function pointer. Add this to the axienet driver to enable mii-tool. Signed-off-by: Andre Przywara Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 217f58bed2c5..dc533d7a090c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1171,6 +1171,16 @@ static void axienet_poll_controller(struct net_device *ndev) } #endif +static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct axienet_local *lp = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return phylink_mii_ioctl(lp->phylink, rq, cmd); +} + static const struct net_device_ops axienet_netdev_ops = { .ndo_open = axienet_open, .ndo_stop = axienet_stop, @@ -1178,6 +1188,7 @@ static const struct net_device_ops axienet_netdev_ops = { .ndo_change_mtu = axienet_change_mtu, .ndo_set_mac_address = netdev_set_mac_address, .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = axienet_ioctl, .ndo_set_rx_mode = axienet_set_multicast_list, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = axienet_poll_controller, -- cgit v1.2.3-59-g8ed1b From 6a00d0dd3fcfa2ef200973479fbeee62f3681130 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:44 +0000 Subject: net: axienet: Wrap DMA pointer writes to prepare for 64 bit Newer versions of the Xilink DMA IP support busses with more than 32 address bits, by introducing an MSB word for the registers holding DMA pointers (tail/current, RX/TX descriptor addresses). On IP configured for more than 32 bits, it is also *required* to write both words, to let the IP recognise this as a start condition for an MM2S request, for instance. Wrap the DMA pointer writes with a separate function, to add this functionality later. For now we stick to the lower 32 bits. Signed-off-by: Andre Przywara Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 26 ++++++++++++++--------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index dc533d7a090c..e7469eb241ad 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -147,6 +147,12 @@ static inline void axienet_dma_out32(struct axienet_local *lp, iowrite32(value, lp->dma_regs + reg); } +static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, + dma_addr_t addr) +{ + axienet_dma_out32(lp, reg, lower_32_bits(addr)); +} + /** * axienet_dma_bd_release - Release buffer descriptor rings * @ndev: Pointer to the net_device structure @@ -285,18 +291,18 @@ static int axienet_dma_bd_init(struct net_device *ndev) /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ - axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); + axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); - axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + - (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); + axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + + (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting. */ - axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); + axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); @@ -757,7 +763,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; /* Start the transfer */ - axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); + axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); if (++lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; @@ -849,7 +855,7 @@ static void axienet_recv(struct net_device *ndev) ndev->stats.rx_bytes += size; if (tail_p) - axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); + axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); } /** @@ -1671,18 +1677,18 @@ static void axienet_dma_err_handler(struct work_struct *work) /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ - axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); + axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); - axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + - (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); + axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + + (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting */ - axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); + axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); -- cgit v1.2.3-59-g8ed1b From 4e958f33ee8f404787711416fe0f78cce2b2f4e2 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:45 +0000 Subject: net: axienet: Upgrade descriptors to hold 64-bit addresses Newer revisions of the AXI DMA IP (>= v7.1) support 64-bit addresses, both for the descriptors itself, as well as for the buffers they are pointing to. This is realised by adding "MSB" words for the next and phys pointer right behind the existing address word, now named "LSB". These MSB words live in formerly reserved areas of the descriptor. If the hardware supports it, write both words when setting an address. The buffer address is handled by two wrapper functions, the two occasions where we set the next pointers are open coded. For now this is guarded by a flag which we don't set yet. Signed-off-by: Andre Przywara Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet.h | 9 +- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 113 +++++++++++++++------- 2 files changed, 83 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index fb7450ca5c53..84c4c3655516 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -328,6 +328,7 @@ #define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1) #define XAE_FEATURE_FULL_RX_CSUM (1 << 2) #define XAE_FEATURE_FULL_TX_CSUM (1 << 3) +#define XAE_FEATURE_DMA_64BIT (1 << 4) #define XAE_NO_CSUM_OFFLOAD 0 @@ -340,9 +341,9 @@ /** * struct axidma_bd - Axi Dma buffer descriptor layout * @next: MM2S/S2MM Next Descriptor Pointer - * @reserved1: Reserved and not used + * @next_msb: MM2S/S2MM Next Descriptor Pointer (high 32 bits) * @phys: MM2S/S2MM Buffer Address - * @reserved2: Reserved and not used + * @phys_msb: MM2S/S2MM Buffer Address (high 32 bits) * @reserved3: Reserved and not used * @reserved4: Reserved and not used * @cntrl: MM2S/S2MM Control value @@ -355,9 +356,9 @@ */ struct axidma_bd { u32 next; /* Physical address of next buffer descriptor */ - u32 reserved1; + u32 next_msb; /* high 32 bits for IP >= v7.1, reserved on older IP */ u32 phys; - u32 reserved2; + u32 phys_msb; /* for IP >= v7.1, reserved for older IP */ u32 reserved3; u32 reserved4; u32 cntrl; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index e7469eb241ad..6ecd1bb5f81d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -153,6 +153,25 @@ static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, axienet_dma_out32(lp, reg, lower_32_bits(addr)); } +static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, + struct axidma_bd *desc) +{ + desc->phys = lower_32_bits(addr); + if (lp->features & XAE_FEATURE_DMA_64BIT) + desc->phys_msb = upper_32_bits(addr); +} + +static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, + struct axidma_bd *desc) +{ + dma_addr_t ret = desc->phys; + + if (lp->features & XAE_FEATURE_DMA_64BIT) + ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; + + return ret; +} + /** * axienet_dma_bd_release - Release buffer descriptor rings * @ndev: Pointer to the net_device structure @@ -176,6 +195,8 @@ static void axienet_dma_bd_release(struct net_device *ndev) return; for (i = 0; i < lp->rx_bd_num; i++) { + dma_addr_t phys; + /* A NULL skb means this descriptor has not been initialised * at all. */ @@ -188,9 +209,11 @@ static void axienet_dma_bd_release(struct net_device *ndev) * descriptor size, after it had been successfully allocated. * So a non-zero value in there means we need to unmap it. */ - if (lp->rx_bd_v[i].cntrl) - dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, + if (lp->rx_bd_v[i].cntrl) { + phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); + dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, DMA_FROM_DEVICE); + } } dma_free_coherent(ndev->dev.parent, @@ -235,29 +258,36 @@ static int axienet_dma_bd_init(struct net_device *ndev) goto out; for (i = 0; i < lp->tx_bd_num; i++) { - lp->tx_bd_v[i].next = lp->tx_bd_p + - sizeof(*lp->tx_bd_v) * - ((i + 1) % lp->tx_bd_num); + dma_addr_t addr = lp->tx_bd_p + + sizeof(*lp->tx_bd_v) * + ((i + 1) % lp->tx_bd_num); + + lp->tx_bd_v[i].next = lower_32_bits(addr); + if (lp->features & XAE_FEATURE_DMA_64BIT) + lp->tx_bd_v[i].next_msb = upper_32_bits(addr); } for (i = 0; i < lp->rx_bd_num; i++) { - lp->rx_bd_v[i].next = lp->rx_bd_p + - sizeof(*lp->rx_bd_v) * - ((i + 1) % lp->rx_bd_num); + dma_addr_t addr; + + addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * + ((i + 1) % lp->rx_bd_num); + lp->rx_bd_v[i].next = lower_32_bits(addr); + if (lp->features & XAE_FEATURE_DMA_64BIT) + lp->rx_bd_v[i].next_msb = upper_32_bits(addr); skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); if (!skb) goto out; lp->rx_bd_v[i].skb = skb; - lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, - skb->data, - lp->max_frm_size, - DMA_FROM_DEVICE); - if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) { + addr = dma_map_single(ndev->dev.parent, skb->data, + lp->max_frm_size, DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, addr)) { netdev_err(ndev, "DMA mapping error\n"); goto out; } + desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); lp->rx_bd_v[i].cntrl = lp->max_frm_size; } @@ -574,6 +604,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, struct axidma_bd *cur_p; int max_bds = nr_bds; unsigned int status; + dma_addr_t phys; int i; if (max_bds == -1) @@ -589,9 +620,10 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) break; - dma_unmap_single(ndev->dev.parent, cur_p->phys, - (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), - DMA_TO_DEVICE); + phys = desc_get_phys_addr(lp, cur_p); + dma_unmap_single(ndev->dev.parent, phys, + (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), + DMA_TO_DEVICE); if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) dev_consume_skb_irq(cur_p->skb); @@ -687,7 +719,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) u32 csum_start_off; u32 csum_index_off; skb_frag_t *frag; - dma_addr_t tail_p; + dma_addr_t tail_p, phys; struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p; u32 orig_tail_ptr = lp->tx_bd_tail; @@ -726,14 +758,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ } - cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) { + phys = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { if (net_ratelimit()) netdev_err(ndev, "TX DMA mapping error\n"); ndev->stats.tx_dropped++; return NETDEV_TX_OK; } + desc_set_phys_addr(lp, phys, cur_p); cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; for (ii = 0; ii < num_frag; ii++) { @@ -741,11 +774,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) lp->tx_bd_tail = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; frag = &skb_shinfo(skb)->frags[ii]; - cur_p->phys = dma_map_single(ndev->dev.parent, - skb_frag_address(frag), - skb_frag_size(frag), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) { + phys = dma_map_single(ndev->dev.parent, + skb_frag_address(frag), + skb_frag_size(frag), + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { if (net_ratelimit()) netdev_err(ndev, "TX DMA mapping error\n"); ndev->stats.tx_dropped++; @@ -755,6 +788,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } + desc_set_phys_addr(lp, phys, cur_p); cur_p->cntrl = skb_frag_size(frag); } @@ -793,10 +827,12 @@ static void axienet_recv(struct net_device *ndev) cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { + dma_addr_t phys; + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; - dma_unmap_single(ndev->dev.parent, cur_p->phys, - lp->max_frm_size, + phys = desc_get_phys_addr(lp, cur_p); + dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, DMA_FROM_DEVICE); skb = cur_p->skb; @@ -832,15 +868,16 @@ static void axienet_recv(struct net_device *ndev) if (!new_skb) return; - cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, - lp->max_frm_size, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) { + phys = dma_map_single(ndev->dev.parent, new_skb->data, + lp->max_frm_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { if (net_ratelimit()) netdev_err(ndev, "RX DMA mapping error\n"); dev_kfree_skb(new_skb); return; } + desc_set_phys_addr(lp, phys, cur_p); cur_p->cntrl = lp->max_frm_size; cur_p->status = 0; @@ -885,7 +922,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) return IRQ_NONE; if (status & XAXIDMA_IRQ_ERROR_MASK) { dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); - dev_err(&ndev->dev, "Current BD is at: 0x%x\n", + dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", + (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, (lp->tx_bd_v[lp->tx_bd_ci]).phys); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); @@ -934,7 +972,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) return IRQ_NONE; if (status & XAXIDMA_IRQ_ERROR_MASK) { dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); - dev_err(&ndev->dev, "Current BD is at: 0x%x\n", + dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", + (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, (lp->rx_bd_v[lp->rx_bd_ci]).phys); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); @@ -1616,14 +1655,18 @@ static void axienet_dma_err_handler(struct work_struct *work) for (i = 0; i < lp->tx_bd_num; i++) { cur_p = &lp->tx_bd_v[i]; - if (cur_p->cntrl) - dma_unmap_single(ndev->dev.parent, cur_p->phys, + if (cur_p->cntrl) { + dma_addr_t addr = desc_get_phys_addr(lp, cur_p); + + dma_unmap_single(ndev->dev.parent, addr, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); + } if (cur_p->skb) dev_kfree_skb_irq(cur_p->skb); cur_p->phys = 0; + cur_p->phys_msb = 0; cur_p->cntrl = 0; cur_p->status = 0; cur_p->app0 = 0; -- cgit v1.2.3-59-g8ed1b From f735c40ed93ccaeb52d026def47ac1a423df7133 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:46 +0000 Subject: net: axienet: Autodetect 64-bit DMA capability When newer revisions of the Axienet IP are configured for a 64-bit bus, we *need* to write to the MSB part of the an address registers, otherwise the IP won't recognise this as a DMA start condition. This is even true when the actual DMA address comes from the lower 4 GB. To autodetect this configuration, at probe time we write all 1's to such an MSB register, and see if any bits stick. If this is configured for a 32-bit bus, those MSB registers are RES0, so reading back 0 indicates that no MSB writes are necessary. On the other hands reading anything other than 0 indicated the need to write the MSB registers, so we set the respective flag. The actual DMA mask stays at 32-bit for now. To help bisecting, a separate patch will enable allocations from higher addresses. Signed-off-by: Andre Przywara Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet.h | 1 + drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 26 +++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 84c4c3655516..fbaf3c987d9c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -161,6 +161,7 @@ #define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */ #define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */ #define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */ +#define XAE_ID_OFFSET 0x000004F8 /* Identification register */ #define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */ #define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */ #define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 6ecd1bb5f81d..a54a5c754da0 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -151,6 +151,9 @@ static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, dma_addr_t addr) { axienet_dma_out32(lp, reg, lower_32_bits(addr)); + + if (lp->features & XAE_FEATURE_DMA_64BIT) + axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); } static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, @@ -1928,6 +1931,29 @@ static int axienet_probe(struct platform_device *pdev) goto free_netdev; } + /* Autodetect the need for 64-bit DMA pointers. + * When the IP is configured for a bus width bigger than 32 bits, + * writing the MSB registers is mandatory, even if they are all 0. + * We can detect this case by writing all 1's to one such register + * and see if that sticks: when the IP is configured for 32 bits + * only, those registers are RES0. + * Those MSB registers were introduced in IP v7.1, which we check first. + */ + if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { + void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; + + iowrite32(0x0, desc); + if (ioread32(desc) == 0) { /* sanity check */ + iowrite32(0xffffffff, desc); + if (ioread32(desc) > 0) { + lp->features |= XAE_FEATURE_DMA_64BIT; + dev_info(&pdev->dev, + "autodetected 64-bit DMA range\n"); + } + iowrite32(0x0, desc); + } + } + /* Check for Ethernet core IRQ (optional) */ if (lp->eth_irq <= 0) dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); -- cgit v1.2.3-59-g8ed1b From 5fff0151b3244dc1ef9df7a5d1c0f79eb42599d7 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 24 Mar 2020 13:23:47 +0000 Subject: net: axienet: Allow DMA to beyond 4GB With all DMA address accesses wrapped, we can actually support 64-bit DMA if this option was chosen at IP integration time. If the IP has been configured for an address width greater than 32 bits, we assume the full 64 bit DMA width is working. In practise this will be limited by the actual system address bus width, which will ideally be the same as the DMA IP address width. If this is not the case, the actual width can still be configured using a dma-ranges property in the parent of the MAC node. This increases the DMA mask on those systems to let the kernel choose buffers from memory at higher addresses. Signed-off-by: Andre Przywara Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index a54a5c754da0..fa5dc2993520 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1780,6 +1780,7 @@ static int axienet_probe(struct platform_device *pdev) struct net_device *ndev; const void *mac_addr; struct resource *ethres; + int addr_width = 32; u32 value; ndev = alloc_etherdev(sizeof(*lp)); @@ -1947,6 +1948,7 @@ static int axienet_probe(struct platform_device *pdev) iowrite32(0xffffffff, desc); if (ioread32(desc) > 0) { lp->features |= XAE_FEATURE_DMA_64BIT; + addr_width = 64; dev_info(&pdev->dev, "autodetected 64-bit DMA range\n"); } @@ -1954,6 +1956,12 @@ static int axienet_probe(struct platform_device *pdev) } } + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); + if (ret) { + dev_err(&pdev->dev, "No suitable DMA available\n"); + goto free_netdev; + } + /* Check for Ethernet core IRQ (optional) */ if (lp->eth_irq <= 0) dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); -- cgit v1.2.3-59-g8ed1b