diff options
Diffstat (limited to 'drivers/net/ethernet/xilinx/ll_temac_main.c')
-rw-r--r-- | drivers/net/ethernet/xilinx/ll_temac_main.c | 359 |
1 files changed, 236 insertions, 123 deletions
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 9461acec6f70..1066420d6a83 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -58,8 +58,11 @@ #include "ll_temac.h" -#define TX_BD_NUM 64 -#define RX_BD_NUM 128 +/* Descriptors defines for Tx and Rx DMA */ +#define TX_BD_NUM_DEFAULT 64 +#define RX_BD_NUM_DEFAULT 1024 +#define TX_BD_NUM_MAX 4096 +#define RX_BD_NUM_MAX 4096 /* --------------------------------------------------------------------- * Low level register access functions @@ -103,7 +106,7 @@ static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout) */ #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC) -/** +/* * temac_indirect_busywait - Wait for current indirect register access * to complete. */ @@ -114,11 +117,11 @@ int temac_indirect_busywait(struct temac_local *lp) spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout)); if (WARN_ON(!hard_acs_rdy(lp))) return -ETIMEDOUT; - else - return 0; + + return 0; } -/** +/* * temac_indirect_in32 - Indirect register read access. This function * must be called without lp->indirect_lock being held. */ @@ -133,7 +136,7 @@ u32 temac_indirect_in32(struct temac_local *lp, int reg) return val; } -/** +/* * temac_indirect_in32_locked - Indirect register read access. This * function must be called with lp->indirect_lock being held. Use * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid @@ -161,7 +164,7 @@ u32 temac_indirect_in32_locked(struct temac_local *lp, int reg) return temac_ior(lp, XTE_LSW0_OFFSET); } -/** +/* * temac_indirect_out32 - Indirect register write access. This function * must be called without lp->indirect_lock being held. */ @@ -174,7 +177,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) spin_unlock_irqrestore(lp->indirect_lock, flags); } -/** +/* * temac_indirect_out32_locked - Indirect register write access. This * function must be called with lp->indirect_lock being held. Use * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid @@ -199,7 +202,7 @@ void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value) WARN_ON(temac_indirect_busywait(lp)); } -/** +/* * temac_dma_in32_* - Memory mapped DMA read, these function expects a * register input that is based on DCR word addresses which are then * converted to memory mapped byte addresses. To be assigned to @@ -215,7 +218,7 @@ static u32 temac_dma_in32_le(struct temac_local *lp, int reg) return ioread32(lp->sdma_regs + (reg << 2)); } -/** +/* * temac_dma_out32_* - Memory mapped DMA read, these function expects * a register input that is based on DCR word addresses which are then * converted to memory mapped byte addresses. To be assigned to @@ -237,7 +240,7 @@ static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value) */ #ifdef CONFIG_PPC_DCR -/** +/* * temac_dma_dcr_in32 - DCR based DMA read */ static u32 temac_dma_dcr_in(struct temac_local *lp, int reg) @@ -245,7 +248,7 @@ static u32 temac_dma_dcr_in(struct temac_local *lp, int reg) return dcr_read(lp->sdma_dcrs, reg); } -/** +/* * temac_dma_dcr_out32 - DCR based DMA write */ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value) @@ -253,12 +256,12 @@ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value) dcr_write(lp->sdma_dcrs, reg, value); } -/** +/* * temac_dcr_setup - If the DMA is DCR based, then setup the address and * I/O functions */ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, - struct device_node *np) + struct device_node *np) { unsigned int dcrs; @@ -283,14 +286,14 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, * such as with MicroBlaze and x86 */ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, - struct device_node *np) + struct device_node *np) { return -1; } #endif -/** +/* * temac_dma_bd_release - Release buffer descriptor rings */ static void temac_dma_bd_release(struct net_device *ndev) @@ -301,26 +304,24 @@ static void temac_dma_bd_release(struct net_device *ndev) /* Reset Local Link (DMA) */ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); - for (i = 0; i < RX_BD_NUM; i++) { + for (i = 0; i < lp->rx_bd_num; i++) { if (!lp->rx_skb[i]) break; - else { - dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, - XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); - dev_kfree_skb(lp->rx_skb[i]); - } + dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, + XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb(lp->rx_skb[i]); } if (lp->rx_bd_v) dma_free_coherent(ndev->dev.parent, - sizeof(*lp->rx_bd_v) * RX_BD_NUM, - lp->rx_bd_v, lp->rx_bd_p); + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, + lp->rx_bd_v, lp->rx_bd_p); if (lp->tx_bd_v) dma_free_coherent(ndev->dev.parent, - sizeof(*lp->tx_bd_v) * TX_BD_NUM, - lp->tx_bd_v, lp->tx_bd_p); + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, + lp->tx_bd_v, lp->tx_bd_p); } -/** +/* * temac_dma_bd_init - Setup buffer descriptor rings */ static int temac_dma_bd_init(struct net_device *ndev) @@ -330,36 +331,37 @@ static int temac_dma_bd_init(struct net_device *ndev) dma_addr_t skb_dma_addr; int i; - lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb), - GFP_KERNEL); + lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num, + sizeof(*lp->rx_skb), GFP_KERNEL); if (!lp->rx_skb) goto out; /* allocate the tx and rx ring buffer descriptors. */ /* returns a virtual address and a physical address. */ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, - sizeof(*lp->tx_bd_v) * TX_BD_NUM, + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) goto out; lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, - sizeof(*lp->rx_bd_v) * RX_BD_NUM, + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) goto out; - for (i = 0; i < TX_BD_NUM; i++) { + for (i = 0; i < lp->tx_bd_num; i++) { lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p - + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM)); + + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num)); } - for (i = 0; i < RX_BD_NUM; i++) { + for (i = 0; i < lp->rx_bd_num; i++) { lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p - + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM)); + + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num)); - skb = netdev_alloc_skb_ip_align(ndev, - XTE_MAX_JUMBO_FRAME_SIZE); + skb = __netdev_alloc_skb_ip_align(ndev, + XTE_MAX_JUMBO_FRAME_SIZE, + GFP_KERNEL); if (!skb) goto out; @@ -376,21 +378,22 @@ static int temac_dma_bd_init(struct net_device *ndev) } /* Configure DMA channel (irq setup) */ - lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl | + lp->dma_out(lp, TX_CHNL_CTRL, + lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 | 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used! CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); - lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl | + lp->dma_out(lp, RX_CHNL_CTRL, + lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 | CHNL_CTRL_IRQ_IOE | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); /* Init descriptor indexes */ lp->tx_bd_ci = 0; - lp->tx_bd_next = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; - lp->rx_bd_tail = RX_BD_NUM - 1; + lp->rx_bd_tail = lp->rx_bd_num - 1; /* Enable RX DMA transfers */ wmb(); @@ -425,7 +428,8 @@ static void temac_do_set_mac_address(struct net_device *ndev) (ndev->dev_addr[2] << 16) | (ndev->dev_addr[3] << 24)); /* There are reserved bits in EUAW1 - * so don't affect them Set MAC bits [47:32] in EUAW1 */ + * so don't affect them Set MAC bits [47:32] in EUAW1 + */ temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET, (ndev->dev_addr[4] & 0x000000ff) | (ndev->dev_addr[5] << 8)); @@ -434,7 +438,7 @@ static void temac_do_set_mac_address(struct net_device *ndev) static int temac_init_mac_address(struct net_device *ndev, const void *address) { - ether_addr_copy(ndev->dev_addr, address); + eth_hw_addr_set(ndev, address); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); temac_do_set_mac_address(ndev); @@ -447,7 +451,7 @@ static int temac_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, addr->sa_data); temac_do_set_mac_address(ndev); return 0; } @@ -525,71 +529,71 @@ static struct temac_option { { .opt = XTE_OPTION_JUMBO, .reg = XTE_RXC1_OFFSET, - .m_or =XTE_RXC1_RXJMBO_MASK, + .m_or = XTE_RXC1_RXJMBO_MASK, }, /* Turn on VLAN packet support for both Rx and Tx */ { .opt = XTE_OPTION_VLAN, .reg = XTE_TXC_OFFSET, - .m_or =XTE_TXC_TXVLAN_MASK, + .m_or = XTE_TXC_TXVLAN_MASK, }, { .opt = XTE_OPTION_VLAN, .reg = XTE_RXC1_OFFSET, - .m_or =XTE_RXC1_RXVLAN_MASK, + .m_or = XTE_RXC1_RXVLAN_MASK, }, /* Turn on FCS stripping on receive packets */ { .opt = XTE_OPTION_FCS_STRIP, .reg = XTE_RXC1_OFFSET, - .m_or =XTE_RXC1_RXFCS_MASK, + .m_or = XTE_RXC1_RXFCS_MASK, }, /* Turn on FCS insertion on transmit packets */ { .opt = XTE_OPTION_FCS_INSERT, .reg = XTE_TXC_OFFSET, - .m_or =XTE_TXC_TXFCS_MASK, + .m_or = XTE_TXC_TXFCS_MASK, }, /* Turn on length/type field checking on receive packets */ { .opt = XTE_OPTION_LENTYPE_ERR, .reg = XTE_RXC1_OFFSET, - .m_or =XTE_RXC1_RXLT_MASK, + .m_or = XTE_RXC1_RXLT_MASK, }, /* Turn on flow control */ { .opt = XTE_OPTION_FLOW_CONTROL, .reg = XTE_FCC_OFFSET, - .m_or =XTE_FCC_RXFLO_MASK, + .m_or = XTE_FCC_RXFLO_MASK, }, /* Turn on flow control */ { .opt = XTE_OPTION_FLOW_CONTROL, .reg = XTE_FCC_OFFSET, - .m_or =XTE_FCC_TXFLO_MASK, + .m_or = XTE_FCC_TXFLO_MASK, }, /* Turn on promiscuous frame filtering (all frames are received ) */ { .opt = XTE_OPTION_PROMISC, .reg = XTE_AFM_OFFSET, - .m_or =XTE_AFM_EPPRM_MASK, + .m_or = XTE_AFM_EPPRM_MASK, }, /* Enable transmitter if not already enabled */ { .opt = XTE_OPTION_TXEN, .reg = XTE_TXC_OFFSET, - .m_or =XTE_TXC_TXEN_MASK, + .m_or = XTE_TXC_TXEN_MASK, }, /* Enable receiver? */ { .opt = XTE_OPTION_RXEN, .reg = XTE_RXC1_OFFSET, - .m_or =XTE_RXC1_RXEN_MASK, + .m_or = XTE_RXC1_RXEN_MASK, }, {} }; -/** +/* * temac_setoptions */ static u32 temac_setoptions(struct net_device *ndev, u32 options) @@ -636,7 +640,7 @@ static void temac_device_reset(struct net_device *ndev) udelay(1); if (--timeout == 0) { dev_err(&ndev->dev, - "temac_device_reset RX reset timeout!!\n"); + "%s RX reset timeout!!\n", __func__); break; } } @@ -648,7 +652,7 @@ static void temac_device_reset(struct net_device *ndev) udelay(1); if (--timeout == 0) { dev_err(&ndev->dev, - "temac_device_reset TX reset timeout!!\n"); + "%s TX reset timeout!!\n", __func__); break; } } @@ -667,7 +671,7 @@ static void temac_device_reset(struct net_device *ndev) udelay(1); if (--timeout == 0) { dev_err(&ndev->dev, - "temac_device_reset DMA reset timeout!!\n"); + "%s DMA reset timeout!!\n", __func__); break; } } @@ -675,7 +679,7 @@ static void temac_device_reset(struct net_device *ndev) if (temac_dma_bd_init(ndev)) { dev_err(&ndev->dev, - "temac_device_reset descriptor allocation failed\n"); + "%s descriptor allocation failed\n", __func__); } spin_lock_irqsave(lp->indirect_lock, flags); @@ -686,7 +690,8 @@ static void temac_device_reset(struct net_device *ndev) spin_unlock_irqrestore(lp->indirect_lock, flags); /* Sync default options with HW - * but leave receiver and transmitter disabled. */ + * but leave receiver and transmitter disabled. + */ temac_setoptions(ndev, lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN)); @@ -718,9 +723,15 @@ static void temac_adjust_link(struct net_device *ndev) mii_speed &= ~XTE_EMCFG_LINKSPD_MASK; switch (phy->speed) { - case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break; - case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break; - case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break; + case SPEED_1000: + mii_speed |= XTE_EMCFG_LINKSPD_1000; + break; + case SPEED_100: + mii_speed |= XTE_EMCFG_LINKSPD_100; + break; + case SPEED_10: + mii_speed |= XTE_EMCFG_LINKSPD_10; + break; } /* Write new speed setting out to TEMAC */ @@ -770,12 +781,15 @@ static void temac_start_xmit_done(struct net_device *ndev) stat = be32_to_cpu(cur_p->app0); while (stat & STS_CTRL_APP0_CMPLT) { + /* Make sure that the other fields are read after bd is + * released by dma + */ + rmb(); dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), be32_to_cpu(cur_p->len), DMA_TO_DEVICE); skb = (struct sk_buff *)ptr_from_txbd(cur_p); if (skb) dev_consume_skb_irq(skb); - cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app3 = 0; @@ -784,8 +798,14 @@ static void temac_start_xmit_done(struct net_device *ndev) ndev->stats.tx_packets++; ndev->stats.tx_bytes += be32_to_cpu(cur_p->len); + /* app0 must be visible last, as it is used to flag + * availability of the bd + */ + smp_mb(); + cur_p->app0 = 0; + lp->tx_bd_ci++; - if (lp->tx_bd_ci >= TX_BD_NUM) + if (lp->tx_bd_ci >= lp->tx_bd_num) lp->tx_bd_ci = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; @@ -810,8 +830,11 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) if (cur_p->app0) return NETDEV_TX_BUSY; + /* Make sure to read next bd app0 after this one */ + rmb(); + tail++; - if (tail >= TX_BD_NUM) + if (tail >= lp->tx_bd_num) tail = 0; cur_p = &lp->tx_bd_v[tail]; @@ -826,14 +849,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; - dma_addr_t start_p, tail_p, skb_dma_addr; + dma_addr_t tail_p, skb_dma_addr; int ii; unsigned long num_frag; skb_frag_t *frag; num_frag = skb_shinfo(skb)->nr_frags; frag = &skb_shinfo(skb)->frags[0]; - start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; if (temac_check_tx_bd_space(lp, num_frag + 1)) { @@ -846,7 +868,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) smp_mb(); /* Space might have just been freed - check again */ - if (temac_check_tx_bd_space(lp, num_frag)) + if (temac_check_tx_bd_space(lp, num_frag + 1)) return NETDEV_TX_BUSY; netif_wake_queue(ndev); @@ -873,10 +895,9 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } cur_p->phys = cpu_to_be32(skb_dma_addr); - ptr_to_txbd((void *)skb, cur_p); for (ii = 0; ii < num_frag; ii++) { - if (++lp->tx_bd_tail >= TX_BD_NUM) + if (++lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; @@ -886,7 +907,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) DMA_TO_DEVICE); if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { if (--lp->tx_bd_tail < 0) - lp->tx_bd_tail = TX_BD_NUM - 1; + lp->tx_bd_tail = lp->tx_bd_num - 1; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; while (--ii >= 0) { --frag; @@ -895,7 +916,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_frag_size(frag), DMA_TO_DEVICE); if (--lp->tx_bd_tail < 0) - lp->tx_bd_tail = TX_BD_NUM - 1; + lp->tx_bd_tail = lp->tx_bd_num - 1; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; } dma_unmap_single(ndev->dev.parent, @@ -912,9 +933,14 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) } cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP); + /* Mark last fragment with skb address, so it can be consumed + * in temac_start_xmit_done() + */ + ptr_to_txbd((void *)skb, cur_p); + tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; lp->tx_bd_tail++; - if (lp->tx_bd_tail >= TX_BD_NUM) + if (lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; skb_tx_timestamp(skb); @@ -923,6 +949,9 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) wmb(); lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ + if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) + netif_stop_queue(ndev); + return NETDEV_TX_OK; } @@ -934,7 +963,7 @@ static int ll_temac_recv_buffers_available(struct temac_local *lp) return 0; available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; if (available <= 0) - available += RX_BD_NUM; + available += lp->rx_bd_num; return available; } @@ -984,9 +1013,8 @@ static void ll_temac_recv(struct net_device *ndev) if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && (skb->protocol == htons(ETH_P_IP)) && (skb->len > 64)) { - /* Convert from device endianness (be32) to cpu - * endiannes, and if necessary swap the bytes + * endianness, and if necessary swap the bytes * (back) for proper IP checksum byte order * (be16). */ @@ -1003,7 +1031,7 @@ static void ll_temac_recv(struct net_device *ndev) ndev->stats.rx_bytes += length; rx_bd = lp->rx_bd_ci; - if (++lp->rx_bd_ci >= RX_BD_NUM) + if (++lp->rx_bd_ci >= lp->rx_bd_num) lp->rx_bd_ci = 0; } while (rx_bd != lp->rx_bd_tail); @@ -1034,7 +1062,7 @@ static void ll_temac_recv(struct net_device *ndev) dma_addr_t skb_dma_addr; rx_bd = lp->rx_bd_tail + 1; - if (rx_bd >= RX_BD_NUM) + if (rx_bd >= lp->rx_bd_num) rx_bd = 0; bd = &lp->rx_bd_v[rx_bd]; @@ -1215,7 +1243,7 @@ static const struct net_device_ops temac_netdev_ops = { .ndo_set_rx_mode = temac_set_multicast_list, .ndo_set_mac_address = temac_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = temac_poll_controller, #endif @@ -1250,13 +1278,108 @@ static const struct attribute_group temac_attr_group = { .attrs = temac_device_attrs, }; -/* ethtool support */ +/* --------------------------------------------------------------------- + * ethtool support + */ + +static void +ll_temac_ethtools_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct temac_local *lp = netdev_priv(ndev); + + ering->rx_max_pending = RX_BD_NUM_MAX; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->tx_max_pending = TX_BD_NUM_MAX; + ering->rx_pending = lp->rx_bd_num; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->tx_pending = lp->tx_bd_num; +} + +static int +ll_temac_ethtools_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct temac_local *lp = netdev_priv(ndev); + + if (ering->rx_pending > RX_BD_NUM_MAX || + ering->rx_mini_pending || + ering->rx_jumbo_pending || + ering->rx_pending > TX_BD_NUM_MAX) + return -EINVAL; + + if (netif_running(ndev)) + return -EBUSY; + + lp->rx_bd_num = ering->rx_pending; + lp->tx_bd_num = ering->tx_pending; + return 0; +} + +static int +ll_temac_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct temac_local *lp = netdev_priv(ndev); + + ec->rx_max_coalesced_frames = lp->coalesce_count_rx; + ec->tx_max_coalesced_frames = lp->coalesce_count_tx; + ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100; + ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100; + return 0; +} + +static int +ll_temac_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct temac_local *lp = netdev_priv(ndev); + + if (netif_running(ndev)) { + netdev_err(ndev, + "Please stop netif before applying configuration\n"); + return -EFAULT; + } + + if (ec->rx_max_coalesced_frames) + lp->coalesce_count_rx = ec->rx_max_coalesced_frames; + if (ec->tx_max_coalesced_frames) + lp->coalesce_count_tx = ec->tx_max_coalesced_frames; + /* With typical LocalLink clock speed of 200 MHz and + * C_PRESCALAR=1023, each delay count corresponds to 5.12 us. + */ + if (ec->rx_coalesce_usecs) + lp->coalesce_delay_rx = + min(255U, (ec->rx_coalesce_usecs * 100) / 512); + if (ec->tx_coalesce_usecs) + lp->coalesce_delay_tx = + min(255U, (ec->tx_coalesce_usecs * 100) / 512); + + return 0; +} + static const struct ethtool_ops temac_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = ll_temac_ethtools_get_ringparam, + .set_ringparam = ll_temac_ethtools_set_ringparam, + .get_coalesce = ll_temac_ethtools_get_coalesce, + .set_coalesce = ll_temac_ethtools_set_coalesce, }; static int temac_probe(struct platform_device *pdev) @@ -1265,8 +1388,7 @@ static int temac_probe(struct platform_device *pdev) struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np; struct temac_local *lp; struct net_device *ndev; - struct resource *res; - const void *addr; + u8 addr[ETH_ALEN]; __be32 *p; bool little_endian; int rc = 0; @@ -1300,6 +1422,8 @@ static int temac_probe(struct platform_device *pdev) lp->ndev = ndev; lp->dev = &pdev->dev; lp->options = XTE_OPTION_DEFAULTS; + lp->rx_bd_num = RX_BD_NUM_DEFAULT; + lp->tx_bd_num = TX_BD_NUM_DEFAULT; spin_lock_init(&lp->rx_lock); INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); @@ -1315,16 +1439,16 @@ static int temac_probe(struct platform_device *pdev) lp->indirect_lock = devm_kmalloc(&pdev->dev, sizeof(*lp->indirect_lock), GFP_KERNEL); + if (!lp->indirect_lock) + return -ENOMEM; spin_lock_init(lp->indirect_lock); } /* map device registers */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - lp->regs = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); + lp->regs = devm_platform_ioremap_resource_byname(pdev, 0); if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map TEMAC registers\n"); - return PTR_ERR(lp->regs); + return -ENOMEM; } /* Select register access functions with the specified @@ -1364,6 +1488,14 @@ static int temac_probe(struct platform_device *pdev) /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; + /* Defaults for IRQ delay/coalescing setup. These are + * configuration values, so does not belong in device-tree. + */ + lp->coalesce_delay_tx = 0x10; + lp->coalesce_count_tx = 0x22; + lp->coalesce_delay_rx = 0xff; + lp->coalesce_count_rx = 0x07; + /* Setup LocalLink DMA */ if (temac_np) { /* Find the DMA node, map the DMA registers, and @@ -1388,7 +1520,7 @@ static int temac_probe(struct platform_device *pdev) of_node_put(dma_np); return PTR_ERR(lp->sdma_regs); } - if (of_get_property(dma_np, "little-endian", NULL)) { + if (of_property_read_bool(dma_np, "little-endian")) { lp->dma_in = temac_dma_in32_le; lp->dma_out = temac_dma_out32_le; } else { @@ -1402,21 +1534,11 @@ static int temac_probe(struct platform_device *pdev) lp->rx_irq = irq_of_parse_and_map(dma_np, 0); lp->tx_irq = irq_of_parse_and_map(dma_np, 1); - /* Use defaults for IRQ delay/coalescing setup. These - * are configuration values, so does not belong in - * device-tree. - */ - lp->tx_chnl_ctrl = 0x10220000; - lp->rx_chnl_ctrl = 0xff070000; - lp->coalesce_count_rx = 0x07; - /* Finished with the DMA node; drop the reference */ of_node_put(dma_np); } else if (pdata) { /* 2nd memory resource specifies DMA registers */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - lp->sdma_regs = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); + lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(lp->sdma_regs)) { dev_err(&pdev->dev, "could not map DMA registers\n"); @@ -1435,37 +1557,28 @@ static int temac_probe(struct platform_device *pdev) lp->tx_irq = platform_get_irq(pdev, 1); /* IRQ delay/coalescing setup */ - if (pdata->tx_irq_timeout || pdata->tx_irq_count) - lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) | - (pdata->tx_irq_count << 16); - else - lp->tx_chnl_ctrl = 0x10220000; + if (pdata->tx_irq_timeout || pdata->tx_irq_count) { + lp->coalesce_delay_tx = pdata->tx_irq_timeout; + lp->coalesce_count_tx = pdata->tx_irq_count; + } if (pdata->rx_irq_timeout || pdata->rx_irq_count) { - lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) | - (pdata->rx_irq_count << 16); + lp->coalesce_delay_rx = pdata->rx_irq_timeout; lp->coalesce_count_rx = pdata->rx_irq_count; - } else { - lp->rx_chnl_ctrl = 0xff070000; - lp->coalesce_count_rx = 0x07; } } /* Error handle returned DMA RX and TX interrupts */ - if (lp->rx_irq < 0) { - if (lp->rx_irq != -EPROBE_DEFER) - dev_err(&pdev->dev, "could not get DMA RX irq\n"); - return lp->rx_irq; - } - if (lp->tx_irq < 0) { - if (lp->tx_irq != -EPROBE_DEFER) - dev_err(&pdev->dev, "could not get DMA TX irq\n"); - return lp->tx_irq; - } + if (lp->rx_irq < 0) + return dev_err_probe(&pdev->dev, lp->rx_irq, + "could not get DMA RX irq\n"); + if (lp->tx_irq < 0) + return dev_err_probe(&pdev->dev, lp->tx_irq, + "could not get DMA TX irq\n"); if (temac_np) { /* Retrieve the MAC address */ - addr = of_get_mac_address(temac_np); - if (IS_ERR(addr)) { + rc = of_get_mac_address(temac_np, addr); + if (rc) { dev_err(&pdev->dev, "could not find MAC address\n"); return -ENODEV; } |