aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/tg3.c')
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c298
1 files changed, 151 insertions, 147 deletions
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bf4074167d6a..cf36312ac5ac 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -194,7 +194,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#if (NET_IP_ALIGN != 0)
#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
#else
-#define TG3_RX_OFFSET(tp) 0
+#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
#endif
/* minimum number of free TX descriptors required to wake up TX process */
@@ -1706,18 +1706,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
- if (lcladv & ADVERTISE_1000XPAUSE) {
- if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- else if (rmtadv & LPA_1000XPAUSE_ASYM)
- cap = FLOW_CTRL_RX;
- } else {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- }
- } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+ if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
+ if (lcladv & ADVERTISE_1000XPAUSE)
+ cap = FLOW_CTRL_RX;
+ if (rmtadv & ADVERTISE_1000XPAUSE)
cap = FLOW_CTRL_TX;
}
@@ -3594,15 +3588,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
u32 val, new_adv;
new_adv = ADVERTISE_CSMA;
- if (advertise & ADVERTISED_10baseT_Half)
- new_adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- new_adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- new_adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- new_adv |= ADVERTISE_100FULL;
-
+ new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
@@ -3612,11 +3598,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
goto done;
- new_adv = 0;
- if (advertise & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000HALF;
- if (advertise & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000FULL;
+ new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
@@ -3790,14 +3772,7 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
{
u32 adv_reg, all_mask = 0;
- if (mask & ADVERTISED_10baseT_Half)
- all_mask |= ADVERTISE_10HALF;
- if (mask & ADVERTISED_10baseT_Full)
- all_mask |= ADVERTISE_10FULL;
- if (mask & ADVERTISED_100baseT_Half)
- all_mask |= ADVERTISE_100HALF;
- if (mask & ADVERTISED_100baseT_Full)
- all_mask |= ADVERTISE_100FULL;
+ all_mask = ethtool_adv_to_mii_adv_t(mask) & ADVERTISE_ALL;
if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
return 0;
@@ -3808,11 +3783,7 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
u32 tg3_ctrl;
- all_mask = 0;
- if (mask & ADVERTISED_1000baseT_Half)
- all_mask |= ADVERTISE_1000HALF;
- if (mask & ADVERTISED_1000baseT_Full)
- all_mask |= ADVERTISE_1000FULL;
+ all_mask = ethtool_adv_to_mii_ctrl1000_t(mask);
if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
return 0;
@@ -3961,6 +3932,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
current_speed = SPEED_INVALID;
current_duplex = DUPLEX_INVALID;
+ tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
err = tg3_phy_auxctl_read(tp,
@@ -4033,8 +4005,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
if (current_link_up == 1 &&
- tp->link_config.active_duplex == DUPLEX_FULL)
+ tp->link_config.active_duplex == DUPLEX_FULL) {
+ u32 reg, bit;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ reg = MII_TG3_FET_GEN_STAT;
+ bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
+ } else {
+ reg = MII_TG3_EXT_STAT;
+ bit = MII_TG3_EXT_STAT_MDIX;
+ }
+
+ if (!tg3_readphy(tp, reg, &val) && (val & bit))
+ tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
+
tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+ }
}
relink:
@@ -4903,23 +4889,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
(tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
/* do nothing, just check for link up at the end */
} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
- u32 adv, new_adv;
+ u32 adv, newadv;
err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
- new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
- ADVERTISE_1000XPAUSE |
- ADVERTISE_1000XPSE_ASYM |
- ADVERTISE_SLCT);
+ newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+ ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM |
+ ADVERTISE_SLCT);
- new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000XHALF;
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000XFULL;
-
- if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
- tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
+ tg3_writephy(tp, MII_ADVERTISE, newadv);
bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
tg3_writephy(tp, MII_BMCR, bmcr);
@@ -5320,6 +5302,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
u32 sw_idx = tnapi->tx_cons;
struct netdev_queue *txq;
int index = tnapi - tp->napi;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
if (tg3_flag(tp, ENABLE_TSS))
index--;
@@ -5370,6 +5353,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
sw_idx = NEXT_TX(sw_idx);
}
+ pkts_compl++;
+ bytes_compl += skb->len;
+
dev_kfree_skb(skb);
if (unlikely(tx_bug)) {
@@ -5378,6 +5364,8 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
+ netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+
tnapi->tx_cons = sw_idx;
/* Need to make the tx_cons update visible to tg3_start_xmit()
@@ -5397,15 +5385,15 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
-static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
{
- if (!ri->skb)
+ if (!ri->data)
return;
pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
map_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(ri->skb);
- ri->skb = NULL;
+ kfree(ri->data);
+ ri->data = NULL;
}
/* Returns size of skb allocated or < 0 on error.
@@ -5419,28 +5407,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* buffers the cpu only reads the last cacheline of the RX descriptor
* (to fetch the error flags, vlan tag, checksum, and opaque cookie).
*/
-static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
u32 opaque_key, u32 dest_idx_unmasked)
{
struct tg3_rx_buffer_desc *desc;
struct ring_info *map;
- struct sk_buff *skb;
+ u8 *data;
dma_addr_t mapping;
- int skb_size, dest_idx;
+ int skb_size, data_size, dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
desc = &tpr->rx_std[dest_idx];
map = &tpr->rx_std_buffers[dest_idx];
- skb_size = tp->rx_pkt_map_sz;
+ data_size = tp->rx_pkt_map_sz;
break;
case RXD_OPAQUE_RING_JUMBO:
dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
desc = &tpr->rx_jmb[dest_idx].std;
map = &tpr->rx_jmb_buffers[dest_idx];
- skb_size = TG3_RX_JMB_MAP_SZ;
+ data_size = TG3_RX_JMB_MAP_SZ;
break;
default:
@@ -5453,31 +5441,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
- if (skb == NULL)
+ skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = kmalloc(skb_size, GFP_ATOMIC);
+ if (!data)
return -ENOMEM;
- skb_reserve(skb, TG3_RX_OFFSET(tp));
-
- mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+ mapping = pci_map_single(tp->pdev,
+ data + TG3_RX_OFFSET(tp),
+ data_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(tp->pdev, mapping)) {
- dev_kfree_skb(skb);
+ kfree(data);
return -EIO;
}
- map->skb = skb;
+ map->data = data;
dma_unmap_addr_set(map, mapping, mapping);
desc->addr_hi = ((u64)mapping >> 32);
desc->addr_lo = ((u64)mapping & 0xffffffff);
- return skb_size;
+ return data_size;
}
/* We only need to move over in the address because the other
* members of the RX descriptor are invariant. See notes above
- * tg3_alloc_rx_skb for full details.
+ * tg3_alloc_rx_data for full details.
*/
static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3_rx_prodring_set *dpr,
@@ -5511,7 +5501,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
return;
}
- dest_map->skb = src_map->skb;
+ dest_map->data = src_map->data;
dma_unmap_addr_set(dest_map, mapping,
dma_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
@@ -5522,7 +5512,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
*/
smp_wmb();
- src_map->skb = NULL;
+ src_map->data = NULL;
}
/* The RX ring scheme is composed of multiple rings which post fresh
@@ -5576,19 +5566,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ u8 *data;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &jmb_prod_idx;
} else
goto next_pkt_nopost;
@@ -5606,13 +5597,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
goto next_pkt;
}
+ prefetch(data + TG3_RX_OFFSET(tp));
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
- skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+ skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
*post_ptr);
if (skb_size < 0)
goto drop_it;
@@ -5620,35 +5612,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
- /* Ensure that the update to the skb happens
+ skb = build_skb(data);
+ if (!skb) {
+ kfree(data);
+ goto drop_it_no_recycle;
+ }
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
+ /* Ensure that the update to the data happens
* after the usage of the old DMA mapping.
*/
smp_wmb();
- ri->skb = NULL;
+ ri->data = NULL;
- skb_put(skb, len);
} else {
- struct sk_buff *copy_skb;
-
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len +
- TG3_RAW_IP_ALIGN);
- if (copy_skb == NULL)
+ skb = netdev_alloc_skb(tp->dev,
+ len + TG3_RAW_IP_ALIGN);
+ if (skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
- skb_put(copy_skb, len);
+ skb_reserve(skb, TG3_RAW_IP_ALIGN);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
- skb_copy_from_linear_data(skb, copy_skb->data, len);
+ memcpy(skb->data,
+ data + TG3_RX_OFFSET(tp),
+ len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-
- /* We'll reuse the original ring buffer. */
- skb = copy_skb;
}
+ skb_put(skb, len);
if ((tp->dev->features & NETIF_F_RXCSUM) &&
(desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
(((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -5787,7 +5781,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_std_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_std_buffers[i].skb) {
+ if (dpr->rx_std_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -5845,7 +5839,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_jmb_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_jmb_buffers[i].skb) {
+ if (dpr->rx_jmb_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -6816,6 +6810,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ netdev_sent_queue(tp->dev, skb->len);
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6968,7 +6963,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
return 0;
}
-static void tg3_set_loopback(struct net_device *dev, u32 features)
+static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -6994,7 +6989,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features)
}
}
-static u32 tg3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tg3_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7004,9 +7000,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int tg3_set_features(struct net_device *dev, u32 features)
+static int tg3_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
tg3_set_loopback(dev, features);
@@ -7082,14 +7078,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
if (tpr != &tp->napi[0].prodring) {
for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
i = (i + 1) & tp->rx_std_ring_mask)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE)) {
for (i = tpr->rx_jmb_cons_idx;
i != tpr->rx_jmb_prod_idx;
i = (i + 1) & tp->rx_jmb_ring_mask) {
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7098,12 +7094,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
}
for (i = 0; i <= tp->rx_std_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7159,7 +7155,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX standard ring. Only "
"%d out of %d buffers were allocated "
@@ -7191,7 +7187,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
}
for (i = 0; i < tp->rx_jumbo_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX jumbo ring. Only %d "
"out of %d buffers were allocated "
@@ -7297,6 +7293,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
}
+ netdev_reset_queue(tp->dev);
}
/* Initialize tx/rx rings for packet processing.
@@ -7626,15 +7623,11 @@ static void tg3_restore_pci_state(struct tg3 *tp)
pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
- if (tg3_flag(tp, PCI_EXPRESS))
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
- else {
- pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
- tp->pci_cacheline_sz);
- pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
- tp->pci_lat_timer);
- }
+ if (!tg3_flag(tp, PCI_EXPRESS)) {
+ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ tp->pci_cacheline_sz);
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
}
/* Make sure PCI-X relaxed ordering bit is clear. */
@@ -7819,8 +7812,6 @@ static int tg3_chip_reset(struct tg3 *tp)
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
val16);
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-
/* Clear error status */
pci_write_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
@@ -8197,7 +8188,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, 5750_PLUS) ||
tg3_flag(tp, 5780_CLASS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_flag(tp, 57765_PLUS))
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -8217,10 +8209,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
return;
- if (!tg3_flag(tp, 5705_PLUS))
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
- else
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
@@ -8581,10 +8570,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tg3_flag(tp, 57765_PLUS)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
- val = TG3_RX_STD_MAX_SIZE_5700;
- else
- val = TG3_RX_STD_MAX_SIZE_5717;
+ val = TG3_RX_STD_RING_SIZE(tp);
val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
val |= (TG3_RX_STD_DMA_SZ << 2);
} else
@@ -10321,9 +10307,16 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (netif_running(dev)) {
ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ }
} else {
ethtool_cmd_speed_set(cmd, SPEED_INVALID);
cmd->duplex = DUPLEX_INVALID;
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
}
cmd->phy_address = tp->phy_addr;
cmd->transceiver = XCVR_INTERNAL;
@@ -10428,10 +10421,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- strcpy(info->fw_version, tp->fw_ver);
- strcpy(info->bus_info, pci_name(tp->pdev));
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -11400,8 +11393,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
u32 budget;
- struct sk_buff *skb, *rx_skb;
- u8 *tx_data;
+ struct sk_buff *skb;
+ u8 *tx_data, *rx_data;
dma_addr_t map;
int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc;
@@ -11569,11 +11562,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
}
if (opaque_key == RXD_OPAQUE_RING_STD) {
- rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+ rx_data = tpr->rx_std_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
mapping);
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
- rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+ rx_data = tpr->rx_jmb_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
mapping);
} else
@@ -11582,15 +11575,16 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
PCI_DMA_FROMDEVICE);
+ rx_data += TG3_RX_OFFSET(tp);
for (i = data_off; i < rx_len; i++, val++) {
- if (*(rx_skb->data + i) != (u8) (val & 0xff))
+ if (*(rx_data + i) != (u8) (val & 0xff))
goto out;
}
}
err = 0;
- /* tg3_free_rings will unmap and free the rx_skb */
+ /* tg3_free_rings will unmap and free the rx_data */
out:
return err;
}
@@ -13218,8 +13212,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
{
- u32 adv = ADVERTISED_Autoneg |
- ADVERTISED_Pause;
+ u32 adv = ADVERTISED_Autoneg;
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
adv |= ADVERTISED_1000baseT_Half |
@@ -13997,9 +13990,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+ tp->fw_needed) {
+ /* For firmware TSO, assume ASF is disabled.
+ * We'll disable TSO later if we discover ASF
+ * is enabled in tg3_get_eeprom_hw_cfg().
+ */
tg3_flag_set(tp, TSO_CAPABLE);
- else {
+ } else {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -14036,7 +14033,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
tg3_flag_set(tp, 4K_FIFO_LIMIT);
- if (tg3_flag(tp, 5717_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
@@ -14056,12 +14055,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, PCI_EXPRESS);
- tp->pcie_readrq = 4096;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
- tp->pcie_readrq = 2048;
-
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+ int readrq = pcie_get_readrq(tp->pdev);
+ if (readrq > 2048)
+ pcie_set_readrq(tp->pdev, 2048);
+ }
pci_read_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
@@ -14273,6 +14271,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
*/
tg3_get_eeprom_hw_cfg(tp);
+ if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
+ tp->fw_needed = NULL;
+ }
+
if (tg3_flag(tp, ENABLE_APE)) {
/* Allow reads and writes to the
* APE register and memory space.
@@ -14548,11 +14552,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tg3_flag_clear(tp, POLL_SERDES);
- tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
tg3_flag(tp, PCIX_MODE)) {
- tp->rx_offset = 0;
+ tp->rx_offset = NET_SKB_PAD;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
tp->rx_copy_thresh = ~(u16)0;
#endif
@@ -15313,7 +15317,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
u32 sndmbx, rcvmbx, intmbx;
char str[40];
u64 dma_mask, persist_dma_mask;
- u32 features = 0;
+ netdev_features_t features = 0;
printk_once(KERN_INFO "%s\n", version);