aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igc')
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c242
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h8
11 files changed, 154 insertions, 164 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 3e386c38d016..1e7e7071f64d 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -264,7 +264,6 @@ int igc_reinit_queues(struct igc_adapter *adapter);
void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
-int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
void igc_update_stats(struct igc_adapter *adapter);
void igc_disable_rx_ring(struct igc_ring *ring);
void igc_enable_rx_ring(struct igc_ring *ring);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index f068b66b8025..a15927e77272 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -182,8 +182,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
igc_check_for_copper_link(hw);
- phy->type = igc_phy_i225;
-
out:
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 5c66b97c0cfa..4f9d7f013a95 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -610,7 +610,6 @@
#define IGC_MDIC_OP_WRITE 0x04000000
#define IGC_MDIC_OP_READ 0x08000000
#define IGC_MDIC_READY 0x10000000
-#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
#define IGC_N0_QUEUE -1
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index b1e72ec5f131..88680e3d613d 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -53,11 +53,6 @@ enum igc_mac_type {
igc_num_macs /* List is 1-based, so subtract 1 for true count. */
};
-enum igc_phy_type {
- igc_phy_unknown = 0,
- igc_phy_i225,
-};
-
enum igc_media_type {
igc_media_type_unknown = 0,
igc_media_type_copper = 1,
@@ -94,8 +89,6 @@ struct igc_mac_info {
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
- u8 forced_speed_duplex;
-
bool asf_firmware_present;
bool arc_subsystem_valid;
@@ -138,8 +131,6 @@ struct igc_nvm_info {
struct igc_phy_info {
struct igc_phy_operations ops;
- enum igc_phy_type type;
-
u32 addr;
u32 id;
u32 reset_delay_us; /* in usec */
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 66ea566488d1..59d5c467ea6e 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igc_get_hw_semaphore_i225(hw))
- ; /* Empty */
+ /* Releasing the resource requires first getting the HW semaphore.
+ * If we fail to get the semaphore, there is nothing we can do,
+ * except log an error and quit. We are not allowed to hang here
+ * indefinitely, as it may cause denial of service or system crash.
+ */
+ if (igc_get_hw_semaphore_i225(hw)) {
+ hw_dbg("Failed to release SW_FW_SYNC.\n");
+ return;
+ }
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 67b8ffd21d8a..a5c4b19d71a2 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -193,7 +193,7 @@ s32 igc_force_mac_fc(struct igc_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2f17f36e94fd..34889be63e78 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -505,6 +505,9 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
u8 index = rx_ring->queue_index;
int size, desc_len, res;
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
rx_ring->q_vector->napi.napi_id);
if (res < 0) {
@@ -2126,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
return ok;
}
-static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
- struct xdp_frame *xdpf,
- struct igc_ring *ring)
-{
- dma_addr_t dma;
-
- dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
- return -ENOMEM;
- }
-
- buffer->type = IGC_TX_BUFFER_TYPE_XDP;
- buffer->xdpf = xdpf;
- buffer->protocol = 0;
- buffer->bytecount = xdpf->len;
- buffer->gso_segs = 1;
- buffer->time_stamp = jiffies;
- dma_unmap_len_set(buffer, len, xdpf->len);
- dma_unmap_addr_set(buffer, dma, dma);
- return 0;
-}
-
/* This function requires __netif_tx_lock is held by the caller. */
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
struct xdp_frame *xdpf)
{
- struct igc_tx_buffer *buffer;
- union igc_adv_tx_desc *desc;
- u32 cmd_type, olinfo_status;
- int err;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, index = ring->next_to_use;
+ struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
+ struct igc_tx_buffer *buffer = head;
+ union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
+ u32 olinfo_status, len = xdpf->len, cmd_type;
+ void *data = xdpf->data;
+ u16 i;
- if (!igc_desc_unused(ring))
- return -EBUSY;
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
- buffer = &ring->tx_buffer_info[ring->next_to_use];
- err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
- if (err)
- return err;
+ if (igc_maybe_stop_tx(ring, count + 3)) {
+ /* this is a hard error */
+ return -EBUSY;
+ }
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- buffer->bytecount;
- olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
+ i = 0;
+ head->bytecount = xdp_get_frame_len(xdpf);
+ head->type = IGC_TX_BUFFER_TYPE_XDP;
+ head->gso_segs = 1;
+ head->xdpf = xdpf;
- desc = IGC_TX_DESC(ring, ring->next_to_use);
- desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
- netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
+ for (;;) {
+ dma_addr_t dma;
+
+ dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev,
+ "Failed to map DMA for TX\n");
+ goto unmap;
+ }
+
+ dma_unmap_len_set(buffer, len, len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | len;
+
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.buffer_addr = cpu_to_le64(dma);
+
+ buffer->protocol = 0;
+
+ if (++index == ring->count)
+ index = 0;
- buffer->next_to_watch = desc;
+ if (i == nr_frags)
+ break;
- ring->next_to_use++;
- if (ring->next_to_use == ring->count)
- ring->next_to_use = 0;
+ buffer = &ring->tx_buffer_info[index];
+ desc = IGC_TX_DESC(ring, index);
+ desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
+
+ netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
+ /* set the timestamp */
+ head->time_stamp = jiffies;
+ /* set next_to_watch value indicating a packet is present */
+ head->next_to_watch = desc;
+ ring->next_to_use = index;
return 0;
+
+unmap:
+ for (;;) {
+ buffer = &ring->tx_buffer_info[index];
+ if (dma_unmap_len(buffer, len))
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(buffer, dma),
+ dma_unmap_len(buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(buffer, len, 0);
+ if (buffer == head)
+ break;
+
+ if (!index)
+ index += ring->count;
+ index--;
+ }
+
+ return -ENOMEM;
}
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
@@ -2366,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
igc_rx_offset(rx_ring) + pkt_offset,
size, true);
+ xdp_buff_clear_frags_flag(&xdp);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -2446,19 +2487,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int totalsize = metasize + datasize;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ net_prefetch(xdp->data_meta);
+
+ skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
- memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
if (metasize) {
skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
@@ -4352,8 +4394,7 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igc_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -5809,9 +5850,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
return false;
for (n = 0; n < qopt->num_entries; n++) {
- const struct tc_taprio_sched_entry *e;
+ const struct tc_taprio_sched_entry *e, *prev;
int i;
+ prev = n ? &qopt->entries[n - 1] : NULL;
e = &qopt->entries[n];
/* i225 only supports "global" frame preemption
@@ -5824,7 +5866,12 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->gate_mask & BIT(i))
queue_uses[i]++;
- if (queue_uses[i] > 1)
+ /* There are limitations: A single queue cannot be
+ * opened and closed multiple times per cycle unless the
+ * gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
return false;
}
}
@@ -5868,6 +5915,7 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
+ bool queue_configured[IGC_MAX_TX_QUEUES] = { };
u32 start_time = 0, end_time = 0;
size_t n;
@@ -5883,9 +5931,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
- /* FIXME: be a little smarter about cases when the gate for a
- * queue stays open for more than one entry.
- */
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
int i;
@@ -5898,8 +5943,15 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!(e->gate_mask & BIT(i)))
continue;
- ring->start_time = start_time;
+ /* Check whether a queue stays open for more than one
+ * entry. If so, keep the start and advance the end
+ * time.
+ */
+ if (!queue_configured[i])
+ ring->start_time = start_time;
ring->end_time = end_time;
+
+ queue_configured[i] = true;
}
start_time += e->interval;
@@ -6167,6 +6219,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
+ if (IGC_REMOVED(hw_addr))
+ return ~value;
+
value = readl(&hw_addr[reg]);
/* reads should not return all F's */
@@ -6183,56 +6238,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
return value;
}
-int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
-{
- struct igc_mac_info *mac = &adapter->hw.mac;
-
- mac->autoneg = false;
-
- /* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work
- */
- if ((spd & 1) || (dplx & ~1))
- goto err_inval;
-
- switch (spd + dplx) {
- case SPEED_10 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_10_HALF;
- break;
- case SPEED_10 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_10_FULL;
- break;
- case SPEED_100 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_100_HALF;
- break;
- case SPEED_100 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_100_FULL;
- break;
- case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = true;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
- break;
- case SPEED_1000 + DUPLEX_HALF: /* not supported */
- goto err_inval;
- case SPEED_2500 + DUPLEX_FULL:
- mac->autoneg = true;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
- break;
- case SPEED_2500 + DUPLEX_HALF: /* not supported */
- default:
- goto err_inval;
- }
-
- /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
- adapter->hw.phy.mdix = AUTO_ALL_MODES;
-
- return 0;
-
-err_inval:
- netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
-}
-
/**
* igc_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -6251,23 +6256,17 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, igc_driver_name);
@@ -6367,8 +6366,7 @@ static int igc_probe(struct pci_dev *pdev,
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= netdev->features;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 5cad31c3c7b0..53b77c969c85 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -141,24 +141,14 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
* igc_check_downshift - Checks whether a downshift in speed occurred
* @hw: pointer to the HW structure
*
- * Success returns 0, Failure returns 1
- *
* A downshift is detected by querying the PHY link health.
*/
-s32 igc_check_downshift(struct igc_hw *hw)
+void igc_check_downshift(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
- s32 ret_val;
-
- switch (phy->type) {
- case igc_phy_i225:
- default:
- /* speed downshift not supported */
- phy->speed_downgraded = false;
- ret_val = 0;
- }
- return ret_val;
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
}
/**
@@ -581,7 +571,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -638,7 +628,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -746,8 +736,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
if (ret_val)
return ret_val;
ret_val = igc_write_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
@@ -779,8 +767,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
if (ret_val)
return ret_val;
ret_val = igc_read_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h
index 1b031372d206..832a7e359f18 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.h
+++ b/drivers/net/ethernet/intel/igc/igc_phy.h
@@ -11,7 +11,7 @@ s32 igc_phy_hw_reset(struct igc_hw *hw);
s32 igc_get_phy_id(struct igc_hw *hw);
s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
-s32 igc_check_downshift(struct igc_hw *hw);
+void igc_check_downshift(struct igc_hw *hw);
s32 igc_setup_copper_link(struct igc_hw *hw);
void igc_power_up_phy_copper(struct igc_hw *hw);
void igc_power_down_phy_copper(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0d6e3215e98f..8dbb9f903ca7 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -15,7 +15,6 @@
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
-#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
#define IGC_PTM_STAT_SLEEP 2
@@ -992,6 +991,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
igc_ptp_write_i225(adapter, &ts);
}
+static void igc_ptm_stop(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_EN;
+
+ wr32(IGC_PTM_CTRL, ctrl);
+}
+
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -1009,8 +1019,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- if (pci_device_is_present(adapter->pdev))
+ if (pci_device_is_present(adapter->pdev)) {
igc_ptp_time_save(adapter);
+ igc_ptm_stop(adapter);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index e197a33d93a0..c0d8214148d1 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -59,9 +59,6 @@
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
-/* MSI-X Table Register Descriptions */
-#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
-
/* RSS registers */
#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
@@ -306,7 +303,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
- writel((val), &hw_addr[(reg)]); \
+ if (!IGC_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
@@ -318,4 +316,6 @@ do { \
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
+#define IGC_REMOVED(h) unlikely(!(h))
+
#endif