aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/efx.c75
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/falcon.c8
-rw-r--r--drivers/net/sfc/mcdi_phy.c21
-rw-r--r--drivers/net/sfc/net_driver.h46
-rw-r--r--drivers/net/sfc/nic.c55
-rw-r--r--drivers/net/sfc/nic.h4
-rw-r--r--drivers/net/sfc/rx.c393
-rw-r--r--drivers/net/sfc/selftest.c28
-rw-r--r--drivers/net/sfc/siena.c4
-rw-r--r--drivers/net/sfc/workarounds.h2
11 files changed, 321 insertions, 319 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 156460527231..8ad476a19d95 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -27,6 +27,7 @@
#include "nic.h"
#include "mcdi.h"
+#include "workarounds.h"
/**************************************************************************
*
@@ -92,13 +93,6 @@ const char *efx_reset_type_names[] = {
#define EFX_MAX_MTU (9 * 1024)
-/* RX slow fill workqueue. If memory allocation fails in the fast path,
- * a work item is pushed onto this work queue to retry the allocation later,
- * to avoid the NIC being starved of RX buffers. Since this is a per cpu
- * workqueue, there is nothing to be gained in making it per NIC
- */
-static struct workqueue_struct *refill_workqueue;
-
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -475,7 +469,8 @@ static void efx_init_channels(struct efx_nic *efx)
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
- efx->rx_buffer_order = get_order(efx->rx_buffer_len);
+ efx->rx_buffer_order = get_order(efx->rx_buffer_len +
+ sizeof(struct efx_rx_page_state));
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
@@ -515,11 +510,11 @@ static void efx_start_channel(struct efx_channel *channel)
channel->enabled = true;
smp_wmb();
- napi_enable(&channel->napi_str);
-
- /* Load up RX descriptors */
+ /* Fill the queues before enabling NAPI */
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fast_push_rx_descriptors(rx_queue);
+
+ napi_enable(&channel->napi_str);
}
/* This disables event queue processing and packet transmission.
@@ -528,8 +523,6 @@ static void efx_start_channel(struct efx_channel *channel)
*/
static void efx_stop_channel(struct efx_channel *channel)
{
- struct efx_rx_queue *rx_queue;
-
if (!channel->enabled)
return;
@@ -537,12 +530,6 @@ static void efx_stop_channel(struct efx_channel *channel)
channel->enabled = false;
napi_disable(&channel->napi_str);
-
- /* Ensure that any worker threads have exited or will be no-ops */
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- spin_lock_bh(&rx_queue->add_lock);
- spin_unlock_bh(&rx_queue->add_lock);
- }
}
static void efx_fini_channels(struct efx_nic *efx)
@@ -556,10 +543,18 @@ static void efx_fini_channels(struct efx_nic *efx)
BUG_ON(efx->port_enabled);
rc = efx_nic_flush_queues(efx);
- if (rc)
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset. */
+ EFX_ERR(efx, "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
EFX_ERR(efx, "failed to flush queues\n");
- else
+ } else {
EFX_LOG(efx, "successfully flushed all queues\n");
+ }
efx_for_each_channel(channel, efx) {
EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
@@ -586,9 +581,9 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_remove_eventq(channel);
}
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
- queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
/**************************************************************************
@@ -1233,15 +1228,8 @@ static void efx_start_all(struct efx_nic *efx)
* since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
- struct efx_rx_queue *rx_queue;
-
/* Make sure the hardware monitor is stopped */
cancel_delayed_work_sync(&efx->monitor_work);
-
- /* Ensure that all RX slow refills are complete. */
- efx_for_each_rx_queue(rx_queue, efx)
- cancel_delayed_work_sync(&rx_queue->work);
-
/* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->mac_work);
}
@@ -1504,11 +1492,11 @@ static int efx_net_stop(struct net_device *net_dev)
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
+static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
- struct net_device_stats *stats = &net_dev->stats;
+ struct rtnl_link_stats64 *stats = &net_dev->stats64;
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx);
@@ -1530,11 +1518,8 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
stats->tx_window_errors = mac_stats->tx_late_collision;
stats->rx_errors = (stats->rx_length_errors +
- stats->rx_over_errors +
stats->rx_crc_errors +
stats->rx_frame_errors +
- stats->rx_fifo_errors +
- stats->rx_missed_errors +
mac_stats->rx_symbol_error);
stats->tx_errors = (stats->tx_window_errors +
mac_stats->tx_bad);
@@ -1645,7 +1630,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
- .ndo_get_stats = efx_net_stats,
+ .ndo_get_stats64 = efx_net_stats,
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
@@ -1886,6 +1871,9 @@ static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ if (efx->reset_pending == RESET_TYPE_NONE)
+ return;
+
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flag set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
@@ -2052,8 +2040,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
rx_queue->queue = i;
rx_queue->channel = &efx->channel[0]; /* for safety */
rx_queue->buffer = NULL;
- spin_lock_init(&rx_queue->add_lock);
- INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
}
efx->type = type;
@@ -2332,6 +2320,9 @@ static int efx_pm_thaw(struct device *dev)
efx->type->resume_wol(efx);
+ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+ queue_work(reset_workqueue, &efx->reset_work);
+
return 0;
}
@@ -2421,11 +2412,6 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
- refill_workqueue = create_workqueue("sfc_refill");
- if (!refill_workqueue) {
- rc = -ENOMEM;
- goto err_refill;
- }
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
rc = -ENOMEM;
@@ -2441,8 +2427,6 @@ static int __init efx_init_module(void)
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
- destroy_workqueue(refill_workqueue);
- err_refill:
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -2454,7 +2438,6 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
- destroy_workqueue(refill_workqueue);
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index ffd708c5304a..e1e448887dfc 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -47,12 +47,12 @@ extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_work(struct work_struct *data);
+extern void efx_rx_slow_fill(unsigned long context);
extern void __efx_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, bool checksummed);
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard);
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
+extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_RXQ_SIZE 1024
#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 655b697b45b2..8558865ff380 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -548,7 +548,9 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
efx_oword_t reg;
- int link_speed;
+ int link_speed, isolate;
+
+ isolate = (efx->reset_pending != RESET_TYPE_NONE);
switch (link_state->speed) {
case 10000: link_speed = 3; break;
@@ -570,7 +572,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
* discarded. */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
- !link_state->up);
+ !link_state->up || isolate);
}
efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
@@ -584,7 +586,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
/* Unisolate the MAC -> RX */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
efx_writeo(efx, &reg, FR_AZ_RX_CFG);
}
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 6032c0e1f1f8..86e43b1f7689 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -20,7 +20,7 @@
#include "nic.h"
#include "selftest.h"
-struct efx_mcdi_phy_cfg {
+struct efx_mcdi_phy_data {
u32 flags;
u32 type;
u32 supported_cap;
@@ -35,7 +35,7 @@ struct efx_mcdi_phy_cfg {
};
static int
-efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
+efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
size_t outlen;
@@ -259,7 +259,7 @@ static u32 ethtool_to_mcdi_cap(u32 cap)
static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
enum efx_phy_mode mode, supported;
u32 flags;
@@ -307,7 +307,7 @@ static u32 mcdi_to_ethtool_media(u32 media)
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_data;
+ struct efx_mcdi_phy_data *phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
u32 caps;
int rc;
@@ -395,6 +395,7 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
efx->wanted_fc |= EFX_FC_AUTO;
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
return 0;
@@ -405,7 +406,7 @@ fail:
int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps = (efx->link_advertising ?
ethtool_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
@@ -446,7 +447,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
*/
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
/* The link partner capabilities are only relevent if the
@@ -505,7 +506,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
int rc;
@@ -535,7 +536,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
@@ -674,7 +675,7 @@ out:
static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
unsigned flags)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 mode;
int rc;
@@ -712,7 +713,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
if (index == 0)
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 2e6fd89f2a72..ba636e086fc3 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,6 +18,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/timer.h>
#include <linux/mdio.h>
#include <linux/list.h>
#include <linux/pci.h>
@@ -221,7 +222,6 @@ struct efx_tx_queue {
* If both this and skb are %NULL, the buffer slot is currently free.
* @data: Pointer to ethernet header
* @len: Buffer length, in bytes.
- * @unmap_addr: DMA address to unmap
*/
struct efx_rx_buffer {
dma_addr_t dma_addr;
@@ -229,7 +229,24 @@ struct efx_rx_buffer {
struct page *page;
char *data;
unsigned int len;
- dma_addr_t unmap_addr;
+};
+
+/**
+ * struct efx_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @refcnt: Number of struct efx_rx_buffer's referencing this page.
+ * When refcnt falls to zero, the page is unmapped for dma
+ * @dma_addr: The dma address of this page.
+ */
+struct efx_rx_page_state {
+ unsigned refcnt;
+ dma_addr_t dma_addr;
+
+ unsigned int __pad[0] ____cacheline_aligned;
};
/**
@@ -242,10 +259,6 @@ struct efx_rx_buffer {
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
- * @add_lock: Receive queue descriptor add spin lock.
- * This lock must be held in order to add buffers to the RX
- * descriptor ring (rxd and buffer) and to update added_count (but
- * not removed_count).
* @max_fill: RX descriptor maximum fill level (<= ring size)
* @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
* (<= @max_fill)
@@ -259,12 +272,7 @@ struct efx_rx_buffer {
* overflow was observed. It should never be set.
* @alloc_page_count: RX allocation strategy counter.
* @alloc_skb_count: RX allocation strategy counter.
- * @work: Descriptor push work thread
- * @buf_page: Page for next RX buffer.
- * We can use a single page for multiple RX buffers. This tracks
- * the remaining space in the allocation.
- * @buf_dma_addr: Page's DMA address.
- * @buf_data: Page's host address.
+ * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
* @flushed: Use when handling queue flushing
*/
struct efx_rx_queue {
@@ -277,7 +285,6 @@ struct efx_rx_queue {
int added_count;
int notified_count;
int removed_count;
- spinlock_t add_lock;
unsigned int max_fill;
unsigned int fast_fill_trigger;
unsigned int fast_fill_limit;
@@ -285,12 +292,9 @@ struct efx_rx_queue {
unsigned int min_overfill;
unsigned int alloc_page_count;
unsigned int alloc_skb_count;
- struct delayed_work work;
+ struct timer_list slow_fill;
unsigned int slow_fill_count;
- struct page *buf_page;
- dma_addr_t buf_dma_addr;
- char *buf_data;
enum efx_flush_state flushed;
};
@@ -336,7 +340,7 @@ enum efx_rx_alloc_method {
* @eventq: Event queue buffer
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
- * @eventq_magic: Event queue magic value for driver-generated test events
+ * @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -367,7 +371,7 @@ struct efx_channel {
struct efx_special_buffer eventq;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
- unsigned int eventq_magic;
+ unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;
@@ -645,6 +649,7 @@ union efx_multicast_hash {
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
* @pci_dev: The PCI device
+ * @port_num: Index of this host port within the controller
* @type: Controller type attributes
* @legacy_irq: IRQ number
* @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -728,6 +733,7 @@ union efx_multicast_hash {
struct efx_nic {
char name[IFNAMSIZ];
struct pci_dev *pci_dev;
+ unsigned port_num;
const struct efx_nic_type *type;
int legacy_irq;
struct workqueue_struct *workqueue;
@@ -830,7 +836,7 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
static inline unsigned int efx_port_num(struct efx_nic *efx)
{
- return PCI_FUNC(efx->pci_dev->devfn);
+ return efx->net_dev->dev_id;
}
/**
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 5d3aaec58556..0ee6fd367e6f 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -79,6 +79,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
/* Depth of RX flush request fifo */
#define EFX_RX_FLUSH_COUNT 4
+/* Generated event code for efx_generate_test_event() */
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ (0x00010100 + (_channel)->channel)
+
+/* Generated event code for efx_generate_fill_event() */
+#define EFX_CHANNEL_MAGIC_FILL(_channel) \
+ (0x00010200 + (_channel)->channel)
+
/**************************************************************************
*
* Solarstorm hardware access
@@ -850,6 +858,26 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
checksummed, discard);
}
+static void
+efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned code;
+
+ code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ if (code == EFX_CHANNEL_MAGIC_TEST(channel))
+ ++channel->magic_count;
+ else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
+ else
+ EFX_LOG(efx, "channel %d received generated "
+ "event "EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
+}
+
/* Global events are basically PHY events */
static void
efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
@@ -993,11 +1021,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
}
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
- channel->eventq_magic = EFX_QWORD_FIELD(
- event, FSF_AZ_DRV_GEN_EV_MAGIC);
- EFX_LOG(channel->efx, "channel %d received generated "
- "event "EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(event));
+ efx_handle_generated_event(channel, &event);
break;
case FSE_AZ_EV_CODE_GLOBAL_EV:
efx_handle_global_event(channel, &event);
@@ -1088,12 +1112,20 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
}
-/* Generates a test event on the event queue. A subsequent call to
- * process_eventq() should pick up the event and place the value of
- * "magic" into channel->eventq_magic;
- */
-void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
+void efx_nic_generate_test_event(struct efx_channel *channel)
{
+ unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
+ efx_qword_t test_event;
+
+ EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_generate_event(channel, &test_event);
+}
+
+void efx_nic_generate_fill_event(struct efx_channel *channel)
+{
+ unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
efx_qword_t test_event;
EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
@@ -1219,9 +1251,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
rx_queue->flushed = FLUSH_DONE;
}
- if (EFX_WORKAROUND_7803(efx))
- return 0;
-
return -ETIMEDOUT;
}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index bbc2c0c2f843..95770e15115d 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -190,8 +190,8 @@ extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_generate_test_event(struct efx_channel *channel,
- unsigned int magic);
+extern void efx_nic_generate_test_event(struct efx_channel *channel);
+extern void efx_nic_generate_fill_event(struct efx_channel *channel);
extern void efx_nic_generate_interrupt(struct efx_nic *efx);
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index e308818b9f55..9fb698e3519d 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -25,6 +25,9 @@
/* Number of RX descriptors pushed at once. */
#define EFX_RX_BATCH 8
+/* Maximum size of a buffer sharing a page */
+#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 64u
@@ -98,155 +101,138 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
return PAGE_SIZE << efx->rx_buffer_order;
}
-
/**
- * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
+ * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
*
* @rx_queue: Efx RX queue
- * @rx_buf: RX buffer structure to populate
*
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information. Return a negative error code or 0 on success.
+ * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
+ * struct efx_rx_buffer for each one. Return a negative error code or 0
+ * on success. May fail having only inserted fewer than EFX_RX_BATCH
+ * buffers.
*/
-static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
+ struct efx_rx_buffer *rx_buf;
int skb_len = efx->rx_buffer_len;
+ unsigned index, count;
- rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
- if (unlikely(!rx_buf->skb))
- return -ENOMEM;
+ for (count = 0; count < EFX_RX_BATCH; ++count) {
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ rx_buf = efx_rx_buffer(rx_queue, index);
- /* Adjust the SKB for padding and checksum */
- skb_reserve(rx_buf->skb, NET_IP_ALIGN);
- rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->data = (char *)rx_buf->skb->data;
- rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
+ if (unlikely(!rx_buf->skb))
+ return -ENOMEM;
+ rx_buf->page = NULL;
- rx_buf->dma_addr = pci_map_single(efx->pci_dev,
- rx_buf->data, rx_buf->len,
- PCI_DMA_FROMDEVICE);
+ /* Adjust the SKB for padding and checksum */
+ skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+ rx_buf->len = skb_len - NET_IP_ALIGN;
+ rx_buf->data = (char *)rx_buf->skb->data;
+ rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+ rx_buf->data, rx_buf->len,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev,
+ rx_buf->dma_addr))) {
+ dev_kfree_skb_any(rx_buf->skb);
+ rx_buf->skb = NULL;
+ return -EIO;
+ }
- if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
- dev_kfree_skb_any(rx_buf->skb);
- rx_buf->skb = NULL;
- return -EIO;
+ ++rx_queue->added_count;
+ ++rx_queue->alloc_skb_count;
}
return 0;
}
/**
- * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
+ * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
- * @rx_buf: RX buffer structure to populate
*
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information. Return a negative error code or 0 on success.
+ * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
+ * and populates struct efx_rx_buffers for each one. Return a negative error
+ * code or 0 on success. If a single page can be split between two buffers,
+ * then the page will either be inserted fully, or not at at all.
*/
-static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- int bytes, space, offset;
-
- bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
-
- /* If there is space left in the previously allocated page,
- * then use it. Otherwise allocate a new one */
- rx_buf->page = rx_queue->buf_page;
- if (rx_buf->page == NULL) {
- dma_addr_t dma_addr;
-
- rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
- efx->rx_buffer_order);
- if (unlikely(rx_buf->page == NULL))
+ struct efx_rx_buffer *rx_buf;
+ struct page *page;
+ void *page_addr;
+ struct efx_rx_page_state *state;
+ dma_addr_t dma_addr;
+ unsigned index, count;
+
+ /* We can split a page between two buffers */
+ BUILD_BUG_ON(EFX_RX_BATCH & 1);
+
+ for (count = 0; count < EFX_RX_BATCH; ++count) {
+ page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
return -ENOMEM;
-
- dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
- 0, efx_rx_buf_size(efx),
+ dma_addr = pci_map_page(efx->pci_dev, page, 0,
+ efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
-
if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
- __free_pages(rx_buf->page, efx->rx_buffer_order);
- rx_buf->page = NULL;
+ __free_pages(page, efx->rx_buffer_order);
return -EIO;
}
-
- rx_queue->buf_page = rx_buf->page;
- rx_queue->buf_dma_addr = dma_addr;
- rx_queue->buf_data = (page_address(rx_buf->page) +
- EFX_PAGE_IP_ALIGN);
- }
-
- rx_buf->len = bytes;
- rx_buf->data = rx_queue->buf_data;
- offset = efx_rx_buf_offset(rx_buf);
- rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
-
- /* Try to pack multiple buffers per page */
- if (efx->rx_buffer_order == 0) {
- /* The next buffer starts on the next 512 byte boundary */
- rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
- offset += ((bytes + 0x1ff) & ~0x1ff);
-
- space = efx_rx_buf_size(efx) - offset;
- if (space >= bytes) {
- /* Refs dropped on kernel releasing each skb */
- get_page(rx_queue->buf_page);
- goto out;
+ page_addr = page_address(page);
+ state = page_addr;
+ state->refcnt = 0;
+ state->dma_addr = dma_addr;
+
+ page_addr += sizeof(struct efx_rx_page_state);
+ dma_addr += sizeof(struct efx_rx_page_state);
+
+ split:
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->skb = NULL;
+ rx_buf->page = page;
+ rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ ++rx_queue->added_count;
+ ++rx_queue->alloc_page_count;
+ ++state->refcnt;
+
+ if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
+ /* Use the second half of the page */
+ get_page(page);
+ dma_addr += (PAGE_SIZE >> 1);
+ page_addr += (PAGE_SIZE >> 1);
+ ++count;
+ goto split;
}
}
- /* This is the final RX buffer for this page, so mark it for
- * unmapping */
- rx_queue->buf_page = NULL;
- rx_buf->unmap_addr = rx_queue->buf_dma_addr;
-
- out:
return 0;
}
-/* This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information.
- */
-static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *new_rx_buf)
-{
- int rc = 0;
-
- if (rx_queue->channel->rx_alloc_push_pages) {
- new_rx_buf->skb = NULL;
- rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
- rx_queue->alloc_page_count++;
- } else {
- new_rx_buf->page = NULL;
- rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
- rx_queue->alloc_skb_count++;
- }
-
- if (unlikely(rc < 0))
- EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
- rx_queue->queue, rc);
- return rc;
-}
-
static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
if (rx_buf->page) {
+ struct efx_rx_page_state *state;
+
EFX_BUG_ON_PARANOID(rx_buf->skb);
- if (rx_buf->unmap_addr) {
- pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
+
+ state = page_address(rx_buf->page);
+ if (--state->refcnt == 0) {
+ pci_unmap_page(efx->pci_dev,
+ state->dma_addr,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
- rx_buf->unmap_addr = 0;
}
} else if (likely(rx_buf->skb)) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
@@ -273,31 +259,84 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}
+/* Attempt to resurrect the other receive buffer that used to share this page,
+ * which had previously been passed up to the kernel and freed. */
+static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_rx_page_state *state = page_address(rx_buf->page);
+ struct efx_rx_buffer *new_buf;
+ unsigned fill_level, index;
+
+ /* +1 because efx_rx_packet() incremented removed_count. +1 because
+ * we'd like to insert an additional descriptor whilst leaving
+ * EFX_RXD_HEAD_ROOM for the non-recycle path */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
+ if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
+ /* We could place "state" on a list, and drain the list in
+ * efx_fast_push_rx_descriptors(). For now, this will do. */
+ return;
+ }
+
+ ++state->refcnt;
+ get_page(rx_buf->page);
+
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ new_buf = efx_rx_buffer(rx_queue, index);
+ new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
+ new_buf->skb = NULL;
+ new_buf->page = rx_buf->page;
+ new_buf->data = (void *)
+ ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+ new_buf->len = rx_buf->len;
+ ++rx_queue->added_count;
+}
+
+/* Recycle the given rx buffer directly back into the rx_queue. There is
+ * always room to add this buffer, because we've just popped a buffer. */
+static void efx_recycle_rx_buffer(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
+ struct efx_rx_buffer *new_buf;
+ unsigned index;
+
+ if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+ page_count(rx_buf->page) == 1)
+ efx_resurrect_rx_buffer(rx_queue, rx_buf);
+
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ new_buf = efx_rx_buffer(rx_queue, index);
+
+ memcpy(new_buf, rx_buf, sizeof(*new_buf));
+ rx_buf->page = NULL;
+ rx_buf->skb = NULL;
+ ++rx_queue->added_count;
+}
+
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
- * @retry: Recheck the fill level
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@fast_fill_limit. If there is insufficient atomic
- * memory to do so, the caller should retry.
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
*/
-static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
- int retry)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
- struct efx_rx_buffer *rx_buf;
- unsigned fill_level, index;
- int i, space, rc = 0;
+ struct efx_channel *channel = rx_queue->channel;
+ unsigned fill_level;
+ int space, rc = 0;
- /* Calculate current fill level. Do this outside the lock,
- * because most of the time we'll end up not wanting to do the
- * fill anyway.
- */
+ /* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
-
- /* Don't fill if we don't need to */
if (fill_level >= rx_queue->fast_fill_trigger)
- return 0;
+ goto out;
/* Record minimum fill level */
if (unlikely(fill_level < rx_queue->min_fill)) {
@@ -305,34 +344,25 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
rx_queue->min_fill = fill_level;
}
- /* Acquire RX add lock. If this lock is contended, then a fast
- * fill must already be in progress (e.g. in the refill
- * tasklet), so we don't need to do anything
- */
- if (!spin_trylock_bh(&rx_queue->add_lock))
- return -1;
-
- retry:
- /* Recalculate current fill level now that we have the lock */
- fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
space = rx_queue->fast_fill_limit - fill_level;
if (space < EFX_RX_BATCH)
- goto out_unlock;
+ goto out;
EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
" level %d to level %d using %s allocation\n",
rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
- rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
+ channel->rx_alloc_push_pages ? "page" : "skb");
do {
- for (i = 0; i < EFX_RX_BATCH; ++i) {
- index = rx_queue->added_count & EFX_RXQ_MASK;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rc = efx_init_rx_buffer(rx_queue, rx_buf);
- if (unlikely(rc))
- goto out;
- ++rx_queue->added_count;
+ if (channel->rx_alloc_push_pages)
+ rc = efx_init_rx_buffers_page(rx_queue);
+ else
+ rc = efx_init_rx_buffers_skb(rx_queue);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ if (rx_queue->added_count == rx_queue->removed_count)
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
}
} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
@@ -341,63 +371,18 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
rx_queue->added_count - rx_queue->removed_count);
out:
- /* Send write pointer to card. */
- efx_nic_notify_rx_desc(rx_queue);
-
- /* If the fast fill is running inside from the refill tasklet, then
- * for SMP systems it may be running on a different CPU to
- * RX event processing, which means that the fill level may now be
- * out of date. */
- if (unlikely(retry && (rc == 0)))
- goto retry;
-
- out_unlock:
- spin_unlock_bh(&rx_queue->add_lock);
-
- return rc;
-}
-
-/**
- * efx_fast_push_rx_descriptors - push new RX descriptors quickly
- * @rx_queue: RX descriptor queue
- *
- * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
- * it will schedule a work item to immediately continue the fast fill
- */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
-{
- int rc;
-
- rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
- if (unlikely(rc)) {
- /* Schedule the work item to run immediately. The hope is
- * that work is immediately pending to free some memory
- * (e.g. an RX event or TX completion)
- */
- efx_schedule_slow_fill(rx_queue, 0);
- }
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
}
-void efx_rx_work(struct work_struct *data)
+void efx_rx_slow_fill(unsigned long context)
{
- struct efx_rx_queue *rx_queue;
- int rc;
-
- rx_queue = container_of(data, struct efx_rx_queue, work.work);
-
- if (unlikely(!rx_queue->channel->enabled))
- return;
-
- EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
- "%d\n", rx_queue->queue, raw_smp_processor_id());
+ struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
+ struct efx_channel *channel = rx_queue->channel;
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(channel);
++rx_queue->slow_fill_count;
- /* Push new RX descriptors, allowing at least 1 jiffy for
- * the kernel to free some more memory. */
- rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
- if (rc)
- efx_schedule_slow_fill(rx_queue, 1);
}
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
@@ -498,6 +483,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard)
{
struct efx_nic *efx = rx_queue->efx;
+ struct efx_channel *channel = rx_queue->channel;
struct efx_rx_buffer *rx_buf;
bool leak_packet = false;
@@ -525,12 +511,13 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Discard packet, if instructed to do so */
if (unlikely(discard)) {
if (unlikely(leak_packet))
- rx_queue->channel->n_skbuff_leaks++;
+ channel->n_skbuff_leaks++;
else
- /* We haven't called efx_unmap_rx_buffer yet,
- * so fini the entire rx_buffer here */
- efx_fini_rx_buffer(rx_queue, rx_buf);
- return;
+ efx_recycle_rx_buffer(channel, rx_buf);
+
+ /* Don't hold off the previous receive */
+ rx_buf = NULL;
+ goto out;
}
/* Release card resources - assumes all RX buffers consumed in-order
@@ -547,6 +534,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
* prefetched into cache.
*/
rx_buf->len = len;
+out:
if (rx_queue->channel->rx_pkt)
__efx_rx_packet(rx_queue->channel,
rx_queue->channel->rx_pkt,
@@ -682,6 +670,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
+ del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
/* Release RX buffers NB start at index 0 not current HW ptr */
@@ -691,16 +680,6 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
efx_fini_rx_buffer(rx_queue, rx_buf);
}
}
-
- /* For a page that is part-way through splitting into RX buffers */
- if (rx_queue->buf_page != NULL) {
- pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
- efx_rx_buf_size(rx_queue->efx),
- PCI_DMA_FROMDEVICE);
- __free_pages(rx_queue->buf_page,
- rx_queue->efx->rx_buffer_order);
- rx_queue->buf_page = NULL;
- }
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 371e86cc090f..1f83404af63b 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -38,7 +38,7 @@ struct efx_loopback_payload {
struct udphdr udp;
__be16 iteration;
const char msg[64];
-} __attribute__ ((packed));
+} __packed;
/* Loopback test source MAC address */
static const unsigned char payload_source[ETH_ALEN] = {
@@ -161,23 +161,17 @@ static int efx_test_interrupts(struct efx_nic *efx,
static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
- unsigned int magic, count;
-
- /* Channel specific code, limited to 20 bits */
- magic = (0x00010150 + channel->channel);
- EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
- channel->channel, magic);
+ unsigned int magic_count, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
- /* Reset flag and zero magic word */
+ magic_count = channel->magic_count;
channel->efx->last_irq_cpu = -1;
- channel->eventq_magic = 0;
smp_wmb();
- efx_nic_generate_test_event(channel, magic);
+ efx_nic_generate_test_event(channel);
/* Wait for arrival of interrupt */
count = 0;
@@ -187,7 +181,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
if (channel->work_pending)
efx_process_channel_now(channel);
- if (channel->eventq_magic == magic)
+ if (channel->magic_count != magic_count)
goto eventq_ok;
} while (++count < 2);
@@ -204,7 +198,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
/* Check to see if event was received even if interrupt wasn't */
efx_process_channel_now(channel);
- if (channel->eventq_magic == magic) {
+ if (channel->magic_count != magic_count) {
EFX_ERR(channel->efx, "channel %d event was generated, but "
"failed to trigger an interrupt\n", channel->channel);
tests->eventq_dma[channel->channel] = 1;
@@ -545,7 +539,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
static int efx_wait_for_link(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
- int count;
+ int count, link_up_count = 0;
bool link_up;
for (count = 0; count < 40; count++) {
@@ -567,8 +561,12 @@ static int efx_wait_for_link(struct efx_nic *efx)
link_up = !efx->mac_op->check_fault(efx);
mutex_unlock(&efx->mac_lock);
- if (link_up)
- return 0;
+ if (link_up) {
+ if (++link_up_count == 2)
+ return 0;
+ } else {
+ link_up_count = 0;
+ }
}
return -ETIMEDOUT;
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 727b4228e081..f2b1e6180753 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -206,6 +206,7 @@ static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
bool already_attached = 0;
+ efx_oword_t reg;
int rc;
/* Allocate storage for hardware specific data */
@@ -220,6 +221,9 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail1;
}
+ efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
+ efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
+
efx_mcdi_init(efx);
/* Recover from a failed assertion before probing */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 518f7fc91473..782e45a613d6 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -54,7 +54,7 @@
/* Increase filter depth to avoid RX_RESET */
#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
/* Flushes may never complete */
-#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A
+#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A