aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ehea/ehea_main.c')
-rw-r--r--drivers/net/ehea/ehea_main.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f0319f1e8e05..869e1604b16e 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -136,7 +136,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
struct ehea_port *port = netdev_priv(dev);
struct net_device_stats *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
- u64 hret, rx_packets;
+ u64 hret, rx_packets, tx_packets;
int i;
memset(stats, 0, sizeof(*stats));
@@ -162,7 +162,11 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
for (i = 0; i < port->num_def_qps; i++)
rx_packets += port->port_res[i].rx_packets;
- stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp;
+ tx_packets = 0;
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ tx_packets += port->port_res[i].tx_packets;
+
+ stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr;
stats->rx_bytes = cb2->rxo;
@@ -406,11 +410,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
pr->p_stats.err_frame_crc++;
- if (netif_msg_rx_err(pr->port)) {
- ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
- ehea_dump(cqe, sizeof(*cqe), "CQE");
- }
-
if (rq == 2) {
*processed_rq2 += 1;
skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
@@ -422,7 +421,11 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
}
if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
- ehea_error("Critical receive error. Resetting port.");
+ if (netif_msg_rx_err(pr->port)) {
+ ehea_error("Critical receive error for QP %d. "
+ "Resetting port.", pr->qp->init_attr.qp_nr);
+ ehea_dump(cqe, sizeof(*cqe), "CQE");
+ }
schedule_work(&pr->port->reset_task);
return 1;
}
@@ -2000,6 +2003,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
ehea_post_swqe(pr->qp, swqe);
+ pr->tx_packets++;
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags);