aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/sge.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c173
1 files changed, 167 insertions, 6 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index f05f0d400324..ede12209f20b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -52,6 +52,7 @@
#include "t4_values.h"
#include "t4_msg.h"
#include "t4fw_api.h"
+#include "cxgb4_ptp.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -1162,7 +1163,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 wr_mid, ctrl0;
+ u32 wr_mid, ctrl0, op;
u64 cntrl, *end;
int qidx, credits;
unsigned int flits, ndesc;
@@ -1175,6 +1176,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t addr[MAX_SKB_FRAGS + 1];
bool immediate = false;
int len, max_pkt_len;
+ bool ptp_enabled = is_ptp_enabled(skb, dev);
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1198,15 +1200,31 @@ out_free: dev_kfree_skb_any(skb);
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb_get_queue_mapping(skb);
- q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ if (ptp_enabled) {
+ spin_lock(&adap->ptp_lock);
+ if (!(adap->ptp_tx_skb)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ adap->ptp_tx_skb = skb_get(skb);
+ } else {
+ spin_unlock(&adap->ptp_lock);
+ goto out_free;
+ }
+ q = &adap->sge.ptptxq;
+ } else {
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ }
+ skb_tx_timestamp(skb);
reclaim_completed_tx(adap, &q->q, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(err == -ENOTSUPP))
+ if (unlikely(err == -ENOTSUPP)) {
+ if (ptp_enabled)
+ spin_unlock(&adap->ptp_lock);
goto out_free;
+ }
#endif /* CONFIG_CHELSIO_T4_FCOE */
flits = calc_tx_flits(skb);
@@ -1218,6 +1236,8 @@ out_free: dev_kfree_skb_any(skb);
dev_err(adap->pdev_dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, qidx);
+ if (ptp_enabled)
+ spin_unlock(&adap->ptp_lock);
return NETDEV_TX_BUSY;
}
@@ -1227,6 +1247,8 @@ out_free: dev_kfree_skb_any(skb);
if (!immediate &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
+ if (ptp_enabled)
+ spin_unlock(&adap->ptp_lock);
goto out_free;
}
@@ -1279,7 +1301,11 @@ out_free: dev_kfree_skb_any(skb);
q->tx_cso += ssi->gso_segs;
} else {
len += sizeof(*cpl);
- wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ if (ptp_enabled)
+ op = FW_PTP_TX_PKT_WR;
+ else
+ op = FW_ETH_TX_PKT_WR;
+ wr->op_immdlen = htonl(FW_WR_OP_V(op) |
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1301,6 +1327,8 @@ out_free: dev_kfree_skb_any(skb);
ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
TXPKT_PF_V(adap->pf);
+ if (ptp_enabled)
+ ctrl0 |= TXPKT_TSTAMP_F;
#ifdef CONFIG_CHELSIO_T4_DCB
if (is_t4(adap->params.chip))
ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
@@ -1332,6 +1360,8 @@ out_free: dev_kfree_skb_any(skb);
txq_advance(&q->q, ndesc);
ring_tx_db(adap, &q->q, ndesc);
+ if (ptp_enabled)
+ spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
}
@@ -2023,6 +2053,92 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
rxq->stats.rx_cso++;
}
+enum {
+ RX_NON_PTP_PKT = 0,
+ RX_PTP_PKT_SUC = 1,
+ RX_PTP_PKT_ERR = 2
+};
+
+/**
+ * t4_systim_to_hwstamp - read hardware time stamp
+ * @adap: the adapter
+ * @skb: the packet
+ *
+ * Read Time Stamp from MPS packet and insert in skb which
+ * is forwarded to PTP application
+ */
+static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *hwtstamps;
+ struct cpl_rx_mps_pkt *cpl = NULL;
+ unsigned char *data;
+ int offset;
+
+ cpl = (struct cpl_rx_mps_pkt *)skb->data;
+ if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
+ X_CPL_RX_MPS_PKT_TYPE_PTP))
+ return RX_PTP_PKT_ERR;
+
+ data = skb->data + sizeof(*cpl);
+ skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
+ offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
+ if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
+ return RX_PTP_PKT_ERR;
+
+ hwtstamps = skb_hwtstamps(skb);
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
+
+ return RX_PTP_PKT_SUC;
+}
+
+/**
+ * t4_rx_hststamp - Recv PTP Event Message
+ * @adap: the adapter
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @skb: the packet
+ *
+ * PTP enabled and MPS packet, read HW timestamp
+ */
+static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
+ struct sge_eth_rxq *rxq, struct sk_buff *skb)
+{
+ int ret;
+
+ if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
+ !is_t4(adapter->params.chip))) {
+ ret = t4_systim_to_hwstamp(adapter, skb);
+ if (ret == RX_PTP_PKT_ERR) {
+ kfree_skb(skb);
+ rxq->stats.rx_drops++;
+ }
+ return ret;
+ }
+ return RX_NON_PTP_PKT;
+}
+
+/**
+ * t4_tx_hststamp - Loopback PTP Transmit Event Message
+ * @adap: the adapter
+ * @skb: the packet
+ * @dev: the ingress net device
+ *
+ * Read hardware timestamp for the loopback PTP Tx event message
+ */
+static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
+ cxgb4_ptp_read_hwstamp(adapter, pi);
+ kfree_skb(skb);
+ return 0;
+ }
+ return 1;
+}
+
/**
* t4_ethrx_handler - process an ingress ethernet packet
* @q: the response queue that received the packet
@@ -2038,11 +2154,13 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sk_buff *skb;
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ struct adapter *adapter = q->adap;
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
u16 err_vec;
struct port_info *pi;
+ int ret = 0;
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
@@ -2068,8 +2186,25 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.rx_drops++;
return 0;
}
+ pi = netdev_priv(q->netdev);
+
+ /* Handle PTP Event Rx packet */
+ if (unlikely(pi->ptp_enable)) {
+ ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
+ if (ret == RX_PTP_PKT_ERR)
+ return 0;
+ }
+ if (likely(!ret))
+ __skb_pull(skb, s->pktshift); /* remove ethernet header pad */
+
+ /* Handle the PTP Event Tx Loopback packet */
+ if (unlikely(pi->ptp_enable && !ret &&
+ (pkt->l2info & htonl(RXF_UDP_F)) &&
+ cxgb4_ptp_is_ptp_rx(skb))) {
+ if (!t4_tx_hststamp(adapter, skb, q->netdev))
+ return 0;
+ }
- __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
if (skb->dev->features & NETIF_F_RXHASH)
@@ -2078,7 +2213,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
- pi = netdev_priv(skb->dev);
if (pi->rxtstamp)
cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
si->sgetstamp);
@@ -2502,6 +2636,20 @@ static void sge_tx_timer_cb(unsigned long data)
tasklet_schedule(&txq->qresume_tsk);
}
+ if (!is_t4(adap->params.chip)) {
+ struct sge_eth_txq *q = &s->ptptxq;
+ int avail;
+
+ spin_lock(&adap->ptp_lock);
+ avail = reclaimable(&q->q);
+
+ if (avail) {
+ free_tx_desc(adap, &q->q, avail, false);
+ q->q.in_use -= avail;
+ }
+ spin_unlock(&adap->ptp_lock);
+ }
+
budget = MAX_TIMER_TX_RECLAIM;
i = s->ethtxq_rover;
do {
@@ -3068,6 +3216,19 @@ void t4_free_sge_resources(struct adapter *adap)
if (adap->sge.intrq.desc)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
+ if (!is_t4(adap->params.chip)) {
+ etq = &adap->sge.ptptxq;
+ if (etq->q.desc) {
+ t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
+ etq->q.cntxt_id);
+ spin_lock_bh(&adap->ptp_lock);
+ free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+ spin_unlock_bh(&adap->ptp_lock);
+ kfree(etq->q.sdesc);
+ free_txq(adap, &etq->q);
+ }
+ }
+
/* clear the reverse egress queue map */
memset(adap->sge.egr_map, 0,
adap->sge.egr_sz * sizeof(*adap->sge.egr_map));