diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c')
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 177 |
1 files changed, 154 insertions, 23 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 0cc6353254bf..ef10aef3cda0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -19,6 +19,12 @@ #include "cn10k.h" #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) +#define PTP_PORT 0x13F +/* PTPv2 header Original Timestamp starts at byte offset 34 and + * contains 6 byte seconds field and 4 byte nano seconds field. + */ +#define PTP_SYNC_SEC_OFFSET 34 + static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, @@ -148,6 +154,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; if (timestamp != 1) { + timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp); err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); if (!err) { memset(&ts, 0, sizeof(ts)); @@ -167,14 +174,15 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, static void otx2_set_rxtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, void *data) { - u64 tsns; + u64 timestamp, tsns; int err; if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) return; + timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data); /* The first 8 bytes is the timestamp */ - err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns); + err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); if (err) return; @@ -433,6 +441,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int budget) { int tx_pkts = 0, tx_bytes = 0, qidx; + struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; int processed_cqe = 0; @@ -443,6 +452,9 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, return 0; process_cqe: + qidx = cq->cq_idx - pfvf->hw.rx_queues; + sq = &pfvf->qset.sq[qidx]; + while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); if (unlikely(!cqe)) { @@ -450,18 +462,20 @@ process_cqe: return 0; break; } + if (cq->cq_type == CQ_XDP) { - qidx = cq->cq_idx - pfvf->hw.rx_queues; - otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx], - cqe); + otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); } else { - otx2_snd_pkt_handler(pfvf, cq, - &pfvf->qset.sq[cq->cint_idx], - cqe, budget, &tx_pkts, &tx_bytes); + otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget, + &tx_pkts, &tx_bytes); } + cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; processed_cqe++; cq->pend_cqe--; + + sq->cons_head++; + sq->cons_head &= (sq->sqe_cnt - 1); } /* Free CQEs to HW */ @@ -482,6 +496,18 @@ process_cqe: return 0; } +static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll) +{ + struct dim_sample dim_sample; + u64 rx_frames, rx_bytes; + + rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) + + OTX2_GET_RX_STATS(RX_UCAST); + rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); + dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample); + net_dim(&cq_poll->dim, dim_sample); +} + int otx2_napi_handler(struct napi_struct *napi, int budget) { struct otx2_cq_queue *rx_cq = NULL; @@ -519,6 +545,17 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (pfvf->flags & OTX2_FLAG_INTF_DOWN) return workdone; + /* Check for adaptive interrupt coalesce */ + if (workdone != 0 && + ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == + OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { + /* Adjust irq coalese using net_dim */ + otx2_adjust_adaptive_coalese(pfvf, cq_poll); + /* Update irq coalescing */ + for (i = 0; i < pfvf->hw.cint_cnt; i++) + otx2_config_irq_coalescing(pfvf, i); + } + /* Re-enable interrupts */ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0)); @@ -599,7 +636,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, ext->subdc = NIX_SUBDC_EXT; if (skb_shinfo(skb)->gso_size) { ext->lso = 1; - ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb); + ext->lso_sb = skb_tcp_all_headers(skb); ext->lso_mps = skb_shinfo(skb)->gso_size; /* Only TSOv4 and TSOv6 GSO offloads are supported */ @@ -661,7 +698,8 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, } static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, - int alg, u64 iova) + int alg, u64 iova, int ptp_offset, + u64 base_ns, int udp_csum) { struct nix_sqe_mem_s *mem; @@ -671,6 +709,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, mem->wmem = 1; /* wait for the memory operation */ mem->addr = iova; + if (ptp_offset) { + mem->start_offset = ptp_offset; + mem->udp_csum_crt = udp_csum; + mem->base_ns = base_ns; + mem->step_type = 1; + } + *offset += sizeof(*mem); } @@ -906,7 +951,7 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf, * be correctly modified, hence don't offload such TSO segments. */ - payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + payload_len = skb->len - skb_tcp_all_headers(skb); last_seg_size = payload_len % skb_shinfo(skb)->gso_size; if (last_seg_size && last_seg_size < 16) return false; @@ -927,16 +972,102 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) return skb_shinfo(skb)->gso_segs; } +static bool otx2_validate_network_transport(struct sk_buff *skb) +{ + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) || + (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) { + struct udphdr *udph = udp_hdr(skb); + + if (udph->source == htons(PTP_PORT) && + udph->dest == htons(PTP_PORT)) + return true; + } + + return false; +} + +static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + u16 nix_offload_hlen = 0, inner_vhlen = 0; + u8 *data = skb->data, *msgtype; + __be16 proto = eth->h_proto; + int network_depth = 0; + + /* NIX is programmed to offload outer VLAN header + * in case of single vlan protocol field holds Network header ETH_IP/V6 + * in case of stacked vlan protocol field holds Inner vlan (8100) + */ + if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX && + skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) { + if (skb->vlan_proto == htons(ETH_P_8021AD)) { + /* Get vlan protocol */ + proto = __vlan_get_protocol(skb, eth->h_proto, NULL); + /* SKB APIs like skb_transport_offset does not include + * offloaded vlan header length. Need to explicitly add + * the length + */ + nix_offload_hlen = VLAN_HLEN; + inner_vhlen = VLAN_HLEN; + } else if (skb->vlan_proto == htons(ETH_P_8021Q)) { + nix_offload_hlen = VLAN_HLEN; + } + } else if (eth_type_vlan(eth->h_proto)) { + proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); + } + + switch (ntohs(proto)) { + case ETH_P_1588: + if (network_depth) + *offset = network_depth; + else + *offset = ETH_HLEN + nix_offload_hlen + + inner_vhlen; + break; + case ETH_P_IP: + case ETH_P_IPV6: + if (!otx2_validate_network_transport(skb)) + return false; + + *udp_csum = 1; + *offset = nix_offload_hlen + skb_transport_offset(skb) + + sizeof(struct udphdr); + } + + msgtype = data + *offset; + + /* Check PTP messageId is SYNC or not */ + return (*msgtype & 0xf) == 0; +} + static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, struct otx2_snd_queue *sq, int *offset) { + struct ptpv2_tstamp *origin_tstamp; + int ptp_offset = 0, udp_csum = 0; + struct timespec64 ts; u64 iova; - if (!skb_shinfo(skb)->gso_size && - skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + if (unlikely(!skb_shinfo(skb)->gso_size && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { + if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) { + if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) { + origin_tstamp = (struct ptpv2_tstamp *) + ((u8 *)skb->data + ptp_offset + + PTP_SYNC_SEC_OFFSET); + ts = ns_to_timespec64(pfvf->ptp->tstamp); + origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff); + origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff); + origin_tstamp->nanoseconds = htonl(ts.tv_nsec); + /* Point to correction field in PTP packet */ + ptp_offset += 8; + } + } else { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } iova = sq->timestamps->iova + (sq->head * sizeof(u64)); - otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova); + otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova, + ptp_offset, pfvf->ptp->base_ns, udp_csum); } else { skb_tx_timestamp(skb); } @@ -947,17 +1078,17 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, { struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); struct otx2_nic *pfvf = netdev_priv(netdev); - int offset, num_segs, free_sqe; + int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; - /* Check if there is room for new SQE. - * 'Num of SQBs freed to SQ's pool - SQ's Aura count' - * will give free SQE count. + /* Check if there is enough room between producer + * and consumer index. */ - free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); + if (free_desc < sq->sqe_thresh) + return false; - if (free_sqe < sq->sqe_thresh || - free_sqe < otx2_get_sqe_count(pfvf, skb)) + if (free_desc < otx2_get_sqe_count(pfvf, skb)) return false; num_segs = skb_shinfo(skb)->nr_frags + 1; @@ -1198,7 +1329,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, put_page(page); break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); break; case XDP_ABORTED: trace_xdp_exception(pfvf->netdev, prog, act); |