/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * RMNET Data virtual network driver * */ #include #include #include #include "rmnet_config.h" #include "rmnet_handlers.h" #include "rmnet_private.h" #include "rmnet_map.h" #include "rmnet_vnd.h" /* RX/TX Fixup */ void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); struct rmnet_pcpu_stats *pcpu_ptr; pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); u64_stats_update_begin(&pcpu_ptr->syncp); pcpu_ptr->stats.rx_pkts++; pcpu_ptr->stats.rx_bytes += skb->len; u64_stats_update_end(&pcpu_ptr->syncp); } void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); struct rmnet_pcpu_stats *pcpu_ptr; pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); u64_stats_update_begin(&pcpu_ptr->syncp); pcpu_ptr->stats.tx_pkts++; pcpu_ptr->stats.tx_bytes += skb->len; u64_stats_update_end(&pcpu_ptr->syncp); } /* Network Device Operations */ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_priv *priv; priv = netdev_priv(dev); if (priv->real_dev) { rmnet_egress_handler(skb); } else { this_cpu_inc(priv->pcpu_stats->stats.tx_drops); kfree_skb(skb); } return NETDEV_TX_OK; } static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) { if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) return -EINVAL; rmnet_dev->mtu = new_mtu; return 0; } static int rmnet_vnd_get_iflink(const struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); return priv->real_dev->ifindex; } static int rmnet_vnd_init(struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); int err; priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats); if (!priv->pcpu_stats) return -ENOMEM; err = gro_cells_init(&priv->gro_cells, dev); if (err) { free_percpu(priv->pcpu_stats); return err; } return 0; } static void rmnet_vnd_uninit(struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); gro_cells_destroy(&priv->gro_cells); free_percpu(priv->pcpu_stats); } static void rmnet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *s) { struct rmnet_priv *priv = netdev_priv(dev); struct rmnet_vnd_stats total_stats; struct rmnet_pcpu_stats *pcpu_ptr; unsigned int cpu, start; memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); for_each_possible_cpu(cpu) { pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); do { start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts; total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes; total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts; total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes; } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start)); total_stats.tx_drops += pcpu_ptr->stats.tx_drops; } s->rx_packets = total_stats.rx_pkts; s->rx_bytes = total_stats.rx_bytes; s->tx_packets = total_stats.tx_pkts; s->tx_bytes = total_stats.tx_bytes; s->tx_dropped = total_stats.tx_drops; } static const struct net_device_ops rmnet_vnd_ops = { .ndo_start_xmit = rmnet_vnd_start_xmit, .ndo_change_mtu = rmnet_vnd_change_mtu, .ndo_get_iflink = rmnet_vnd_get_iflink, .ndo_add_slave = rmnet_add_bridge, .ndo_del_slave = rmnet_del_bridge, .ndo_init = rmnet_vnd_init, .ndo_uninit = rmnet_vnd_uninit, .ndo_get_stats64 = rmnet_get_stats64, }; /* Called by kernel whenever a new rmnet device is created. Sets MTU, * flags, ARP type, needed headroom, etc... */ void rmnet_vnd_setup(struct net_device *rmnet_dev) { rmnet_dev->netdev_ops = &rmnet_vnd_ops; rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; random_ether_addr(rmnet_dev->dev_addr); rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; /* Raw IP mode */ rmnet_dev->header_ops = NULL; /* No header */ rmnet_dev->type = ARPHRD_RAWIP; rmnet_dev->hard_header_len = 0; rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); rmnet_dev->needs_free_netdev = true; } /* Exposed API */ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, struct rmnet_port *port, struct net_device *real_dev, struct rmnet_endpoint *ep) { struct rmnet_priv *priv; int rc; if (ep->egress_dev) return -EINVAL; rc = register_netdevice(rmnet_dev); if (!rc) { ep->egress_dev = rmnet_dev; ep->mux_id = id; port->nr_rmnet_devs++; rmnet_dev->rtnl_link_ops = &rmnet_link_ops; priv = netdev_priv(rmnet_dev); priv->mux_id = id; priv->real_dev = real_dev; netdev_dbg(rmnet_dev, "rmnet dev created\n"); } return rc; } int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, struct rmnet_endpoint *ep) { if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev) return -EINVAL; ep->egress_dev = NULL; port->nr_rmnet_devs--; return 0; } u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev) { struct rmnet_priv *priv; priv = netdev_priv(rmnet_dev); return priv->mux_id; } int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) { netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); /* Although we expect similar number of enable/disable * commands, optimize for the disable. That is more * latency sensitive than enable */ if (unlikely(enable)) netif_wake_queue(rmnet_dev); else netif_stop_queue(rmnet_dev); return 0; }