aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/ibm
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2011-10-14 05:31:05 +0000
committerDavid S. Miller <davem@davemloft.net>2011-10-17 19:00:55 -0400
commitd695c335f9165cb73f9389479cce755e8207b5f4 (patch)
tree23e72cf67fbe555e23d00001983a857a2e9666dc /drivers/net/ethernet/ibm
parentehea: Allocate large enough skbs to avoid partial cacheline DMA writes (diff)
downloadlinux-dev-d695c335f9165cb73f9389479cce755e8207b5f4.tar.xz
linux-dev-d695c335f9165cb73f9389479cce755e8207b5f4.zip
ehea: Simplify ehea_xmit2 and ehea_xmit3
Based on a patch from Michael Ellerman, clean up a significant portion of the transmit path. There was a lot of duplication here. Even worse, we were always checksumming tx packets and ignoring the skb->ip_summed field. Also remove NETIF_F_FRAGLIST from dev->features, I'm not sure why it was enabled. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c137
1 files changed, 36 insertions, 101 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 13218092769c..77aafba8272c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1676,37 +1676,6 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
return ret;
}
-/*
- * The write_* functions store information in swqe which is used by
- * the hardware to calculate the ip/tcp/udp checksum
- */
-
-static inline void write_ip_start_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->ip_start = skb_network_offset(skb);
- swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
-}
-
-static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->tcp_offset =
- (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
-
- swqe->tcp_end = (u16)skb->len - 1;
-}
-
-static inline void write_udp_offset_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->tcp_offset =
- (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
-
- swqe->tcp_end = (u16)skb->len - 1;
-}
-
-
static void write_swqe2_TSO(struct sk_buff *skb,
struct ehea_swqe *swqe, u32 lkey)
{
@@ -2105,41 +2074,46 @@ static int ehea_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
- struct ehea_swqe *swqe, u32 lkey)
+static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
{
- if (skb->protocol == htons(ETH_P_IP)) {
- const struct iphdr *iph = ip_hdr(skb);
+ swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
- /* IPv4 */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT
- | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
- write_ip_start_end(swqe, skb);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
- if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
- /* IP fragment, so don't change cs */
- swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
- else
- write_udp_offset_end(swqe, skb);
- } else if (iph->protocol == IPPROTO_TCP) {
- write_tcp_offset_end(swqe, skb);
- }
+ swqe->ip_start = skb_network_offset(skb);
+ swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
- /* icmp (big data) and ip segmentation packets (all other ip
- packets) do not require any special handling */
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
- } else {
- /* Other Ethernet Protocol */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IMM_DATA_PRESENT
- | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ swqe->tcp_offset = swqe->ip_end + 1 +
+ offsetof(struct udphdr, check);
+ swqe->tcp_end = skb->len - 1;
+ break;
+
+ case IPPROTO_TCP:
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
+
+ swqe->tcp_offset = swqe->ip_end + 1 +
+ offsetof(struct tcphdr, check);
+ swqe->tcp_end = skb->len - 1;
+ break;
}
+}
+
+static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
+
+ xmit_common(skb, swqe);
write_swqe2_data(skb, dev, swqe, lkey);
}
@@ -2152,51 +2126,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
skb_frag_t *frag;
int i;
- if (skb->protocol == htons(ETH_P_IP)) {
- const struct iphdr *iph = ip_hdr(skb);
+ xmit_common(skb, swqe);
- /* IPv4 */
- write_ip_start_end(swqe, skb);
-
- if (iph->protocol == IPPROTO_TCP) {
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
-
- write_tcp_offset_end(swqe, skb);
-
- } else if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
- /* IP fragment, so don't change cs */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IMM_DATA_PRESENT;
- else {
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
-
- write_udp_offset_end(swqe, skb);
- }
- } else {
- /* icmp (big data) and
- ip segmentation packets (all other ip packets) */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
- }
- } else {
- /* Other Ethernet Protocol */
- swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
- }
- /* copy (immediate) data */
if (nfrags == 0) {
- /* data is in a single piece */
skb_copy_from_linear_data(skb, imm_data, skb->len);
} else {
- /* first copy data from the skb->data buffer ... */
skb_copy_from_linear_data(skb, imm_data,
skb_headlen(skb));
imm_data += skb_headlen(skb);
@@ -2208,6 +2142,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
imm_data += frag->size;
}
}
+
swqe->immediate_data_length = skb->len;
dev_kfree_skb(skb);
}
@@ -3184,7 +3119,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
- dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+ dev->hw_features = NETIF_F_SG | NETIF_F_TSO
| NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX