aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/fc.c2
-rw-r--r--net/802/fddi.c12
-rw-r--r--net/802/hippi.c2
-rw-r--r--net/802/tr.c2
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_core.c16
-rw-r--r--net/8021q/vlan_dev.c13
-rw-r--r--net/9p/client.c4
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_route.c4
-rw-r--r--net/bluetooth/af_bluetooth.c5
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bridge/br_if.c29
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_netfilter.c107
-rw-r--r--net/caif/caif_dev.c24
-rw-r--r--net/caif/caif_socket.c26
-rw-r--r--net/caif/cfcnfg.c49
-rw-r--r--net/caif/cfctrl.c59
-rw-r--r--net/caif/cfdbgl.c4
-rw-r--r--net/caif/cfdgml.c11
-rw-r--r--net/caif/cffrml.c14
-rw-r--r--net/caif/cfmuxl.c14
-rw-r--r--net/caif/cfpkt_skbuff.c48
-rw-r--r--net/caif/cfrfml.c12
-rw-r--r--net/caif/cfserl.c4
-rw-r--r--net/caif/cfsrvl.c17
-rw-r--r--net/caif/cfutill.c12
-rw-r--r--net/caif/cfveil.c11
-rw-r--r--net/caif/cfvidl.c6
-rw-r--r--net/caif/chnl_net.c47
-rw-r--r--net/can/raw.c4
-rw-r--r--net/core/datagram.c5
-rw-r--r--net/core/dev.c342
-rw-r--r--net/core/ethtool.c88
-rw-r--r--net/core/fib_rules.c11
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/flow.c82
-rw-r--r--net/core/gen_estimator.c4
-rw-r--r--net/core/iovec.c6
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/net-sysfs.c37
-rw-r--r--net/core/net-sysfs.h4
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/core/rtnetlink.c31
-rw-r--r--net/core/skbuff.c92
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/utils.c15
-rw-r--r--net/dccp/ccid.h46
-rw-r--r--net/dccp/ccids/Kconfig31
-rw-r--r--net/dccp/ccids/ccid2.c287
-rw-r--r--net/dccp/ccids/ccid2.h35
-rw-r--r--net/dccp/ccids/ccid3.c253
-rw-r--r--net/dccp/ccids/ccid3.h51
-rw-r--r--net/dccp/ccids/lib/loss_interval.c2
-rw-r--r--net/dccp/ccids/lib/packet_history.c39
-rw-r--r--net/dccp/ccids/lib/packet_history.h22
-rw-r--r--net/dccp/ccids/lib/tfrc.h1
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c14
-rw-r--r--net/dccp/options.c25
-rw-r--r--net/decnet/dn_nsp_out.c8
-rw-r--r--net/econet/af_econet.c6
-rw-r--r--net/ethernet/eth.c8
-rw-r--r--net/ipv4/Kconfig8
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/arp.c231
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_frontend.c35
-rw-r--r--net/ipv4/fib_trie.c55
-rw-r--r--net/ipv4/gre.c151
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c236
-rw-r--r--net/ipv4/ip_options.c3
-rw-r--r--net/ipv4/ip_output.c24
-rw-r--r--net/ipv4/ipip.c215
-rw-r--r--net/ipv4/ipmr.c428
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c31
-rw-r--r--net/ipv4/protocol.c31
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c83
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c29
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c31
-rw-r--r--net/ipv4/tcp_timer.c37
-rw-r--r--net/ipv4/tcp_westwood.c2
-rw-r--r--net/ipv4/tunnel4.c19
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/xfrm4_tunnel.c4
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/addrlabel.c5
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/exthdrs_core.c4
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_tunnel.c160
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ndisc.c26
-rw-r--r--net/ipv6/netfilter/ip6_tables.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/protocol.c32
-rw-r--r--net/ipv6/raw.c12
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/sit.c168
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/tunnel6.c17
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c4
-rw-r--r--net/irda/af_irda.c14
-rw-r--r--net/irda/discovery.c2
-rw-r--r--net/irda/ircomm/ircomm_tty.c4
-rw-r--r--net/irda/irlan/irlan_eth.c32
-rw-r--r--net/irda/irlan/irlan_event.c2
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/irda/irlmp_frame.c2
-rw-r--r--net/irda/irnet/irnet_irda.c22
-rw-r--r--net/irda/irnet/irnet_ppp.c8
-rw-r--r--net/irda/irnet/irnet_ppp.h3
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_eth.c1
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/mac80211/aes_ccm.c6
-rw-r--r--net/mac80211/aes_cmac.c6
-rw-r--r--net/mac80211/agg-rx.c22
-rw-r--r--net/mac80211/cfg.c145
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/debugfs.c7
-rw-r--r--net/mac80211/debugfs_key.c55
-rw-r--r--net/mac80211/debugfs_netdev.c3
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mac80211/driver-ops.h14
-rw-r--r--net/mac80211/driver-trace.h42
-rw-r--r--net/mac80211/ht.c28
-rw-r--r--net/mac80211/ibss.c12
-rw-r--r--net/mac80211/ieee80211_i.h109
-rw-r--r--net/mac80211/iface.c401
-rw-r--r--net/mac80211/key.c113
-rw-r--r--net/mac80211/key.h10
-rw-r--r--net/mac80211/main.c182
-rw-r--r--net/mac80211/mlme.c122
-rw-r--r--net/mac80211/offchannel.c26
-rw-r--r--net/mac80211/pm.c3
-rw-r--r--net/mac80211/rate.c11
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c2
-rw-r--r--net/mac80211/rx.c757
-rw-r--r--net/mac80211/scan.c69
-rw-r--r--net/mac80211/sta_info.c40
-rw-r--r--net/mac80211/sta_info.h16
-rw-r--r--net/mac80211/status.c12
-rw-r--r--net/mac80211/tx.c68
-rw-r--r--net/mac80211/util.c82
-rw-r--r--net/mac80211/wep.c2
-rw-r--r--net/mac80211/work.c39
-rw-r--r--net/mac80211/wpa.c34
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c22
-rw-r--r--net/netfilter/xt_hashlimit.c15
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/Kconfig11
-rw-r--r--net/phonet/af_phonet.c17
-rw-r--r--net/phonet/datagram.c13
-rw-r--r--net/phonet/pep.c477
-rw-r--r--net/phonet/pn_dev.c5
-rw-r--r--net/phonet/socket.c190
-rw-r--r--net/rds/af_rds.c26
-rw-r--r--net/rds/bind.c82
-rw-r--r--net/rds/cong.c8
-rw-r--r--net/rds/connection.c157
-rw-r--r--net/rds/ib.c194
-rw-r--r--net/rds/ib.h100
-rw-r--r--net/rds/ib_cm.c184
-rw-r--r--net/rds/ib_rdma.c318
-rw-r--r--net/rds/ib_recv.c549
-rw-r--r--net/rds/ib_send.c682
-rw-r--r--net/rds/ib_stats.c2
-rw-r--r--net/rds/ib_sysctl.c17
-rw-r--r--net/rds/info.c12
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rds/iw.h11
-rw-r--r--net/rds/iw_cm.c14
-rw-r--r--net/rds/iw_rdma.c5
-rw-r--r--net/rds/iw_recv.c24
-rw-r--r--net/rds/iw_send.c93
-rw-r--r--net/rds/iw_sysctl.c4
-rw-r--r--net/rds/loop.c31
-rw-r--r--net/rds/message.c118
-rw-r--r--net/rds/page.c5
-rw-r--r--net/rds/rdma.c339
-rw-r--r--net/rds/rdma.h85
-rw-r--r--net/rds/rdma_transport.c42
-rw-r--r--net/rds/rds.h187
-rw-r--r--net/rds/recv.c9
-rw-r--r--net/rds/send.c544
-rw-r--r--net/rds/stats.c6
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--net/rds/tcp.c8
-rw-r--r--net/rds/tcp.h9
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_recv.c14
-rw-r--r--net/rds/tcp_send.c66
-rw-r--r--net/rds/threads.c69
-rw-r--r--net/rds/transport.c19
-rw-r--r--net/rds/xlist.h80
-rw-r--r--net/rfkill/input.c2
-rw-r--r--net/rose/rose_link.c4
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_csum.c595
-rw-r--r--net/sched/cls_flow.c74
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sched/sch_api.c16
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_sfq.c33
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/objcnt.c5
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c34
-rw-r--r--net/sctp/probe.c4
-rw-r--r--net/sctp/protocol.c19
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c21
-rw-r--r--net/sctp/sm_statefuns.c20
-rw-r--r--net/sctp/sm_statetable.c42
-rw-r--r--net/sctp/socket.c85
-rw-r--r--net/sctp/transport.c9
-rw-r--r--net/socket.c30
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c44
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c2
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c2
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/tipc/addr.c2
-rw-r--r--net/tipc/bcast.c41
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/core.c6
-rw-r--r--net/tipc/dbg.c4
-rw-r--r--net/tipc/discover.c8
-rw-r--r--net/tipc/eth_media.c48
-rw-r--r--net/tipc/link.c31
-rw-r--r--net/tipc/link.h16
-rw-r--r--net/tipc/msg.h6
-rw-r--r--net/tipc/name_table.c50
-rw-r--r--net/tipc/net.c1
-rw-r--r--net/tipc/node.c28
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/port.c19
-rw-r--r--net/tipc/port.h2
-rw-r--r--net/tipc/socket.c83
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/wireless/core.c66
-rw-r--r--net/wireless/core.h32
-rw-r--r--net/wireless/ibss.c19
-rw-r--r--net/wireless/mlme.c152
-rw-r--r--net/wireless/nl80211.c228
-rw-r--r--net/wireless/nl80211.h14
-rw-r--r--net/wireless/radiotap.c3
-rw-r--r--net/wireless/reg.c22
-rw-r--r--net/wireless/sme.c9
-rw-r--r--net/wireless/sysfs.c9
-rw-r--r--net/wireless/util.c28
-rw-r--r--net/wireless/wext-core.c2
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/x25/af_x25.c34
279 files changed, 8738 insertions, 5133 deletions
diff --git a/net/802/fc.c b/net/802/fc.c
index 34cf1ee014b8..1e49f2d4ea96 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -70,7 +70,7 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev,
if(daddr)
{
memcpy(fch->daddr,daddr,dev->addr_len);
- return(hdr_len);
+ return hdr_len;
}
return -hdr_len;
}
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 3ef0ab0a543a..94b3ad08f39a 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -82,10 +82,10 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev,
if (daddr != NULL)
{
memcpy(fddi->daddr, daddr, dev->addr_len);
- return(hl);
+ return hl;
}
- return(-hl);
+ return -hl;
}
@@ -108,7 +108,7 @@ static int fddi_rebuild_header(struct sk_buff *skb)
{
printk("%s: Don't know how to resolve type %04X addresses.\n",
skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype));
- return(0);
+ return 0;
}
}
@@ -162,7 +162,7 @@ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
/* Assume 802.2 SNAP frames, for now */
- return(type);
+ return type;
}
EXPORT_SYMBOL(fddi_type_trans);
@@ -170,9 +170,9 @@ EXPORT_SYMBOL(fddi_type_trans);
int fddi_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
- return(-EINVAL);
+ return -EINVAL;
dev->mtu = new_mtu;
- return(0);
+ return 0;
}
EXPORT_SYMBOL(fddi_change_mtu);
diff --git a/net/802/hippi.c b/net/802/hippi.c
index cd3e8e929529..91aca8780fd0 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -152,7 +152,7 @@ int hippi_change_mtu(struct net_device *dev, int new_mtu)
if ((new_mtu < 68) || (new_mtu > 65280))
return -EINVAL;
dev->mtu = new_mtu;
- return(0);
+ return 0;
}
EXPORT_SYMBOL(hippi_change_mtu);
diff --git a/net/802/tr.c b/net/802/tr.c
index 1c6e596074df..5e20cf8a074b 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -145,7 +145,7 @@ static int tr_header(struct sk_buff *skb, struct net_device *dev,
{
memcpy(trh->daddr,daddr,dev->addr_len);
tr_source_route(skb, trh, dev);
- return(hdr_len);
+ return hdr_len;
}
return -hdr_len;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a2ad15250575..25c21332e9c3 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -321,7 +321,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
if (new_dev == NULL)
return -ENOBUFS;
- new_dev->real_num_tx_queues = real_dev->real_num_tx_queues;
+ netif_copy_real_num_queues(new_dev, real_dev);
dev_net_set(new_dev, net);
/* need 4 bytes for extra VLAN header info,
* hope the underlying device can handle it.
@@ -525,6 +525,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
break;
case NETDEV_UNREGISTER:
+ /* twiddle thumbs on netns device moves */
+ if (dev->reg_state != NETREG_UNREGISTERING)
+ break;
+
/* Delete all VLANs for this dev. */
grp->killall = 1;
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 8d9503ad01da..b26ce343072c 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -25,6 +25,7 @@ struct vlan_priority_tci_mapping {
* @rx_multicast: number of received multicast packets
* @syncp: synchronization point for 64bit counters
* @rx_errors: number of errors
+ * @rx_dropped: number of dropped packets
*/
struct vlan_rx_stats {
u64 rx_packets;
@@ -32,6 +33,7 @@ struct vlan_rx_stats {
u64 rx_multicast;
struct u64_stats_sync syncp;
unsigned long rx_errors;
+ unsigned long rx_dropped;
};
/**
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 0eb96f7e44be..b6d55a9304f2 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -30,7 +30,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
skb->pkt_type = PACKET_OTHERHOST;
}
- return (polling ? netif_receive_skb(skb) : netif_rx(skb));
+ return polling ? netif_receive_skb(skb) : netif_rx(skb);
drop:
dev_kfree_skb_any(skb);
@@ -38,12 +38,12 @@ drop:
}
EXPORT_SYMBOL(__vlan_hwaccel_rx);
-int vlan_hwaccel_do_receive(struct sk_buff *skb)
+void vlan_hwaccel_do_receive(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct vlan_rx_stats *rx_stats;
- skb->dev = vlan_dev_info(dev)->real_dev;
+ skb->dev = vlan_dev_real_dev(dev);
netif_nit_deliver(skb);
skb->dev = dev;
@@ -72,7 +72,6 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
break;
}
u64_stats_update_end(&rx_stats->syncp);
- return 0;
}
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -112,9 +111,12 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
}
for (p = napi->gro_list; p; p = p->next) {
- NAPI_GRO_CB(p)->same_flow =
- p->dev == skb->dev && !compare_ether_header(
- skb_mac_header(p), skb_gro_mac_header(skb));
+ unsigned long diffs;
+
+ diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+ diffs |= compare_ether_header(skb_mac_header(p),
+ skb_gro_mac_header(skb));
+ NAPI_GRO_CB(p)->same_flow = !diffs;
NAPI_GRO_CB(p)->flush = 0;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3bccdd12a264..f6fbcc0f1af9 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -177,8 +177,8 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
} else {
skb->dev = vlan_dev;
- rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
- smp_processor_id());
+ rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats);
+
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += skb->len;
@@ -225,7 +225,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
}
}
- netif_rx(skb);
+ if (unlikely(netif_rx(skb) == NET_RX_DROP)) {
+ if (rx_stats)
+ rx_stats->rx_dropped++;
+ }
rcu_read_unlock();
return NET_RX_SUCCESS;
@@ -843,13 +846,15 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
accum.rx_packets += rxpackets;
accum.rx_bytes += rxbytes;
accum.rx_multicast += rxmulticast;
- /* rx_errors is an ulong, not protected by syncp */
+ /* rx_errors, rx_dropped are ulong, not protected by syncp */
accum.rx_errors += p->rx_errors;
+ accum.rx_dropped += p->rx_dropped;
}
stats->rx_packets = accum.rx_packets;
stats->rx_bytes = accum.rx_bytes;
stats->rx_errors = accum.rx_errors;
stats->multicast = accum.rx_multicast;
+ stats->rx_dropped = accum.rx_dropped;
}
return stats;
}
diff --git a/net/9p/client.c b/net/9p/client.c
index dc6f2f26d023..f34b9f510818 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -61,13 +61,13 @@ static const match_table_t tokens = {
inline int p9_is_proto_dotl(struct p9_client *clnt)
{
- return (clnt->proto_version == p9_proto_2000L);
+ return clnt->proto_version == p9_proto_2000L;
}
EXPORT_SYMBOL(p9_is_proto_dotl);
inline int p9_is_proto_dotu(struct p9_client *clnt)
{
- return (clnt->proto_version == p9_proto_2000u);
+ return clnt->proto_version == p9_proto_2000u;
}
EXPORT_SYMBOL(p9_is_proto_dotu);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index c85109d809ca..078eb162d9bf 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -222,7 +222,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
}
}
-static unsigned int
+static int
p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
{
int ret, n;
diff --git a/net/atm/common.c b/net/atm/common.c
index 940404a73b3d..1b9c52a02cd3 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -792,7 +792,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
default:
if (level == SOL_SOCKET)
return -EINVAL;
- break;
+ break;
}
if (!vcc->dev || !vcc->dev->ops->getsockopt)
return -EINVAL;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index d98bde1a0ac8..181d70c73d70 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -220,7 +220,6 @@ static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
static int lec_open(struct net_device *dev)
{
netif_start_queue(dev);
- memset(&dev->stats, 0, sizeof(struct net_device_stats));
return 0;
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index cfdfd7e2a172..26eaebf4aaa9 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1103,7 +1103,7 @@ done:
out:
release_sock(sk);
- return 0;
+ return err;
}
/*
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 7805945a5fd6..a1690845dc6e 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -412,7 +412,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
{
ax25_uid_assoc *user;
ax25_route *ax25_rt;
- int err;
+ int err = 0;
if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
return -EHOSTUNREACH;
@@ -453,7 +453,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
put:
ax25_put_route(ax25_rt);
- return 0;
+ return err;
}
struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 421c45bd1b95..ed0f22f57668 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -297,13 +297,12 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
mask |= POLLERR;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
if (sk->sk_state == BT_CLOSED)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 7dca91bb8c57..15ea84ba344e 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -179,13 +179,13 @@ static unsigned char rfcomm_crc_table[256] = {
/* FCS on 2 bytes */
static inline u8 __fcs(u8 *data)
{
- return (0xff - __crc(data));
+ return 0xff - __crc(data);
}
/* FCS on 3 bytes */
static inline u8 __fcs2(u8 *data)
{
- return (0xff - rfcomm_crc_table[__crc(data) ^ data[2]]);
+ return 0xff - rfcomm_crc_table[__crc(data) ^ data[2]];
}
/* Check FCS */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index c03d2c3ff03e..89ad25a76202 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -61,30 +61,27 @@ static int port_cost(struct net_device *dev)
}
-/*
- * Check for port carrier transistions.
- * Called from work queue to allow for calling functions that
- * might sleep (such as speed check), and to debounce.
- */
+/* Check for port carrier transistions. */
void br_port_carrier_check(struct net_bridge_port *p)
{
struct net_device *dev = p->dev;
struct net_bridge *br = p->br;
- if (netif_carrier_ok(dev))
+ if (netif_running(dev) && netif_carrier_ok(dev))
p->path_cost = port_cost(dev);
- if (netif_running(br->dev)) {
- spin_lock_bh(&br->lock);
- if (netif_carrier_ok(dev)) {
- if (p->state == BR_STATE_DISABLED)
- br_stp_enable_port(p);
- } else {
- if (p->state != BR_STATE_DISABLED)
- br_stp_disable_port(p);
- }
- spin_unlock_bh(&br->lock);
+ if (!netif_running(br->dev))
+ return;
+
+ spin_lock_bh(&br->lock);
+ if (netif_running(dev) && netif_carrier_ok(dev)) {
+ if (p->state == BR_STATE_DISABLED)
+ br_stp_enable_port(p);
+ } else {
+ if (p->state != BR_STATE_DISABLED)
+ br_stp_disable_port(p);
}
+ spin_unlock_bh(&br->lock);
}
static void release_nbp(struct kobject *kobj)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 826cd5221536..6d04cfdf4541 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -141,7 +141,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
const unsigned char *dest = eth_hdr(skb)->h_dest;
int (*rhook)(struct sk_buff *skb);
- if (skb->pkt_type == PACKET_LOOPBACK)
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return skb;
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 137f23259a93..77f7b5fda45a 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -209,6 +209,72 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb)
skb->protocol = htons(ETH_P_PPP_SES);
}
+/* When handing a packet over to the IP layer
+ * check whether we have a skb that is in the
+ * expected format
+ */
+
+int br_parse_ip_options(struct sk_buff *skb)
+{
+ struct ip_options *opt;
+ struct iphdr *iph;
+ struct net_device *dev = skb->dev;
+ u32 len;
+
+ iph = ip_hdr(skb);
+ opt = &(IPCB(skb)->opt);
+
+ /* Basic sanity checks */
+ if (iph->ihl < 5 || iph->version != 4)
+ goto inhdr_error;
+
+ if (!pskb_may_pull(skb, iph->ihl*4))
+ goto inhdr_error;
+
+ iph = ip_hdr(skb);
+ if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+ goto inhdr_error;
+
+ len = ntohs(iph->tot_len);
+ if (skb->len < len) {
+ IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
+ goto drop;
+ } else if (len < (iph->ihl*4))
+ goto inhdr_error;
+
+ if (pskb_trim_rcsum(skb, len)) {
+ IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
+ goto drop;
+ }
+
+ /* Zero out the CB buffer if no options present */
+ if (iph->ihl == 5) {
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ return 0;
+ }
+
+ opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
+ if (ip_options_compile(dev_net(dev), opt, skb))
+ goto inhdr_error;
+
+ /* Check correct handling of SRR option */
+ if (unlikely(opt->srr)) {
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+ if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
+ goto drop;
+
+ if (ip_options_rcv_srr(skb))
+ goto drop;
+ }
+
+ return 0;
+
+inhdr_error:
+ IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
+drop:
+ return -1;
+}
+
/* Fill in the header for fragmented IP packets handled by
* the IPv4 connection tracking code.
*/
@@ -549,7 +615,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
{
struct net_bridge_port *p;
struct net_bridge *br;
- struct iphdr *iph;
__u32 len = nf_bridge_encap_header_len(skb);
if (unlikely(!pskb_may_pull(skb, len)))
@@ -578,28 +643,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
nf_bridge_pull_encap_header_rcsum(skb);
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- goto inhdr_error;
-
- iph = ip_hdr(skb);
- if (iph->ihl < 5 || iph->version != 4)
- goto inhdr_error;
-
- if (!pskb_may_pull(skb, 4 * iph->ihl))
- goto inhdr_error;
-
- iph = ip_hdr(skb);
- if (ip_fast_csum((__u8 *) iph, iph->ihl) != 0)
- goto inhdr_error;
-
- len = ntohs(iph->tot_len);
- if (skb->len < len || len < 4 * iph->ihl)
- goto inhdr_error;
-
- pskb_trim_rcsum(skb, len);
-
- /* BUG: Should really parse the IP options here. */
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ if (br_parse_ip_options(skb))
+ /* Drop invalid packet */
+ goto out;
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
@@ -614,8 +660,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
return NF_STOLEN;
-inhdr_error:
-// IP_INC_STATS_BH(IpInHdrErrors);
out:
return NF_DROP;
}
@@ -759,14 +803,19 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
+ int ret;
+
if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
!skb_is_gso(skb)) {
- /* BUG: Should really parse the IP options here. */
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
- return ip_fragment(skb, br_dev_queue_push_xmit);
+ if (br_parse_ip_options(skb))
+ /* Drop invalid packet */
+ return NF_DROP;
+ ret = ip_fragment(skb, br_dev_queue_push_xmit);
} else
- return br_dev_queue_push_xmit(skb);
+ ret = br_dev_queue_push_xmit(skb);
+
+ return ret;
}
#else
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 0b586e9d1378..b99369a055d1 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -9,6 +9,8 @@
* and Sakari Ailus <sakari.ailus@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -171,7 +173,7 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
net = dev_net(dev);
pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
caifd = caif_get(dev);
- if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
return NET_RX_DROP;
if (caifd->layer.up->receive(caifd->layer.up, pkt))
@@ -214,7 +216,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
switch (what) {
case NETDEV_REGISTER:
- pr_info("CAIF: %s():register %s\n", __func__, dev->name);
+ netdev_info(dev, "register\n");
caifd = caif_device_alloc(dev);
if (caifd == NULL)
break;
@@ -225,14 +227,13 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
break;
case NETDEV_UP:
- pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
+ netdev_info(dev, "up\n");
caifd = caif_get(dev);
if (caifd == NULL)
break;
caifdev = netdev_priv(dev);
if (atomic_read(&caifd->state) == NETDEV_UP) {
- pr_info("CAIF: %s():%s already up\n",
- __func__, dev->name);
+ netdev_info(dev, "already up\n");
break;
}
atomic_set(&caifd->state, what);
@@ -273,7 +274,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
caifd = caif_get(dev);
if (caifd == NULL)
break;
- pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
+ netdev_info(dev, "going down\n");
if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
atomic_read(&caifd->state) == NETDEV_DOWN)
@@ -295,11 +296,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
caifd = caif_get(dev);
if (caifd == NULL)
break;
- pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
+ netdev_info(dev, "down\n");
if (atomic_read(&caifd->in_use))
- pr_warning("CAIF: %s(): "
- "Unregistering an active CAIF device: %s\n",
- __func__, dev->name);
+ netdev_warn(dev,
+ "Unregistering an active CAIF device\n");
cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
dev_put(dev);
atomic_set(&caifd->state, what);
@@ -307,7 +307,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
case NETDEV_UNREGISTER:
caifd = caif_get(dev);
- pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
+ netdev_info(dev, "unregister\n");
atomic_set(&caifd->state, what);
caif_device_destroy(dev);
break;
@@ -391,7 +391,7 @@ static int __init caif_device_init(void)
int result;
cfg = cfcnfg_create();
if (!cfg) {
- pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
+ pr_warn("can't create cfcnfg\n");
goto err_cfcnfg_create_failed;
}
result = register_pernet_device(&caif_net_ops);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 8ce904786116..4d918f8f4e67 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -28,9 +30,6 @@
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(AF_CAIF);
-#define CAIF_DEF_SNDBUF (4096*10)
-#define CAIF_DEF_RCVBUF (4096*100)
-
/*
* CAIF state is re-using the TCP socket states.
* caif_states stored in sk_state reflect the state as reported by
@@ -157,9 +156,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
- trace_printk("CAIF: %s():"
- " sending flow OFF (queue len = %d %d)\n",
- __func__,
+ pr_debug("sending flow OFF (queue len = %d %d)\n",
atomic_read(&cf_sk->sk.sk_rmem_alloc),
sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
@@ -172,9 +169,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return err;
if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
- trace_printk("CAIF: %s():"
- " sending flow OFF due to rmem_schedule\n",
- __func__);
+ pr_debug("sending flow OFF due to rmem_schedule\n");
dbfs_atomic_inc(&cnt.num_rx_flow_off);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
@@ -275,8 +270,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
break;
default:
- pr_debug("CAIF: %s(): Unexpected flow command %d\n",
- __func__, flow);
+ pr_debug("Unexpected flow command %d\n", flow);
}
}
@@ -536,8 +530,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
/* Slight paranoia, probably not needed. */
if (unlikely(loopcnt++ > 1000)) {
- pr_warning("CAIF: %s(): transmit retries failed,"
- " error = %d\n", __func__, ret);
+ pr_warn("transmit retries failed, error = %d\n", ret);
break;
}
@@ -902,8 +895,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
cf_sk->maxframe = dev->mtu - (headroom + tailroom);
dev_put(dev);
if (cf_sk->maxframe < 1) {
- pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n",
- __func__, dev->mtu);
+ pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
err = -ENODEV;
goto out;
}
@@ -1123,10 +1115,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
/* Store the protocol */
sk->sk_protocol = (unsigned char) protocol;
- /* Sendbuf dictates the amount of outbound packets not yet sent */
- sk->sk_sndbuf = CAIF_DEF_SNDBUF;
- sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
-
/*
* Lock in order to try to stop someone from opening the socket
* too early.
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 1c29189b344d..41adafd18914 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -3,6 +3,9 @@
* Author: Sjur Brendeland/sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/slab.h>
@@ -78,7 +81,7 @@ struct cfcnfg *cfcnfg_create(void)
/* Initiate this layer */
this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
this->mux = cfmuxl_create();
@@ -106,7 +109,7 @@ struct cfcnfg *cfcnfg_create(void)
layer_set_up(this->ctrl, this);
return this;
out_of_mem:
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
kfree(this->mux);
kfree(this->ctrl);
kfree(this);
@@ -194,7 +197,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
caif_assert(adap_layer != NULL);
channel_id = adap_layer->id;
if (adap_layer->dn == NULL || channel_id == 0) {
- pr_err("CAIF: %s():adap_layer->id is 0\n", __func__);
+ pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
ret = -ENOTCONN;
goto end;
}
@@ -204,9 +207,8 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
layer_set_up(servl, NULL);
ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
if (servl == NULL) {
- pr_err("CAIF: %s(): PROTOCOL ERROR "
- "- Error removing service_layer Channel_Id(%d)",
- __func__, channel_id);
+ pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)",
+ channel_id);
ret = -EINVAL;
goto end;
}
@@ -216,18 +218,14 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
if (phyinfo == NULL) {
- pr_warning("CAIF: %s(): "
- "No interface to send disconnect to\n",
- __func__);
+ pr_warn("No interface to send disconnect to\n");
ret = -ENODEV;
goto end;
}
if (phyinfo->id != phyid ||
phyinfo->phy_layer->id != phyid ||
phyinfo->frm_layer->id != phyid) {
- pr_err("CAIF: %s(): "
- "Inconsistency in phy registration\n",
- __func__);
+ pr_err("Inconsistency in phy registration\n");
ret = -EINVAL;
goto end;
}
@@ -276,21 +274,20 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
{
struct cflayer *frml;
if (adap_layer == NULL) {
- pr_err("CAIF: %s(): adap_layer is zero", __func__);
+ pr_err("adap_layer is zero\n");
return -EINVAL;
}
if (adap_layer->receive == NULL) {
- pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__);
+ pr_err("adap_layer->receive is NULL\n");
return -EINVAL;
}
if (adap_layer->ctrlcmd == NULL) {
- pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__);
+ pr_err("adap_layer->ctrlcmd == NULL\n");
return -EINVAL;
}
frml = cnfg->phy_layers[param->phyid].frm_layer;
if (frml == NULL) {
- pr_err("CAIF: %s(): Specified PHY type does not exist!",
- __func__);
+ pr_err("Specified PHY type does not exist!\n");
return -ENODEV;
}
caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
@@ -330,9 +327,7 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
struct net_device *netdev;
if (adapt_layer == NULL) {
- pr_debug("CAIF: %s(): link setup response "
- "but no client exist, send linkdown back\n",
- __func__);
+ pr_debug("link setup response but no client exist, send linkdown back\n");
cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
return;
}
@@ -374,13 +369,11 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
break;
default:
- pr_err("CAIF: %s(): Protocol error. "
- "Link setup response - unknown channel type\n",
- __func__);
+ pr_err("Protocol error. Link setup response - unknown channel type\n");
return;
}
if (!servicel) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
layer_set_dn(servicel, cnfg->mux);
@@ -418,7 +411,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
}
}
if (*phyid == 0) {
- pr_err("CAIF: %s(): No Available PHY ID\n", __func__);
+ pr_err("No Available PHY ID\n");
return;
}
@@ -427,7 +420,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
phy_driver =
cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
if (!phy_driver) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
@@ -436,7 +429,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
phy_driver = NULL;
break;
default:
- pr_err("CAIF: %s(): %d", __func__, phy_type);
+ pr_err("%d\n", phy_type);
return;
break;
}
@@ -455,7 +448,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
phy_layer->type = phy_type;
frml = cffrml_create(*phyid, fcs);
if (!frml) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
cnfg->phy_layers[*phyid].frm_layer = frml;
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 563145fdc4c3..08f267a109aa 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -36,7 +38,7 @@ struct cflayer *cfctrl_create(void)
struct cfctrl *this =
kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
@@ -132,9 +134,7 @@ struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
if (cfctrl_req_eq(req, p)) {
if (p != first)
- pr_warning("CAIF: %s(): Requests are not "
- "received in order\n",
- __func__);
+ pr_warn("Requests are not received in order\n");
atomic_set(&ctrl->rsp_seq_no,
p->sequence_no);
@@ -177,7 +177,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
int ret;
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
@@ -189,8 +189,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
ret =
cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
if (ret < 0) {
- pr_err("CAIF: %s(): Could not transmit enum message\n",
- __func__);
+ pr_err("Could not transmit enum message\n");
cfpkt_destroy(pkt);
}
}
@@ -208,7 +207,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
char utility_name[16];
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
@@ -253,13 +252,13 @@ int cfctrl_linkup_request(struct cflayer *layer,
param->u.utility.paramlen);
break;
default:
- pr_warning("CAIF: %s():Request setup of bad link type = %d\n",
- __func__, param->linktype);
+ pr_warn("Request setup of bad link type = %d\n",
+ param->linktype);
return -EINVAL;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
req->client_layer = user_layer;
@@ -276,8 +275,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
ret =
cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
if (ret < 0) {
- pr_err("CAIF: %s(): Could not transmit linksetup request\n",
- __func__);
+ pr_err("Could not transmit linksetup request\n");
cfpkt_destroy(pkt);
return -ENODEV;
}
@@ -291,7 +289,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
@@ -300,8 +298,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
ret =
cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
if (ret < 0) {
- pr_err("CAIF: %s(): Could not transmit link-down request\n",
- __func__);
+ pr_err("Could not transmit link-down request\n");
cfpkt_destroy(pkt);
}
return ret;
@@ -313,7 +310,7 @@ void cfctrl_sleep_req(struct cflayer *layer)
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
@@ -330,7 +327,7 @@ void cfctrl_wake_req(struct cflayer *layer)
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
@@ -347,7 +344,7 @@ void cfctrl_getstartreason_req(struct cflayer *layer)
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
@@ -364,12 +361,11 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
struct cfctrl_request_info *p, *tmp;
struct cfctrl *ctrl = container_obj(layr);
spin_lock(&ctrl->info_list_lock);
- pr_warning("CAIF: %s(): enter\n", __func__);
+ pr_warn("enter\n");
list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
if (p->client_layer == adap_layer) {
- pr_warning("CAIF: %s(): cancel req :%d\n", __func__,
- p->sequence_no);
+ pr_warn("cancel req :%d\n", p->sequence_no);
list_del(&p->list);
kfree(p);
}
@@ -520,9 +516,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
cfpkt_extr_head(pkt, &param, len);
break;
default:
- pr_warning("CAIF: %s(): Request setup "
- "- invalid link type (%d)",
- __func__, serv);
+ pr_warn("Request setup - invalid link type (%d)\n",
+ serv);
goto error;
}
@@ -532,9 +527,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
cfpkt_erroneous(pkt)) {
- pr_err("CAIF: %s(): Invalid O/E bit or parse "
- "error on CAIF control channel",
- __func__);
+ pr_err("Invalid O/E bit or parse error on CAIF control channel\n");
cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
0,
req ? req->client_layer
@@ -556,8 +549,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
break;
case CFCTRL_CMD_LINK_ERR:
- pr_err("CAIF: %s(): Frame Error Indication received\n",
- __func__);
+ pr_err("Frame Error Indication received\n");
cfctrl->res.linkerror_ind();
break;
case CFCTRL_CMD_ENUM:
@@ -576,7 +568,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
cfctrl->res.radioset_rsp();
break;
default:
- pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__);
+ pr_err("Unrecognized Control Frame\n");
goto error;
break;
}
@@ -595,8 +587,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
case CAIF_CTRLCMD_FLOW_OFF_IND:
spin_lock(&this->info_list_lock);
if (!list_empty(&this->list)) {
- pr_debug("CAIF: %s(): Received flow off in "
- "control layer", __func__);
+ pr_debug("Received flow off in control layer\n");
}
spin_unlock(&this->info_list_lock);
break;
@@ -620,7 +611,7 @@ static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
if (!ctrl->loop_linkused[linkid])
goto found;
spin_unlock(&ctrl->loop_linkid_lock);
- pr_err("CAIF: %s(): Out of link-ids\n", __func__);
+ pr_err("Out of link-ids\n");
return -EINVAL;
found:
if (!ctrl->loop_linkused[linkid])
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 676648cac8dd..496fda9ac66f 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/slab.h>
#include <net/caif/caif_layer.h>
@@ -17,7 +19,7 @@ struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!dbg) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index ed9d53aff280..d3ed264ad6c4 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -26,7 +28,7 @@ struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!dgm) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -49,14 +51,14 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
caif_assert(layr->ctrlcmd != NULL);
if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
if ((cmd & DGM_CMD_BIT) == 0) {
if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -75,8 +77,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
return 0;
default:
cfpkt_destroy(pkt);
- pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n",
- __func__, cmd, cmd);
+ pr_info("Unknown datagram control %d (0x%x)\n", cmd, cmd);
return -EPROTO;
}
}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index e86a4ca3b217..a445043931ae 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -6,6 +6,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -32,7 +34,7 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
{
struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cffrml, layer) == 0);
@@ -83,7 +85,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_setlen(pkt, len) < 0) {
++cffrml_rcv_error;
- pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len);
+ pr_err("Framing length error (%d)\n", len);
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -99,14 +101,14 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
cfpkt_add_trail(pkt, &tmp, 2);
++cffrml_rcv_error;
++cffrml_rcv_checsum_error;
- pr_info("CAIF: %s(): Frame checksum error "
- "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks);
+ pr_info("Frame checksum error (0x%x != 0x%x)\n",
+ hdrchks, pktchks);
return -EILSEQ;
}
}
if (cfpkt_erroneous(pkt)) {
++cffrml_rcv_error;
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -132,7 +134,7 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
cfpkt_add_head(pkt, &tmp, 2);
cfpkt_info(pkt)->hdr_len += 2;
if (cfpkt_erroneous(pkt)) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
return -EPROTO;
}
ret = layr->dn->transmit(layr->dn, pkt);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 80c8d332b258..46f34b2e0478 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -3,6 +3,9 @@
* Author: Sjur Brendeland/sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -190,7 +193,7 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
u8 id;
struct cflayer *up;
if (cfpkt_extr_head(pkt, &id, 1) < 0) {
- pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__);
+ pr_err("erroneous Caif Packet\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -199,8 +202,8 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
up = get_up(muxl, id);
spin_unlock(&muxl->receive_lock);
if (up == NULL) {
- pr_info("CAIF: %s():Received data on unknown link ID = %d "
- "(0x%x) up == NULL", __func__, id, id);
+ pr_info("Received data on unknown link ID = %d (0x%x) up == NULL",
+ id, id);
cfpkt_destroy(pkt);
/*
* Don't return ERROR, since modem misbehaves and sends out
@@ -223,9 +226,8 @@ static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
struct caif_payload_info *info = cfpkt_info(pkt);
dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
if (dn == NULL) {
- pr_warning("CAIF: %s(): Send data on unknown phy "
- "ID = %d (0x%x)\n",
- __func__, info->dev_info->id, info->dev_info->id);
+ pr_warn("Send data on unknown phy ID = %d (0x%x)\n",
+ info->dev_info->id, info->dev_info->id);
return -ENOTCONN;
}
info->hdr_len += 1;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index c49a6695793a..d7e865e2ff65 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/hardirq.h>
@@ -12,11 +14,12 @@
#define PKT_PREFIX 48
#define PKT_POSTFIX 2
#define PKT_LEN_WHEN_EXTENDING 128
-#define PKT_ERROR(pkt, errmsg) do { \
- cfpkt_priv(pkt)->erronous = true; \
- skb_reset_tail_pointer(&pkt->skb); \
- pr_warning("CAIF: " errmsg);\
- } while (0)
+#define PKT_ERROR(pkt, errmsg) \
+do { \
+ cfpkt_priv(pkt)->erronous = true; \
+ skb_reset_tail_pointer(&pkt->skb); \
+ pr_warn(errmsg); \
+} while (0)
struct cfpktq {
struct sk_buff_head head;
@@ -130,13 +133,13 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
return -EPROTO;
if (unlikely(len > skb->len)) {
- PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n");
+ PKT_ERROR(pkt, "read beyond end of packet\n");
return -EPROTO;
}
if (unlikely(len > skb_headlen(skb))) {
if (unlikely(skb_linearize(skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
}
@@ -156,11 +159,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
return -EPROTO;
if (unlikely(skb_linearize(skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
- PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n");
+ PKT_ERROR(pkt, "read beyond end of packet\n");
return -EPROTO;
}
from = skb_tail_pointer(skb) - len;
@@ -202,7 +205,7 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
/* Make sure data is writable */
if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
- PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n");
+ PKT_ERROR(pkt, "cow failed\n");
return -EPROTO;
}
/*
@@ -211,8 +214,7 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
* lengths of the top SKB.
*/
if (lastskb != skb) {
- pr_warning("CAIF: %s(): Packet is non-linear\n",
- __func__);
+ pr_warn("Packet is non-linear\n");
skb->len += len;
skb->data_len += len;
}
@@ -242,14 +244,14 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
if (unlikely(is_erronous(pkt)))
return -EPROTO;
if (unlikely(skb_headroom(skb) < len)) {
- PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n");
+ PKT_ERROR(pkt, "no headroom\n");
return -EPROTO;
}
/* Make sure data is writable */
ret = skb_cow_data(skb, 0, &lastskb);
if (unlikely(ret < 0)) {
- PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
+ PKT_ERROR(pkt, "cow failed\n");
return ret;
}
@@ -283,7 +285,7 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
if (unlikely(is_erronous(pkt)))
return -EPROTO;
if (unlikely(skb_linearize(&pkt->skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
@@ -309,7 +311,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
/* Need to expand SKB */
if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
- PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n");
+ PKT_ERROR(pkt, "skb_pad_trail failed\n");
return cfpkt_getlen(pkt);
}
@@ -380,8 +382,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
return NULL;
if (skb->data + pos > skb_tail_pointer(skb)) {
- PKT_ERROR(pkt,
- "cfpkt_split: trying to split beyond end of packet");
+ PKT_ERROR(pkt, "trying to split beyond end of packet\n");
return NULL;
}
@@ -455,17 +456,17 @@ int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
return -EPROTO;
/* Make sure SKB is writable */
if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
- PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n");
+ PKT_ERROR(pkt, "skb_cow_data failed\n");
return -EPROTO;
}
if (unlikely(skb_linearize(skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
if (unlikely(skb_tailroom(skb) < buflen)) {
- PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n");
+ PKT_ERROR(pkt, "buffer too short - failed\n");
return -EPROTO;
}
@@ -483,14 +484,13 @@ int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
return -EPROTO;
if (unlikely(buflen > skb->len)) {
- PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large "
- "- failed\n");
+ PKT_ERROR(pkt, "buflen too large - failed\n");
return -EPROTO;
}
if (unlikely(buflen > skb_headlen(skb))) {
if (unlikely(skb_linearize(skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 9a699242d104..bde8481e8d25 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -48,7 +50,7 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
@@ -178,9 +180,7 @@ out:
cfpkt_destroy(rfml->incomplete_frm);
rfml->incomplete_frm = NULL;
- pr_info("CAIF: %s(): "
- "Connection error %d triggered on RFM link\n",
- __func__, err);
+ pr_info("Connection error %d triggered on RFM link\n", err);
/* Trigger connection error upon failure.*/
layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
@@ -280,9 +280,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
out:
if (err != 0) {
- pr_info("CAIF: %s(): "
- "Connection error %d triggered on RFM link\n",
- __func__, err);
+ pr_info("Connection error %d triggered on RFM link\n", err);
/* Trigger connection error upon failure.*/
layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index a11fbd68a13d..9297f7dea9d8 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -34,7 +36,7 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx)
{
struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfserl, layer) == 0);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index f40939a91211..ab5e542526bf 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -79,8 +81,7 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
default:
- pr_warning("CAIF: %s(): "
- "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
+ pr_warn("Unexpected ctrl in cfsrvl (%d)\n", ctrl);
/* We have both modem and phy flow on, send flow on */
layr->up->ctrlcmd(layr->up, ctrl, phyid);
service->phy_flow_on = true;
@@ -107,14 +108,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
u8 flow_on = SRVL_FLOW_ON;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n",
- __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n",
- __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -131,14 +130,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n",
- __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n",
- __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 02795aff57a4..efad410e4c82 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
@@ -26,7 +28,7 @@ struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!util) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -47,7 +49,7 @@ static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
caif_assert(layr->up->receive != NULL);
caif_assert(layr->up->ctrlcmd != NULL);
if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -64,16 +66,14 @@ static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
cfpkt_destroy(pkt);
return 0;
case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
- pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n",
- __func__);
+ pr_err("REMOTE SHUTDOWN REQUEST RECEIVED\n");
layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
service->open = false;
cfpkt_destroy(pkt);
return 0;
default:
cfpkt_destroy(pkt);
- pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n",
- __func__, cmd, cmd);
+ pr_warn("Unknown service control %d (0x%x)\n", cmd, cmd);
return -EPROTO;
}
}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 77cc09faac9a..3b425b189a99 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/slab.h>
#include <net/caif/caif_layer.h>
@@ -25,7 +27,7 @@ struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!vei) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -47,7 +49,7 @@ static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -67,8 +69,7 @@ static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
cfpkt_destroy(pkt);
return 0;
default: /* SET RS232 PIN */
- pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n",
- __func__, cmd, cmd);
+ pr_warn("Unknown VEI control packet %d (0x%x)!\n", cmd, cmd);
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -86,7 +87,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
caif_assert(layr->dn->transmit != NULL);
if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
return -EPROTO;
}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index ada6ee2d48f5..bf6fef2a0eff 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
@@ -21,7 +23,7 @@ struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!vid) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -38,7 +40,7 @@ static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
{
u32 videoheader;
if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4293e190ec53..84a422c98941 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -5,6 +5,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/version.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -28,9 +30,6 @@
#define CONNECT_TIMEOUT (5 * HZ)
#define CAIF_NET_DEFAULT_QUEUE_LEN 500
-#undef pr_debug
-#define pr_debug pr_warning
-
/*This list is protected by the rtnl lock. */
static LIST_HEAD(chnl_net_list);
@@ -142,8 +141,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
int phyid)
{
struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
- pr_debug("CAIF: %s(): NET flowctrl func called flow: %s\n",
- __func__,
+ pr_debug("NET flowctrl func called flow: %s\n",
flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
@@ -196,12 +194,12 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
priv = netdev_priv(dev);
if (skb->len > priv->netdev->mtu) {
- pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__);
+ pr_warn("Size of skb exceeded MTU\n");
return -ENOSPC;
}
if (!priv->flowenabled) {
- pr_debug("CAIF: %s(): dropping packets flow off\n", __func__);
+ pr_debug("dropping packets flow off\n");
return NETDEV_TX_BUSY;
}
@@ -237,7 +235,7 @@ static int chnl_net_open(struct net_device *dev)
ASSERT_RTNL();
priv = netdev_priv(dev);
if (!priv) {
- pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__);
+ pr_debug("chnl_net_open: no priv\n");
return -ENODEV;
}
@@ -246,18 +244,17 @@ static int chnl_net_open(struct net_device *dev)
result = caif_connect_client(&priv->conn_req, &priv->chnl,
&llifindex, &headroom, &tailroom);
if (result != 0) {
- pr_debug("CAIF: %s(): err: "
- "Unable to register and open device,"
- " Err:%d\n",
- __func__,
- result);
+ pr_debug("err: "
+ "Unable to register and open device,"
+ " Err:%d\n",
+ result);
goto error;
}
lldev = dev_get_by_index(dev_net(dev), llifindex);
if (lldev == NULL) {
- pr_debug("CAIF: %s(): no interface?\n", __func__);
+ pr_debug("no interface?\n");
result = -ENODEV;
goto error;
}
@@ -279,9 +276,7 @@ static int chnl_net_open(struct net_device *dev)
dev_put(lldev);
if (mtu < 100) {
- pr_warning("CAIF: %s(): "
- "CAIF Interface MTU too small (%d)\n",
- __func__, mtu);
+ pr_warn("CAIF Interface MTU too small (%d)\n", mtu);
result = -ENODEV;
goto error;
}
@@ -296,33 +291,32 @@ static int chnl_net_open(struct net_device *dev)
rtnl_lock();
if (result == -ERESTARTSYS) {
- pr_debug("CAIF: %s(): wait_event_interruptible"
- " woken by a signal\n", __func__);
+ pr_debug("wait_event_interruptible woken by a signal\n");
result = -ERESTARTSYS;
goto error;
}
if (result == 0) {
- pr_debug("CAIF: %s(): connect timeout\n", __func__);
+ pr_debug("connect timeout\n");
caif_disconnect_client(&priv->chnl);
priv->state = CAIF_DISCONNECTED;
- pr_debug("CAIF: %s(): state disconnected\n", __func__);
+ pr_debug("state disconnected\n");
result = -ETIMEDOUT;
goto error;
}
if (priv->state != CAIF_CONNECTED) {
- pr_debug("CAIF: %s(): connect failed\n", __func__);
+ pr_debug("connect failed\n");
result = -ECONNREFUSED;
goto error;
}
- pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
+ pr_debug("CAIF Netdevice connected\n");
return 0;
error:
caif_disconnect_client(&priv->chnl);
priv->state = CAIF_DISCONNECTED;
- pr_debug("CAIF: %s(): state disconnected\n", __func__);
+ pr_debug("state disconnected\n");
return result;
}
@@ -413,7 +407,7 @@ static void caif_netlink_parms(struct nlattr *data[],
struct caif_connect_request *conn_req)
{
if (!data) {
- pr_warning("CAIF: %s: no params data found\n", __func__);
+ pr_warn("no params data found\n");
return;
}
if (data[IFLA_CAIF_IPV4_CONNID])
@@ -442,8 +436,7 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
ret = register_netdevice(dev);
if (ret)
- pr_warning("CAIF: %s(): device rtml registration failed\n",
- __func__);
+ pr_warn("device rtml registration failed\n");
return ret;
}
diff --git a/net/can/raw.c b/net/can/raw.c
index a10e3338f084..7d77e67e57af 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -647,12 +647,12 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err < 0)
goto free_skb;
- err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (err < 0)
goto free_skb;
/* to be able to check the received tx sock reference in raw_rcv() */
- skb_tx(skb)->prevent_sk_orphan = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
skb->dev = dev;
skb->sk = sk;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 251997a95483..4df1b7a6c1bf 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -746,13 +746,12 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
diff --git a/net/core/dev.c b/net/core/dev.c
index 660dd41aaaa6..a313bab1b754 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -129,6 +129,7 @@
#include <linux/random.h>
#include <trace/events/napi.h>
#include <linux/pci.h>
+#include <linux/inetdevice.h>
#include "net-sysfs.h"
@@ -371,6 +372,14 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
* --ANK (980803)
*/
+static inline struct list_head *ptype_head(const struct packet_type *pt)
+{
+ if (pt->type == htons(ETH_P_ALL))
+ return &ptype_all;
+ else
+ return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+}
+
/**
* dev_add_pack - add packet handler
* @pt: packet type declaration
@@ -386,16 +395,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
void dev_add_pack(struct packet_type *pt)
{
- int hash;
+ struct list_head *head = ptype_head(pt);
- spin_lock_bh(&ptype_lock);
- if (pt->type == htons(ETH_P_ALL))
- list_add_rcu(&pt->list, &ptype_all);
- else {
- hash = ntohs(pt->type) & PTYPE_HASH_MASK;
- list_add_rcu(&pt->list, &ptype_base[hash]);
- }
- spin_unlock_bh(&ptype_lock);
+ spin_lock(&ptype_lock);
+ list_add_rcu(&pt->list, head);
+ spin_unlock(&ptype_lock);
}
EXPORT_SYMBOL(dev_add_pack);
@@ -414,15 +418,10 @@ EXPORT_SYMBOL(dev_add_pack);
*/
void __dev_remove_pack(struct packet_type *pt)
{
- struct list_head *head;
+ struct list_head *head = ptype_head(pt);
struct packet_type *pt1;
- spin_lock_bh(&ptype_lock);
-
- if (pt->type == htons(ETH_P_ALL))
- head = &ptype_all;
- else
- head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+ spin_lock(&ptype_lock);
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
@@ -433,7 +432,7 @@ void __dev_remove_pack(struct packet_type *pt)
printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
out:
- spin_unlock_bh(&ptype_lock);
+ spin_unlock(&ptype_lock);
}
EXPORT_SYMBOL(__dev_remove_pack);
@@ -1568,6 +1567,41 @@ void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
+#ifdef CONFIG_RPS
+/**
+ * netif_set_real_num_rx_queues - set actual number of RX queues used
+ * @dev: Network device
+ * @rxq: Actual number of RX queues
+ *
+ * This must be called either with the rtnl_lock held or before
+ * registration of the net device. Returns 0 on success, or a
+ * negative error code. If called before registration, it also
+ * sets the maximum number of queues, and always succeeds.
+ */
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
+{
+ int rc;
+
+ if (dev->reg_state == NETREG_REGISTERED) {
+ ASSERT_RTNL();
+
+ if (rxq > dev->num_rx_queues)
+ return -EINVAL;
+
+ rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
+ rxq);
+ if (rc)
+ return rc;
+ } else {
+ dev->num_rx_queues = rxq;
+ }
+
+ dev->real_num_rx_queues = rxq;
+ return 0;
+}
+EXPORT_SYMBOL(netif_set_real_num_rx_queues);
+#endif
+
static inline void __netif_reschedule(struct Qdisc *q)
{
struct softnet_data *sd;
@@ -1902,14 +1936,14 @@ static int dev_gso_segment(struct sk_buff *skb)
/*
* Try to orphan skb early, right before transmission by the device.
- * We cannot orphan skb if tx timestamp is requested, since
- * drivers need to call skb_tstamp_tx() to send the timestamp.
+ * We cannot orphan skb if tx timestamp is requested or the sk-reference
+ * is needed on driver level for other reasons, e.g. see net/can/raw.c
*/
static inline void skb_orphan_try(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
- if (sk && !skb_tx(skb)->flags) {
+ if (sk && !skb_shinfo(skb)->tx_flags) {
/* skb_tx_hash() wont be able to get sk.
* We copy sk_hash into skb->rxhash
*/
@@ -1930,7 +1964,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
struct net_device *dev)
{
return skb_is_nonlinear(skb) &&
- ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
+ ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
(skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
illegal_highdma(dev, skb))));
}
@@ -2143,6 +2177,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc;
}
+static DEFINE_PER_CPU(int, xmit_recursion);
+#define RECURSION_LIMIT 3
+
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
@@ -2208,10 +2245,15 @@ int dev_queue_xmit(struct sk_buff *skb)
if (txq->xmit_lock_owner != cpu) {
+ if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
+ goto recursion_alert;
+
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_tx_queue_stopped(txq)) {
+ __this_cpu_inc(xmit_recursion);
rc = dev_hard_start_xmit(skb, dev, txq);
+ __this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
@@ -2223,7 +2265,9 @@ int dev_queue_xmit(struct sk_buff *skb)
"queue packet!\n", dev->name);
} else {
/* Recursion is detected! It is possible,
- * unfortunately */
+ * unfortunately
+ */
+recursion_alert:
if (net_ratelimit())
printk(KERN_CRIT "Dead loop on virtual device "
"%s, fix it urgently!\n", dev->name);
@@ -2259,69 +2303,44 @@ static inline void ____napi_schedule(struct softnet_data *sd,
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
-#ifdef CONFIG_RPS
-
-/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
-EXPORT_SYMBOL(rps_sock_flow_table);
-
/*
- * get_rps_cpu is called from netif_receive_skb and returns the target
- * CPU from the RPS map of the receiving queue for a given skb.
- * rcu_read_lock must be held on entry.
+ * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
+ * and src/dst port numbers. Returns a non-zero hash number on success
+ * and 0 on failure.
*/
-static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
- struct rps_dev_flow **rflowp)
+__u32 __skb_get_rxhash(struct sk_buff *skb)
{
+ int nhoff, hash = 0, poff;
struct ipv6hdr *ip6;
struct iphdr *ip;
- struct netdev_rx_queue *rxqueue;
- struct rps_map *map;
- struct rps_dev_flow_table *flow_table;
- struct rps_sock_flow_table *sock_flow_table;
- int cpu = -1;
u8 ip_proto;
- u16 tcpu;
u32 addr1, addr2, ihl;
union {
u32 v32;
u16 v16[2];
} ports;
- if (skb_rx_queue_recorded(skb)) {
- u16 index = skb_get_rx_queue(skb);
- if (unlikely(index >= dev->num_rx_queues)) {
- WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
- "on queue %u, but number of RX queues is %u\n",
- dev->name, index, dev->num_rx_queues);
- goto done;
- }
- rxqueue = dev->_rx + index;
- } else
- rxqueue = dev->_rx;
-
- if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
- goto done;
-
- if (skb->rxhash)
- goto got_hash; /* Skip hash computation on packet header */
+ nhoff = skb_network_offset(skb);
switch (skb->protocol) {
case __constant_htons(ETH_P_IP):
- if (!pskb_may_pull(skb, sizeof(*ip)))
+ if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
goto done;
- ip = (struct iphdr *) skb->data;
- ip_proto = ip->protocol;
+ ip = (struct iphdr *) (skb->data + nhoff);
+ if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+ ip_proto = 0;
+ else
+ ip_proto = ip->protocol;
addr1 = (__force u32) ip->saddr;
addr2 = (__force u32) ip->daddr;
ihl = ip->ihl;
break;
case __constant_htons(ETH_P_IPV6):
- if (!pskb_may_pull(skb, sizeof(*ip6)))
+ if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
goto done;
- ip6 = (struct ipv6hdr *) skb->data;
+ ip6 = (struct ipv6hdr *) (skb->data + nhoff);
ip_proto = ip6->nexthdr;
addr1 = (__force u32) ip6->saddr.s6_addr32[3];
addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -2330,33 +2349,81 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
default:
goto done;
}
- switch (ip_proto) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- case IPPROTO_DCCP:
- case IPPROTO_ESP:
- case IPPROTO_AH:
- case IPPROTO_SCTP:
- case IPPROTO_UDPLITE:
- if (pskb_may_pull(skb, (ihl * 4) + 4)) {
- ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
+
+ ports.v32 = 0;
+ poff = proto_ports_offset(ip_proto);
+ if (poff >= 0) {
+ nhoff += ihl * 4 + poff;
+ if (pskb_may_pull(skb, nhoff + 4)) {
+ ports.v32 = * (__force u32 *) (skb->data + nhoff);
if (ports.v16[1] < ports.v16[0])
swap(ports.v16[0], ports.v16[1]);
- break;
}
- default:
- ports.v32 = 0;
- break;
}
/* get a consistent hash (same value on both flow directions) */
if (addr2 < addr1)
swap(addr1, addr2);
- skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
- if (!skb->rxhash)
- skb->rxhash = 1;
-got_hash:
+ hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+ if (!hash)
+ hash = 1;
+
+done:
+ return hash;
+}
+EXPORT_SYMBOL(__skb_get_rxhash);
+
+#ifdef CONFIG_RPS
+
+/* One global table that all flow-based protocols share. */
+struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+EXPORT_SYMBOL(rps_sock_flow_table);
+
+/*
+ * get_rps_cpu is called from netif_receive_skb and returns the target
+ * CPU from the RPS map of the receiving queue for a given skb.
+ * rcu_read_lock must be held on entry.
+ */
+static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ struct rps_dev_flow **rflowp)
+{
+ struct netdev_rx_queue *rxqueue;
+ struct rps_map *map = NULL;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_sock_flow_table *sock_flow_table;
+ int cpu = -1;
+ u16 tcpu;
+
+ if (skb_rx_queue_recorded(skb)) {
+ u16 index = skb_get_rx_queue(skb);
+ if (unlikely(index >= dev->real_num_rx_queues)) {
+ WARN_ONCE(dev->real_num_rx_queues > 1,
+ "%s received packet on queue %u, but number "
+ "of RX queues is %u\n",
+ dev->name, index, dev->real_num_rx_queues);
+ goto done;
+ }
+ rxqueue = dev->_rx + index;
+ } else
+ rxqueue = dev->_rx;
+
+ if (rxqueue->rps_map) {
+ map = rcu_dereference(rxqueue->rps_map);
+ if (map && map->len == 1) {
+ tcpu = map->cpus[0];
+ if (cpu_online(tcpu))
+ cpu = tcpu;
+ goto done;
+ }
+ } else if (!rxqueue->rps_flow_table) {
+ goto done;
+ }
+
+ skb_reset_network_header(skb);
+ if (!skb_get_rxhash(skb))
+ goto done;
+
flow_table = rcu_dereference(rxqueue->rps_flow_table);
sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (flow_table && sock_flow_table) {
@@ -2396,7 +2463,6 @@ got_hash:
}
}
- map = rcu_dereference(rxqueue->rps_map);
if (map) {
tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
@@ -2654,7 +2720,7 @@ static int ing_filter(struct sk_buff *skb)
skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
- rxq = &dev->rx_queue;
+ rxq = &dev->ingress_queue;
q = rxq->qdisc;
if (q != &noop_qdisc) {
@@ -2671,7 +2737,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
- if (skb->dev->rx_queue.qdisc == &noop_qdisc)
+ if (skb->dev->ingress_queue.qdisc == &noop_qdisc)
goto out;
if (*pt_prev) {
@@ -2828,8 +2894,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
if (!netdev_tstamp_prequeue)
net_timestamp_check(skb);
- if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
- return NET_RX_SUCCESS;
+ if (vlan_tx_tag_present(skb))
+ vlan_hwaccel_do_receive(skb);
/* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb))
@@ -3050,7 +3116,7 @@ out:
return netif_receive_skb(skb);
}
-static void napi_gro_flush(struct napi_struct *napi)
+inline void napi_gro_flush(struct napi_struct *napi)
{
struct sk_buff *skb, *next;
@@ -3063,6 +3129,7 @@ static void napi_gro_flush(struct napi_struct *napi)
napi->gro_count = 0;
napi->gro_list = NULL;
}
+EXPORT_SYMBOL(napi_gro_flush);
enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
@@ -3077,7 +3144,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
goto normal;
- if (skb_is_gso(skb) || skb_has_frags(skb))
+ if (skb_is_gso(skb) || skb_has_frag_list(skb))
goto normal;
rcu_read_lock();
@@ -3156,16 +3223,18 @@ normal:
}
EXPORT_SYMBOL(dev_gro_receive);
-static gro_result_t
+static inline gro_result_t
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff *p;
for (p = napi->gro_list; p; p = p->next) {
- NAPI_GRO_CB(p)->same_flow =
- (p->dev == skb->dev) &&
- !compare_ether_header(skb_mac_header(p),
+ unsigned long diffs;
+
+ diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+ diffs |= compare_ether_header(skb_mac_header(p),
skb_gro_mac_header(skb));
+ NAPI_GRO_CB(p)->same_flow = !diffs;
NAPI_GRO_CB(p)->flush = 0;
}
@@ -4871,7 +4940,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev,
static void netdev_init_queue_locks(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
- __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
+ __netdev_init_queue_locks_one(dev, &dev->ingress_queue, NULL);
}
unsigned long netdev_fix_features(unsigned long features, const char *name)
@@ -4941,6 +5010,34 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);
+static int netif_alloc_rx_queues(struct net_device *dev)
+{
+#ifdef CONFIG_RPS
+ unsigned int i, count = dev->num_rx_queues;
+
+ if (count) {
+ struct netdev_rx_queue *rx;
+
+ rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
+ if (!rx) {
+ pr_err("netdev: Unable to allocate %u rx queues.\n",
+ count);
+ return -ENOMEM;
+ }
+ dev->_rx = rx;
+ atomic_set(&rx->count, count);
+
+ /*
+ * Set a pointer to first element in the array which holds the
+ * reference count.
+ */
+ for (i = 0; i < count; i++)
+ rx[i].first = rx;
+ }
+#endif
+ return 0;
+}
+
/**
* register_netdevice - register a network device
* @dev: device to register
@@ -4978,24 +5075,10 @@ int register_netdevice(struct net_device *dev)
dev->iflink = -1;
-#ifdef CONFIG_RPS
- if (!dev->num_rx_queues) {
- /*
- * Allocate a single RX queue if driver never called
- * alloc_netdev_mq
- */
-
- dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
- if (!dev->_rx) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = netif_alloc_rx_queues(dev);
+ if (ret)
+ goto out;
- dev->_rx->first = dev->_rx;
- atomic_set(&dev->_rx->count, 1);
- dev->num_rx_queues = 1;
- }
-#endif
/* Init, if this function is available */
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
@@ -5035,6 +5118,12 @@ int register_netdevice(struct net_device *dev)
if (dev->features & NETIF_F_SG)
dev->features |= NETIF_F_GSO;
+ /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
+ * vlan_dev_init() will do the dev->features check, so these features
+ * are enabled only if supported by underlying device.
+ */
+ dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
+
ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
ret = notifier_to_errno(ret);
if (ret)
@@ -5264,7 +5353,7 @@ void netdev_run_todo(void)
/* paranoia */
BUG_ON(atomic_read(&dev->refcnt));
- WARN_ON(dev->ip_ptr);
+ WARN_ON(rcu_dereference_raw(dev->ip_ptr));
WARN_ON(dev->ip6_ptr);
WARN_ON(dev->dn_ptr);
@@ -5363,7 +5452,7 @@ static void netdev_init_one_queue(struct net_device *dev,
static void netdev_init_queues(struct net_device *dev)
{
- netdev_init_one_queue(dev, &dev->rx_queue, NULL);
+ netdev_init_one_queue(dev, &dev->ingress_queue, NULL);
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
spin_lock_init(&dev->tx_global_lock);
}
@@ -5386,10 +5475,6 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
struct net_device *dev;
size_t alloc_size;
struct net_device *p;
-#ifdef CONFIG_RPS
- struct netdev_rx_queue *rx;
- int i;
-#endif
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -5415,29 +5500,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
goto free_p;
}
-#ifdef CONFIG_RPS
- rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
- if (!rx) {
- printk(KERN_ERR "alloc_netdev: Unable to allocate "
- "rx queues.\n");
- goto free_tx;
- }
-
- atomic_set(&rx->count, queue_count);
-
- /*
- * Set a pointer to first element in the array which holds the
- * reference count.
- */
- for (i = 0; i < queue_count; i++)
- rx[i].first = rx;
-#endif
dev = PTR_ALIGN(p, NETDEV_ALIGN);
dev->padded = (char *)dev - (char *)p;
if (dev_addr_init(dev))
- goto free_rx;
+ goto free_tx;
dev_mc_init(dev);
dev_uc_init(dev);
@@ -5449,8 +5517,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev->real_num_tx_queues = queue_count;
#ifdef CONFIG_RPS
- dev->_rx = rx;
dev->num_rx_queues = queue_count;
+ dev->real_num_rx_queues = queue_count;
#endif
dev->gso_max_size = GSO_MAX_SIZE;
@@ -5467,11 +5535,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
strcpy(dev->name, name);
return dev;
-free_rx:
-#ifdef CONFIG_RPS
- kfree(rx);
free_tx:
-#endif
kfree(tx);
free_p:
kfree(p);
@@ -5658,6 +5722,10 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Notify protocols, that we are about to destroy
this device. They should clean all the things.
+
+ Note that dev->reg_state stays at NETREG_REGISTERED.
+ This is wanted because this way 8021q and macvlan know
+ the device is just moving and can keep their slaves up.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 7a85367b3c2f..7d7e572cedc7 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include <linux/slab.h>
/*
@@ -205,18 +206,24 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo info;
const struct ethtool_ops *ops = dev->ethtool_ops;
- if (!ops->get_drvinfo)
- return -EOPNOTSUPP;
-
memset(&info, 0, sizeof(info));
info.cmd = ETHTOOL_GDRVINFO;
- ops->get_drvinfo(dev, &info);
+ if (ops && ops->get_drvinfo) {
+ ops->get_drvinfo(dev, &info);
+ } else if (dev->dev.parent && dev->dev.parent->driver) {
+ strlcpy(info.bus_info, dev_name(dev->dev.parent),
+ sizeof(info.bus_info));
+ strlcpy(info.driver, dev->dev.parent->driver->name,
+ sizeof(info.driver));
+ } else {
+ return -EOPNOTSUPP;
+ }
/*
* this method of obtaining string set info is deprecated;
* Use ETHTOOL_GSSET_INFO instead.
*/
- if (ops->get_sset_count) {
+ if (ops && ops->get_sset_count) {
int rc;
rc = ops->get_sset_count(dev, ETH_SS_TEST);
@@ -229,9 +236,9 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
if (rc >= 0)
info.n_priv_flags = rc;
}
- if (ops->get_regs_len)
+ if (ops && ops->get_regs_len)
info.regdump_len = ops->get_regs_len(dev);
- if (ops->get_eeprom_len)
+ if (ops && ops->get_eeprom_len)
info.eedump_len = ops->get_eeprom_len(dev);
if (copy_to_user(useraddr, &info, sizeof(info)))
@@ -479,6 +486,38 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
list->count++;
}
+/*
+ * ethtool does not (or did not) set masks for flow parameters that are
+ * not specified, so if both value and mask are 0 then this must be
+ * treated as equivalent to a mask with all bits set. Implement that
+ * here rather than in drivers.
+ */
+static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec;
+
+ if (fs->flow_type != TCP_V4_FLOW &&
+ fs->flow_type != UDP_V4_FLOW &&
+ fs->flow_type != SCTP_V4_FLOW)
+ return;
+
+ if (!(entry->ip4src | mask->ip4src))
+ mask->ip4src = htonl(0xffffffff);
+ if (!(entry->ip4dst | mask->ip4dst))
+ mask->ip4dst = htonl(0xffffffff);
+ if (!(entry->psrc | mask->psrc))
+ mask->psrc = htons(0xffff);
+ if (!(entry->pdst | mask->pdst))
+ mask->pdst = htons(0xffff);
+ if (!(entry->tos | mask->tos))
+ mask->tos = 0xff;
+ if (!(fs->vlan_tag | fs->vlan_tag_mask))
+ fs->vlan_tag_mask = 0xffff;
+ if (!(fs->data | fs->data_mask))
+ fs->data_mask = 0xffffffffffffffffULL;
+}
+
static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
void __user *useraddr)
{
@@ -493,6 +532,8 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
+ rx_ntuple_fix_masks(&cmd.fs);
+
/*
* Cache filter in dev struct for GET operation only if
* the underlying driver doesn't have its own GET operation, and
@@ -667,19 +708,19 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
break;
case IP_USER_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.raw_ip4_spec.ip4src);
+ fsc->fs.h_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.raw_ip4_spec.ip4src);
+ fsc->fs.m_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.raw_ip4_spec.ip4dst);
+ fsc->fs.h_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.raw_ip4_spec.ip4dst);
+ fsc->fs.m_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
break;
@@ -775,7 +816,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (regs.len > reglen)
regs.len = reglen;
- regbuf = kmalloc(reglen, GFP_USER);
+ regbuf = vmalloc(reglen);
if (!regbuf)
return -ENOMEM;
@@ -790,7 +831,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
ret = 0;
out:
- kfree(regbuf);
+ vfree(regbuf);
return ret;
}
@@ -1175,8 +1216,11 @@ static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
return -EFAULT;
if (edata.data) {
- if (!dev->ethtool_ops->get_rx_csum ||
- !dev->ethtool_ops->get_rx_csum(dev))
+ u32 rxcsum = dev->ethtool_ops->get_rx_csum ?
+ dev->ethtool_ops->get_rx_csum(dev) :
+ ethtool_op_get_rx_csum(dev);
+
+ if (!rxcsum)
return -EINVAL;
dev->features |= NETIF_F_GRO;
} else
@@ -1402,14 +1446,22 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
if (!dev || !netif_device_present(dev))
return -ENODEV;
- if (!dev->ethtool_ops)
- return -EOPNOTSUPP;
-
if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
return -EFAULT;
+ if (!dev->ethtool_ops) {
+ /* ETHTOOL_GDRVINFO does not require any driver support.
+ * It is also unprivileged and does not change anything,
+ * so we can take a shortcut to it. */
+ if (ethcmd == ETHTOOL_GDRVINFO)
+ return ethtool_get_drvinfo(dev, useraddr);
+ else
+ return -EOPNOTSUPP;
+ }
+
/* Allow some commands to be done by anyone */
switch (ethcmd) {
+ case ETHTOOL_GSET:
case ETHTOOL_GDRVINFO:
case ETHTOOL_GMSGLVL:
case ETHTOOL_GCOALESCE:
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 42e84e08a1be..332c2e31d048 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -182,7 +182,8 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
{
int ret = 0;
- if (rule->iifindex && (rule->iifindex != fl->iif))
+ if (rule->iifindex && (rule->iifindex != fl->iif) &&
+ !(fl->flags & FLOWI_FLAG_MATCH_ANY_IIF))
goto out;
if (rule->oifindex && (rule->oifindex != fl->oif))
@@ -225,9 +226,11 @@ jumped:
err = ops->action(rule, fl, flags, arg);
if (err != -EAGAIN) {
- fib_rule_get(rule);
- arg->rule = rule;
- goto out;
+ if (likely(atomic_inc_not_zero(&rule->refcnt))) {
+ arg->rule = rule;
+ goto out;
+ }
+ break;
}
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 52b051f82a01..7adf50352918 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -638,10 +638,9 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
return err;
}
- rcu_read_lock_bh();
- old_fp = rcu_dereference_bh(sk->sk_filter);
+ old_fp = rcu_dereference_protected(sk->sk_filter,
+ sock_owned_by_user(sk));
rcu_assign_pointer(sk->sk_filter, fp);
- rcu_read_unlock_bh();
if (old_fp)
sk_filter_delayed_uncharge(sk, old_fp);
@@ -654,14 +653,13 @@ int sk_detach_filter(struct sock *sk)
int ret = -ENOENT;
struct sk_filter *filter;
- rcu_read_lock_bh();
- filter = rcu_dereference_bh(sk->sk_filter);
+ filter = rcu_dereference_protected(sk->sk_filter,
+ sock_owned_by_user(sk));
if (filter) {
rcu_assign_pointer(sk->sk_filter, NULL);
sk_filter_delayed_uncharge(sk, filter);
ret = 0;
}
- rcu_read_unlock_bh();
return ret;
}
EXPORT_SYMBOL_GPL(sk_detach_filter);
diff --git a/net/core/flow.c b/net/core/flow.c
index f67dcbfe54ef..127c8a7ffd61 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -53,8 +53,7 @@ struct flow_flush_info {
struct flow_cache {
u32 hash_shift;
- unsigned long order;
- struct flow_cache_percpu *percpu;
+ struct flow_cache_percpu __percpu *percpu;
struct notifier_block hotcpu_notifier;
int low_watermark;
int high_watermark;
@@ -64,7 +63,7 @@ struct flow_cache {
atomic_t flow_cache_genid = ATOMIC_INIT(0);
EXPORT_SYMBOL(flow_cache_genid);
static struct flow_cache flow_cache_global;
-static struct kmem_cache *flow_cachep;
+static struct kmem_cache *flow_cachep __read_mostly;
static DEFINE_SPINLOCK(flow_cache_gc_lock);
static LIST_HEAD(flow_cache_gc_list);
@@ -177,15 +176,11 @@ static u32 flow_hash_code(struct flow_cache *fc,
{
u32 *k = (u32 *) key;
- return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
- & (flow_cache_hash_size(fc) - 1));
+ return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+ & (flow_cache_hash_size(fc) - 1);
}
-#if (BITS_PER_LONG == 64)
-typedef u64 flow_compare_t;
-#else
-typedef u32 flow_compare_t;
-#endif
+typedef unsigned long flow_compare_t;
/* I hear what you're saying, use memcmp. But memcmp cannot make
* important assumptions that we can here, such as alignment and
@@ -357,62 +352,73 @@ void flow_cache_flush(void)
put_online_cpus();
}
-static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
- struct flow_cache_percpu *fcp)
+static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
{
- fcp->hash_table = (struct hlist_head *)
- __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
- if (!fcp->hash_table)
- panic("NET: failed to allocate flow cache order %lu\n", fc->order);
+ struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
+ size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
- fcp->hash_rnd_recalc = 1;
- fcp->hash_count = 0;
- tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
+ if (!fcp->hash_table) {
+ fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
+ if (!fcp->hash_table) {
+ pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
+ return -ENOMEM;
+ }
+ fcp->hash_rnd_recalc = 1;
+ fcp->hash_count = 0;
+ tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
+ }
+ return 0;
}
-static int flow_cache_cpu(struct notifier_block *nfb,
+static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
- int cpu = (unsigned long) hcpu;
+ int res, cpu = (unsigned long) hcpu;
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ res = flow_cache_cpu_prepare(fc, cpu);
+ if (res)
+ return notifier_from_errno(res);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
__flow_cache_shrink(fc, fcp, 0);
+ break;
+ }
return NOTIFY_OK;
}
-static int flow_cache_init(struct flow_cache *fc)
+static int __init flow_cache_init(struct flow_cache *fc)
{
- unsigned long order;
int i;
fc->hash_shift = 10;
fc->low_watermark = 2 * flow_cache_hash_size(fc);
fc->high_watermark = 4 * flow_cache_hash_size(fc);
- for (order = 0;
- (PAGE_SIZE << order) <
- (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
- order++)
- /* NOTHING */;
- fc->order = order;
fc->percpu = alloc_percpu(struct flow_cache_percpu);
+ if (!fc->percpu)
+ return -ENOMEM;
- setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
- (unsigned long) fc);
- fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
- add_timer(&fc->rnd_timer);
-
- for_each_possible_cpu(i)
- flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
-
+ for_each_online_cpu(i) {
+ if (flow_cache_cpu_prepare(fc, i))
+ return -ENOMEM;
+ }
fc->hotcpu_notifier = (struct notifier_block){
.notifier_call = flow_cache_cpu,
};
register_hotcpu_notifier(&fc->hotcpu_notifier);
+ setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
+ (unsigned long) fc);
+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
+ add_timer(&fc->rnd_timer);
+
return 0;
}
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 6743146e4d6b..7c2373321b74 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -274,9 +274,9 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
while ((e = gen_find_node(bstats, rate_est))) {
rb_erase(&e->node, &est_root);
- write_lock_bh(&est_lock);
+ write_lock(&est_lock);
e->bstats = NULL;
- write_unlock_bh(&est_lock);
+ write_unlock(&est_lock);
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index e6b133b77ccb..72aceb1fe4fa 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -42,7 +42,9 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
- err = move_addr_to_kernel(m->msg_name, m->msg_namelen,
+ void __user *namep;
+ namep = (void __user __force *) m->msg_name;
+ err = move_addr_to_kernel(namep, m->msg_namelen,
address);
if (err < 0)
return err;
@@ -53,7 +55,7 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
}
size = m->msg_iovlen * sizeof(struct iovec);
- if (copy_from_user(iov, m->msg_iov, size))
+ if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
return -EFAULT;
m->msg_iov = iov;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a4e0a7482c2b..b142a0d76072 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -122,7 +122,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
unsigned long neigh_rand_reach_time(unsigned long base)
{
- return (base ? (net_random() % base) + (base >> 1) : 0);
+ return base ? (net_random() % base) + (base >> 1) : 0;
}
EXPORT_SYMBOL(neigh_rand_reach_time);
@@ -766,9 +766,9 @@ next_elt:
static __inline__ int neigh_max_probes(struct neighbour *n)
{
struct neigh_parms *p = n->parms;
- return (n->nud_state & NUD_PROBE ?
+ return (n->nud_state & NUD_PROBE) ?
p->ucast_probes :
- p->ucast_probes + p->app_probes + p->mcast_probes);
+ p->ucast_probes + p->app_probes + p->mcast_probes;
}
static void neigh_invalidate(struct neighbour *neigh)
@@ -1210,7 +1210,9 @@ int neigh_resolve_output(struct sk_buff *skb)
if (!neigh_event_send(neigh, skb)) {
int err;
struct net_device *dev = neigh->dev;
- if (dev->header_ops->cache && !dst->hh) {
+ if (dev->header_ops->cache &&
+ !dst->hh &&
+ !(dst->flags & DST_NOCACHE)) {
write_lock_bh(&neigh->lock);
if (!dst->hh)
neigh_hh_init(neigh, dst, dst->ops->protocol);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index af4dfbadf2a0..fa81fd0a488f 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -515,7 +515,7 @@ static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
return attribute->store(queue, attribute, buf, count);
}
-static struct sysfs_ops rx_queue_sysfs_ops = {
+static const struct sysfs_ops rx_queue_sysfs_ops = {
.show = rx_queue_attr_show,
.store = rx_queue_attr_store,
};
@@ -742,34 +742,38 @@ static int rx_queue_add_kobject(struct net_device *net, int index)
return error;
}
-static int rx_queue_register_kobjects(struct net_device *net)
+int
+net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
{
int i;
int error = 0;
- net->queues_kset = kset_create_and_add("queues",
- NULL, &net->dev.kobj);
- if (!net->queues_kset)
- return -ENOMEM;
- for (i = 0; i < net->num_rx_queues; i++) {
+ for (i = old_num; i < new_num; i++) {
error = rx_queue_add_kobject(net, i);
- if (error)
+ if (error) {
+ new_num = old_num;
break;
+ }
}
- if (error)
- while (--i >= 0)
- kobject_put(&net->_rx[i].kobj);
+ while (--i >= new_num)
+ kobject_put(&net->_rx[i].kobj);
return error;
}
-static void rx_queue_remove_kobjects(struct net_device *net)
+static int rx_queue_register_kobjects(struct net_device *net)
{
- int i;
+ net->queues_kset = kset_create_and_add("queues",
+ NULL, &net->dev.kobj);
+ if (!net->queues_kset)
+ return -ENOMEM;
+ return net_rx_queue_update_kobjects(net, 0, net->real_num_rx_queues);
+}
- for (i = 0; i < net->num_rx_queues; i++)
- kobject_put(&net->_rx[i].kobj);
+static void rx_queue_remove_kobjects(struct net_device *net)
+{
+ net_rx_queue_update_kobjects(net, net->real_num_rx_queues, 0);
kset_unregister(net->queues_kset);
}
#endif /* CONFIG_RPS */
@@ -789,12 +793,13 @@ static const void *net_netlink_ns(struct sock *sk)
return sock_net(sk);
}
-static struct kobj_ns_type_operations net_ns_type_operations = {
+struct kobj_ns_type_operations net_ns_type_operations = {
.type = KOBJ_NS_TYPE_NET,
.current_ns = net_current_ns,
.netlink_ns = net_netlink_ns,
.initial_ns = net_initial_ns,
};
+EXPORT_SYMBOL_GPL(net_ns_type_operations);
static void net_kobj_ns_exit(struct net *net)
{
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 805555e8b187..778e1571548d 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,4 +4,8 @@
int netdev_kobject_init(void);
int netdev_register_kobject(struct net_device *);
void netdev_unregister_kobject(struct net_device *);
+#ifdef CONFIG_RPS
+int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
+#endif
+
#endif
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 10a1ea72010d..2c0df0f95b3d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -729,16 +729,14 @@ static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
*num = 0;
for (; i < maxlen; i++) {
+ int value;
char c;
*num <<= 4;
if (get_user(c, &user_buffer[i]))
return -EFAULT;
- if ((c >= '0') && (c <= '9'))
- *num |= c - '0';
- else if ((c >= 'a') && (c <= 'f'))
- *num |= c - 'a' + 10;
- else if ((c >= 'A') && (c <= 'F'))
- *num |= c - 'A' + 10;
+ value = hex_to_bin(c);
+ if (value >= 0)
+ *num |= value;
else
break;
}
@@ -3907,8 +3905,6 @@ static void __exit pg_cleanup(void)
{
struct pktgen_thread *t;
struct list_head *q, *n;
- wait_queue_head_t queue;
- init_waitqueue_head(&queue);
/* Stop all interfaces & threads */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f78d821bd935..b2a718dfd720 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -612,36 +612,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
{
- struct rtnl_link_stats64 a;
-
- a.rx_packets = b->rx_packets;
- a.tx_packets = b->tx_packets;
- a.rx_bytes = b->rx_bytes;
- a.tx_bytes = b->tx_bytes;
- a.rx_errors = b->rx_errors;
- a.tx_errors = b->tx_errors;
- a.rx_dropped = b->rx_dropped;
- a.tx_dropped = b->tx_dropped;
-
- a.multicast = b->multicast;
- a.collisions = b->collisions;
-
- a.rx_length_errors = b->rx_length_errors;
- a.rx_over_errors = b->rx_over_errors;
- a.rx_crc_errors = b->rx_crc_errors;
- a.rx_frame_errors = b->rx_frame_errors;
- a.rx_fifo_errors = b->rx_fifo_errors;
- a.rx_missed_errors = b->rx_missed_errors;
-
- a.tx_aborted_errors = b->tx_aborted_errors;
- a.tx_carrier_errors = b->tx_carrier_errors;
- a.tx_fifo_errors = b->tx_fifo_errors;
- a.tx_heartbeat_errors = b->tx_heartbeat_errors;
- a.tx_window_errors = b->tx_window_errors;
-
- a.rx_compressed = b->rx_compressed;
- a.tx_compressed = b->tx_compressed;
- memcpy(v, &a, sizeof(a));
+ memcpy(v, b, sizeof(*b));
}
/* All VF info */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c83b421341c0..752c1972b3a7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -202,8 +202,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb->data = data;
skb_reset_tail_pointer(skb);
skb->end = skb->tail + size;
- kmemcheck_annotate_bitfield(skb, flags1);
- kmemcheck_annotate_bitfield(skb, flags2);
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb->mac_header = ~0U;
#endif
@@ -340,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb)
put_page(skb_shinfo(skb)->frags[i].page);
}
- if (skb_has_frags(skb))
+ if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
kfree(skb->head);
@@ -685,16 +683,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
- int headerlen = skb->data - skb->head;
- /*
- * Allocate the copy buffer
- */
- struct sk_buff *n;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- n = alloc_skb(skb->end + skb->data_len, gfp_mask);
-#else
- n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
-#endif
+ int headerlen = skb_headroom(skb);
+ unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
+ struct sk_buff *n = alloc_skb(size, gfp_mask);
+
if (!n)
return NULL;
@@ -726,20 +718,14 @@ EXPORT_SYMBOL(skb_copy);
struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
{
- /*
- * Allocate the copy buffer
- */
- struct sk_buff *n;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- n = alloc_skb(skb->end, gfp_mask);
-#else
- n = alloc_skb(skb->end - skb->head, gfp_mask);
-#endif
+ unsigned int size = skb_end_pointer(skb) - skb->head;
+ struct sk_buff *n = alloc_skb(size, gfp_mask);
+
if (!n)
goto out;
/* Set the data pointer */
- skb_reserve(n, skb->data - skb->head);
+ skb_reserve(n, skb_headroom(skb));
/* Set the tail pointer and length */
skb_put(n, skb_headlen(skb));
/* Copy the bytes */
@@ -759,7 +745,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
skb_shinfo(n)->nr_frags = i;
}
- if (skb_has_frags(skb)) {
+ if (skb_has_frag_list(skb)) {
skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
skb_clone_fraglist(n);
}
@@ -791,12 +777,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
{
int i;
u8 *data;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- int size = nhead + skb->end + ntail;
-#else
- int size = nhead + (skb->end - skb->head) + ntail;
-#endif
+ int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
long off;
+ bool fastpath;
BUG_ON(nhead < 0);
@@ -810,23 +793,36 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
goto nodata;
/* Copy only real data... and, alas, header. This should be
- * optimized for the cases when header is void. */
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- memcpy(data + nhead, skb->head, skb->tail);
-#else
- memcpy(data + nhead, skb->head, skb->tail - skb->head);
-#endif
- memcpy(data + size, skb_end_pointer(skb),
+ * optimized for the cases when header is void.
+ */
+ memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
+
+ memcpy((struct skb_shared_info *)(data + size),
+ skb_shinfo(skb),
offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- get_page(skb_shinfo(skb)->frags[i].page);
+ /* Check if we can avoid taking references on fragments if we own
+ * the last reference on skb->head. (see skb_release_data())
+ */
+ if (!skb->cloned)
+ fastpath = true;
+ else {
+ int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
- if (skb_has_frags(skb))
- skb_clone_fraglist(skb);
+ fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
+ }
- skb_release_data(skb);
+ if (fastpath) {
+ kfree(skb->head);
+ } else {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ get_page(skb_shinfo(skb)->frags[i].page);
+ if (skb_has_frag_list(skb))
+ skb_clone_fraglist(skb);
+
+ skb_release_data(skb);
+ }
off = (data + nhead) - skb->head;
skb->head = data;
@@ -1099,7 +1095,7 @@ drop_pages:
for (; i < nfrags; i++)
put_page(skb_shinfo(skb)->frags[i].page);
- if (skb_has_frags(skb))
+ if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
goto done;
}
@@ -1194,7 +1190,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Optimization: no fragments, no reasons to preestimate
* size of pulled pages. Superb.
*/
- if (!skb_has_frags(skb))
+ if (!skb_has_frag_list(skb))
goto pull_pages;
/* Estimate size of pulled pages. */
@@ -2323,7 +2319,7 @@ next_skb:
st->frag_data = NULL;
}
- if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
+ if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0;
goto next_skb;
@@ -2893,7 +2889,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
return -ENOMEM;
/* Easy case. Most of packets will go this way. */
- if (!skb_has_frags(skb)) {
+ if (!skb_has_frag_list(skb)) {
/* A little of trouble, not enough of space for trailer.
* This should not happen, when stack is tuned to generate
* good frames. OK, on miss we reallocate and reserve even more
@@ -2928,7 +2924,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
if (skb1->next == NULL && tailbits) {
if (skb_shinfo(skb1)->nr_frags ||
- skb_has_frags(skb1) ||
+ skb_has_frag_list(skb1) ||
skb_tailroom(skb1) < tailbits)
ntail = tailbits + 128;
}
@@ -2937,7 +2933,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
skb_cloned(skb1) ||
ntail ||
skb_shinfo(skb1)->nr_frags ||
- skb_has_frags(skb1)) {
+ skb_has_frag_list(skb1)) {
struct sk_buff *skb2;
/* Fuck, we are miserable poor guys... */
@@ -3020,7 +3016,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
} else {
/*
* no hardware time stamps available,
- * so keep the skb_shared_tx and only
+ * so keep the shared tx_flags and only
* store software time stamp
*/
skb->tstamp = ktime_get_real();
diff --git a/net/core/sock.c b/net/core/sock.c
index ef30e9d286e7..42365deeba27 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1557,6 +1557,8 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
EXPORT_SYMBOL(sock_alloc_send_skb);
static void __lock_sock(struct sock *sk)
+ __releases(&sk->sk_lock.slock)
+ __acquires(&sk->sk_lock.slock)
{
DEFINE_WAIT(wait);
@@ -1573,6 +1575,8 @@ static void __lock_sock(struct sock *sk)
}
static void __release_sock(struct sock *sk)
+ __releases(&sk->sk_lock.slock)
+ __acquires(&sk->sk_lock.slock)
{
struct sk_buff *skb = sk->sk_backlog.head;
diff --git a/net/core/utils.c b/net/core/utils.c
index f41854470539..5fea0ab21902 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -75,7 +75,7 @@ __be32 in_aton(const char *str)
str++;
}
}
- return(htonl(l));
+ return htonl(l);
}
EXPORT_SYMBOL(in_aton);
@@ -92,18 +92,19 @@ EXPORT_SYMBOL(in_aton);
static inline int xdigit2bin(char c, int delim)
{
+ int val;
+
if (c == delim || c == '\0')
return IN6PTON_DELIM;
if (c == ':')
return IN6PTON_COLON_MASK;
if (c == '.')
return IN6PTON_DOT;
- if (c >= '0' && c <= '9')
- return (IN6PTON_XDIGIT | IN6PTON_DIGIT| (c - '0'));
- if (c >= 'a' && c <= 'f')
- return (IN6PTON_XDIGIT | (c - 'a' + 10));
- if (c >= 'A' && c <= 'F')
- return (IN6PTON_XDIGIT | (c - 'A' + 10));
+
+ val = hex_to_bin(c);
+ if (val >= 0)
+ return val | IN6PTON_XDIGIT | (val < 10 ? IN6PTON_DIGIT : 0);
+
if (delim == -1)
return IN6PTON_DELIM;
return IN6PTON_UNKNOWN;
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6df6f8ac9636..6d16a9070ff0 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -62,18 +62,14 @@ struct ccid_operations {
void (*ccid_hc_tx_exit)(struct sock *sk);
void (*ccid_hc_rx_packet_recv)(struct sock *sk,
struct sk_buff *skb);
- int (*ccid_hc_rx_parse_options)(struct sock *sk,
- unsigned char option,
- unsigned char len, u16 idx,
- unsigned char* value);
+ int (*ccid_hc_rx_parse_options)(struct sock *sk, u8 pkt,
+ u8 opt, u8 *val, u8 len);
int (*ccid_hc_rx_insert_options)(struct sock *sk,
struct sk_buff *skb);
void (*ccid_hc_tx_packet_recv)(struct sock *sk,
struct sk_buff *skb);
- int (*ccid_hc_tx_parse_options)(struct sock *sk,
- unsigned char option,
- unsigned char len, u16 idx,
- unsigned char* value);
+ int (*ccid_hc_tx_parse_options)(struct sock *sk, u8 pkt,
+ u8 opt, u8 *val, u8 len);
int (*ccid_hc_tx_send_packet)(struct sock *sk,
struct sk_buff *skb);
void (*ccid_hc_tx_packet_sent)(struct sock *sk,
@@ -168,27 +164,31 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
ccid->ccid_ops->ccid_hc_tx_packet_recv(sk, skb);
}
+/**
+ * ccid_hc_tx_parse_options - Parse CCID-specific options sent by the receiver
+ * @pkt: type of packet that @opt appears on (RFC 4340, 5.1)
+ * @opt: the CCID-specific option type (RFC 4340, 5.8 and 10.3)
+ * @val: value of @opt
+ * @len: length of @val in bytes
+ */
static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
- unsigned char option,
- unsigned char len, u16 idx,
- unsigned char* value)
+ u8 pkt, u8 opt, u8 *val, u8 len)
{
- int rc = 0;
- if (ccid->ccid_ops->ccid_hc_tx_parse_options != NULL)
- rc = ccid->ccid_ops->ccid_hc_tx_parse_options(sk, option, len, idx,
- value);
- return rc;
+ if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
+ return 0;
+ return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
}
+/**
+ * ccid_hc_rx_parse_options - Parse CCID-specific options sent by the sender
+ * Arguments are analogous to ccid_hc_tx_parse_options()
+ */
static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
- unsigned char option,
- unsigned char len, u16 idx,
- unsigned char* value)
+ u8 pkt, u8 opt, u8 *val, u8 len)
{
- int rc = 0;
- if (ccid->ccid_ops->ccid_hc_rx_parse_options != NULL)
- rc = ccid->ccid_ops->ccid_hc_rx_parse_options(sk, option, len, idx, value);
- return rc;
+ if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
+ return 0;
+ return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
}
static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 8408398cd44e..0581143cb800 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -47,37 +47,6 @@ config IP_DCCP_CCID3_DEBUG
If in doubt, say N.
-config IP_DCCP_CCID3_RTO
- int "Use higher bound for nofeedback timer"
- default 100
- depends on IP_DCCP_CCID3 && EXPERIMENTAL
- ---help---
- Use higher lower bound for nofeedback timer expiration.
-
- The TFRC nofeedback timer normally expires after the maximum of 4
- RTTs and twice the current send interval (RFC 3448, 4.3). On LANs
- with a small RTT this can mean a high processing load and reduced
- performance, since then the nofeedback timer is triggered very
- frequently.
-
- This option enables to set a higher lower bound for the nofeedback
- value. Values in units of milliseconds can be set here.
-
- A value of 0 disables this feature by enforcing the value specified
- in RFC 3448. The following values have been suggested as bounds for
- experimental use:
- * 16-20ms to match the typical multimedia inter-frame interval
- * 100ms as a reasonable compromise [default]
- * 1000ms corresponds to the lower TCP RTO bound (RFC 2988, 2.4)
-
- The default of 100ms is a compromise between a large value for
- efficient DCCP implementations, and a small value to avoid disrupting
- the network in times of congestion.
-
- The purpose of the nofeedback timer is to slow DCCP down when there
- is serious network congestion: experimenting with larger values should
- therefore not be performed on WANs.
-
config IP_DCCP_TFRC_LIB
def_bool y if IP_DCCP_CCID3
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 9b3ae9922be1..dc18172b1e59 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -25,59 +25,14 @@
*/
#include <linux/slab.h>
#include "../feat.h"
-#include "../ccid.h"
-#include "../dccp.h"
#include "ccid2.h"
#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
static int ccid2_debug;
#define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
-
-static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hc)
-{
- int len = 0;
- int pipe = 0;
- struct ccid2_seq *seqp = hc->tx_seqh;
-
- /* there is data in the chain */
- if (seqp != hc->tx_seqt) {
- seqp = seqp->ccid2s_prev;
- len++;
- if (!seqp->ccid2s_acked)
- pipe++;
-
- while (seqp != hc->tx_seqt) {
- struct ccid2_seq *prev = seqp->ccid2s_prev;
-
- len++;
- if (!prev->ccid2s_acked)
- pipe++;
-
- /* packets are sent sequentially */
- BUG_ON(dccp_delta_seqno(seqp->ccid2s_seq,
- prev->ccid2s_seq ) >= 0);
- BUG_ON(time_before(seqp->ccid2s_sent,
- prev->ccid2s_sent));
-
- seqp = prev;
- }
- }
-
- BUG_ON(pipe != hc->tx_pipe);
- ccid2_pr_debug("len of chain=%d\n", len);
-
- do {
- seqp = seqp->ccid2s_prev;
- len++;
- } while (seqp != hc->tx_seqh);
-
- ccid2_pr_debug("total len=%d\n", len);
- BUG_ON(len != hc->tx_seqbufc * CCID2_SEQBUF_LEN);
-}
#else
#define ccid2_pr_debug(format, a...)
-#define ccid2_hc_tx_check_sanity(hc)
#endif
static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
@@ -156,19 +111,10 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
dp->dccps_l_ack_ratio = val;
}
-static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hc, long val)
-{
- ccid2_pr_debug("change SRTT to %ld\n", val);
- hc->tx_srtt = val;
-}
-
-static void ccid2_start_rto_timer(struct sock *sk);
-
static void ccid2_hc_tx_rto_expire(unsigned long data)
{
struct sock *sk = (struct sock *)data;
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
- long s;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
@@ -178,23 +124,19 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
ccid2_pr_debug("RTO_EXPIRE\n");
- ccid2_hc_tx_check_sanity(hc);
-
/* back-off timer */
hc->tx_rto <<= 1;
+ if (hc->tx_rto > DCCP_RTO_MAX)
+ hc->tx_rto = DCCP_RTO_MAX;
- s = hc->tx_rto / HZ;
- if (s > 60)
- hc->tx_rto = 60 * HZ;
-
- ccid2_start_rto_timer(sk);
+ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
/* adjust pipe, cwnd etc */
hc->tx_ssthresh = hc->tx_cwnd / 2;
if (hc->tx_ssthresh < 2)
hc->tx_ssthresh = 2;
- hc->tx_cwnd = 1;
- hc->tx_pipe = 0;
+ hc->tx_cwnd = 1;
+ hc->tx_pipe = 0;
/* clear state about stuff we sent */
hc->tx_seqt = hc->tx_seqh;
@@ -204,22 +146,11 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
hc->tx_rpseq = 0;
hc->tx_rpdupack = -1;
ccid2_change_l_ack_ratio(sk, 1);
- ccid2_hc_tx_check_sanity(hc);
out:
bh_unlock_sock(sk);
sock_put(sk);
}
-static void ccid2_start_rto_timer(struct sock *sk)
-{
- struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-
- ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto);
-
- BUG_ON(timer_pending(&hc->tx_rtotimer));
- sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
-}
-
static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
{
struct dccp_sock *dp = dccp_sk(sk);
@@ -230,7 +161,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
hc->tx_seqh->ccid2s_acked = 0;
- hc->tx_seqh->ccid2s_sent = jiffies;
+ hc->tx_seqh->ccid2s_sent = ccid2_time_stamp;
next = hc->tx_seqh->ccid2s_next;
/* check if we need to alloc more space */
@@ -296,23 +227,20 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
}
#endif
- /* setup RTO timer */
- if (!timer_pending(&hc->tx_rtotimer))
- ccid2_start_rto_timer(sk);
+ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
do {
struct ccid2_seq *seqp = hc->tx_seqt;
while (seqp != hc->tx_seqh) {
- ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
+ ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
(unsigned long long)seqp->ccid2s_seq,
seqp->ccid2s_acked, seqp->ccid2s_sent);
seqp = seqp->ccid2s_next;
}
} while (0);
ccid2_pr_debug("=========\n");
- ccid2_hc_tx_check_sanity(hc);
#endif
}
@@ -378,17 +306,87 @@ out_invalid_option:
return -1;
}
-static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
+/**
+ * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
+ * This code is almost identical with TCP's tcp_rtt_estimator(), since
+ * - it has a higher sampling frequency (recommended by RFC 1323),
+ * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
+ * - it is simple (cf. more complex proposals such as Eifel timer or research
+ * which suggests that the gain should be set according to window size),
+ * - in tests it was found to work well with CCID2 [gerrit].
+ */
+static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
{
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+ long m = mrtt ? : 1;
- sk_stop_timer(sk, &hc->tx_rtotimer);
- ccid2_pr_debug("deleted RTO timer\n");
+ if (hc->tx_srtt == 0) {
+ /* First measurement m */
+ hc->tx_srtt = m << 3;
+ hc->tx_mdev = m << 1;
+
+ hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
+ hc->tx_rttvar = hc->tx_mdev_max;
+
+ hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
+ } else {
+ /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
+ m -= (hc->tx_srtt >> 3);
+ hc->tx_srtt += m;
+
+ /* Similarly, update scaled mdev with regard to |m| */
+ if (m < 0) {
+ m = -m;
+ m -= (hc->tx_mdev >> 2);
+ /*
+ * This neutralises RTO increase when RTT < SRTT - mdev
+ * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
+ * in Linux TCP", USENIX 2002, pp. 49-62).
+ */
+ if (m > 0)
+ m >>= 3;
+ } else {
+ m -= (hc->tx_mdev >> 2);
+ }
+ hc->tx_mdev += m;
+
+ if (hc->tx_mdev > hc->tx_mdev_max) {
+ hc->tx_mdev_max = hc->tx_mdev;
+ if (hc->tx_mdev_max > hc->tx_rttvar)
+ hc->tx_rttvar = hc->tx_mdev_max;
+ }
+
+ /*
+ * Decay RTTVAR at most once per flight, exploiting that
+ * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2)
+ * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1)
+ * GAR is a useful bound for FlightSize = pipe.
+ * AWL is probably too low here, as it over-estimates pipe.
+ */
+ if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
+ if (hc->tx_mdev_max < hc->tx_rttvar)
+ hc->tx_rttvar -= (hc->tx_rttvar -
+ hc->tx_mdev_max) >> 2;
+ hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
+ hc->tx_mdev_max = tcp_rto_min(sk);
+ }
+ }
+
+ /*
+ * Set RTO from SRTT and RTTVAR
+ * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms.
+ * This agrees with RFC 4341, 5:
+ * "Because DCCP does not retransmit data, DCCP does not require
+ * TCP's recommended minimum timeout of one second".
+ */
+ hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
+
+ if (hc->tx_rto > DCCP_RTO_MAX)
+ hc->tx_rto = DCCP_RTO_MAX;
}
-static inline void ccid2_new_ack(struct sock *sk,
- struct ccid2_seq *seqp,
- unsigned int *maxincr)
+static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
+ unsigned int *maxincr)
{
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
@@ -402,93 +400,27 @@ static inline void ccid2_new_ack(struct sock *sk,
hc->tx_cwnd += 1;
hc->tx_packets_acked = 0;
}
-
- /* update RTO */
- if (hc->tx_srtt == -1 ||
- time_after(jiffies, hc->tx_lastrtt + hc->tx_srtt)) {
- unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent;
- int s;
-
- /* first measurement */
- if (hc->tx_srtt == -1) {
- ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
- r, jiffies,
- (unsigned long long)seqp->ccid2s_seq);
- ccid2_change_srtt(hc, r);
- hc->tx_rttvar = r >> 1;
- } else {
- /* RTTVAR */
- long tmp = hc->tx_srtt - r;
- long srtt;
-
- if (tmp < 0)
- tmp *= -1;
-
- tmp >>= 2;
- hc->tx_rttvar *= 3;
- hc->tx_rttvar >>= 2;
- hc->tx_rttvar += tmp;
-
- /* SRTT */
- srtt = hc->tx_srtt;
- srtt *= 7;
- srtt >>= 3;
- tmp = r >> 3;
- srtt += tmp;
- ccid2_change_srtt(hc, srtt);
- }
- s = hc->tx_rttvar << 2;
- /* clock granularity is 1 when based on jiffies */
- if (!s)
- s = 1;
- hc->tx_rto = hc->tx_srtt + s;
-
- /* must be at least a second */
- s = hc->tx_rto / HZ;
- /* DCCP doesn't require this [but I like it cuz my code sux] */
-#if 1
- if (s < 1)
- hc->tx_rto = HZ;
-#endif
- /* max 60 seconds */
- if (s > 60)
- hc->tx_rto = HZ * 60;
-
- hc->tx_lastrtt = jiffies;
-
- ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
- hc->tx_srtt, hc->tx_rttvar,
- hc->tx_rto, HZ, r);
- }
-
- /* we got a new ack, so re-start RTO timer */
- ccid2_hc_tx_kill_rto_timer(sk);
- ccid2_start_rto_timer(sk);
-}
-
-static void ccid2_hc_tx_dec_pipe(struct sock *sk)
-{
- struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-
- if (hc->tx_pipe == 0)
- DCCP_BUG("pipe == 0");
- else
- hc->tx_pipe--;
-
- if (hc->tx_pipe == 0)
- ccid2_hc_tx_kill_rto_timer(sk);
+ /*
+ * FIXME: RTT is sampled several times per acknowledgment (for each
+ * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
+ * This causes the RTT to be over-estimated, since the older entries
+ * in the Ack Vector have earlier sending times.
+ * The cleanest solution is to not use the ccid2s_sent field at all
+ * and instead use DCCP timestamps: requires changes in other places.
+ */
+ ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
}
static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
{
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
- if (time_before(seqp->ccid2s_sent, hc->tx_last_cong)) {
+ if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
return;
}
- hc->tx_last_cong = jiffies;
+ hc->tx_last_cong = ccid2_time_stamp;
hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
@@ -510,7 +442,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
int done = 0;
unsigned int maxincr = 0;
- ccid2_hc_tx_check_sanity(hc);
/* check reverse path congestion */
seqno = DCCP_SKB_CB(skb)->dccpd_seq;
@@ -620,7 +551,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
seqp->ccid2s_acked = 1;
ccid2_pr_debug("Got ack for %llu\n",
(unsigned long long)seqp->ccid2s_seq);
- ccid2_hc_tx_dec_pipe(sk);
+ hc->tx_pipe--;
}
if (seqp == hc->tx_seqt) {
done = 1;
@@ -677,7 +608,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
* one ack vector.
*/
ccid2_congestion_event(sk, seqp);
- ccid2_hc_tx_dec_pipe(sk);
+ hc->tx_pipe--;
}
if (seqp == hc->tx_seqt)
break;
@@ -695,7 +626,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
hc->tx_seqt = hc->tx_seqt->ccid2s_next;
}
- ccid2_hc_tx_check_sanity(hc);
+ /* restart RTO timer if not all outstanding data has been acked */
+ if (hc->tx_pipe == 0)
+ sk_stop_timer(sk, &hc->tx_rtotimer);
+ else
+ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
}
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -707,12 +642,8 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
/* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
hc->tx_ssthresh = ~0U;
- /*
- * RFC 4341, 5: "The cwnd parameter is initialized to at most four
- * packets for new connections, following the rules from [RFC3390]".
- * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
- */
- hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
+ /* Use larger initial windows (RFC 4341, section 5). */
+ hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
/* Make sure that Ack Ratio is enabled and within bounds. */
max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
@@ -723,15 +654,11 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
if (ccid2_hc_tx_alloc_seq(hc))
return -ENOMEM;
- hc->tx_rto = 3 * HZ;
- ccid2_change_srtt(hc, -1);
- hc->tx_rttvar = -1;
+ hc->tx_rto = DCCP_TIMEOUT_INIT;
hc->tx_rpdupack = -1;
- hc->tx_last_cong = jiffies;
+ hc->tx_last_cong = ccid2_time_stamp;
setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
(unsigned long)sk);
-
- ccid2_hc_tx_check_sanity(hc);
return 0;
}
@@ -740,7 +667,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
int i;
- ccid2_hc_tx_kill_rto_timer(sk);
+ sk_stop_timer(sk, &hc->tx_rtotimer);
for (i = 0; i < hc->tx_seqbufc; i++)
kfree(hc->tx_seqbuf[i]);
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 1ec6a30103bb..9731c2dc1487 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -18,18 +18,23 @@
#ifndef _DCCP_CCID2_H_
#define _DCCP_CCID2_H_
-#include <linux/dccp.h>
#include <linux/timer.h>
#include <linux/types.h>
#include "../ccid.h"
+#include "../dccp.h"
+
+/*
+ * CCID-2 timestamping faces the same issues as TCP timestamping.
+ * Hence we reuse/share as much of the code as possible.
+ */
+#define ccid2_time_stamp tcp_time_stamp
+
/* NUMDUPACK parameter from RFC 4341, p. 6 */
#define NUMDUPACK 3
-struct sock;
-
struct ccid2_seq {
u64 ccid2s_seq;
- unsigned long ccid2s_sent;
+ u32 ccid2s_sent;
int ccid2s_acked;
struct ccid2_seq *ccid2s_prev;
struct ccid2_seq *ccid2s_next;
@@ -42,7 +47,12 @@ struct ccid2_seq {
* struct ccid2_hc_tx_sock - CCID2 TX half connection
* @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
* @tx_packets_acked: Ack counter for deriving cwnd growth (RFC 3465)
- * @tx_lastrtt: time RTT was last measured
+ * @tx_srtt: smoothed RTT estimate, scaled by 2^3
+ * @tx_mdev: smoothed RTT variation, scaled by 2^2
+ * @tx_mdev_max: maximum of @mdev during one flight
+ * @tx_rttvar: moving average/maximum of @mdev_max
+ * @tx_rto: RTO value deriving from SRTT and RTTVAR (RFC 2988)
+ * @tx_rtt_seq: to decay RTTVAR at most once per flight
* @tx_rpseq: last consecutive seqno
* @tx_rpdupack: dupacks since rpseq
*/
@@ -55,14 +65,19 @@ struct ccid2_hc_tx_sock {
int tx_seqbufc;
struct ccid2_seq *tx_seqh;
struct ccid2_seq *tx_seqt;
- long tx_rto;
- long tx_srtt;
- long tx_rttvar;
- unsigned long tx_lastrtt;
+
+ /* RTT measurement: variables/principles are the same as in TCP */
+ u32 tx_srtt,
+ tx_mdev,
+ tx_mdev_max,
+ tx_rttvar,
+ tx_rto;
+ u64 tx_rtt_seq:48;
struct timer_list tx_rtotimer;
+
u64 tx_rpseq;
int tx_rpdupack;
- unsigned long tx_last_cong;
+ u32 tx_last_cong;
u64 tx_high_ack;
};
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 95f752986497..c3f3a25bbd7a 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -54,7 +54,6 @@ static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
[TFRC_SSTATE_NO_SENT] = "NO_SENT",
[TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
[TFRC_SSTATE_FBACK] = "FBACK",
- [TFRC_SSTATE_TERM] = "TERM",
};
return ccid3_state_names[state];
@@ -91,19 +90,16 @@ static inline u64 rfc3390_initial_rate(struct sock *sk)
return scaled_div(w_init << 6, hc->tx_rtt);
}
-/*
- * Recalculate t_ipi and delta (should be called whenever X changes)
+/**
+ * ccid3_update_send_interval - Calculate new t_ipi = s / X_inst
+ * This respects the granularity of X_inst (64 * bytes/second).
*/
static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
{
- /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
- /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
- hc->tx_delta = min_t(u32, hc->tx_t_ipi / 2, TFRC_OPSYS_HALF_TIME_GRAN);
-
- ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n", hc->tx_t_ipi,
- hc->tx_delta, hc->tx_s, (unsigned)(hc->tx_x >> 6));
+ ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
+ hc->tx_s, (unsigned)(hc->tx_x >> 6));
}
static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
@@ -211,16 +207,19 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
ccid3_tx_state_name(hc->tx_state));
+ /* Ignore and do not restart after leaving the established state */
+ if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
+ goto out;
+
+ /* Reset feedback state to "no feedback received" */
if (hc->tx_state == TFRC_SSTATE_FBACK)
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
- else if (hc->tx_state != TFRC_SSTATE_NO_FBACK)
- goto out;
/*
* Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4
+ * RTO is 0 if and only if no feedback has been received yet.
*/
- if (hc->tx_t_rto == 0 || /* no feedback received yet */
- hc->tx_p == 0) {
+ if (hc->tx_t_rto == 0 || hc->tx_p == 0) {
/* halve send rate directly */
hc->tx_x = max(hc->tx_x / 2,
@@ -256,7 +255,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
* Set new timeout for the nofeedback timer.
* See comments in packet_recv() regarding the value of t_RTO.
*/
- if (unlikely(hc->tx_t_rto == 0)) /* no feedback yet */
+ if (unlikely(hc->tx_t_rto == 0)) /* no feedback received yet */
t_nfb = TFRC_INITIAL_TIMEOUT;
else
t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
@@ -290,8 +289,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
if (unlikely(skb->len == 0))
return -EBADMSG;
- switch (hc->tx_state) {
- case TFRC_SSTATE_NO_SENT:
+ if (hc->tx_state == TFRC_SSTATE_NO_SENT) {
sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
hc->tx_last_win_count = 0;
@@ -326,27 +324,22 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
ccid3_update_send_interval(hc);
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
- break;
- case TFRC_SSTATE_NO_FBACK:
- case TFRC_SSTATE_FBACK:
+
+ } else {
delay = ktime_us_delta(hc->tx_t_nom, now);
ccid3_pr_debug("delay=%ld\n", (long)delay);
/*
- * Scheduling of packet transmissions [RFC 3448, 4.6]
+ * Scheduling of packet transmissions (RFC 5348, 8.3)
*
* if (t_now > t_nom - delta)
* // send the packet now
* else
* // send the packet in (t_nom - t_now) milliseconds.
*/
- if (delay - (s64)hc->tx_delta >= 1000)
- return (u32)delay / 1000L;
+ if (delay >= TFRC_T_DELTA)
+ return (u32)delay / USEC_PER_MSEC;
ccid3_hc_tx_update_win_count(hc, now);
- break;
- case TFRC_SSTATE_TERM:
- DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
- return -EINVAL;
}
/* prepare to send now (add options etc.) */
@@ -372,48 +365,34 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
- struct ccid3_options_received *opt_recv;
+ struct tfrc_tx_hist_entry *acked;
ktime_t now;
unsigned long t_nfb;
- u32 pinv, r_sample;
+ u32 r_sample;
/* we are only interested in ACKs */
if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
return;
- /* ... and only in the established state */
- if (hc->tx_state != TFRC_SSTATE_FBACK &&
- hc->tx_state != TFRC_SSTATE_NO_FBACK)
- return;
-
- opt_recv = &hc->tx_options_received;
- now = ktime_get_real();
-
- /* Estimate RTT from history if ACK number is valid */
- r_sample = tfrc_tx_hist_rtt(hc->tx_hist,
- DCCP_SKB_CB(skb)->dccpd_ack_seq, now);
- if (r_sample == 0) {
- DCCP_WARN("%s(%p): %s with bogus ACK-%llu\n", dccp_role(sk), sk,
- dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type),
- (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq);
- return;
- }
-
- /* Update receive rate in units of 64 * bytes/second */
- hc->tx_x_recv = opt_recv->ccid3or_receive_rate;
- hc->tx_x_recv <<= 6;
-
- /* Update loss event rate (which is scaled by 1e6) */
- pinv = opt_recv->ccid3or_loss_event_rate;
- if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */
- hc->tx_p = 0;
- else /* can not exceed 100% */
- hc->tx_p = scaled_div(1, pinv);
/*
- * Validate new RTT sample and update moving average
+ * Locate the acknowledged packet in the TX history.
+ *
+ * Returning "entry not found" here can for instance happen when
+ * - the host has not sent out anything (e.g. a passive server),
+ * - the Ack is outdated (packet with higher Ack number was received),
+ * - it is a bogus Ack (for a packet not sent on this connection).
*/
- r_sample = dccp_sample_rtt(sk, r_sample);
+ acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb));
+ if (acked == NULL)
+ return;
+ /* For the sake of RTT sampling, ignore/remove all older entries */
+ tfrc_tx_hist_purge(&acked->next);
+
+ /* Update the moving average for the RTT estimate (RFC 3448, 4.3) */
+ now = ktime_get_real();
+ r_sample = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp));
hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9);
+
/*
* Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3
*/
@@ -461,13 +440,12 @@ done_computing_x:
sk->sk_write_space(sk);
/*
- * Update timeout interval for the nofeedback timer.
- * We use a configuration option to increase the lower bound.
- * This can help avoid triggering the nofeedback timer too
- * often ('spinning') on LANs with small RTTs.
+ * Update timeout interval for the nofeedback timer. In order to control
+ * rate halving on networks with very low RTTs (<= 1 ms), use per-route
+ * tunable RTAX_RTO_MIN value as the lower bound.
*/
- hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt, (CONFIG_IP_DCCP_CCID3_RTO *
- (USEC_PER_SEC / 1000)));
+ hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt,
+ USEC_PER_SEC/HZ * tcp_rto_min(sk));
/*
* Schedule no feedback timer to expire in
* max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
@@ -482,66 +460,41 @@ done_computing_x:
jiffies + usecs_to_jiffies(t_nfb));
}
-static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
- unsigned char len, u16 idx,
- unsigned char *value)
+static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type,
+ u8 option, u8 *optval, u8 optlen)
{
- int rc = 0;
- const struct dccp_sock *dp = dccp_sk(sk);
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
- struct ccid3_options_received *opt_recv;
__be32 opt_val;
- opt_recv = &hc->tx_options_received;
-
- if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
- opt_recv->ccid3or_seqno = dp->dccps_gsr;
- opt_recv->ccid3or_loss_event_rate = ~0;
- opt_recv->ccid3or_loss_intervals_idx = 0;
- opt_recv->ccid3or_loss_intervals_len = 0;
- opt_recv->ccid3or_receive_rate = 0;
- }
-
switch (option) {
+ case TFRC_OPT_RECEIVE_RATE:
case TFRC_OPT_LOSS_EVENT_RATE:
- if (unlikely(len != 4)) {
- DCCP_WARN("%s(%p), invalid len %d "
- "for TFRC_OPT_LOSS_EVENT_RATE\n",
- dccp_role(sk), sk, len);
- rc = -EINVAL;
- } else {
- opt_val = get_unaligned((__be32 *)value);
- opt_recv->ccid3or_loss_event_rate = ntohl(opt_val);
- ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
- dccp_role(sk), sk,
- opt_recv->ccid3or_loss_event_rate);
+ /* Must be ignored on Data packets, cf. RFC 4342 8.3 and 8.5 */
+ if (packet_type == DCCP_PKT_DATA)
+ break;
+ if (unlikely(optlen != 4)) {
+ DCCP_WARN("%s(%p), invalid len %d for %u\n",
+ dccp_role(sk), sk, optlen, option);
+ return -EINVAL;
}
- break;
- case TFRC_OPT_LOSS_INTERVALS:
- opt_recv->ccid3or_loss_intervals_idx = idx;
- opt_recv->ccid3or_loss_intervals_len = len;
- ccid3_pr_debug("%s(%p), LOSS_INTERVALS=(%u, %u)\n",
- dccp_role(sk), sk,
- opt_recv->ccid3or_loss_intervals_idx,
- opt_recv->ccid3or_loss_intervals_len);
- break;
- case TFRC_OPT_RECEIVE_RATE:
- if (unlikely(len != 4)) {
- DCCP_WARN("%s(%p), invalid len %d "
- "for TFRC_OPT_RECEIVE_RATE\n",
- dccp_role(sk), sk, len);
- rc = -EINVAL;
- } else {
- opt_val = get_unaligned((__be32 *)value);
- opt_recv->ccid3or_receive_rate = ntohl(opt_val);
+ opt_val = ntohl(get_unaligned((__be32 *)optval));
+
+ if (option == TFRC_OPT_RECEIVE_RATE) {
+ /* Receive Rate is kept in units of 64 bytes/second */
+ hc->tx_x_recv = opt_val;
+ hc->tx_x_recv <<= 6;
+
ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
- dccp_role(sk), sk,
- opt_recv->ccid3or_receive_rate);
+ dccp_role(sk), sk, opt_val);
+ } else {
+ /* Update the fixpoint Loss Event Rate fraction */
+ hc->tx_p = tfrc_invert_loss_event_rate(opt_val);
+
+ ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
+ dccp_role(sk), sk, opt_val);
}
- break;
}
-
- return rc;
+ return 0;
}
static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -559,42 +512,36 @@ static void ccid3_hc_tx_exit(struct sock *sk)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
- ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
sk_stop_timer(sk, &hc->tx_no_feedback_timer);
-
tfrc_tx_hist_purge(&hc->tx_hist);
}
static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
{
- struct ccid3_hc_tx_sock *hc;
-
- /* Listen socks doesn't have a private CCID block */
- if (sk->sk_state == DCCP_LISTEN)
- return;
-
- hc = ccid3_hc_tx_sk(sk);
- info->tcpi_rto = hc->tx_t_rto;
- info->tcpi_rtt = hc->tx_rtt;
+ info->tcpi_rto = ccid3_hc_tx_sk(sk)->tx_t_rto;
+ info->tcpi_rtt = ccid3_hc_tx_sk(sk)->tx_rtt;
}
static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
u32 __user *optval, int __user *optlen)
{
- const struct ccid3_hc_tx_sock *hc;
+ const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
+ struct tfrc_tx_info tfrc;
const void *val;
- /* Listen socks doesn't have a private CCID block */
- if (sk->sk_state == DCCP_LISTEN)
- return -EINVAL;
-
- hc = ccid3_hc_tx_sk(sk);
switch (optname) {
case DCCP_SOCKOPT_CCID_TX_INFO:
- if (len < sizeof(hc->tx_tfrc))
+ if (len < sizeof(tfrc))
return -EINVAL;
- len = sizeof(hc->tx_tfrc);
- val = &hc->tx_tfrc;
+ tfrc.tfrctx_x = hc->tx_x;
+ tfrc.tfrctx_x_recv = hc->tx_x_recv;
+ tfrc.tfrctx_x_calc = hc->tx_x_calc;
+ tfrc.tfrctx_rtt = hc->tx_rtt;
+ tfrc.tfrctx_p = hc->tx_p;
+ tfrc.tfrctx_rto = hc->tx_t_rto;
+ tfrc.tfrctx_ipi = hc->tx_t_ipi;
+ len = sizeof(tfrc);
+ val = &tfrc;
break;
default:
return -ENOPROTOOPT;
@@ -624,7 +571,6 @@ static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
static const char *const ccid3_rx_state_names[] = {
[TFRC_RSTATE_NO_DATA] = "NO_DATA",
[TFRC_RSTATE_DATA] = "DATA",
- [TFRC_RSTATE_TERM] = "TERM",
};
return ccid3_rx_state_names[state];
@@ -650,14 +596,9 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
- ktime_t now;
+ ktime_t now = ktime_get_real();
s64 delta = 0;
- if (unlikely(hc->rx_state == TFRC_RSTATE_TERM))
- return;
-
- now = ktime_get_real();
-
switch (fbtype) {
case CCID3_FBACK_INITIAL:
hc->rx_x_recv = 0;
@@ -701,14 +642,12 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
{
- const struct ccid3_hc_rx_sock *hc;
+ const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
__be32 x_recv, pinv;
if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
return 0;
- hc = ccid3_hc_rx_sk(sk);
-
if (dccp_packet_without_ack(skb))
return 0;
@@ -749,10 +688,11 @@ static u32 ccid3_first_li(struct sock *sk)
x_recv = scaled_div32(hc->rx_bytes_recv, delta);
if (x_recv == 0) { /* would also trigger divide-by-zero */
DCCP_WARN("X_recv==0\n");
- if ((x_recv = hc->rx_x_recv) == 0) {
+ if (hc->rx_x_recv == 0) {
DCCP_BUG("stored value of X_recv is zero");
return ~0U;
}
+ x_recv = hc->rx_x_recv;
}
fval = scaled_div(hc->rx_s, hc->rx_rtt);
@@ -862,46 +802,31 @@ static void ccid3_hc_rx_exit(struct sock *sk)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
- ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
-
tfrc_rx_hist_purge(&hc->rx_hist);
tfrc_lh_cleanup(&hc->rx_li_hist);
}
static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
{
- const struct ccid3_hc_rx_sock *hc;
-
- /* Listen socks doesn't have a private CCID block */
- if (sk->sk_state == DCCP_LISTEN)
- return;
-
- hc = ccid3_hc_rx_sk(sk);
- info->tcpi_ca_state = hc->rx_state;
+ info->tcpi_ca_state = ccid3_hc_rx_sk(sk)->rx_state;
info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
- info->tcpi_rcv_rtt = hc->rx_rtt;
+ info->tcpi_rcv_rtt = ccid3_hc_rx_sk(sk)->rx_rtt;
}
static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
u32 __user *optval, int __user *optlen)
{
- const struct ccid3_hc_rx_sock *hc;
+ const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
struct tfrc_rx_info rx_info;
const void *val;
- /* Listen socks doesn't have a private CCID block */
- if (sk->sk_state == DCCP_LISTEN)
- return -EINVAL;
-
- hc = ccid3_hc_rx_sk(sk);
switch (optname) {
case DCCP_SOCKOPT_CCID_RX_INFO:
if (len < sizeof(rx_info))
return -EINVAL;
rx_info.tfrcrx_x_recv = hc->rx_x_recv;
rx_info.tfrcrx_rtt = hc->rx_rtt;
- rx_info.tfrcrx_p = hc->rx_pinv == 0 ? ~0U :
- scaled_div(1, hc->rx_pinv);
+ rx_info.tfrcrx_p = tfrc_invert_loss_event_rate(hc->rx_pinv);
len = sizeof(rx_info);
val = &rx_info;
break;
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 032635776653..1a9933c29672 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -42,35 +42,36 @@
#include "lib/tfrc.h"
#include "../ccid.h"
-/* Two seconds as per RFC 3448 4.2 */
+/* Two seconds as per RFC 5348, 4.2 */
#define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC)
-/* In usecs - half the scheduling granularity as per RFC3448 4.6 */
-#define TFRC_OPSYS_HALF_TIME_GRAN (USEC_PER_SEC / (2 * HZ))
-
/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */
#define TFRC_T_MBI 64
+/*
+ * The t_delta parameter (RFC 5348, 8.3): delays of less than %USEC_PER_MSEC are
+ * rounded down to 0, since sk_reset_timer() here uses millisecond granularity.
+ * Hence we can use a constant t_delta = %USEC_PER_MSEC when HZ >= 500. A coarse
+ * resolution of HZ < 500 means that the error is below one timer tick (t_gran)
+ * when using the constant t_delta = t_gran / 2 = %USEC_PER_SEC / (2 * HZ).
+ */
+#if (HZ >= 500)
+# define TFRC_T_DELTA USEC_PER_MSEC
+#else
+# define TFRC_T_DELTA (USEC_PER_SEC / (2 * HZ))
+#endif
+
enum ccid3_options {
TFRC_OPT_LOSS_EVENT_RATE = 192,
TFRC_OPT_LOSS_INTERVALS = 193,
TFRC_OPT_RECEIVE_RATE = 194,
};
-struct ccid3_options_received {
- u64 ccid3or_seqno:48,
- ccid3or_loss_intervals_idx:16;
- u16 ccid3or_loss_intervals_len;
- u32 ccid3or_loss_event_rate;
- u32 ccid3or_receive_rate;
-};
-
/* TFRC sender states */
enum ccid3_hc_tx_states {
TFRC_SSTATE_NO_SENT = 1,
TFRC_SSTATE_NO_FBACK,
TFRC_SSTATE_FBACK,
- TFRC_SSTATE_TERM,
};
/**
@@ -90,19 +91,16 @@ enum ccid3_hc_tx_states {
* @tx_no_feedback_timer: Handle to no feedback timer
* @tx_t_ld: Time last doubled during slow start
* @tx_t_nom: Nominal send time of next packet
- * @tx_delta: Send timer delta (RFC 3448, 4.6) in usecs
* @tx_hist: Packet history
- * @tx_options_received: Parsed set of retrieved options
*/
struct ccid3_hc_tx_sock {
- struct tfrc_tx_info tx_tfrc;
-#define tx_x tx_tfrc.tfrctx_x
-#define tx_x_recv tx_tfrc.tfrctx_x_recv
-#define tx_x_calc tx_tfrc.tfrctx_x_calc
-#define tx_rtt tx_tfrc.tfrctx_rtt
-#define tx_p tx_tfrc.tfrctx_p
-#define tx_t_rto tx_tfrc.tfrctx_rto
-#define tx_t_ipi tx_tfrc.tfrctx_ipi
+ u64 tx_x;
+ u64 tx_x_recv;
+ u32 tx_x_calc;
+ u32 tx_rtt;
+ u32 tx_p;
+ u32 tx_t_rto;
+ u32 tx_t_ipi;
u16 tx_s;
enum ccid3_hc_tx_states tx_state:8;
u8 tx_last_win_count;
@@ -110,9 +108,7 @@ struct ccid3_hc_tx_sock {
struct timer_list tx_no_feedback_timer;
ktime_t tx_t_ld;
ktime_t tx_t_nom;
- u32 tx_delta;
struct tfrc_tx_hist_entry *tx_hist;
- struct ccid3_options_received tx_options_received;
};
static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
@@ -126,21 +122,16 @@ static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
enum ccid3_hc_rx_states {
TFRC_RSTATE_NO_DATA = 1,
TFRC_RSTATE_DATA,
- TFRC_RSTATE_TERM = 127,
};
/**
* struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
- * @rx_x_recv: Receiver estimate of send rate (RFC 3448 4.3)
- * @rx_rtt: Receiver estimate of rtt (non-standard)
- * @rx_p: Current loss event rate (RFC 3448 5.4)
* @rx_last_counter: Tracks window counter (RFC 4342, 8.1)
* @rx_state: Receiver state, one of %ccid3_hc_rx_states
* @rx_bytes_recv: Total sum of DCCP payload bytes
* @rx_x_recv: Receiver estimate of send rate (RFC 3448, sec. 4.3)
* @rx_rtt: Receiver estimate of RTT
* @rx_tstamp_last_feedback: Time at which last feedback was sent
- * @rx_tstamp_last_ack: Time at which last feedback was sent
* @rx_hist: Packet history (loss detection + RTT sampling)
* @rx_li_hist: Loss Interval database
* @rx_s: Received packet size in bytes
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 8fc3cbf79071..497723c4d4bb 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -116,7 +116,7 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
cur->li_length = len;
tfrc_lh_calc_i_mean(lh);
- return (lh->i_mean < old_i_mean);
+ return lh->i_mean < old_i_mean;
}
/* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index 3a4f414e94a0..de8fe294bf0b 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -38,18 +38,6 @@
#include "packet_history.h"
#include "../../dccp.h"
-/**
- * tfrc_tx_hist_entry - Simple singly-linked TX history list
- * @next: next oldest entry (LIFO order)
- * @seqno: sequence number of this entry
- * @stamp: send time of packet with sequence number @seqno
- */
-struct tfrc_tx_hist_entry {
- struct tfrc_tx_hist_entry *next;
- u64 seqno;
- ktime_t stamp;
-};
-
/*
* Transmitter History Routines
*/
@@ -71,15 +59,6 @@ void tfrc_tx_packet_history_exit(void)
}
}
-static struct tfrc_tx_hist_entry *
- tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno)
-{
- while (head != NULL && head->seqno != seqno)
- head = head->next;
-
- return head;
-}
-
int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno)
{
struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any());
@@ -107,24 +86,6 @@ void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
*headp = NULL;
}
-u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno,
- const ktime_t now)
-{
- u32 rtt = 0;
- struct tfrc_tx_hist_entry *packet = tfrc_tx_hist_find_entry(head, seqno);
-
- if (packet != NULL) {
- rtt = ktime_us_delta(now, packet->stamp);
- /*
- * Garbage-collect older (irrelevant) entries:
- */
- tfrc_tx_hist_purge(&packet->next);
- }
-
- return rtt;
-}
-
-
/*
* Receiver History Routines
*/
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 7df6c5299999..7ee4a9d9d335 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -40,12 +40,28 @@
#include <linux/slab.h>
#include "tfrc.h"
-struct tfrc_tx_hist_entry;
+/**
+ * tfrc_tx_hist_entry - Simple singly-linked TX history list
+ * @next: next oldest entry (LIFO order)
+ * @seqno: sequence number of this entry
+ * @stamp: send time of packet with sequence number @seqno
+ */
+struct tfrc_tx_hist_entry {
+ struct tfrc_tx_hist_entry *next;
+ u64 seqno;
+ ktime_t stamp;
+};
+
+static inline struct tfrc_tx_hist_entry *
+ tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno)
+{
+ while (head != NULL && head->seqno != seqno)
+ head = head->next;
+ return head;
+}
extern int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
extern void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
-extern u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head,
- const u64 seqno, const ktime_t now);
/* Subtraction a-b modulo-16, respects circular wrap-around */
#define SUB16(a, b) (((a) + 16 - (b)) & 0xF)
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 01bb48e96c2e..f8ee3f549770 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -57,6 +57,7 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight)
extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
+extern u32 tfrc_invert_loss_event_rate(u32 loss_event_rate);
extern int tfrc_tx_packet_history_init(void);
extern void tfrc_tx_packet_history_exit(void);
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 22ca1cf0eb55..a052a4377e26 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -687,3 +687,17 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
index = tfrc_binsearch(fvalue, 0);
return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE;
}
+
+/**
+ * tfrc_invert_loss_event_rate - Compute p so that 10^6 corresponds to 100%
+ * When @loss_event_rate is large, there is a chance that p is truncated to 0.
+ * To avoid re-entering slow-start in that case, we set p = TFRC_SMALLEST_P > 0.
+ */
+u32 tfrc_invert_loss_event_rate(u32 loss_event_rate)
+{
+ if (loss_event_rate == UINT_MAX) /* see RFC 4342, 8.5 */
+ return 0;
+ if (unlikely(loss_event_rate == 0)) /* map 1/0 into 100% */
+ return 1000000;
+ return max_t(u32, scaled_div(1, loss_event_rate), TFRC_SMALLEST_P);
+}
diff --git a/net/dccp/options.c b/net/dccp/options.c
index bfda087bd90d..92718511eac5 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -96,18 +96,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
}
/*
- * CCID-Specific Options (from RFC 4340, sec. 10.3):
- *
- * Option numbers 128 through 191 are for options sent from the
- * HC-Sender to the HC-Receiver; option numbers 192 through 255
- * are for options sent from the HC-Receiver to the HC-Sender.
- *
* CCID-specific options are ignored during connection setup, as
* negotiation may still be in progress (see RFC 4340, 10.3).
* The same applies to Ack Vectors, as these depend on the CCID.
- *
*/
- if (dreq != NULL && (opt >= 128 ||
+ if (dreq != NULL && (opt >= DCCPO_MIN_RX_CCID_SPECIFIC ||
opt == DCCPO_ACK_VECTOR_0 || opt == DCCPO_ACK_VECTOR_1))
goto ignore_option;
@@ -226,23 +219,15 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
dccp_pr_debug("%s rx opt: ELAPSED_TIME=%d\n",
dccp_role(sk), elapsed_time);
break;
- case 128 ... 191: {
- const u16 idx = value - options;
-
+ case DCCPO_MIN_RX_CCID_SPECIFIC ... DCCPO_MAX_RX_CCID_SPECIFIC:
if (ccid_hc_rx_parse_options(dp->dccps_hc_rx_ccid, sk,
- opt, len, idx,
- value) != 0)
+ pkt_type, opt, value, len))
goto out_invalid_option;
- }
break;
- case 192 ... 255: {
- const u16 idx = value - options;
-
+ case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC:
if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
- opt, len, idx,
- value) != 0)
+ pkt_type, opt, value, len))
goto out_invalid_option;
- }
break;
default:
DCCP_CRIT("DCCP(%p): option %d(len=%d) not "
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index baeb1eaf011b..2ef115277bea 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -693,22 +693,22 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
aux = scp->accessdata.acc_userl;
*skb_put(skb, 1) = aux;
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux);
+ memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux);
aux = scp->accessdata.acc_passl;
*skb_put(skb, 1) = aux;
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux);
+ memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux);
aux = scp->accessdata.acc_accl;
*skb_put(skb, 1) = aux;
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux);
+ memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux);
aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
*skb_put(skb, 1) = aux;
if (aux > 0)
- memcpy(skb_put(skb,aux), scp->conndata_out.opt_data, aux);
+ memcpy(skb_put(skb, aux), scp->conndata_out.opt_data, aux);
scp->persist = dn_nsp_persist(sk);
scp->persist_fxn = dn_nsp_retrans_conninit;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index dc54bd0d083b..f8c1ae4b41f0 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -392,7 +392,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
dev_queue_xmit(skb);
dev_put(dev);
mutex_unlock(&econet_mutex);
- return(len);
+ return len;
out_free:
kfree_skb(skb);
@@ -637,7 +637,7 @@ static int econet_create(struct net *net, struct socket *sock, int protocol,
eo->num = protocol;
econet_insert_socket(&econet_sklist, sk);
- return(0);
+ return 0;
out:
return err;
}
@@ -1009,7 +1009,6 @@ static int __init aun_udp_initialise(void)
struct sockaddr_in sin;
skb_queue_head_init(&aun_queue);
- spin_lock_init(&aun_queue_lock);
setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
ab_cleanup_timer.expires = jiffies + (HZ*2);
add_timer(&ab_cleanup_timer);
@@ -1167,7 +1166,6 @@ static int __init econet_proto_init(void)
goto out;
sock_register(&econet_family_ops);
#ifdef CONFIG_ECONET_AUNUDP
- spin_lock_init(&aun_queue_lock);
aun_udp_initialise();
#endif
#ifdef CONFIG_ECONET_NATIVE
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 215c83986a9d..f00ef2f1d814 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -367,7 +367,7 @@ struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
EXPORT_SYMBOL(alloc_etherdev_mq);
static size_t _format_mac_addr(char *buf, int buflen,
- const unsigned char *addr, int len)
+ const unsigned char *addr, int len)
{
int i;
char *cp = buf;
@@ -376,7 +376,7 @@ static size_t _format_mac_addr(char *buf, int buflen,
cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
if (i == len - 1)
break;
- cp += strlcpy(cp, ":", buflen - (cp - buf));
+ cp += scnprintf(cp, buflen - (cp - buf), ":");
}
return cp - buf;
}
@@ -386,7 +386,7 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
size_t l;
l = _format_mac_addr(buf, PAGE_SIZE, addr, len);
- l += strlcpy(buf + l, "\n", PAGE_SIZE - l);
- return ((ssize_t) l);
+ l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
+ return (ssize_t)l;
}
EXPORT_SYMBOL(sysfs_format_mac);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 7cd7760144f7..e848e6c062cd 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -215,9 +215,15 @@ config NET_IPIP
be inserted in and removed from the running kernel whenever you
want). Most people won't need this and can say N.
+config NET_IPGRE_DEMUX
+ tristate "IP: GRE demultiplexer"
+ help
+ This is helper module to demultiplex GRE packets on GRE version field criteria.
+ Required by ip_gre and pptp modules.
+
config NET_IPGRE
tristate "IP: GRE tunnels over IP"
- depends on IPV6 || IPV6=n
+ depends on (IPV6 || IPV6=n) && NET_IPGRE_DEMUX
help
Tunneling means encapsulating data of one protocol type within
another protocol and sending it over a channel that understands the
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 80ff87ce43aa..4978d22f9a75 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
obj-$(CONFIG_NET_IPIP) += ipip.o
+obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_AH) += ah4.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6a1100c25a9f..f581f77d1097 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -227,18 +227,16 @@ EXPORT_SYMBOL(inet_ehash_secret);
/*
* inet_ehash_secret must be set exactly once
- * Instead of using a dedicated spinlock, we (ab)use inetsw_lock
*/
void build_ehash_secret(void)
{
u32 rnd;
+
do {
get_random_bytes(&rnd, sizeof(rnd));
} while (rnd == 0);
- spin_lock_bh(&inetsw_lock);
- if (!inet_ehash_secret)
- inet_ehash_secret = rnd;
- spin_unlock_bh(&inetsw_lock);
+
+ cmpxchg(&inet_ehash_secret, 0, rnd);
}
EXPORT_SYMBOL(build_ehash_secret);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 96c1955b3e2f..d9031ad67826 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -55,7 +55,7 @@
* Stuart Cheshire : Metricom and grat arp fixes
* *** FOR 2.1 clean this up ***
* Lawrence V. Stefani: (08/12/96) Added FDDI support.
- * Alan Cox : Took the AP1000 nasty FDDI hack and
+ * Alan Cox : Took the AP1000 nasty FDDI hack and
* folded into the mainstream FDDI code.
* Ack spit, Linus how did you allow that
* one in...
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(clip_tbl_hook);
#endif
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/netfilter_arp.h>
@@ -161,7 +161,7 @@ static const struct neigh_ops arp_direct_ops = {
.queue_xmit = dev_queue_xmit,
};
-const struct neigh_ops arp_broken_ops = {
+static const struct neigh_ops arp_broken_ops = {
.family = AF_INET,
.solicit = arp_solicit,
.error_report = arp_error_report,
@@ -170,35 +170,34 @@ const struct neigh_ops arp_broken_ops = {
.hh_output = dev_queue_xmit,
.queue_xmit = dev_queue_xmit,
};
-EXPORT_SYMBOL(arp_broken_ops);
struct neigh_table arp_tbl = {
- .family = AF_INET,
- .entry_size = sizeof(struct neighbour) + 4,
- .key_len = 4,
- .hash = arp_hash,
- .constructor = arp_constructor,
- .proxy_redo = parp_redo,
- .id = "arp_cache",
- .parms = {
- .tbl = &arp_tbl,
- .base_reachable_time = 30 * HZ,
- .retrans_time = 1 * HZ,
- .gc_staletime = 60 * HZ,
- .reachable_time = 30 * HZ,
- .delay_probe_time = 5 * HZ,
- .queue_len = 3,
- .ucast_probes = 3,
- .mcast_probes = 3,
- .anycast_delay = 1 * HZ,
- .proxy_delay = (8 * HZ) / 10,
- .proxy_qlen = 64,
- .locktime = 1 * HZ,
+ .family = AF_INET,
+ .entry_size = sizeof(struct neighbour) + 4,
+ .key_len = 4,
+ .hash = arp_hash,
+ .constructor = arp_constructor,
+ .proxy_redo = parp_redo,
+ .id = "arp_cache",
+ .parms = {
+ .tbl = &arp_tbl,
+ .base_reachable_time = 30 * HZ,
+ .retrans_time = 1 * HZ,
+ .gc_staletime = 60 * HZ,
+ .reachable_time = 30 * HZ,
+ .delay_probe_time = 5 * HZ,
+ .queue_len = 3,
+ .ucast_probes = 3,
+ .mcast_probes = 3,
+ .anycast_delay = 1 * HZ,
+ .proxy_delay = (8 * HZ) / 10,
+ .proxy_qlen = 64,
+ .locktime = 1 * HZ,
},
- .gc_interval = 30 * HZ,
- .gc_thresh1 = 128,
- .gc_thresh2 = 512,
- .gc_thresh3 = 1024,
+ .gc_interval = 30 * HZ,
+ .gc_thresh1 = 128,
+ .gc_thresh2 = 512,
+ .gc_thresh3 = 1024,
};
EXPORT_SYMBOL(arp_tbl);
@@ -233,7 +232,7 @@ static u32 arp_hash(const void *pkey, const struct net_device *dev)
static int arp_constructor(struct neighbour *neigh)
{
- __be32 addr = *(__be32*)neigh->primary_key;
+ __be32 addr = *(__be32 *)neigh->primary_key;
struct net_device *dev = neigh->dev;
struct in_device *in_dev;
struct neigh_parms *parms;
@@ -296,16 +295,19 @@ static int arp_constructor(struct neighbour *neigh)
neigh->ops = &arp_broken_ops;
neigh->output = neigh->ops->output;
return 0;
+#else
+ break;
#endif
- ;}
+ }
#endif
if (neigh->type == RTN_MULTICAST) {
neigh->nud_state = NUD_NOARP;
arp_mc_map(addr, neigh->ha, dev, 1);
- } else if (dev->flags&(IFF_NOARP|IFF_LOOPBACK)) {
+ } else if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) {
neigh->nud_state = NUD_NOARP;
memcpy(neigh->ha, dev->dev_addr, dev->addr_len);
- } else if (neigh->type == RTN_BROADCAST || dev->flags&IFF_POINTOPOINT) {
+ } else if (neigh->type == RTN_BROADCAST ||
+ (dev->flags & IFF_POINTOPOINT)) {
neigh->nud_state = NUD_NOARP;
memcpy(neigh->ha, dev->broadcast, dev->addr_len);
}
@@ -315,7 +317,7 @@ static int arp_constructor(struct neighbour *neigh)
else
neigh->ops = &arp_generic_ops;
- if (neigh->nud_state&NUD_VALID)
+ if (neigh->nud_state & NUD_VALID)
neigh->output = neigh->ops->connected_output;
else
neigh->output = neigh->ops->output;
@@ -334,7 +336,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
__be32 saddr = 0;
u8 *dst_ha = NULL;
struct net_device *dev = neigh->dev;
- __be32 target = *(__be32*)neigh->primary_key;
+ __be32 target = *(__be32 *)neigh->primary_key;
int probes = atomic_read(&neigh->probes);
struct in_device *in_dev;
@@ -347,7 +349,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
default:
case 0: /* By default announce any local IP */
- if (skb && inet_addr_type(dev_net(dev), ip_hdr(skb)->saddr) == RTN_LOCAL)
+ if (skb && inet_addr_type(dev_net(dev),
+ ip_hdr(skb)->saddr) == RTN_LOCAL)
saddr = ip_hdr(skb)->saddr;
break;
case 1: /* Restrict announcements of saddr in same subnet */
@@ -369,16 +372,21 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (!saddr)
saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
- if ((probes -= neigh->parms->ucast_probes) < 0) {
- if (!(neigh->nud_state&NUD_VALID))
- printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n");
+ probes -= neigh->parms->ucast_probes;
+ if (probes < 0) {
+ if (!(neigh->nud_state & NUD_VALID))
+ printk(KERN_DEBUG
+ "trying to ucast probe in NUD_INVALID\n");
dst_ha = neigh->ha;
read_lock_bh(&neigh->lock);
- } else if ((probes -= neigh->parms->app_probes) < 0) {
+ } else {
+ probes -= neigh->parms->app_probes;
+ if (probes < 0) {
#ifdef CONFIG_ARPD
- neigh_app_ns(neigh);
+ neigh_app_ns(neigh);
#endif
- return;
+ return;
+ }
}
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
@@ -451,7 +459,8 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
* is allowed to use this function, it is scheduled to be removed. --ANK
*/
-static int arp_set_predefined(int addr_hint, unsigned char * haddr, __be32 paddr, struct net_device * dev)
+static int arp_set_predefined(int addr_hint, unsigned char *haddr,
+ __be32 paddr, struct net_device *dev)
{
switch (addr_hint) {
case RTN_LOCAL:
@@ -483,7 +492,8 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
paddr = skb_rtable(skb)->rt_gateway;
- if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev))
+ if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr,
+ paddr, dev))
return 0;
n = __neigh_lookup(&arp_tbl, &paddr, dev, 1);
@@ -515,13 +525,14 @@ int arp_bind_neighbour(struct dst_entry *dst)
return -EINVAL;
if (n == NULL) {
__be32 nexthop = ((struct rtable *)dst)->rt_gateway;
- if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT))
+ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
nexthop = 0;
n = __neigh_lookup_errno(
#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
- dev->type == ARPHRD_ATM ? clip_tbl_hook :
+ dev->type == ARPHRD_ATM ?
+ clip_tbl_hook :
#endif
- &arp_tbl, &nexthop, dev);
+ &arp_tbl, &nexthop, dev);
if (IS_ERR(n))
return PTR_ERR(n);
dst->neighbour = n;
@@ -543,8 +554,8 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
if (!IN_DEV_PROXY_ARP(in_dev))
return 0;
-
- if ((imi = IN_DEV_MEDIUM_ID(in_dev)) == 0)
+ imi = IN_DEV_MEDIUM_ID(in_dev);
+ if (imi == 0)
return 1;
if (imi == -1)
return 0;
@@ -555,7 +566,7 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
if (out_dev)
omi = IN_DEV_MEDIUM_ID(out_dev);
- return (omi != imi && omi != -1);
+ return omi != imi && omi != -1;
}
/*
@@ -685,7 +696,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
arp->ar_pln = 4;
arp->ar_op = htons(type);
- arp_ptr=(unsigned char *)(arp+1);
+ arp_ptr = (unsigned char *)(arp + 1);
memcpy(arp_ptr, src_hw, dev->addr_len);
arp_ptr += dev->addr_len;
@@ -735,9 +746,8 @@ void arp_send(int type, int ptype, __be32 dest_ip,
skb = arp_create(type, ptype, dest_ip, dev, src_ip,
dest_hw, src_hw, target_hw);
- if (skb == NULL) {
+ if (skb == NULL)
return;
- }
arp_xmit(skb);
}
@@ -815,7 +825,7 @@ static int arp_process(struct sk_buff *skb)
/*
* Extract fields
*/
- arp_ptr= (unsigned char *)(arp+1);
+ arp_ptr = (unsigned char *)(arp + 1);
sha = arp_ptr;
arp_ptr += dev->addr_len;
memcpy(&sip, arp_ptr, 4);
@@ -869,16 +879,17 @@ static int arp_process(struct sk_buff *skb)
addr_type = rt->rt_type;
if (addr_type == RTN_LOCAL) {
- int dont_send = 0;
+ int dont_send;
- if (!dont_send)
- dont_send |= arp_ignore(in_dev,sip,tip);
+ dont_send = arp_ignore(in_dev, sip, tip);
if (!dont_send && IN_DEV_ARPFILTER(in_dev))
- dont_send |= arp_filter(sip,tip,dev);
+ dont_send |= arp_filter(sip, tip, dev);
if (!dont_send) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n) {
- arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
+ arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
+ dev, tip, sha, dev->dev_addr,
+ sha);
neigh_release(n);
}
}
@@ -887,8 +898,7 @@ static int arp_process(struct sk_buff *skb)
if (addr_type == RTN_UNICAST &&
(arp_fwd_proxy(in_dev, dev, rt) ||
arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
- pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
- {
+ pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n)
neigh_release(n);
@@ -896,9 +906,12 @@ static int arp_process(struct sk_buff *skb)
if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
skb->pkt_type == PACKET_HOST ||
in_dev->arp_parms->proxy_delay == 0) {
- arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
+ arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
+ dev, tip, sha, dev->dev_addr,
+ sha);
} else {
- pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb);
+ pneigh_enqueue(&arp_tbl,
+ in_dev->arp_parms, skb);
return 0;
}
goto out;
@@ -939,7 +952,8 @@ static int arp_process(struct sk_buff *skb)
if (arp->ar_op != htons(ARPOP_REPLY) ||
skb->pkt_type != PACKET_HOST)
state = NUD_STALE;
- neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0);
+ neigh_update(n, sha, state,
+ override ? NEIGH_UPDATE_F_OVERRIDE : 0);
neigh_release(n);
}
@@ -975,7 +989,8 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
arp->ar_pln != 4)
goto freeskb;
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb == NULL)
goto out_of_mem;
memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
@@ -1019,7 +1034,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
return -EINVAL;
if (!dev && (r->arp_flags & ATF_COM)) {
dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
- r->arp_ha.sa_data);
+ r->arp_ha.sa_data);
if (!dev)
return -ENODEV;
}
@@ -1033,7 +1048,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
}
static int arp_req_set(struct net *net, struct arpreq *r,
- struct net_device * dev)
+ struct net_device *dev)
{
__be32 ip;
struct neighbour *neigh;
@@ -1046,10 +1061,11 @@ static int arp_req_set(struct net *net, struct arpreq *r,
if (r->arp_flags & ATF_PERM)
r->arp_flags |= ATF_COM;
if (dev == NULL) {
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip,
- .tos = RTO_ONLINK } } };
- struct rtable * rt;
- if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
+ struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
+ .tos = RTO_ONLINK } };
+ struct rtable *rt;
+ err = ip_route_output_key(net, &rt, &fl);
+ if (err != 0)
return err;
dev = rt->dst.dev;
ip_rt_put(rt);
@@ -1083,9 +1099,9 @@ static int arp_req_set(struct net *net, struct arpreq *r,
unsigned state = NUD_STALE;
if (r->arp_flags & ATF_PERM)
state = NUD_PERMANENT;
- err = neigh_update(neigh, (r->arp_flags&ATF_COM) ?
+ err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
r->arp_ha.sa_data : NULL, state,
- NEIGH_UPDATE_F_OVERRIDE|
+ NEIGH_UPDATE_F_OVERRIDE |
NEIGH_UPDATE_F_ADMIN);
neigh_release(neigh);
}
@@ -1094,12 +1110,12 @@ static int arp_req_set(struct net *net, struct arpreq *r,
static unsigned arp_state_to_flags(struct neighbour *neigh)
{
- unsigned flags = 0;
if (neigh->nud_state&NUD_PERMANENT)
- flags = ATF_PERM|ATF_COM;
+ return ATF_PERM | ATF_COM;
else if (neigh->nud_state&NUD_VALID)
- flags = ATF_COM;
- return flags;
+ return ATF_COM;
+ else
+ return 0;
}
/*
@@ -1142,7 +1158,7 @@ static int arp_req_delete_public(struct net *net, struct arpreq *r,
}
static int arp_req_delete(struct net *net, struct arpreq *r,
- struct net_device * dev)
+ struct net_device *dev)
{
int err;
__be32 ip;
@@ -1153,10 +1169,11 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
if (dev == NULL) {
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip,
- .tos = RTO_ONLINK } } };
- struct rtable * rt;
- if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
+ struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
+ .tos = RTO_ONLINK } };
+ struct rtable *rt;
+ err = ip_route_output_key(net, &rt, &fl);
+ if (err != 0)
return err;
dev = rt->dst.dev;
ip_rt_put(rt);
@@ -1166,7 +1183,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
err = -ENXIO;
neigh = neigh_lookup(&arp_tbl, &ip, dev);
if (neigh) {
- if (neigh->nud_state&~NUD_NOARP)
+ if (neigh->nud_state & ~NUD_NOARP)
err = neigh_update(neigh, NULL, NUD_FAILED,
NEIGH_UPDATE_F_OVERRIDE|
NEIGH_UPDATE_F_ADMIN);
@@ -1186,24 +1203,24 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
struct net_device *dev = NULL;
switch (cmd) {
- case SIOCDARP:
- case SIOCSARP:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- case SIOCGARP:
- err = copy_from_user(&r, arg, sizeof(struct arpreq));
- if (err)
- return -EFAULT;
- break;
- default:
- return -EINVAL;
+ case SIOCDARP:
+ case SIOCSARP:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ case SIOCGARP:
+ err = copy_from_user(&r, arg, sizeof(struct arpreq));
+ if (err)
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
}
if (r.arp_pa.sa_family != AF_INET)
return -EPFNOSUPPORT;
if (!(r.arp_flags & ATF_PUBL) &&
- (r.arp_flags & (ATF_NETMASK|ATF_DONTPUB)))
+ (r.arp_flags & (ATF_NETMASK | ATF_DONTPUB)))
return -EINVAL;
if (!(r.arp_flags & ATF_NETMASK))
((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
@@ -1211,7 +1228,8 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
rtnl_lock();
if (r.arp_dev[0]) {
err = -ENODEV;
- if ((dev = __dev_get_by_name(net, r.arp_dev)) == NULL)
+ dev = __dev_get_by_name(net, r.arp_dev);
+ if (dev == NULL)
goto out;
/* Mmmm... It is wrong... ARPHRD_NETROM==0 */
@@ -1243,7 +1261,8 @@ out:
return err;
}
-static int arp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
+static int arp_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
{
struct net_device *dev = ptr;
@@ -1311,12 +1330,13 @@ static char *ax2asc2(ax25_address *a, char *buf)
for (n = 0, s = buf; n < 6; n++) {
c = (a->ax25_call[n] >> 1) & 0x7F;
- if (c != ' ') *s++ = c;
+ if (c != ' ')
+ *s++ = c;
}
*s++ = '-';
-
- if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) {
+ n = (a->ax25_call[6] >> 1) & 0x0F;
+ if (n > 9) {
*s++ = '1';
n -= 10;
}
@@ -1325,10 +1345,9 @@ static char *ax2asc2(ax25_address *a, char *buf)
*s++ = '\0';
if (*buf == '\0' || *buf == '-')
- return "*";
+ return "*";
return buf;
-
}
#endif /* CONFIG_AX25 */
@@ -1408,10 +1427,10 @@ static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
/* ------------------------------------------------------------------------ */
static const struct seq_operations arp_seq_ops = {
- .start = arp_seq_start,
- .next = neigh_seq_next,
- .stop = neigh_seq_stop,
- .show = arp_seq_show,
+ .start = arp_seq_start,
+ .next = neigh_seq_next,
+ .stop = neigh_seq_stop,
+ .show = arp_seq_show,
};
static int arp_seq_open(struct inode *inode, struct file *file)
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 721a8a37b45c..174be6caa5c8 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -73,6 +73,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_id = jiffies;
sk_dst_set(sk, &rt->dst);
- return(0);
+ return 0;
}
EXPORT_SYMBOL(ip4_datagram_connect);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index da14c49284f4..c2ff48fa18c7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -209,7 +209,7 @@ static void inetdev_destroy(struct in_device *in_dev)
inet_free_ifa(ifa);
}
- dev->ip_ptr = NULL;
+ rcu_assign_pointer(dev->ip_ptr, NULL);
devinet_sysctl_unregister(in_dev);
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
@@ -1059,7 +1059,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_REGISTER:
printk(KERN_DEBUG "inetdev_event: bug\n");
- dev->ip_ptr = NULL;
+ rcu_assign_pointer(dev->ip_ptr, NULL);
break;
case NETDEV_UP:
if (!inetdev_valid_mtu(dev->mtu))
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 7d02a9f999fa..4a69a957872b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -147,35 +147,40 @@ static void fib_flush(struct net *net)
rt_cache_flush(net, -1);
}
-/*
- * Find the first device with a given source address.
+/**
+ * __ip_dev_find - find the first device with a given source address.
+ * @net: the net namespace
+ * @addr: the source address
+ * @devref: if true, take a reference on the found device
+ *
+ * If a caller uses devref=false, it should be protected by RCU
*/
-
-struct net_device * ip_dev_find(struct net *net, __be32 addr)
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
{
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
- struct fib_result res;
+ struct flowi fl = {
+ .nl_u = {
+ .ip4_u = {
+ .daddr = addr
+ }
+ },
+ .flags = FLOWI_FLAG_MATCH_ANY_IIF
+ };
+ struct fib_result res = { 0 };
struct net_device *dev = NULL;
- struct fib_table *local_table;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
-
- local_table = fib_get_table(net, RT_TABLE_LOCAL);
- if (!local_table || fib_table_lookup(local_table, &fl, &res))
+ if (fib_lookup(net, &fl, &res))
return NULL;
if (res.type != RTN_LOCAL)
goto out;
dev = FIB_RES_DEV(res);
- if (dev)
+ if (dev && devref)
dev_hold(dev);
out:
fib_res_put(&res);
return dev;
}
-EXPORT_SYMBOL(ip_dev_find);
+EXPORT_SYMBOL(__ip_dev_find);
/*
* Find address type as if only "dev" was present in the system. If
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 4a8e370862bc..a96e5ec211a0 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -186,9 +186,7 @@ static inline struct tnode *node_parent_rcu(struct node *node)
{
struct tnode *ret = node_parent(node);
- return rcu_dereference_check(ret,
- rcu_read_lock_held() ||
- lockdep_rtnl_is_held());
+ return rcu_dereference_rtnl(ret);
}
/* Same as rcu_assign_pointer
@@ -211,9 +209,7 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
{
struct node *ret = tnode_get_child(tn, i);
- return rcu_dereference_check(ret,
- rcu_read_lock_held() ||
- lockdep_rtnl_is_held());
+ return rcu_dereference_rtnl(ret);
}
static inline int tnode_child_length(const struct tnode *tn)
@@ -459,8 +455,8 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
tn->empty_children = 1<<bits;
}
- pr_debug("AT %p s=%u %lu\n", tn, (unsigned int) sizeof(struct tnode),
- (unsigned long) (sizeof(struct node) << bits));
+ pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
+ sizeof(struct node) << bits);
return tn;
}
@@ -609,11 +605,10 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Keep root node larger */
- if (!node_parent((struct node*) tn)) {
+ if (!node_parent((struct node *)tn)) {
inflate_threshold_use = inflate_threshold_root;
halve_threshold_use = halve_threshold_root;
- }
- else {
+ } else {
inflate_threshold_use = inflate_threshold;
halve_threshold_use = halve_threshold;
}
@@ -639,7 +634,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
check_tnode(tn);
/* Return if at least one inflate is run */
- if( max_work != MAX_WORK)
+ if (max_work != MAX_WORK)
return (struct node *) tn;
/*
@@ -966,9 +961,7 @@ fib_find_node(struct trie *t, u32 key)
struct node *n;
pos = 0;
- n = rcu_dereference_check(t->trie,
- rcu_read_lock_held() ||
- lockdep_rtnl_is_held());
+ n = rcu_dereference_rtnl(t->trie);
while (n != NULL && NODE_TYPE(n) == T_TNODE) {
tn = (struct tnode *) n;
@@ -1748,16 +1741,14 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
/* Node empty, walk back up to parent */
c = (struct node *) p;
- } while ( (p = node_parent_rcu(c)) != NULL);
+ } while ((p = node_parent_rcu(c)) != NULL);
return NULL; /* Root of trie */
}
static struct leaf *trie_firstleaf(struct trie *t)
{
- struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie,
- rcu_read_lock_held() ||
- lockdep_rtnl_is_held());
+ struct tnode *n = (struct tnode *)rcu_dereference_rtnl(t->trie);
if (!n)
return NULL;
@@ -2043,14 +2034,14 @@ struct fib_trie_iter {
struct seq_net_private p;
struct fib_table *tb;
struct tnode *tnode;
- unsigned index;
- unsigned depth;
+ unsigned int index;
+ unsigned int depth;
};
static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
{
struct tnode *tn = iter->tnode;
- unsigned cindex = iter->index;
+ unsigned int cindex = iter->index;
struct tnode *p;
/* A single entry routing table */
@@ -2159,7 +2150,7 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
*/
static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
{
- unsigned i, max, pointers, bytes, avdepth;
+ unsigned int i, max, pointers, bytes, avdepth;
if (stat->leaves)
avdepth = stat->totdepth*100 / stat->leaves;
@@ -2356,7 +2347,8 @@ static void fib_trie_seq_stop(struct seq_file *seq, void *v)
static void seq_indent(struct seq_file *seq, int n)
{
- while (n-- > 0) seq_puts(seq, " ");
+ while (n-- > 0)
+ seq_puts(seq, " ");
}
static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
@@ -2388,7 +2380,7 @@ static const char *const rtn_type_names[__RTN_MAX] = {
[RTN_XRESOLVE] = "XRESOLVE",
};
-static inline const char *rtn_type(char *buf, size_t len, unsigned t)
+static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
{
if (t < __RTN_MAX && rtn_type_names[t])
return rtn_type_names[t];
@@ -2544,13 +2536,12 @@ static void fib_route_seq_stop(struct seq_file *seq, void *v)
rcu_read_unlock();
}
-static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
+static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
{
- static unsigned type2flags[RTN_MAX + 1] = {
- [7] = RTF_REJECT, [8] = RTF_REJECT,
- };
- unsigned flags = type2flags[type];
+ unsigned int flags = 0;
+ if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
+ flags = RTF_REJECT;
if (fi && fi->fib_nh->nh_gw)
flags |= RTF_GATEWAY;
if (mask == htonl(0xFFFFFFFF))
@@ -2562,7 +2553,7 @@ static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
/*
* This outputs /proc/net/route.
* The format of the file is not supposed to be changed
- * and needs to be same as fib_hash output to avoid breaking
+ * and needs to be same as fib_hash output to avoid breaking
* legacy utilities
*/
static int fib_route_seq_show(struct seq_file *seq, void *v)
@@ -2587,7 +2578,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
list_for_each_entry_rcu(fa, &li->falh, fa_list) {
const struct fib_info *fi = fa->fa_info;
- unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
+ unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
int len;
if (fa->fa_type == RTN_BROADCAST
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
new file mode 100644
index 000000000000..caea6885fdbd
--- /dev/null
+++ b/net/ipv4/gre.c
@@ -0,0 +1,151 @@
+/*
+ * GRE over IPv4 demultiplexer driver
+ *
+ * Authors: Dmitry Kozlov (xeb@mail.ru)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <net/protocol.h>
+#include <net/gre.h>
+
+
+static const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly;
+static DEFINE_SPINLOCK(gre_proto_lock);
+
+int gre_add_protocol(const struct gre_protocol *proto, u8 version)
+{
+ if (version >= GREPROTO_MAX)
+ goto err_out;
+
+ spin_lock(&gre_proto_lock);
+ if (gre_proto[version])
+ goto err_out_unlock;
+
+ rcu_assign_pointer(gre_proto[version], proto);
+ spin_unlock(&gre_proto_lock);
+ return 0;
+
+err_out_unlock:
+ spin_unlock(&gre_proto_lock);
+err_out:
+ return -1;
+}
+EXPORT_SYMBOL_GPL(gre_add_protocol);
+
+int gre_del_protocol(const struct gre_protocol *proto, u8 version)
+{
+ if (version >= GREPROTO_MAX)
+ goto err_out;
+
+ spin_lock(&gre_proto_lock);
+ if (gre_proto[version] != proto)
+ goto err_out_unlock;
+ rcu_assign_pointer(gre_proto[version], NULL);
+ spin_unlock(&gre_proto_lock);
+ synchronize_rcu();
+ return 0;
+
+err_out_unlock:
+ spin_unlock(&gre_proto_lock);
+err_out:
+ return -1;
+}
+EXPORT_SYMBOL_GPL(gre_del_protocol);
+
+static int gre_rcv(struct sk_buff *skb)
+{
+ const struct gre_protocol *proto;
+ u8 ver;
+ int ret;
+
+ if (!pskb_may_pull(skb, 12))
+ goto drop;
+
+ ver = skb->data[1]&0x7f;
+ if (ver >= GREPROTO_MAX)
+ goto drop;
+
+ rcu_read_lock();
+ proto = rcu_dereference(gre_proto[ver]);
+ if (!proto || !proto->handler)
+ goto drop_unlock;
+ ret = proto->handler(skb);
+ rcu_read_unlock();
+ return ret;
+
+drop_unlock:
+ rcu_read_unlock();
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static void gre_err(struct sk_buff *skb, u32 info)
+{
+ const struct gre_protocol *proto;
+ u8 ver;
+
+ if (!pskb_may_pull(skb, 12))
+ goto drop;
+
+ ver = skb->data[1]&0x7f;
+ if (ver >= GREPROTO_MAX)
+ goto drop;
+
+ rcu_read_lock();
+ proto = rcu_dereference(gre_proto[ver]);
+ if (!proto || !proto->err_handler)
+ goto drop_unlock;
+ proto->err_handler(skb, info);
+ rcu_read_unlock();
+ return;
+
+drop_unlock:
+ rcu_read_unlock();
+drop:
+ kfree_skb(skb);
+}
+
+static const struct net_protocol net_gre_protocol = {
+ .handler = gre_rcv,
+ .err_handler = gre_err,
+ .netns_ok = 1,
+};
+
+static int __init gre_init(void)
+{
+ pr_info("GRE over IPv4 demultiplexor driver");
+
+ if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
+ pr_err("gre: can't add protocol\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void __exit gre_exit(void)
+{
+ inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
+}
+
+module_init(gre_init);
+module_exit(gre_exit);
+
+MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver");
+MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_LICENSE("GPL");
+
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index a0d847c7cba5..96bc7f9475a3 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -379,7 +379,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
inet->tos = ip_hdr(skb)->tos;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
if (icmp_param->replyopts.optlen) {
ipc.opt = &icmp_param->replyopts;
if (ipc.opt->srr)
@@ -538,7 +538,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
inet_sk(sk)->tos = tos;
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
{
struct flowi fl = {
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index e5fa2ddce320..ba8042665849 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -425,7 +425,7 @@ static int inet_diag_bc_run(const void *bc, int len,
bc += op->no;
}
}
- return (len == 0);
+ return len == 0;
}
static int valid_cc(const void *bc, int len, int cc)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b7c41654dde5..168440834ade 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -116,11 +116,11 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a)
struct ip4_create_arg *arg = a;
qp = container_of(q, struct ipq, q);
- return (qp->id == arg->iph->id &&
+ return qp->id == arg->iph->id &&
qp->saddr == arg->iph->saddr &&
qp->daddr == arg->iph->daddr &&
qp->protocol == arg->iph->protocol &&
- qp->user == arg->user);
+ qp->user == arg->user;
}
/* Memory Tracking Functions. */
@@ -542,7 +542,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
- if (skb_has_frags(head)) {
+ if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 35c93e8b6a46..fbe2c473a06a 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -44,6 +44,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
+#include <net/gre.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/ipv6.h>
@@ -63,13 +64,13 @@
We cannot track such dead loops during route installation,
it is infeasible task. The most general solutions would be
to keep skb->encapsulation counter (sort of local ttl),
- and silently drop packet when it expires. It is the best
+ and silently drop packet when it expires. It is a good
solution, but it supposes maintaing new variable in ALL
skb, even if no tunneling is used.
- Current solution: HARD_TX_LOCK lock breaks dead loops.
-
-
+ Current solution: xmit_recursion breaks dead loops. This is a percpu
+ counter, since when we enter the first ndo_xmit(), cpu migration is
+ forbidden. We force an exit if this counter reaches RECURSION_LIMIT
2. Networking dead loops would not kill routers, but would really
kill network. IP hop limit plays role of "t->recursion" in this case,
@@ -128,7 +129,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev);
static int ipgre_net_id __read_mostly;
struct ipgre_net {
- struct ip_tunnel *tunnels[4][HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
struct net_device *fb_tunnel_dev;
};
@@ -158,13 +159,40 @@ struct ipgre_net {
#define tunnels_l tunnels[1]
#define tunnels_wc tunnels[0]
/*
- * Locking : hash tables are protected by RCU and a spinlock
+ * Locking : hash tables are protected by RCU and RTNL
*/
-static DEFINE_SPINLOCK(ipgre_lock);
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
+{
+ struct pcpu_tstats sum = { 0 };
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+ sum.rx_packets += tstats->rx_packets;
+ sum.rx_bytes += tstats->rx_bytes;
+ sum.tx_packets += tstats->tx_packets;
+ sum.tx_bytes += tstats->tx_bytes;
+ }
+ dev->stats.rx_packets = sum.rx_packets;
+ dev->stats.rx_bytes = sum.rx_bytes;
+ dev->stats.tx_packets = sum.tx_packets;
+ dev->stats.tx_bytes = sum.tx_bytes;
+ return &dev->stats;
+}
+
/* Given src, dst and key, find appropriate for input tunnel. */
static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
@@ -173,8 +201,8 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
{
struct net *net = dev_net(dev);
int link = dev->ifindex;
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(key);
+ unsigned int h0 = HASH(remote);
+ unsigned int h1 = HASH(key);
struct ip_tunnel *t, *cand = NULL;
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
@@ -289,13 +317,13 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
return NULL;
}
-static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
+static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
struct ip_tunnel_parm *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
__be32 key = parms->i_key;
- unsigned h = HASH(key);
+ unsigned int h = HASH(key);
int prio = 0;
if (local)
@@ -308,7 +336,7 @@ static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
return &ign->tunnels[prio][h];
}
-static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
+static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
struct ip_tunnel *t)
{
return __ipgre_bucket(ign, &t->parms);
@@ -316,23 +344,22 @@ static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
{
- struct ip_tunnel **tp = ipgre_bucket(ign, t);
+ struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
- spin_lock_bh(&ipgre_lock);
- t->next = *tp;
+ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
- spin_unlock_bh(&ipgre_lock);
}
static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
{
- struct ip_tunnel **tp;
-
- for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
- if (t == *tp) {
- spin_lock_bh(&ipgre_lock);
- *tp = t->next;
- spin_unlock_bh(&ipgre_lock);
+ struct ip_tunnel __rcu **tp;
+ struct ip_tunnel *iter;
+
+ for (tp = ipgre_bucket(ign, t);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
break;
}
}
@@ -346,10 +373,13 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
__be32 local = parms->iph.saddr;
__be32 key = parms->i_key;
int link = parms->link;
- struct ip_tunnel *t, **tp;
+ struct ip_tunnel *t;
+ struct ip_tunnel __rcu **tp;
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
- for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next)
+ for (tp = __ipgre_bucket(ign, parms);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next)
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
key == t->parms.i_key &&
@@ -360,7 +390,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
return t;
}
-static struct ip_tunnel * ipgre_tunnel_locate(struct net *net,
+static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
struct ip_tunnel_parm *parms, int create)
{
struct ip_tunnel *t, *nt;
@@ -582,7 +612,7 @@ static int ipgre_rcv(struct sk_buff *skb)
if ((tunnel = ipgre_tunnel_lookup(skb->dev,
iph->saddr, iph->daddr, key,
gre_proto))) {
- struct net_device_stats *stats = &tunnel->dev->stats;
+ struct pcpu_tstats *tstats;
secpath_reset(skb);
@@ -606,22 +636,22 @@ static int ipgre_rcv(struct sk_buff *skb)
/* Looped back packet, drop it! */
if (skb_rtable(skb)->fl.iif == 0)
goto drop;
- stats->multicast++;
+ tunnel->dev->stats.multicast++;
skb->pkt_type = PACKET_BROADCAST;
}
#endif
if (((flags&GRE_CSUM) && csum) ||
(!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
- stats->rx_crc_errors++;
- stats->rx_errors++;
+ tunnel->dev->stats.rx_crc_errors++;
+ tunnel->dev->stats.rx_errors++;
goto drop;
}
if (tunnel->parms.i_flags&GRE_SEQ) {
if (!(flags&GRE_SEQ) ||
(tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
- stats->rx_fifo_errors++;
- stats->rx_errors++;
+ tunnel->dev->stats.rx_fifo_errors++;
+ tunnel->dev->stats.rx_errors++;
goto drop;
}
tunnel->i_seqno = seqno + 1;
@@ -630,8 +660,8 @@ static int ipgre_rcv(struct sk_buff *skb)
/* Warning: All skb pointers will be invalidated! */
if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) {
- stats->rx_length_errors++;
- stats->rx_errors++;
+ tunnel->dev->stats.rx_length_errors++;
+ tunnel->dev->stats.rx_errors++;
goto drop;
}
@@ -640,14 +670,20 @@ static int ipgre_rcv(struct sk_buff *skb)
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
- skb_tunnel_rx(skb, tunnel->dev);
+ tstats = this_cpu_ptr(tunnel->dev->tstats);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+
+ __skb_tunnel_rx(skb, tunnel->dev);
skb_reset_network_header(skb);
ipgre_ecn_decapsulate(iph, skb);
- netif_rx(skb);
+ if (netif_rx(skb) == NET_RX_DROP)
+ tunnel->dev->stats.rx_dropped++;
+
rcu_read_unlock();
- return(0);
+ return 0;
}
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -655,20 +691,19 @@ drop:
rcu_read_unlock();
drop_nolock:
kfree_skb(skb);
- return(0);
+ return 0;
}
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct pcpu_tstats *tstats;
struct iphdr *old_iph = ip_hdr(skb);
struct iphdr *tiph;
u8 tos;
__be16 df;
struct rtable *rt; /* Route to the other host */
- struct net_device *tdev; /* Device to other host */
+ struct net_device *tdev; /* Device to other host */
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int gre_hlen;
@@ -690,7 +725,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
/* NBMA tunnel */
if (skb_dst(skb) == NULL) {
- stats->tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
goto tx_error;
}
@@ -736,14 +771,20 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
}
{
- struct flowi fl = { .oif = tunnel->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = dst,
- .saddr = tiph->saddr,
- .tos = RT_TOS(tos) } },
- .proto = IPPROTO_GRE };
+ struct flowi fl = {
+ .oif = tunnel->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = dst,
+ .saddr = tiph->saddr,
+ .tos = RT_TOS(tos)
+ }
+ },
+ .proto = IPPROTO_GRE
+ }
+;
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- stats->tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
goto tx_error;
}
}
@@ -751,7 +792,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (tdev == dev) {
ip_rt_put(rt);
- stats->collisions++;
+ dev->stats.collisions++;
goto tx_error;
}
@@ -814,7 +855,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
dev->needed_headroom = max_headroom;
if (!new_skb) {
ip_rt_put(rt);
- txq->tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -881,15 +922,15 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
}
nf_reset(skb);
-
- IPTUNNEL_XMIT();
+ tstats = this_cpu_ptr(dev->tstats);
+ __IPTUNNEL_XMIT(tstats, &dev->stats);
return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
tx_error:
- stats->tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -909,13 +950,19 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
/* Guess output device to choose reasonable mtu and needed_headroom */
if (iph->daddr) {
- struct flowi fl = { .oif = tunnel->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = iph->daddr,
- .saddr = iph->saddr,
- .tos = RT_TOS(iph->tos) } },
- .proto = IPPROTO_GRE };
+ struct flowi fl = {
+ .oif = tunnel->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos)
+ }
+ },
+ .proto = IPPROTO_GRE
+ };
struct rtable *rt;
+
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
tdev = rt->dst.dev;
ip_rt_put(rt);
@@ -1012,7 +1059,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
} else {
- unsigned nflags = 0;
+ unsigned int nflags = 0;
t = netdev_priv(dev);
@@ -1125,7 +1172,7 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr, const void *saddr, unsigned int len)
{
struct ip_tunnel *t = netdev_priv(dev);
struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
@@ -1167,13 +1214,19 @@ static int ipgre_open(struct net_device *dev)
struct ip_tunnel *t = netdev_priv(dev);
if (ipv4_is_multicast(t->parms.iph.daddr)) {
- struct flowi fl = { .oif = t->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = t->parms.iph.daddr,
- .saddr = t->parms.iph.saddr,
- .tos = RT_TOS(t->parms.iph.tos) } },
- .proto = IPPROTO_GRE };
+ struct flowi fl = {
+ .oif = t->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = t->parms.iph.daddr,
+ .saddr = t->parms.iph.saddr,
+ .tos = RT_TOS(t->parms.iph.tos)
+ }
+ },
+ .proto = IPPROTO_GRE
+ };
struct rtable *rt;
+
if (ip_route_output_key(dev_net(dev), &rt, &fl))
return -EADDRNOTAVAIL;
dev = rt->dst.dev;
@@ -1213,12 +1266,19 @@ static const struct net_device_ops ipgre_netdev_ops = {
.ndo_start_xmit = ipgre_tunnel_xmit,
.ndo_do_ioctl = ipgre_tunnel_ioctl,
.ndo_change_mtu = ipgre_tunnel_change_mtu,
+ .ndo_get_stats = ipgre_get_stats,
};
+static void ipgre_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
static void ipgre_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipgre_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ipgre_dev_free;
dev->type = ARPHRD_IPGRE;
dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
@@ -1256,6 +1316,10 @@ static int ipgre_tunnel_init(struct net_device *dev)
} else
dev->header_ops = &ipgre_header_ops;
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
return 0;
}
@@ -1274,14 +1338,13 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
tunnel->hlen = sizeof(struct iphdr) + 4;
dev_hold(dev);
- ign->tunnels_wc[0] = tunnel;
+ rcu_assign_pointer(ign->tunnels_wc[0], tunnel);
}
-static const struct net_protocol ipgre_protocol = {
- .handler = ipgre_rcv,
- .err_handler = ipgre_err,
- .netns_ok = 1,
+static const struct gre_protocol ipgre_protocol = {
+ .handler = ipgre_rcv,
+ .err_handler = ipgre_err,
};
static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
@@ -1291,11 +1354,13 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
for (prio = 0; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t = ign->tunnels[prio][h];
+ struct ip_tunnel *t;
+
+ t = rtnl_dereference(ign->tunnels[prio][h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, head);
- t = t->next;
+ t = rtnl_dereference(t->next);
}
}
}
@@ -1441,6 +1506,10 @@ static int ipgre_tap_init(struct net_device *dev)
ipgre_tunnel_bind_dev(dev);
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
return 0;
}
@@ -1451,6 +1520,7 @@ static const struct net_device_ops ipgre_tap_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ipgre_tunnel_change_mtu,
+ .ndo_get_stats = ipgre_get_stats,
};
static void ipgre_tap_setup(struct net_device *dev)
@@ -1459,7 +1529,7 @@ static void ipgre_tap_setup(struct net_device *dev)
ether_setup(dev);
dev->netdev_ops = &ipgre_tap_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ipgre_dev_free;
dev->iflink = 0;
dev->features |= NETIF_F_NETNS_LOCAL;
@@ -1487,6 +1557,10 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nla
if (!tb[IFLA_MTU])
dev->mtu = mtu;
+ /* Can use a lockless transmit, unless we generate output sequences */
+ if (!(nt->parms.o_flags & GRE_SEQ))
+ dev->features |= NETIF_F_LLTX;
+
err = register_netdevice(dev);
if (err)
goto out;
@@ -1522,7 +1596,7 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
t = nt;
if (dev->type != ARPHRD_ETHER) {
- unsigned nflags = 0;
+ unsigned int nflags = 0;
if (ipv4_is_multicast(p.iph.daddr))
nflags = IFF_BROADCAST;
@@ -1663,7 +1737,7 @@ static int __init ipgre_init(void)
if (err < 0)
return err;
- err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE);
+ err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
if (err < 0) {
printk(KERN_INFO "ipgre init: can't add protocol\n");
goto add_proto_failed;
@@ -1683,7 +1757,7 @@ out:
tap_ops_failed:
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_failed:
- inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
+ gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
add_proto_failed:
unregister_pernet_device(&ipgre_net_ops);
goto out;
@@ -1693,7 +1767,7 @@ static void __exit ipgre_fini(void)
{
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
- if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
+ if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
printk(KERN_INFO "ipgre close: can't remove protocol\n");
unregister_pernet_device(&ipgre_net_ops);
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ba9836c488ed..1906fa35860c 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -466,7 +466,7 @@ error:
}
return -EINVAL;
}
-
+EXPORT_SYMBOL(ip_options_compile);
/*
* Undo all the changes done by ip_options_compile().
@@ -646,3 +646,4 @@ int ip_options_rcv_srr(struct sk_buff *skb)
}
return 0;
}
+EXPORT_SYMBOL(ip_options_rcv_srr);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7649d7750075..439d2a34ee44 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -487,7 +487,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
* LATER: this step can be merged to real generation of fragments,
* we can switch to copy when see the first bad fragment.
*/
- if (skb_has_frags(skb)) {
+ if (skb_has_frag_list(skb)) {
struct sk_buff *frag, *frag2;
int first_len = skb_pagelen(skb);
@@ -844,10 +844,9 @@ int ip_append_data(struct sock *sk,
inet->cork.length = 0;
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
- if ((exthdrlen = rt->dst.header_len) != 0) {
- length += exthdrlen;
- transhdrlen += exthdrlen;
- }
+ exthdrlen = rt->dst.header_len;
+ length += exthdrlen;
+ transhdrlen += exthdrlen;
} else {
rt = (struct rtable *)inet->cork.dst;
if (inet->cork.flags & IPCORK_OPT)
@@ -934,16 +933,19 @@ alloc_new_skb:
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
- alloclen = datalen + fragheaderlen;
+ alloclen = fraglen;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
- if (datalen == length + fraggap)
+ if (datalen == length + fraggap) {
alloclen += rt->dst.trailer_len;
-
+ /* make sure mtu is not reached */
+ if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
+ datalen -= ALIGN(rt->dst.trailer_len, 8);
+ }
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len + 15,
@@ -960,7 +962,7 @@ alloc_new_skb:
else
/* only the initial fragment is
time stamped */
- ipc->shtx.flags = 0;
+ ipc->tx_flags = 0;
}
if (skb == NULL)
goto error;
@@ -971,7 +973,7 @@ alloc_new_skb:
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
- *skb_tx(skb) = ipc->shtx;
+ skb_shinfo(skb)->tx_flags = ipc->tx_flags;
/*
* Find where to start putting bytes.
@@ -1391,7 +1393,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
if (replyopts.opt.optlen) {
ipc.opt = &replyopts.opt;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ec036731a70b..6ad46c28ede2 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -122,31 +122,59 @@
static int ipip_net_id __read_mostly;
struct ipip_net {
- struct ip_tunnel *tunnels_r_l[HASH_SIZE];
- struct ip_tunnel *tunnels_r[HASH_SIZE];
- struct ip_tunnel *tunnels_l[HASH_SIZE];
- struct ip_tunnel *tunnels_wc[1];
- struct ip_tunnel **tunnels[4];
+ struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_wc[1];
+ struct ip_tunnel __rcu **tunnels[4];
struct net_device *fb_tunnel_dev;
};
-static void ipip_tunnel_init(struct net_device *dev);
+static int ipip_tunnel_init(struct net_device *dev);
static void ipip_tunnel_setup(struct net_device *dev);
+static void ipip_dev_free(struct net_device *dev);
/*
- * Locking : hash tables are protected by RCU and a spinlock
+ * Locking : hash tables are protected by RCU and RTNL
*/
-static DEFINE_SPINLOCK(ipip_lock);
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+static struct net_device_stats *ipip_get_stats(struct net_device *dev)
+{
+ struct pcpu_tstats sum = { 0 };
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+ sum.rx_packets += tstats->rx_packets;
+ sum.rx_bytes += tstats->rx_bytes;
+ sum.tx_packets += tstats->tx_packets;
+ sum.tx_bytes += tstats->tx_bytes;
+ }
+ dev->stats.rx_packets = sum.rx_packets;
+ dev->stats.rx_bytes = sum.rx_bytes;
+ dev->stats.tx_packets = sum.tx_packets;
+ dev->stats.tx_bytes = sum.tx_bytes;
+ return &dev->stats;
+}
+
static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
__be32 remote, __be32 local)
{
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(local);
+ unsigned int h0 = HASH(remote);
+ unsigned int h1 = HASH(local);
struct ip_tunnel *t;
struct ipip_net *ipn = net_generic(net, ipip_net_id);
@@ -169,12 +197,12 @@ static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
return NULL;
}
-static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
+static struct ip_tunnel __rcu **__ipip_bucket(struct ipip_net *ipn,
struct ip_tunnel_parm *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
- unsigned h = 0;
+ unsigned int h = 0;
int prio = 0;
if (remote) {
@@ -188,7 +216,7 @@ static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
return &ipn->tunnels[prio][h];
}
-static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
+static inline struct ip_tunnel __rcu **ipip_bucket(struct ipip_net *ipn,
struct ip_tunnel *t)
{
return __ipip_bucket(ipn, &t->parms);
@@ -196,13 +224,14 @@ static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
{
- struct ip_tunnel **tp;
-
- for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) {
- if (t == *tp) {
- spin_lock_bh(&ipip_lock);
- *tp = t->next;
- spin_unlock_bh(&ipip_lock);
+ struct ip_tunnel __rcu **tp;
+ struct ip_tunnel *iter;
+
+ for (tp = ipip_bucket(ipn, t);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
break;
}
}
@@ -210,12 +239,10 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
{
- struct ip_tunnel **tp = ipip_bucket(ipn, t);
+ struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
- spin_lock_bh(&ipip_lock);
- t->next = *tp;
+ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
- spin_unlock_bh(&ipip_lock);
}
static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -223,12 +250,15 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
- struct ip_tunnel *t, **tp, *nt;
+ struct ip_tunnel *t, *nt;
+ struct ip_tunnel __rcu **tp;
struct net_device *dev;
char name[IFNAMSIZ];
struct ipip_net *ipn = net_generic(net, ipip_net_id);
- for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) {
+ for (tp = __ipip_bucket(ipn, parms);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next) {
if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
return t;
}
@@ -238,7 +268,7 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
else
- sprintf(name, "tunl%%d");
+ strcpy(name, "tunl%d");
dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup);
if (dev == NULL)
@@ -254,7 +284,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
nt = netdev_priv(dev);
nt->parms = *parms;
- ipip_tunnel_init(dev);
+ if (ipip_tunnel_init(dev) < 0)
+ goto failed_free;
if (register_netdevice(dev) < 0)
goto failed_free;
@@ -264,20 +295,19 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
return nt;
failed_free:
- free_netdev(dev);
+ ipip_dev_free(dev);
return NULL;
}
+/* called with RTNL */
static void ipip_tunnel_uninit(struct net_device *dev)
{
struct net *net = dev_net(dev);
struct ipip_net *ipn = net_generic(net, ipip_net_id);
- if (dev == ipn->fb_tunnel_dev) {
- spin_lock_bh(&ipip_lock);
- ipn->tunnels_wc[0] = NULL;
- spin_unlock_bh(&ipip_lock);
- } else
+ if (dev == ipn->fb_tunnel_dev)
+ rcu_assign_pointer(ipn->tunnels_wc[0], NULL);
+ else
ipip_tunnel_unlink(ipn, netdev_priv(dev));
dev_put(dev);
}
@@ -359,8 +389,10 @@ static int ipip_rcv(struct sk_buff *skb)
const struct iphdr *iph = ip_hdr(skb);
rcu_read_lock();
- if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev),
- iph->saddr, iph->daddr)) != NULL) {
+ tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
+ if (tunnel != NULL) {
+ struct pcpu_tstats *tstats;
+
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
rcu_read_unlock();
kfree_skb(skb);
@@ -374,10 +406,17 @@ static int ipip_rcv(struct sk_buff *skb)
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
- skb_tunnel_rx(skb, tunnel->dev);
+ tstats = this_cpu_ptr(tunnel->dev->tstats);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+
+ __skb_tunnel_rx(skb, tunnel->dev);
ipip_ecn_decapsulate(iph, skb);
- netif_rx(skb);
+
+ if (netif_rx(skb) == NET_RX_DROP)
+ tunnel->dev->stats.rx_dropped++;
+
rcu_read_unlock();
return 0;
}
@@ -394,13 +433,12 @@ static int ipip_rcv(struct sk_buff *skb)
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct pcpu_tstats *tstats;
struct iphdr *tiph = &tunnel->parms.iph;
u8 tos = tunnel->parms.iph.tos;
__be16 df = tiph->frag_off;
struct rtable *rt; /* Route to the other host */
- struct net_device *tdev; /* Device to other host */
+ struct net_device *tdev; /* Device to other host */
struct iphdr *old_iph = ip_hdr(skb);
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
@@ -410,13 +448,13 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->protocol != htons(ETH_P_IP))
goto tx_error;
- if (tos&1)
+ if (tos & 1)
tos = old_iph->tos;
if (!dst) {
/* NBMA tunnel */
if ((rt = skb_rtable(skb)) == NULL) {
- stats->tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
goto tx_error;
}
if ((dst = rt->rt_gateway) == 0)
@@ -424,14 +462,20 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
{
- struct flowi fl = { .oif = tunnel->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = dst,
- .saddr = tiph->saddr,
- .tos = RT_TOS(tos) } },
- .proto = IPPROTO_IPIP };
+ struct flowi fl = {
+ .oif = tunnel->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = dst,
+ .saddr = tiph->saddr,
+ .tos = RT_TOS(tos)
+ }
+ },
+ .proto = IPPROTO_IPIP
+ };
+
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- stats->tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
}
@@ -439,7 +483,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (tdev == dev) {
ip_rt_put(rt);
- stats->collisions++;
+ dev->stats.collisions++;
goto tx_error;
}
@@ -449,7 +493,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
- stats->collisions++;
+ dev->stats.collisions++;
ip_rt_put(rt);
goto tx_error;
}
@@ -485,7 +529,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
- txq->tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -522,14 +566,14 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->ttl = old_iph->ttl;
nf_reset(skb);
-
- IPTUNNEL_XMIT();
+ tstats = this_cpu_ptr(dev->tstats);
+ __IPTUNNEL_XMIT(tstats, &dev->stats);
return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
tx_error:
- stats->tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -544,13 +588,19 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
iph = &tunnel->parms.iph;
if (iph->daddr) {
- struct flowi fl = { .oif = tunnel->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = iph->daddr,
- .saddr = iph->saddr,
- .tos = RT_TOS(iph->tos) } },
- .proto = IPPROTO_IPIP };
+ struct flowi fl = {
+ .oif = tunnel->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos)
+ }
+ },
+ .proto = IPPROTO_IPIP
+ };
struct rtable *rt;
+
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
tdev = rt->dst.dev;
ip_rt_put(rt);
@@ -696,13 +746,19 @@ static const struct net_device_ops ipip_netdev_ops = {
.ndo_start_xmit = ipip_tunnel_xmit,
.ndo_do_ioctl = ipip_tunnel_ioctl,
.ndo_change_mtu = ipip_tunnel_change_mtu,
-
+ .ndo_get_stats = ipip_get_stats,
};
+static void ipip_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
static void ipip_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipip_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ipip_dev_free;
dev->type = ARPHRD_TUNNEL;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
@@ -711,10 +767,11 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_LLTX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
-static void ipip_tunnel_init(struct net_device *dev)
+static int ipip_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
@@ -725,9 +782,15 @@ static void ipip_tunnel_init(struct net_device *dev)
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
ipip_tunnel_bind_dev(dev);
+
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
}
-static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
+static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -740,11 +803,16 @@ static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
dev_hold(dev);
- ipn->tunnels_wc[0] = tunnel;
+ rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
+ return 0;
}
-static struct xfrm_tunnel ipip_handler = {
+static struct xfrm_tunnel ipip_handler __read_mostly = {
.handler = ipip_rcv,
.err_handler = ipip_err,
.priority = 1,
@@ -760,11 +828,12 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
for (prio = 1; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t = ipn->tunnels[prio][h];
+ struct ip_tunnel *t;
+ t = rtnl_dereference(ipn->tunnels[prio][h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, head);
- t = t->next;
+ t = rtnl_dereference(t->next);
}
}
}
@@ -789,7 +858,9 @@ static int __net_init ipip_init_net(struct net *net)
}
dev_net_set(ipn->fb_tunnel_dev, net);
- ipip_fb_tunnel_init(ipn->fb_tunnel_dev);
+ err = ipip_fb_tunnel_init(ipn->fb_tunnel_dev);
+ if (err)
+ goto err_reg_dev;
if ((err = register_netdev(ipn->fb_tunnel_dev)))
goto err_reg_dev;
@@ -797,7 +868,7 @@ static int __net_init ipip_init_net(struct net *net)
return 0;
err_reg_dev:
- free_netdev(ipn->fb_tunnel_dev);
+ ipip_dev_free(ipn->fb_tunnel_dev);
err_alloc_dev:
/* nothing */
return err;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 179fcab866fc..86dd5691af46 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -75,7 +75,7 @@ struct mr_table {
struct net *net;
#endif
u32 id;
- struct sock *mroute_sk;
+ struct sock __rcu *mroute_sk;
struct timer_list ipmr_expire_timer;
struct list_head mfc_unres_queue;
struct list_head mfc_cache_array[MFC_LINES];
@@ -98,7 +98,7 @@ struct ipmr_result {
};
/* Big lock, protecting vif table, mrt cache and mroute socket state.
- Note that the changes are semaphored via rtnl_lock.
+ * Note that the changes are semaphored via rtnl_lock.
*/
static DEFINE_RWLOCK(mrt_lock);
@@ -113,11 +113,11 @@ static DEFINE_RWLOCK(mrt_lock);
static DEFINE_SPINLOCK(mfc_unres_lock);
/* We return to original Alan's scheme. Hash table of resolved
- entries is changed only in process context and protected
- with weak lock mrt_lock. Queue of unresolved entries is protected
- with strong spinlock mfc_unres_lock.
-
- In this case data path is free of exclusive locks at all.
+ * entries is changed only in process context and protected
+ * with weak lock mrt_lock. Queue of unresolved entries is protected
+ * with strong spinlock mfc_unres_lock.
+ *
+ * In this case data path is free of exclusive locks at all.
*/
static struct kmem_cache *mrt_cachep __read_mostly;
@@ -396,9 +396,9 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
set_fs(KERNEL_DS);
err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
set_fs(oldfs);
- } else
+ } else {
err = -EOPNOTSUPP;
-
+ }
dev = NULL;
if (err == 0 &&
@@ -495,7 +495,8 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
dev->iflink = 0;
rcu_read_lock();
- if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev) {
rcu_read_unlock();
goto failure;
}
@@ -552,9 +553,10 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
mrt->mroute_reg_vif_num = -1;
#endif
- if (vifi+1 == mrt->maxvif) {
+ if (vifi + 1 == mrt->maxvif) {
int tmp;
- for (tmp=vifi-1; tmp>=0; tmp--) {
+
+ for (tmp = vifi - 1; tmp >= 0; tmp--) {
if (VIF_EXISTS(mrt, tmp))
break;
}
@@ -565,25 +567,33 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
dev_set_allmulti(dev, -1);
- if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
+ in_dev = __in_dev_get_rtnl(dev);
+ if (in_dev) {
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
ip_rt_multicast_event(in_dev);
}
- if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
+ if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
dev_put(dev);
return 0;
}
-static inline void ipmr_cache_free(struct mfc_cache *c)
+static void ipmr_cache_free_rcu(struct rcu_head *head)
{
+ struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
+
kmem_cache_free(mrt_cachep, c);
}
+static inline void ipmr_cache_free(struct mfc_cache *c)
+{
+ call_rcu(&c->rcu, ipmr_cache_free_rcu);
+}
+
/* Destroy an unresolved cache entry, killing queued skbs
- and reporting error to netlink readers.
+ * and reporting error to netlink readers.
*/
static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
@@ -605,8 +615,9 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
memset(&e->msg, 0, sizeof(e->msg));
rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
- } else
+ } else {
kfree_skb(skb);
+ }
}
ipmr_cache_free(c);
@@ -724,13 +735,13 @@ static int vif_add(struct net *net, struct mr_table *mrt,
case 0:
if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
- if (dev && dev->ip_ptr == NULL) {
+ if (dev && __in_dev_get_rtnl(dev) == NULL) {
dev_put(dev);
return -EADDRNOTAVAIL;
}
- } else
+ } else {
dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
-
+ }
if (!dev)
return -EADDRNOTAVAIL;
err = dev_set_allmulti(dev, 1);
@@ -743,16 +754,16 @@ static int vif_add(struct net *net, struct mr_table *mrt,
return -EINVAL;
}
- if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
+ in_dev = __in_dev_get_rtnl(dev);
+ if (!in_dev) {
dev_put(dev);
return -EADDRNOTAVAIL;
}
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
ip_rt_multicast_event(in_dev);
- /*
- * Fill in the VIF structures
- */
+ /* Fill in the VIF structures */
+
v->rate_limit = vifc->vifc_rate_limit;
v->local = vifc->vifc_lcl_addr.s_addr;
v->remote = vifc->vifc_rmt_addr.s_addr;
@@ -765,14 +776,14 @@ static int vif_add(struct net *net, struct mr_table *mrt,
v->pkt_in = 0;
v->pkt_out = 0;
v->link = dev->ifindex;
- if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
+ if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
v->link = dev->iflink;
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
v->dev = dev;
#ifdef CONFIG_IP_PIMSM
- if (v->flags&VIFF_REGISTER)
+ if (v->flags & VIFF_REGISTER)
mrt->mroute_reg_vif_num = vifi;
#endif
if (vifi+1 > mrt->maxvif)
@@ -781,6 +792,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
return 0;
}
+/* called with rcu_read_lock() */
static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
__be32 origin,
__be32 mcastgrp)
@@ -788,7 +800,7 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
int line = MFC_HASH(mcastgrp, origin);
struct mfc_cache *c;
- list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
+ list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
return c;
}
@@ -801,19 +813,20 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
static struct mfc_cache *ipmr_cache_alloc(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
- if (c == NULL)
- return NULL;
- c->mfc_un.res.minvif = MAXVIFS;
+
+ if (c)
+ c->mfc_un.res.minvif = MAXVIFS;
return c;
}
static struct mfc_cache *ipmr_cache_alloc_unres(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
- if (c == NULL)
- return NULL;
- skb_queue_head_init(&c->mfc_un.unres.unresolved);
- c->mfc_un.unres.expires = jiffies + 10*HZ;
+
+ if (c) {
+ skb_queue_head_init(&c->mfc_un.unres.unresolved);
+ c->mfc_un.unres.expires = jiffies + 10*HZ;
+ }
return c;
}
@@ -827,17 +840,15 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
struct sk_buff *skb;
struct nlmsgerr *e;
- /*
- * Play the pending entries through our router
- */
+ /* Play the pending entries through our router */
while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
- nlh->nlmsg_len = (skb_tail_pointer(skb) -
- (u8 *)nlh);
+ nlh->nlmsg_len = skb_tail_pointer(skb) -
+ (u8 *)nlh;
} else {
nlh->nlmsg_type = NLMSG_ERROR;
nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
@@ -848,8 +859,9 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
}
rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
- } else
+ } else {
ip_mr_forward(net, mrt, skb, c, 0);
+ }
}
}
@@ -867,6 +879,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
const int ihl = ip_hdrlen(pkt);
struct igmphdr *igmp;
struct igmpmsg *msg;
+ struct sock *mroute_sk;
int ret;
#ifdef CONFIG_IP_PIMSM
@@ -882,9 +895,9 @@ static int ipmr_cache_report(struct mr_table *mrt,
#ifdef CONFIG_IP_PIMSM
if (assert == IGMPMSG_WHOLEPKT) {
/* Ugly, but we have no choice with this interface.
- Duplicate old header, fix ihl, length etc.
- And all this only to mangle msg->im_msgtype and
- to set msg->im_mbz to "mbz" :-)
+ * Duplicate old header, fix ihl, length etc.
+ * And all this only to mangle msg->im_msgtype and
+ * to set msg->im_mbz to "mbz" :-)
*/
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
@@ -901,39 +914,38 @@ static int ipmr_cache_report(struct mr_table *mrt,
#endif
{
- /*
- * Copy the IP header
- */
+ /* Copy the IP header */
skb->network_header = skb->tail;
skb_put(skb, ihl);
skb_copy_to_linear_data(skb, pkt->data, ihl);
- ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
+ ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
msg = (struct igmpmsg *)skb_network_header(skb);
msg->im_vif = vifi;
skb_dst_set(skb, dst_clone(skb_dst(pkt)));
- /*
- * Add our header
- */
+ /* Add our header */
- igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
+ igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
igmp->type =
msg->im_msgtype = assert;
- igmp->code = 0;
- ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
+ igmp->code = 0;
+ ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
skb->transport_header = skb->network_header;
}
- if (mrt->mroute_sk == NULL) {
+ rcu_read_lock();
+ mroute_sk = rcu_dereference(mrt->mroute_sk);
+ if (mroute_sk == NULL) {
+ rcu_read_unlock();
kfree_skb(skb);
return -EINVAL;
}
- /*
- * Deliver to mrouted
- */
- ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
+ /* Deliver to mrouted */
+
+ ret = sock_queue_rcv_skb(mroute_sk, skb);
+ rcu_read_unlock();
if (ret < 0) {
if (net_ratelimit())
printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
@@ -965,9 +977,7 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
}
if (!found) {
- /*
- * Create a new entry if allowable
- */
+ /* Create a new entry if allowable */
if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
(c = ipmr_cache_alloc_unres()) == NULL) {
@@ -977,16 +987,14 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
return -ENOBUFS;
}
- /*
- * Fill in the new cache entry
- */
+ /* Fill in the new cache entry */
+
c->mfc_parent = -1;
c->mfc_origin = iph->saddr;
c->mfc_mcastgrp = iph->daddr;
- /*
- * Reflect first query at mrouted.
- */
+ /* Reflect first query at mrouted. */
+
err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
if (err < 0) {
/* If the report failed throw the cache entry
@@ -1006,10 +1014,9 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
}
- /*
- * See if we can append the packet
- */
- if (c->mfc_un.unres.unresolved.qlen>3) {
+ /* See if we can append the packet */
+
+ if (c->mfc_un.unres.unresolved.qlen > 3) {
kfree_skb(skb);
err = -ENOBUFS;
} else {
@@ -1035,9 +1042,7 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
- write_lock_bh(&mrt_lock);
- list_del(&c->list);
- write_unlock_bh(&mrt_lock);
+ list_del_rcu(&c->list);
ipmr_cache_free(c);
return 0;
@@ -1090,9 +1095,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
- write_lock_bh(&mrt_lock);
- list_add(&c->list, &mrt->mfc_cache_array[line]);
- write_unlock_bh(&mrt_lock);
+ list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
/*
* Check to see if we resolved a queued list. If so we
@@ -1130,26 +1133,21 @@ static void mroute_clean_tables(struct mr_table *mrt)
LIST_HEAD(list);
struct mfc_cache *c, *next;
- /*
- * Shut down all active vif entries
- */
+ /* Shut down all active vif entries */
+
for (i = 0; i < mrt->maxvif; i++) {
- if (!(mrt->vif_table[i].flags&VIFF_STATIC))
+ if (!(mrt->vif_table[i].flags & VIFF_STATIC))
vif_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
- /*
- * Wipe the cache
- */
+ /* Wipe the cache */
+
for (i = 0; i < MFC_LINES; i++) {
list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
- if (c->mfc_flags&MFC_STATIC)
+ if (c->mfc_flags & MFC_STATIC)
continue;
- write_lock_bh(&mrt_lock);
- list_del(&c->list);
- write_unlock_bh(&mrt_lock);
-
+ list_del_rcu(&c->list);
ipmr_cache_free(c);
}
}
@@ -1164,6 +1162,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
}
}
+/* called from ip_ra_control(), before an RCU grace period,
+ * we dont need to call synchronize_rcu() here
+ */
static void mrtsock_destruct(struct sock *sk)
{
struct net *net = sock_net(sk);
@@ -1171,13 +1172,9 @@ static void mrtsock_destruct(struct sock *sk)
rtnl_lock();
ipmr_for_each_table(mrt, net) {
- if (sk == mrt->mroute_sk) {
+ if (sk == rtnl_dereference(mrt->mroute_sk)) {
IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
-
- write_lock_bh(&mrt_lock);
- mrt->mroute_sk = NULL;
- write_unlock_bh(&mrt_lock);
-
+ rcu_assign_pointer(mrt->mroute_sk, NULL);
mroute_clean_tables(mrt);
}
}
@@ -1204,7 +1201,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENOENT;
if (optname != MRT_INIT) {
- if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
+ if (sk != rcu_dereference_raw(mrt->mroute_sk) &&
+ !capable(CAP_NET_ADMIN))
return -EACCES;
}
@@ -1217,23 +1215,20 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENOPROTOOPT;
rtnl_lock();
- if (mrt->mroute_sk) {
+ if (rtnl_dereference(mrt->mroute_sk)) {
rtnl_unlock();
return -EADDRINUSE;
}
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
- write_lock_bh(&mrt_lock);
- mrt->mroute_sk = sk;
- write_unlock_bh(&mrt_lock);
-
+ rcu_assign_pointer(mrt->mroute_sk, sk);
IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
}
rtnl_unlock();
return ret;
case MRT_DONE:
- if (sk != mrt->mroute_sk)
+ if (sk != rcu_dereference_raw(mrt->mroute_sk))
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
@@ -1246,7 +1241,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENFILE;
rtnl_lock();
if (optname == MRT_ADD_VIF) {
- ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
+ ret = vif_add(net, mrt, &vif,
+ sk == rtnl_dereference(mrt->mroute_sk));
} else {
ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
}
@@ -1267,7 +1263,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
if (optname == MRT_DEL_MFC)
ret = ipmr_mfc_delete(mrt, &mfc);
else
- ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
+ ret = ipmr_mfc_add(net, mrt, &mfc,
+ sk == rtnl_dereference(mrt->mroute_sk));
rtnl_unlock();
return ret;
/*
@@ -1276,7 +1273,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
case MRT_ASSERT:
{
int v;
- if (get_user(v,(int __user *)optval))
+ if (get_user(v, (int __user *)optval))
return -EFAULT;
mrt->mroute_do_assert = (v) ? 1 : 0;
return 0;
@@ -1286,7 +1283,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
{
int v;
- if (get_user(v,(int __user *)optval))
+ if (get_user(v, (int __user *)optval))
return -EFAULT;
v = (v) ? 1 : 0;
@@ -1309,14 +1306,16 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -EINVAL;
if (get_user(v, (u32 __user *)optval))
return -EFAULT;
- if (sk == mrt->mroute_sk)
- return -EBUSY;
rtnl_lock();
ret = 0;
- if (!ipmr_new_table(net, v))
- ret = -ENOMEM;
- raw_sk(sk)->ipmr_table = v;
+ if (sk == rtnl_dereference(mrt->mroute_sk)) {
+ ret = -EBUSY;
+ } else {
+ if (!ipmr_new_table(net, v))
+ ret = -ENOMEM;
+ raw_sk(sk)->ipmr_table = v;
+ }
rtnl_unlock();
return ret;
}
@@ -1347,9 +1346,9 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
if (optname != MRT_VERSION &&
#ifdef CONFIG_IP_PIMSM
- optname!=MRT_PIM &&
+ optname != MRT_PIM &&
#endif
- optname!=MRT_ASSERT)
+ optname != MRT_ASSERT)
return -ENOPROTOOPT;
if (get_user(olr, optlen))
@@ -1416,19 +1415,19 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
if (copy_from_user(&sr, arg, sizeof(sr)))
return -EFAULT;
- read_lock(&mrt_lock);
+ rcu_read_lock();
c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
sr.wrong_if = c->mfc_un.res.wrong_if;
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
if (copy_to_user(arg, &sr, sizeof(sr)))
return -EFAULT;
return 0;
}
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -EADDRNOTAVAIL;
default:
return -ENOIOCTLCMD;
@@ -1465,7 +1464,7 @@ static struct notifier_block ip_mr_notifier = {
};
/*
- * Encapsulate a packet by attaching a valid IPIP header to it.
+ * Encapsulate a packet by attaching a valid IPIP header to it.
* This avoids tunnel drivers and other mess and gives us the speed so
* important for multicast video.
*/
@@ -1480,7 +1479,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
skb_reset_network_header(skb);
iph = ip_hdr(skb);
- iph->version = 4;
+ iph->version = 4;
iph->tos = old_iph->tos;
iph->ttl = old_iph->ttl;
iph->frag_off = 0;
@@ -1498,7 +1497,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
static inline int ipmr_forward_finish(struct sk_buff *skb)
{
- struct ip_options * opt = &(IPCB(skb)->opt);
+ struct ip_options *opt = &(IPCB(skb)->opt);
IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
@@ -1535,22 +1534,34 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
}
#endif
- if (vif->flags&VIFF_TUNNEL) {
- struct flowi fl = { .oif = vif->link,
- .nl_u = { .ip4_u =
- { .daddr = vif->remote,
- .saddr = vif->local,
- .tos = RT_TOS(iph->tos) } },
- .proto = IPPROTO_IPIP };
+ if (vif->flags & VIFF_TUNNEL) {
+ struct flowi fl = {
+ .oif = vif->link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = vif->remote,
+ .saddr = vif->local,
+ .tos = RT_TOS(iph->tos)
+ }
+ },
+ .proto = IPPROTO_IPIP
+ };
+
if (ip_route_output_key(net, &rt, &fl))
goto out_free;
encap = sizeof(struct iphdr);
} else {
- struct flowi fl = { .oif = vif->link,
- .nl_u = { .ip4_u =
- { .daddr = iph->daddr,
- .tos = RT_TOS(iph->tos) } },
- .proto = IPPROTO_IPIP };
+ struct flowi fl = {
+ .oif = vif->link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .tos = RT_TOS(iph->tos)
+ }
+ },
+ .proto = IPPROTO_IPIP
+ };
+
if (ip_route_output_key(net, &rt, &fl))
goto out_free;
}
@@ -1559,8 +1570,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
- allow to send ICMP, so that packets will disappear
- to blackhole.
+ * allow to send ICMP, so that packets will disappear
+ * to blackhole.
*/
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
@@ -1583,7 +1594,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
ip_decrease_ttl(ip_hdr(skb));
/* FIXME: forward and output firewalls used to be called here.
- * What do we do with netfilter? -- RR */
+ * What do we do with netfilter? -- RR
+ */
if (vif->flags & VIFF_TUNNEL) {
ip_encap(skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */
@@ -1644,15 +1656,15 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (skb_rtable(skb)->fl.iif == 0) {
/* It is our own packet, looped back.
- Very complicated situation...
-
- The best workaround until routing daemons will be
- fixed is not to redistribute packet, if it was
- send through wrong interface. It means, that
- multicast applications WILL NOT work for
- (S,G), which have default multicast route pointing
- to wrong oif. In any case, it is not a good
- idea to use multicasting applications on router.
+ * Very complicated situation...
+ *
+ * The best workaround until routing daemons will be
+ * fixed is not to redistribute packet, if it was
+ * send through wrong interface. It means, that
+ * multicast applications WILL NOT work for
+ * (S,G), which have default multicast route pointing
+ * to wrong oif. In any case, it is not a good
+ * idea to use multicasting applications on router.
*/
goto dont_forward;
}
@@ -1662,9 +1674,9 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
- so that we cannot check that packet arrived on an oif.
- It is bad, but otherwise we would need to move pretty
- large chunk of pimd to kernel. Ough... --ANK
+ * so that we cannot check that packet arrived on an oif.
+ * It is bad, but otherwise we would need to move pretty
+ * large chunk of pimd to kernel. Ough... --ANK
*/
(mrt->mroute_do_pim ||
cache->mfc_un.res.ttls[true_vifi] < 255) &&
@@ -1682,10 +1694,12 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
/*
* Forward the frame
*/
- for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
+ for (ct = cache->mfc_un.res.maxvif - 1;
+ ct >= cache->mfc_un.res.minvif; ct--) {
if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
if (skb2)
ipmr_queue_xmit(net, mrt, skb2, cache,
psend);
@@ -1696,6 +1710,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (psend != -1) {
if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
if (skb2)
ipmr_queue_xmit(net, mrt, skb2, cache, psend);
} else {
@@ -1713,6 +1728,7 @@ dont_forward:
/*
* Multicast packets for forwarding arrive here
+ * Called with rcu_read_lock();
*/
int ip_mr_input(struct sk_buff *skb)
@@ -1724,9 +1740,9 @@ int ip_mr_input(struct sk_buff *skb)
int err;
/* Packet is looped back after forward, it should not be
- forwarded second time, but still can be delivered locally.
+ * forwarded second time, but still can be delivered locally.
*/
- if (IPCB(skb)->flags&IPSKB_FORWARDED)
+ if (IPCB(skb)->flags & IPSKB_FORWARDED)
goto dont_forward;
err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
@@ -1736,28 +1752,28 @@ int ip_mr_input(struct sk_buff *skb)
}
if (!local) {
- if (IPCB(skb)->opt.router_alert) {
- if (ip_call_ra_chain(skb))
- return 0;
- } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
- /* IGMPv1 (and broken IGMPv2 implementations sort of
- Cisco IOS <= 11.2(8)) do not put router alert
- option to IGMP packets destined to routable
- groups. It is very bad, because it means
- that we can forward NO IGMP messages.
- */
- read_lock(&mrt_lock);
- if (mrt->mroute_sk) {
- nf_reset(skb);
- raw_rcv(mrt->mroute_sk, skb);
- read_unlock(&mrt_lock);
- return 0;
- }
- read_unlock(&mrt_lock);
+ if (IPCB(skb)->opt.router_alert) {
+ if (ip_call_ra_chain(skb))
+ return 0;
+ } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
+ /* IGMPv1 (and broken IGMPv2 implementations sort of
+ * Cisco IOS <= 11.2(8)) do not put router alert
+ * option to IGMP packets destined to routable
+ * groups. It is very bad, because it means
+ * that we can forward NO IGMP messages.
+ */
+ struct sock *mroute_sk;
+
+ mroute_sk = rcu_dereference(mrt->mroute_sk);
+ if (mroute_sk) {
+ nf_reset(skb);
+ raw_rcv(mroute_sk, skb);
+ return 0;
+ }
}
}
- read_lock(&mrt_lock);
+ /* already under rcu_read_lock() */
cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
/*
@@ -1769,13 +1785,12 @@ int ip_mr_input(struct sk_buff *skb)
if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
ip_local_deliver(skb);
- if (skb2 == NULL) {
- read_unlock(&mrt_lock);
+ if (skb2 == NULL)
return -ENOBUFS;
- }
skb = skb2;
}
+ read_lock(&mrt_lock);
vif = ipmr_find_vif(mrt, skb->dev);
if (vif >= 0) {
int err2 = ipmr_cache_unresolved(mrt, vif, skb);
@@ -1788,8 +1803,8 @@ int ip_mr_input(struct sk_buff *skb)
return -ENODEV;
}
+ read_lock(&mrt_lock);
ip_mr_forward(net, mrt, skb, cache, local);
-
read_unlock(&mrt_lock);
if (local)
@@ -1805,6 +1820,7 @@ dont_forward:
}
#ifdef CONFIG_IP_PIMSM
+/* called with rcu_read_lock() */
static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
unsigned int pimlen)
{
@@ -1813,10 +1829,10 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
/*
- Check that:
- a. packet is really destinted to a multicast group
- b. packet is not a NULL-REGISTER
- c. packet is not truncated
+ * Check that:
+ * a. packet is really sent to a multicast group
+ * b. packet is not a NULL-REGISTER
+ * c. packet is not truncated
*/
if (!ipv4_is_multicast(encap->daddr) ||
encap->tot_len == 0 ||
@@ -1826,26 +1842,23 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
read_lock(&mrt_lock);
if (mrt->mroute_reg_vif_num >= 0)
reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
- if (reg_dev)
- dev_hold(reg_dev);
read_unlock(&mrt_lock);
if (reg_dev == NULL)
return 1;
skb->mac_header = skb->network_header;
- skb_pull(skb, (u8*)encap - skb->data);
+ skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IP);
- skb->ip_summed = 0;
+ skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_HOST;
skb_tunnel_rx(skb, reg_dev);
netif_rx(skb);
- dev_put(reg_dev);
- return 0;
+ return NET_RX_SUCCESS;
}
#endif
@@ -1854,7 +1867,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
* Handle IGMP messages of PIMv1
*/
-int pim_rcv_v1(struct sk_buff * skb)
+int pim_rcv_v1(struct sk_buff *skb)
{
struct igmphdr *pim;
struct net *net = dev_net(skb->dev);
@@ -1881,7 +1894,7 @@ drop:
#endif
#ifdef CONFIG_IP_PIMSM_V2
-static int pim_rcv(struct sk_buff * skb)
+static int pim_rcv(struct sk_buff *skb)
{
struct pimreghdr *pim;
struct net *net = dev_net(skb->dev);
@@ -1891,8 +1904,8 @@ static int pim_rcv(struct sk_buff * skb)
goto drop;
pim = (struct pimreghdr *)skb_transport_header(skb);
- if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
- (pim->flags&PIM_NULL_REGISTER) ||
+ if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
+ (pim->flags & PIM_NULL_REGISTER) ||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
@@ -1958,28 +1971,33 @@ int ipmr_get_route(struct net *net,
if (mrt == NULL)
return -ENOENT;
- read_lock(&mrt_lock);
+ rcu_read_lock();
cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
if (cache == NULL) {
struct sk_buff *skb2;
struct iphdr *iph;
struct net_device *dev;
- int vif;
+ int vif = -1;
if (nowait) {
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -EAGAIN;
}
dev = skb->dev;
- if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
+ read_lock(&mrt_lock);
+ if (dev)
+ vif = ipmr_find_vif(mrt, dev);
+ if (vif < 0) {
read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -ENODEV;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -ENOMEM;
}
@@ -1992,13 +2010,16 @@ int ipmr_get_route(struct net *net,
iph->version = 0;
err = ipmr_cache_unresolved(mrt, vif, skb2);
read_unlock(&mrt_lock);
+ rcu_read_unlock();
return err;
}
- if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
+ read_lock(&mrt_lock);
+ if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
cache->mfc_flags |= MFC_NOTIFY;
err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
read_unlock(&mrt_lock);
+ rcu_read_unlock();
return err;
}
@@ -2050,14 +2071,14 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
s_h = cb->args[1];
s_e = cb->args[2];
- read_lock(&mrt_lock);
+ rcu_read_lock();
ipmr_for_each_table(mrt, net) {
if (t < s_t)
goto next_table;
if (t > s_t)
s_h = 0;
for (h = s_h; h < MFC_LINES; h++) {
- list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) {
+ list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
if (e < s_e)
goto next_entry;
if (ipmr_fill_mroute(mrt, skb,
@@ -2075,7 +2096,7 @@ next_table:
t++;
}
done:
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
cb->args[2] = e;
cb->args[1] = h;
@@ -2086,7 +2107,8 @@ done:
#ifdef CONFIG_PROC_FS
/*
- * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
+ * The /proc interfaces to multicast routing :
+ * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
*/
struct ipmr_vif_iter {
struct seq_net_private p;
@@ -2208,14 +2230,14 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
struct mr_table *mrt = it->mrt;
struct mfc_cache *mfc;
- read_lock(&mrt_lock);
+ rcu_read_lock();
for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
it->cache = &mrt->mfc_cache_array[it->ct];
- list_for_each_entry(mfc, it->cache, list)
+ list_for_each_entry_rcu(mfc, it->cache, list)
if (pos-- == 0)
return mfc;
}
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
spin_lock_bh(&mfc_unres_lock);
it->cache = &mrt->mfc_unres_queue;
@@ -2274,7 +2296,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
/* exhausted cache_array, show unresolved */
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
it->cache = &mrt->mfc_unres_queue;
it->ct = 0;
@@ -2282,7 +2304,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (!list_empty(it->cache))
return list_first_entry(it->cache, struct mfc_cache, list);
- end_of_list:
+end_of_list:
spin_unlock_bh(&mfc_unres_lock);
it->cache = NULL;
@@ -2297,7 +2319,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
if (it->cache == &mrt->mfc_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
else if (it->cache == &mrt->mfc_cache_array[it->ct])
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
}
static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
@@ -2323,7 +2345,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
mfc->mfc_un.res.bytes,
mfc->mfc_un.res.wrong_if);
for (n = mfc->mfc_un.res.minvif;
- n < mfc->mfc_un.res.maxvif; n++ ) {
+ n < mfc->mfc_un.res.maxvif; n++) {
if (VIF_EXISTS(mrt, n) &&
mfc->mfc_un.res.ttls[n] < 255)
seq_printf(seq,
@@ -2421,7 +2443,7 @@ int __init ip_mr_init(void)
mrt_cachep = kmem_cache_create("ip_mrt_cache",
sizeof(struct mfc_cache),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
if (!mrt_cachep)
return -ENOMEM;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e8f4f9a57f12..8b642f152468 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -72,7 +72,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
for (i = 0; i < len; i++)
ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i];
- return (ret != 0);
+ return ret != 0;
}
/*
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 3a43cf36db87..1e26a4897655 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -29,6 +29,7 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/net_namespace.h>
#include <net/checksum.h>
+#include <net/ip.h>
#define CLUSTERIP_VERSION "0.8"
@@ -231,24 +232,22 @@ clusterip_hashfn(const struct sk_buff *skb,
{
const struct iphdr *iph = ip_hdr(skb);
unsigned long hashval;
- u_int16_t sport, dport;
- const u_int16_t *ports;
-
- switch (iph->protocol) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- case IPPROTO_UDPLITE:
- case IPPROTO_SCTP:
- case IPPROTO_DCCP:
- case IPPROTO_ICMP:
- ports = (const void *)iph+iph->ihl*4;
- sport = ports[0];
- dport = ports[1];
- break;
- default:
+ u_int16_t sport = 0, dport = 0;
+ int poff;
+
+ poff = proto_ports_offset(iph->protocol);
+ if (poff >= 0) {
+ const u_int16_t *ports;
+ u16 _ports[2];
+
+ ports = skb_header_pointer(skb, iph->ihl * 4 + poff, 4, _ports);
+ if (ports) {
+ sport = ports[0];
+ dport = ports[1];
+ }
+ } else {
if (net_ratelimit())
pr_info("unknown protocol %u\n", iph->protocol);
- sport = dport = 0;
}
switch (config->hash_mode) {
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index f2d297351405..65699c24411c 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -28,8 +28,7 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp;
-static DEFINE_SPINLOCK(inet_proto_lock);
+const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly;
/*
* Add a protocol handler to the hash tables
@@ -37,20 +36,9 @@ static DEFINE_SPINLOCK(inet_proto_lock);
int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
- int hash, ret;
+ int hash = protocol & (MAX_INET_PROTOS - 1);
- hash = protocol & (MAX_INET_PROTOS - 1);
-
- spin_lock_bh(&inet_proto_lock);
- if (inet_protos[hash]) {
- ret = -1;
- } else {
- inet_protos[hash] = prot;
- ret = 0;
- }
- spin_unlock_bh(&inet_proto_lock);
-
- return ret;
+ return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet_add_protocol);
@@ -60,18 +48,9 @@ EXPORT_SYMBOL(inet_add_protocol);
int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
{
- int hash, ret;
-
- hash = protocol & (MAX_INET_PROTOS - 1);
+ int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- spin_lock_bh(&inet_proto_lock);
- if (inet_protos[hash] == prot) {
- inet_protos[hash] = NULL;
- ret = 0;
- } else {
- ret = -1;
- }
- spin_unlock_bh(&inet_proto_lock);
+ ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 009a7b2aa1ef..1f85ef289895 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -505,7 +505,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ac6559cb54f9..04e0df82b88c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1107,6 +1107,7 @@ restart:
* on the route gc list.
*/
+ rt->dst.flags |= DST_NOCACHE;
if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
int err = arp_bind_neighbour(&rt->dst);
if (err) {
@@ -1268,18 +1269,11 @@ skip_hashing:
void rt_bind_peer(struct rtable *rt, int create)
{
- static DEFINE_SPINLOCK(rt_peer_lock);
struct inet_peer *peer;
peer = inet_getpeer(rt->rt_dst, create);
- spin_lock_bh(&rt_peer_lock);
- if (rt->peer == NULL) {
- rt->peer = peer;
- peer = NULL;
- }
- spin_unlock_bh(&rt_peer_lock);
- if (peer)
+ if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
inet_putpeer(peer);
}
@@ -2365,9 +2359,8 @@ static int __mkroute_output(struct rtable **result,
struct rtable *rth;
struct in_device *in_dev;
u32 tos = RT_FL_TOS(oldflp);
- int err = 0;
- if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
+ if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
return -EINVAL;
if (fl->fl4_dst == htonl(0xFFFFFFFF))
@@ -2380,11 +2373,12 @@ static int __mkroute_output(struct rtable **result,
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- /* get work reference to inet device */
- in_dev = in_dev_get(dev_out);
- if (!in_dev)
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev_out);
+ if (!in_dev) {
+ rcu_read_unlock();
return -EINVAL;
-
+ }
if (res->type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
if (res->fi) {
@@ -2392,13 +2386,13 @@ static int __mkroute_output(struct rtable **result,
res->fi = NULL;
}
} else if (res->type == RTN_MULTICAST) {
- flags |= RTCF_MULTICAST|RTCF_LOCAL;
+ flags |= RTCF_MULTICAST | RTCF_LOCAL;
if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
oldflp->proto))
flags &= ~RTCF_LOCAL;
/* If multicast route do not exist use
- default one, but do not gateway in this case.
- Yes, it is hack.
+ * default one, but do not gateway in this case.
+ * Yes, it is hack.
*/
if (res->fi && res->prefixlen < 4) {
fib_info_put(res->fi);
@@ -2409,9 +2403,12 @@ static int __mkroute_output(struct rtable **result,
rth = dst_alloc(&ipv4_dst_ops);
if (!rth) {
- err = -ENOBUFS;
- goto cleanup;
+ rcu_read_unlock();
+ return -ENOBUFS;
}
+ in_dev_hold(in_dev);
+ rcu_read_unlock();
+ rth->idev = in_dev;
atomic_set(&rth->dst.__refcnt, 1);
rth->dst.flags= DST_HOST;
@@ -2432,7 +2429,6 @@ static int __mkroute_output(struct rtable **result,
cache entry */
rth->dst.dev = dev_out;
dev_hold(dev_out);
- rth->idev = in_dev_get(dev_out);
rth->rt_gateway = fl->fl4_dst;
rth->rt_spec_dst= fl->fl4_src;
@@ -2467,13 +2463,8 @@ static int __mkroute_output(struct rtable **result,
rt_set_nexthop(rth, res, 0);
rth->rt_flags = flags;
-
*result = rth;
- cleanup:
- /* release work reference to inet device */
- in_dev_put(in_dev);
-
- return err;
+ return 0;
}
static int ip_mkroute_output(struct rtable **rp,
@@ -2497,6 +2488,7 @@ static int ip_mkroute_output(struct rtable **rp,
/*
* Major route resolver routine.
+ * called with rcu_read_lock();
*/
static int ip_route_output_slow(struct net *net, struct rtable **rp,
@@ -2515,7 +2507,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
.iif = net->loopback_dev->ifindex,
.oif = oldflp->oif };
struct fib_result res;
- unsigned flags = 0;
+ unsigned int flags = 0;
struct net_device *dev_out = NULL;
int free_res = 0;
int err;
@@ -2545,7 +2537,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
(ipv4_is_multicast(oldflp->fl4_dst) ||
oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
- dev_out = ip_dev_find(net, oldflp->fl4_src);
+ dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
if (dev_out == NULL)
goto out;
@@ -2570,26 +2562,21 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
- dev_out = ip_dev_find(net, oldflp->fl4_src);
- if (dev_out == NULL)
+ if (!__ip_dev_find(net, oldflp->fl4_src, false))
goto out;
- dev_put(dev_out);
- dev_out = NULL;
}
}
if (oldflp->oif) {
- dev_out = dev_get_by_index(net, oldflp->oif);
+ dev_out = dev_get_by_index_rcu(net, oldflp->oif);
err = -ENODEV;
if (dev_out == NULL)
goto out;
/* RACE: Check return value of inet_select_addr instead. */
- if (__in_dev_get_rtnl(dev_out) == NULL) {
- dev_put(dev_out);
+ if (rcu_dereference(dev_out->ip_ptr) == NULL)
goto out; /* Wrong error code */
- }
if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
@@ -2612,10 +2599,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
fl.fl4_dst = fl.fl4_src;
if (!fl.fl4_dst)
fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
- if (dev_out)
- dev_put(dev_out);
dev_out = net->loopback_dev;
- dev_hold(dev_out);
fl.oif = net->loopback_dev->ifindex;
res.type = RTN_LOCAL;
flags |= RTCF_LOCAL;
@@ -2649,8 +2633,6 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
res.type = RTN_UNICAST;
goto make_route;
}
- if (dev_out)
- dev_put(dev_out);
err = -ENETUNREACH;
goto out;
}
@@ -2659,10 +2641,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
if (res.type == RTN_LOCAL) {
if (!fl.fl4_src)
fl.fl4_src = fl.fl4_dst;
- if (dev_out)
- dev_put(dev_out);
dev_out = net->loopback_dev;
- dev_hold(dev_out);
fl.oif = dev_out->ifindex;
if (res.fi)
fib_info_put(res.fi);
@@ -2682,28 +2661,23 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
- if (dev_out)
- dev_put(dev_out);
dev_out = FIB_RES_DEV(res);
- dev_hold(dev_out);
fl.oif = dev_out->ifindex;
make_route:
err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
-
if (free_res)
fib_res_put(&res);
- if (dev_out)
- dev_put(dev_out);
out: return err;
}
int __ip_route_output_key(struct net *net, struct rtable **rp,
const struct flowi *flp)
{
- unsigned hash;
+ unsigned int hash;
+ int res;
struct rtable *rth;
if (!rt_caching(net))
@@ -2734,7 +2708,10 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
rcu_read_unlock_bh();
slow_output:
- return ip_route_output_slow(net, rp, flp);
+ rcu_read_lock();
+ res = ip_route_output_slow(net, rp, flp);
+ rcu_read_unlock();
+ return res;
}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
@@ -2798,7 +2775,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
dst_release(&(*rp)->dst);
*rp = rt;
- return (rt ? 0 : -ENOMEM);
+ return rt ? 0 : -ENOMEM;
}
int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f115ea68a4ef..1664a0590bb8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2392,7 +2392,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
err = tp->af_specific->md5_parse(sk, optval, optlen);
break;
#endif
-
+ case TCP_USER_TIMEOUT:
+ /* Cap the max timeout in ms TCP will retry/retrans
+ * before giving up and aborting (ETIMEDOUT) a connection.
+ */
+ icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ break;
default:
err = -ENOPROTOOPT;
break;
@@ -2611,6 +2616,10 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_THIN_DUPACK:
val = tp->thin_dupack;
break;
+
+ case TCP_USER_TIMEOUT:
+ val = jiffies_to_msecs(icsk->icsk_user_timeout);
+ break;
default:
return -ENOPROTOOPT;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b55f60f6fcbe..f6fdd727a23d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -182,7 +182,7 @@ static void tcp_incr_quickack(struct sock *sk)
icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
}
-void tcp_enter_quickack_mode(struct sock *sk)
+static void tcp_enter_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_incr_quickack(sk);
@@ -805,25 +805,12 @@ void tcp_update_metrics(struct sock *sk)
}
}
-/* Numbers are taken from RFC3390.
- *
- * John Heffner states:
- *
- * The RFC specifies a window of no more than 4380 bytes
- * unless 2*MSS > 4380. Reading the pseudocode in the RFC
- * is a bit misleading because they use a clamp at 4380 bytes
- * rather than use a multiplier in the relevant range.
- */
__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
{
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
- if (!cwnd) {
- if (tp->mss_cache > 1460)
- cwnd = 2;
- else
- cwnd = (tp->mss_cache > 1095) ? 3 : 4;
- }
+ if (!cwnd)
+ cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
}
@@ -2314,7 +2301,7 @@ static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
{
- return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
+ return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
}
static inline int tcp_head_timedout(struct sock *sk)
@@ -3412,8 +3399,8 @@ static void tcp_ack_probe(struct sock *sk)
static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
{
- return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
- inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
+ return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
+ inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
}
static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
@@ -3430,9 +3417,9 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
const u32 ack, const u32 ack_seq,
const u32 nwin)
{
- return (after(ack, tp->snd_una) ||
+ return after(ack, tp->snd_una) ||
after(ack_seq, tp->snd_wl1) ||
- (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd));
+ (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
}
/* Update our send window.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 020766292bb0..a0232f3a358b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2571,7 +2571,6 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
return tcp_gro_receive(head, skb);
}
-EXPORT_SYMBOL(tcp4_gro_receive);
int tcp4_gro_complete(struct sk_buff *skb)
{
@@ -2584,7 +2583,6 @@ int tcp4_gro_complete(struct sk_buff *skb)
return tcp_gro_complete(skb);
}
-EXPORT_SYMBOL(tcp4_gro_complete);
struct proto tcp_prot = {
.name = "TCP",
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f25b56cb85cb..43cf901d7659 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -55,7 +55,7 @@ static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
return 1;
if (after(end_seq, s_win) && before(seq, e_win))
return 1;
- return (seq == e_win && seq == end_seq);
+ return seq == e_win && seq == end_seq;
}
/*
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index de3bd8458588..05b1ecf36763 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -224,16 +224,10 @@ void tcp_select_initial_window(int __space, __u32 mss,
}
}
- /* Set initial window to value enough for senders,
- * following RFC2414. Senders, not following this RFC,
- * will be satisfied with 2.
- */
+ /* Set initial window to value enough for senders, following RFC5681. */
if (mss > (1 << *rcv_wscale)) {
- int init_cwnd = 4;
- if (mss > 1460 * 3)
- init_cwnd = 2;
- else if (mss > 1460)
- init_cwnd = 3;
+ int init_cwnd = rfc3390_bytes_to_packets(mss);
+
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
@@ -1376,9 +1370,9 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
const struct sk_buff *skb,
unsigned mss_now, int nonagle)
{
- return (skb->len < mss_now &&
+ return skb->len < mss_now &&
((nonagle & TCP_NAGLE_CORK) ||
- (!nonagle && tp->packets_out && tcp_minshall_check(tp))));
+ (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
}
/* Return non-zero if the Nagle test allows this packet to be
@@ -1449,10 +1443,10 @@ int tcp_may_send_now(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
- return (skb &&
+ return skb &&
tcp_snd_test(sk, skb, tcp_current_mss(sk),
(tcp_skb_is_last(sk, skb) ?
- tp->nonagle : TCP_NAGLE_PUSH)));
+ tp->nonagle : TCP_NAGLE_PUSH));
}
/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
@@ -2429,6 +2423,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
__u8 rcv_wscale;
/* Set this up on the first call only */
req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
+
+ /* limit the window selection if the user enforce a smaller rx buffer */
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+ (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
+ req->window_clamp = tcp_full_space(sk);
+
/* tcp_full_space because it is guaranteed to be the first packet */
tcp_select_initial_window(tcp_full_space(sk),
mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
@@ -2555,6 +2555,11 @@ static void tcp_connect_init(struct sock *sk)
tcp_initialize_rcv_mss(sk);
+ /* limit the window selection if the user enforce a smaller rx buffer */
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+ (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
+ tp->window_clamp = tcp_full_space(sk);
+
tcp_select_initial_window(tcp_full_space(sk),
tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 74c54b30600f..f3c8c6c019ae 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -140,10 +140,10 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
*/
static bool retransmits_timed_out(struct sock *sk,
unsigned int boundary,
+ unsigned int timeout,
bool syn_set)
{
- unsigned int timeout, linear_backoff_thresh;
- unsigned int start_ts;
+ unsigned int linear_backoff_thresh, start_ts;
unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
if (!inet_csk(sk)->icsk_retransmits)
@@ -154,14 +154,15 @@ static bool retransmits_timed_out(struct sock *sk,
else
start_ts = tcp_sk(sk)->retrans_stamp;
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
-
- if (boundary <= linear_backoff_thresh)
- timeout = ((2 << boundary) - 1) * rto_base;
- else
- timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
- (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ if (likely(timeout == 0)) {
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
+ if (boundary <= linear_backoff_thresh)
+ timeout = ((2 << boundary) - 1) * rto_base;
+ else
+ timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
+ (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ }
return (tcp_time_stamp - start_ts) >= timeout;
}
@@ -178,7 +179,7 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
syn_set = 1;
} else {
- if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
@@ -191,14 +192,15 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
- !retransmits_timed_out(sk, retry_until, 0);
+ !retransmits_timed_out(sk, retry_until, 0, 0);
if (tcp_out_of_resources(sk, do_reset))
return 1;
}
}
- if (retransmits_timed_out(sk, retry_until, syn_set)) {
+ if (retransmits_timed_out(sk, retry_until,
+ syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
/* Has it gone just too far? */
tcp_write_err(sk);
return 1;
@@ -440,7 +442,7 @@ out_reset_timer:
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
- if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
__sk_dst_reset(sk);
out:;
@@ -560,7 +562,14 @@ static void tcp_keepalive_timer (unsigned long data)
elapsed = keepalive_time_elapsed(tp);
if (elapsed >= keepalive_time_when(tp)) {
- if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
+ /* If the TCP_USER_TIMEOUT option is enabled, use that
+ * to determine when to timeout instead.
+ */
+ if ((icsk->icsk_user_timeout != 0 &&
+ elapsed >= icsk->icsk_user_timeout &&
+ icsk->icsk_probes_out > 0) ||
+ (icsk->icsk_user_timeout == 0 &&
+ icsk->icsk_probes_out >= keepalive_probes(tp))) {
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_write_err(sk);
goto out;
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 20151d6a6241..a534dda5456e 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -80,7 +80,7 @@ static void tcp_westwood_init(struct sock *sk)
*/
static inline u32 westwood_do_filter(u32 a, u32 b)
{
- return (((7 * a) + b) >> 3);
+ return ((7 * a) + b) >> 3;
}
static void westwood_filter(struct westwood *w, u32 delta)
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 59186ca7808a..9a17bd2a0a37 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -14,8 +14,8 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm_tunnel *tunnel4_handlers;
-static struct xfrm_tunnel *tunnel64_handlers;
+static struct xfrm_tunnel *tunnel4_handlers __read_mostly;
+static struct xfrm_tunnel *tunnel64_handlers __read_mostly;
static DEFINE_MUTEX(tunnel4_mutex);
static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
@@ -39,7 +39,7 @@ int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
}
handler->next = *pprev;
- *pprev = handler;
+ rcu_assign_pointer(*pprev, handler);
ret = 0;
@@ -73,6 +73,11 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
}
EXPORT_SYMBOL(xfrm4_tunnel_deregister);
+#define for_each_tunnel_rcu(head, handler) \
+ for (handler = rcu_dereference(head); \
+ handler != NULL; \
+ handler = rcu_dereference(handler->next)) \
+
static int tunnel4_rcv(struct sk_buff *skb)
{
struct xfrm_tunnel *handler;
@@ -80,7 +85,7 @@ static int tunnel4_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto drop;
- for (handler = tunnel4_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel4_handlers, handler)
if (!handler->handler(skb))
return 0;
@@ -99,7 +104,7 @@ static int tunnel64_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
- for (handler = tunnel64_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel64_handlers, handler)
if (!handler->handler(skb))
return 0;
@@ -115,7 +120,7 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
- for (handler = tunnel4_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel4_handlers, handler)
if (!handler->err_handler(skb, info))
break;
}
@@ -125,7 +130,7 @@ static void tunnel64_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
- for (handler = tunnel64_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel64_handlers, handler)
if (!handler->err_handler(skb, info))
break;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fb23c2e63b52..b3f7e8cf18ac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -797,7 +797,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
return -EOPNOTSUPP;
ipc.opt = NULL;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
if (up->pending) {
/*
@@ -845,7 +845,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
- err = sock_tx_timestamp(msg, sk, &ipc.shtx);
+ err = sock_tx_timestamp(sk, &ipc.tx_flags);
if (err)
return err;
if (msg->msg_controllen) {
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 41f5982d2087..82806455e859 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -58,14 +58,14 @@ static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
return -ENOENT;
}
-static struct xfrm_tunnel xfrm_tunnel_handler = {
+static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
.handler = xfrm_tunnel_rcv,
.err_handler = xfrm_tunnel_err,
.priority = 2,
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static struct xfrm_tunnel xfrm64_tunnel_handler = {
+static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
.handler = xfrm_tunnel_rcv,
.err_handler = xfrm_tunnel_err,
.priority = 2,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 324fac3b6c16..8c88340278f5 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -243,7 +243,7 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev)
/* Check if a route is valid prefix route */
static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
{
- return ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0);
+ return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
}
static void addrconf_del_timer(struct inet6_ifaddr *ifp)
@@ -2964,7 +2964,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
start sending router solicitations.
*/
- if (ifp->idev->cnf.forwarding == 0 &&
+ if ((ifp->idev->cnf.forwarding == 0 ||
+ ifp->idev->cnf.forwarding == 2) &&
ifp->idev->cnf.rtr_solicits > 0 &&
(dev->flags&IFF_LOOPBACK) == 0 &&
(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 8175f802651b..c8993e5a337c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -518,10 +518,9 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
static inline int ip6addrlbl_msgsize(void)
{
- return (NLMSG_ALIGN(sizeof(struct ifaddrlblmsg))
+ return NLMSG_ALIGN(sizeof(struct ifaddrlblmsg))
+ nla_total_size(16) /* IFAL_ADDRESS */
- + nla_total_size(4) /* IFAL_LABEL */
- );
+ + nla_total_size(4); /* IFAL_LABEL */
}
static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 56b9bf2516f4..60220985bb80 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -467,7 +467,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin->sin6_scope_id = sk->sk_bound_dev_if;
*uaddr_len = sizeof(*sin);
- return(0);
+ return 0;
}
EXPORT_SYMBOL(inet6_getname);
@@ -488,7 +488,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCADDRT:
case SIOCDELRT:
- return(ipv6_route_ioctl(net, cmd, (void __user *)arg));
+ return ipv6_route_ioctl(net, cmd, (void __user *)arg);
case SIOCSIFADDR:
return addrconf_add_ifaddr(net, (void __user *) arg);
@@ -502,7 +502,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return sk->sk_prot->ioctl(sk, cmd, arg);
}
/*NOTREACHED*/
- return(0);
+ return 0;
}
EXPORT_SYMBOL(inet6_ioctl);
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index e1caa5d526c2..14ed0a955b56 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -13,12 +13,12 @@ int ipv6_ext_hdr(u8 nexthdr)
/*
* find out if nexthdr is an extension header or a protocol
*/
- return ( (nexthdr == NEXTHDR_HOP) ||
+ return (nexthdr == NEXTHDR_HOP) ||
(nexthdr == NEXTHDR_ROUTING) ||
(nexthdr == NEXTHDR_FRAGMENT) ||
(nexthdr == NEXTHDR_AUTH) ||
(nexthdr == NEXTHDR_NONE) ||
- (nexthdr == NEXTHDR_DEST) );
+ (nexthdr == NEXTHDR_DEST);
}
/*
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 980912ed7a38..99157b4cd56e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -637,7 +637,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
}
mtu -= hlen + sizeof(struct frag_hdr);
- if (skb_has_frags(skb)) {
+ if (skb_has_frag_list(skb)) {
int first_len = skb_pagelen(skb);
struct sk_buff *frag2;
@@ -878,8 +878,8 @@ static inline int ip6_rt_check(struct rt6key *rt_key,
struct in6_addr *fl_addr,
struct in6_addr *addr_cache)
{
- return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
- (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
+ return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
+ (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
}
static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0fd027f3f47e..8be3c452af90 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -75,7 +75,7 @@ MODULE_LICENSE("GPL");
(addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
(HASH_SIZE - 1))
-static void ip6_tnl_dev_init(struct net_device *dev);
+static int ip6_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_setup(struct net_device *dev);
static int ip6_tnl_net_id __read_mostly;
@@ -83,15 +83,42 @@ struct ip6_tnl_net {
/* the IPv6 tunnel fallback device */
struct net_device *fb_tnl_dev;
/* lists for storing tunnels in use */
- struct ip6_tnl *tnls_r_l[HASH_SIZE];
- struct ip6_tnl *tnls_wc[1];
- struct ip6_tnl **tnls[2];
+ struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
+ struct ip6_tnl __rcu *tnls_wc[1];
+ struct ip6_tnl __rcu **tnls[2];
};
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+static struct net_device_stats *ip6_get_stats(struct net_device *dev)
+{
+ struct pcpu_tstats sum = { 0 };
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+ sum.rx_packets += tstats->rx_packets;
+ sum.rx_bytes += tstats->rx_bytes;
+ sum.tx_packets += tstats->tx_packets;
+ sum.tx_bytes += tstats->tx_bytes;
+ }
+ dev->stats.rx_packets = sum.rx_packets;
+ dev->stats.rx_bytes = sum.rx_bytes;
+ dev->stats.tx_packets = sum.tx_packets;
+ dev->stats.tx_bytes = sum.tx_bytes;
+ return &dev->stats;
+}
+
/*
- * Locking : hash tables are protected by RCU and a spinlock
+ * Locking : hash tables are protected by RCU and RTNL
*/
-static DEFINE_SPINLOCK(ip6_tnl_lock);
static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
{
@@ -138,8 +165,8 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
static struct ip6_tnl *
ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
{
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(local);
+ unsigned int h0 = HASH(remote);
+ unsigned int h1 = HASH(local);
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -167,7 +194,7 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
* Return: head of IPv6 tunnel list
**/
-static struct ip6_tnl **
+static struct ip6_tnl __rcu **
ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p)
{
struct in6_addr *remote = &p->raddr;
@@ -190,12 +217,10 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p)
static void
ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
- struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms);
+ struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
- spin_lock_bh(&ip6_tnl_lock);
- t->next = *tp;
+ rcu_assign_pointer(t->next , rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
- spin_unlock_bh(&ip6_tnl_lock);
}
/**
@@ -206,18 +231,25 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
static void
ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
- struct ip6_tnl **tp;
-
- for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) {
- if (t == *tp) {
- spin_lock_bh(&ip6_tnl_lock);
- *tp = t->next;
- spin_unlock_bh(&ip6_tnl_lock);
+ struct ip6_tnl __rcu **tp;
+ struct ip6_tnl *iter;
+
+ for (tp = ip6_tnl_bucket(ip6n, &t->parms);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
break;
}
}
}
+static void ip6_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
/**
* ip6_tnl_create() - create a new tunnel
* @p: tunnel parameters
@@ -256,7 +288,9 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
t = netdev_priv(dev);
t->parms = *p;
- ip6_tnl_dev_init(dev);
+ err = ip6_tnl_dev_init(dev);
+ if (err < 0)
+ goto failed_free;
if ((err = register_netdevice(dev)) < 0)
goto failed_free;
@@ -266,7 +300,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
return t;
failed_free:
- free_netdev(dev);
+ ip6_dev_free(dev);
failed:
return NULL;
}
@@ -290,10 +324,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
{
struct in6_addr *remote = &p->raddr;
struct in6_addr *local = &p->laddr;
+ struct ip6_tnl __rcu **tp;
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- for (t = *ip6_tnl_bucket(ip6n, p); t; t = t->next) {
+ for (tp = ip6_tnl_bucket(ip6n, p);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
ipv6_addr_equal(remote, &t->parms.raddr))
return t;
@@ -318,13 +355,10 @@ ip6_tnl_dev_uninit(struct net_device *dev)
struct net *net = dev_net(dev);
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- if (dev == ip6n->fb_tnl_dev) {
- spin_lock_bh(&ip6_tnl_lock);
- ip6n->tnls_wc[0] = NULL;
- spin_unlock_bh(&ip6_tnl_lock);
- } else {
+ if (dev == ip6n->fb_tnl_dev)
+ rcu_assign_pointer(ip6n->tnls_wc[0], NULL);
+ else
ip6_tnl_unlink(ip6n, t);
- }
ip6_tnl_dst_reset(t);
dev_put(dev);
}
@@ -702,6 +736,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
&ipv6h->daddr)) != NULL) {
+ struct pcpu_tstats *tstats;
+
if (t->parms.proto != ipproto && t->parms.proto != 0) {
rcu_read_unlock();
goto discard;
@@ -724,10 +760,17 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
skb->pkt_type = PACKET_HOST;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
- skb_tunnel_rx(skb, t->dev);
+ tstats = this_cpu_ptr(t->dev->tstats);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+
+ __skb_tunnel_rx(skb, t->dev);
dscp_ecn_decapsulate(t, ipv6h, skb);
- netif_rx(skb);
+
+ if (netif_rx(skb) == NET_RX_DROP)
+ t->dev->stats.rx_dropped++;
+
rcu_read_unlock();
return 0;
}
@@ -934,8 +977,10 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
err = ip6_local_out(skb);
if (net_xmit_eval(err) == 0) {
- stats->tx_bytes += pkt_len;
- stats->tx_packets++;
+ struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats);
+
+ tstats->tx_bytes += pkt_len;
+ tstats->tx_packets++;
} else {
stats->tx_errors++;
stats->tx_aborted_errors++;
@@ -1300,12 +1345,14 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
static const struct net_device_ops ip6_tnl_netdev_ops = {
- .ndo_uninit = ip6_tnl_dev_uninit,
+ .ndo_uninit = ip6_tnl_dev_uninit,
.ndo_start_xmit = ip6_tnl_xmit,
- .ndo_do_ioctl = ip6_tnl_ioctl,
+ .ndo_do_ioctl = ip6_tnl_ioctl,
.ndo_change_mtu = ip6_tnl_change_mtu,
+ .ndo_get_stats = ip6_get_stats,
};
+
/**
* ip6_tnl_dev_setup - setup virtual tunnel device
* @dev: virtual device associated with tunnel
@@ -1317,7 +1364,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
static void ip6_tnl_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &ip6_tnl_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ip6_dev_free;
dev->type = ARPHRD_TUNNEL6;
dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
@@ -1333,12 +1380,17 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
* @dev: virtual device associated with tunnel
**/
-static inline void
+static inline int
ip6_tnl_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
+
t->dev = dev;
strcpy(t->parms.name, dev->name);
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+ return 0;
}
/**
@@ -1346,11 +1398,15 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
* @dev: virtual device associated with tunnel
**/
-static void ip6_tnl_dev_init(struct net_device *dev)
+static int ip6_tnl_dev_init(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- ip6_tnl_dev_init_gen(dev);
+ int err = ip6_tnl_dev_init_gen(dev);
+
+ if (err)
+ return err;
ip6_tnl_link_config(t);
+ return 0;
}
/**
@@ -1360,25 +1416,29 @@ static void ip6_tnl_dev_init(struct net_device *dev)
* Return: 0
**/
-static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
+static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = dev_net(dev);
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ int err = ip6_tnl_dev_init_gen(dev);
+
+ if (err)
+ return err;
- ip6_tnl_dev_init_gen(dev);
t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
- ip6n->tnls_wc[0] = t;
+ rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ return 0;
}
-static struct xfrm6_tunnel ip4ip6_handler = {
+static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
.handler = ip4ip6_rcv,
.err_handler = ip4ip6_err,
.priority = 1,
};
-static struct xfrm6_tunnel ip6ip6_handler = {
+static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
.handler = ip6ip6_rcv,
.err_handler = ip6ip6_err,
.priority = 1,
@@ -1391,14 +1451,14 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
LIST_HEAD(list);
for (h = 0; h < HASH_SIZE; h++) {
- t = ip6n->tnls_r_l[h];
+ t = rtnl_dereference(ip6n->tnls_r_l[h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, &list);
- t = t->next;
+ t = rtnl_dereference(t->next);
}
}
- t = ip6n->tnls_wc[0];
+ t = rtnl_dereference(ip6n->tnls_wc[0]);
unregister_netdevice_queue(t->dev, &list);
unregister_netdevice_many(&list);
}
@@ -1419,7 +1479,9 @@ static int __net_init ip6_tnl_init_net(struct net *net)
goto err_alloc_dev;
dev_net_set(ip6n->fb_tnl_dev, net);
- ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
+ err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
+ if (err < 0)
+ goto err_register;
err = register_netdev(ip6n->fb_tnl_dev);
if (err < 0)
@@ -1427,7 +1489,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
return 0;
err_register:
- free_netdev(ip6n->fb_tnl_dev);
+ ip6_dev_free(ip6n->fb_tnl_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 66078dad7fe8..2640c9be589d 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -666,7 +666,9 @@ static int pim6_rcv(struct sk_buff *skb)
skb_tunnel_rx(skb, reg_dev);
- netif_rx(skb);
+ if (netif_rx(skb) == NET_RX_DROP)
+ reg_dev->stats.rx_dropped++;
+
dev_put(reg_dev);
return 0;
drop:
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 58841c4ae947..b3dd844cd34f 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -228,12 +228,12 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
do {
cur = ((void *)cur) + (cur->nd_opt_len << 3);
} while(cur < end && cur->nd_opt_type != type);
- return (cur <= end && cur->nd_opt_type == type ? cur : NULL);
+ return cur <= end && cur->nd_opt_type == type ? cur : NULL;
}
static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
{
- return (opt->nd_opt_type == ND_OPT_RDNSS);
+ return opt->nd_opt_type == ND_OPT_RDNSS;
}
static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
@@ -244,7 +244,7 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
do {
cur = ((void *)cur) + (cur->nd_opt_len << 3);
} while(cur < end && !ndisc_is_useropt(cur));
- return (cur <= end && ndisc_is_useropt(cur) ? cur : NULL);
+ return cur <= end && ndisc_is_useropt(cur) ? cur : NULL;
}
static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
@@ -319,7 +319,7 @@ static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
int prepad = ndisc_addr_option_pad(dev->type);
if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad))
return NULL;
- return (lladdr + prepad);
+ return lladdr + prepad;
}
int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
@@ -1105,6 +1105,18 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err);
}
+static inline int accept_ra(struct inet6_dev *in6_dev)
+{
+ /*
+ * If forwarding is enabled, RA are not accepted unless the special
+ * hybrid mode (accept_ra=2) is enabled.
+ */
+ if (in6_dev->cnf.forwarding && in6_dev->cnf.accept_ra < 2)
+ return 0;
+
+ return in6_dev->cnf.accept_ra;
+}
+
static void ndisc_router_discovery(struct sk_buff *skb)
{
struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
@@ -1158,8 +1170,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
return;
}
- /* skip route and link configuration on routers */
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
+ if (!accept_ra(in6_dev))
goto skip_linkparms;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1309,8 +1320,7 @@ skip_linkparms:
NEIGH_UPDATE_F_ISROUTER);
}
- /* skip route and link configuration on routers */
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
+ if (!accept_ra(in6_dev))
goto out;
#ifdef CONFIG_IPV6_ROUTE_INFO
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 8e754be92c24..6b331e9b5706 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -82,13 +82,13 @@ EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
int
ip6t_ext_hdr(u8 nexthdr)
{
- return ( (nexthdr == IPPROTO_HOPOPTS) ||
- (nexthdr == IPPROTO_ROUTING) ||
- (nexthdr == IPPROTO_FRAGMENT) ||
- (nexthdr == IPPROTO_ESP) ||
- (nexthdr == IPPROTO_AH) ||
- (nexthdr == IPPROTO_NONE) ||
- (nexthdr == IPPROTO_DSTOPTS) );
+ return (nexthdr == IPPROTO_HOPOPTS) ||
+ (nexthdr == IPPROTO_ROUTING) ||
+ (nexthdr == IPPROTO_FRAGMENT) ||
+ (nexthdr == IPPROTO_ESP) ||
+ (nexthdr == IPPROTO_AH) ||
+ (nexthdr == IPPROTO_NONE) ||
+ (nexthdr == IPPROTO_DSTOPTS);
}
/* Returns whether matches rule or not. */
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 578f3c1a16db..138a8b362706 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -363,7 +363,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
- if (skb_has_frags(head)) {
+ if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 1fa3468f0f32..9bb936ae2452 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,28 +25,14 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
-static DEFINE_SPINLOCK(inet6_proto_lock);
-
+const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly;
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
- int ret, hash = protocol & (MAX_INET_PROTOS - 1);
-
- spin_lock_bh(&inet6_proto_lock);
-
- if (inet6_protos[hash]) {
- ret = -1;
- } else {
- inet6_protos[hash] = prot;
- ret = 0;
- }
-
- spin_unlock_bh(&inet6_proto_lock);
+ int hash = protocol & (MAX_INET_PROTOS - 1);
- return ret;
+ return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1;
}
-
EXPORT_SYMBOL(inet6_add_protocol);
/*
@@ -57,20 +43,10 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- spin_lock_bh(&inet6_proto_lock);
-
- if (inet6_protos[hash] != prot) {
- ret = -1;
- } else {
- inet6_protos[hash] = NULL;
- ret = 0;
- }
-
- spin_unlock_bh(&inet6_proto_lock);
+ ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1;
synchronize_net();
return ret;
}
-
EXPORT_SYMBOL(inet6_del_protocol);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e677937a07fc..45e6efb7f171 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -764,7 +764,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
return -EINVAL;
if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
- return(-EAFNOSUPPORT);
+ return -EAFNOSUPPORT;
/* port is the proto value [0..255] carried in nexthdr */
proto = ntohs(sin6->sin6_port);
@@ -772,10 +772,10 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
if (!proto)
proto = inet->inet_num;
else if (proto != inet->inet_num)
- return(-EINVAL);
+ return -EINVAL;
if (proto > 255)
- return(-EINVAL);
+ return -EINVAL;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
@@ -985,7 +985,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
/* You may get strange result with a positive odd offset;
RFC2292bis agrees with me. */
if (val > 0 && (val&1))
- return(-EINVAL);
+ return -EINVAL;
if (val < 0) {
rp->checksum = 0;
} else {
@@ -997,7 +997,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
break;
default:
- return(-ENOPROTOOPT);
+ return -ENOPROTOOPT;
}
}
@@ -1190,7 +1190,7 @@ static int rawv6_init_sk(struct sock *sk)
default:
break;
}
- return(0);
+ return 0;
}
struct proto rawv6_prot = {
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 64cfef1b0a4c..c7ba3149633f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -458,7 +458,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
- if (skb_has_frags(head)) {
+ if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a275c6e1e25c..17e217933885 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -217,14 +217,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static __inline__ int rt6_check_expired(const struct rt6_info *rt)
{
- return (rt->rt6i_flags & RTF_EXPIRES &&
- time_after(jiffies, rt->rt6i_expires));
+ return (rt->rt6i_flags & RTF_EXPIRES) &&
+ time_after(jiffies, rt->rt6i_expires);
}
static inline int rt6_need_strict(struct in6_addr *daddr)
{
- return (ipv6_addr_type(daddr) &
- (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK));
+ return ipv6_addr_type(daddr) &
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
/*
@@ -440,7 +440,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
__func__, match);
net = dev_net(rt0->rt6i_dev);
- return (match ? match : net->ipv6.ip6_null_entry);
+ return match ? match : net->ipv6.ip6_null_entry;
}
#ifdef CONFIG_IPV6_ROUTE_INFO
@@ -859,7 +859,7 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
dst_release(*dstp);
*dstp = new;
- return (new ? 0 : -ENOMEM);
+ return new ? 0 : -ENOMEM;
}
EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
@@ -1070,7 +1070,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
out:
net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
- return (atomic_read(&ops->entries) > rt_max_size);
+ return atomic_read(&ops->entries) > rt_max_size;
}
/* Clean host part of a prefix. Not necessary in radix tree,
@@ -1169,6 +1169,8 @@ int ip6_route_add(struct fib6_config *cfg)
if (addr_type & IPV6_ADDR_MULTICAST)
rt->dst.input = ip6_mc_input;
+ else if (cfg->fc_flags & RTF_LOCAL)
+ rt->dst.input = ip6_input;
else
rt->dst.input = ip6_forward;
@@ -1190,7 +1192,8 @@ int ip6_route_add(struct fib6_config *cfg)
they would result in kernel looping; promote them to reject routes
*/
if ((cfg->fc_flags & RTF_REJECT) ||
- (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
+ (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
+ && !(cfg->fc_flags&RTF_LOCAL))) {
/* hold loopback dev/idev if we haven't done so. */
if (dev != net->loopback_dev) {
if (dev) {
@@ -2102,6 +2105,9 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rtm->rtm_type == RTN_UNREACHABLE)
cfg->fc_flags |= RTF_REJECT;
+ if (rtm->rtm_type == RTN_LOCAL)
+ cfg->fc_flags |= RTF_LOCAL;
+
cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
cfg->fc_nlinfo.nlh = nlh;
cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@ -2222,6 +2228,8 @@ static int rt6_fill_node(struct net *net,
NLA_PUT_U32(skb, RTA_TABLE, table);
if (rt->rt6i_flags&RTF_REJECT)
rtm->rtm_type = RTN_UNREACHABLE;
+ else if (rt->rt6i_flags&RTF_LOCAL)
+ rtm->rtm_type = RTN_LOCAL;
else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
rtm->rtm_type = RTN_LOCAL;
else
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 4699cd3c3118..d7701782b639 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -63,36 +63,63 @@
#define HASH_SIZE 16
#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
-static void ipip6_tunnel_init(struct net_device *dev);
+static int ipip6_tunnel_init(struct net_device *dev);
static void ipip6_tunnel_setup(struct net_device *dev);
+static void ipip6_dev_free(struct net_device *dev);
static int sit_net_id __read_mostly;
struct sit_net {
- struct ip_tunnel *tunnels_r_l[HASH_SIZE];
- struct ip_tunnel *tunnels_r[HASH_SIZE];
- struct ip_tunnel *tunnels_l[HASH_SIZE];
- struct ip_tunnel *tunnels_wc[1];
- struct ip_tunnel **tunnels[4];
+ struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_wc[1];
+ struct ip_tunnel __rcu **tunnels[4];
struct net_device *fb_tunnel_dev;
};
/*
- * Locking : hash tables are protected by RCU and a spinlock
+ * Locking : hash tables are protected by RCU and RTNL
*/
-static DEFINE_SPINLOCK(ipip6_lock);
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
+{
+ struct pcpu_tstats sum = { 0 };
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+ sum.rx_packets += tstats->rx_packets;
+ sum.rx_bytes += tstats->rx_bytes;
+ sum.tx_packets += tstats->tx_packets;
+ sum.tx_bytes += tstats->tx_bytes;
+ }
+ dev->stats.rx_packets = sum.rx_packets;
+ dev->stats.rx_bytes = sum.rx_bytes;
+ dev->stats.tx_packets = sum.tx_packets;
+ dev->stats.tx_bytes = sum.tx_bytes;
+ return &dev->stats;
+}
/*
* Must be invoked with rcu_read_lock
*/
static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
struct net_device *dev, __be32 remote, __be32 local)
{
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(local);
+ unsigned int h0 = HASH(remote);
+ unsigned int h1 = HASH(local);
struct ip_tunnel *t;
struct sit_net *sitn = net_generic(net, sit_net_id);
@@ -121,12 +148,12 @@ static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
return NULL;
}
-static struct ip_tunnel **__ipip6_bucket(struct sit_net *sitn,
+static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
struct ip_tunnel_parm *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
- unsigned h = 0;
+ unsigned int h = 0;
int prio = 0;
if (remote) {
@@ -140,7 +167,7 @@ static struct ip_tunnel **__ipip6_bucket(struct sit_net *sitn,
return &sitn->tunnels[prio][h];
}
-static inline struct ip_tunnel **ipip6_bucket(struct sit_net *sitn,
+static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn,
struct ip_tunnel *t)
{
return __ipip6_bucket(sitn, &t->parms);
@@ -148,13 +175,14 @@ static inline struct ip_tunnel **ipip6_bucket(struct sit_net *sitn,
static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
{
- struct ip_tunnel **tp;
-
- for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) {
- if (t == *tp) {
- spin_lock_bh(&ipip6_lock);
- *tp = t->next;
- spin_unlock_bh(&ipip6_lock);
+ struct ip_tunnel __rcu **tp;
+ struct ip_tunnel *iter;
+
+ for (tp = ipip6_bucket(sitn, t);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
break;
}
}
@@ -162,12 +190,10 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
{
- struct ip_tunnel **tp = ipip6_bucket(sitn, t);
+ struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
- spin_lock_bh(&ipip6_lock);
- t->next = *tp;
+ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
- spin_unlock_bh(&ipip6_lock);
}
static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
@@ -187,17 +213,20 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
#endif
}
-static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
+static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
struct ip_tunnel_parm *parms, int create)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
- struct ip_tunnel *t, **tp, *nt;
+ struct ip_tunnel *t, *nt;
+ struct ip_tunnel __rcu **tp;
struct net_device *dev;
char name[IFNAMSIZ];
struct sit_net *sitn = net_generic(net, sit_net_id);
- for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) {
+ for (tp = __ipip6_bucket(sitn, parms);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
parms->link == t->parms.link) {
@@ -213,7 +242,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
else
- sprintf(name, "sit%%d");
+ strcpy(name, "sit%d");
dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
if (dev == NULL)
@@ -229,7 +258,8 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
nt = netdev_priv(dev);
nt->parms = *parms;
- ipip6_tunnel_init(dev);
+ if (ipip6_tunnel_init(dev) < 0)
+ goto failed_free;
ipip6_tunnel_clone_6rd(dev, sitn);
if (parms->i_flags & SIT_ISATAP)
@@ -244,7 +274,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
return nt;
failed_free:
- free_netdev(dev);
+ ipip6_dev_free(dev);
failed:
return NULL;
}
@@ -340,7 +370,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
ASSERT_RTNL();
- for (p = t->prl; p; p = p->next) {
+ for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
if (p->addr == a->addr) {
if (chg) {
p->flags = a->flags;
@@ -451,15 +481,12 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
struct sit_net *sitn = net_generic(net, sit_net_id);
if (dev == sitn->fb_tunnel_dev) {
- spin_lock_bh(&ipip6_lock);
- sitn->tunnels_wc[0] = NULL;
- spin_unlock_bh(&ipip6_lock);
- dev_put(dev);
+ rcu_assign_pointer(sitn->tunnels_wc[0], NULL);
} else {
ipip6_tunnel_unlink(sitn, netdev_priv(dev));
ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
- dev_put(dev);
}
+ dev_put(dev);
}
@@ -548,6 +575,8 @@ static int ipip6_rcv(struct sk_buff *skb)
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr);
if (tunnel != NULL) {
+ struct pcpu_tstats *tstats;
+
secpath_reset(skb);
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
@@ -563,10 +592,17 @@ static int ipip6_rcv(struct sk_buff *skb)
return 0;
}
- skb_tunnel_rx(skb, tunnel->dev);
+ tstats = this_cpu_ptr(tunnel->dev->tstats);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+
+ __skb_tunnel_rx(skb, tunnel->dev);
ipip6_ecn_decapsulate(iph, skb);
- netif_rx(skb);
+
+ if (netif_rx(skb) == NET_RX_DROP)
+ tunnel->dev->stats.rx_dropped++;
+
rcu_read_unlock();
return 0;
}
@@ -590,7 +626,7 @@ __be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel)
#ifdef CONFIG_IPV6_SIT_6RD
if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
tunnel->ip6rd.prefixlen)) {
- unsigned pbw0, pbi0;
+ unsigned int pbw0, pbi0;
int pbi1;
u32 d;
@@ -625,14 +661,13 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct pcpu_tstats *tstats;
struct iphdr *tiph = &tunnel->parms.iph;
struct ipv6hdr *iph6 = ipv6_hdr(skb);
u8 tos = tunnel->parms.iph.tos;
__be16 df = tiph->frag_off;
struct rtable *rt; /* Route to the other host */
- struct net_device *tdev; /* Device to other host */
+ struct net_device *tdev; /* Device to other host */
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
__be32 dst = tiph->daddr;
@@ -703,20 +738,20 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
.oif = tunnel->parms.link,
.proto = IPPROTO_IPV6 };
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- stats->tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
}
if (rt->rt_type != RTN_UNICAST) {
ip_rt_put(rt);
- stats->tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
tdev = rt->dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
- stats->collisions++;
+ dev->stats.collisions++;
goto tx_error;
}
@@ -724,7 +759,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
- stats->collisions++;
+ dev->stats.collisions++;
ip_rt_put(rt);
goto tx_error;
}
@@ -763,7 +798,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
- txq->tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -799,14 +834,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
iph->ttl = iph6->hop_limit;
nf_reset(skb);
-
- IPTUNNEL_XMIT();
+ tstats = this_cpu_ptr(dev->tstats);
+ __IPTUNNEL_XMIT(tstats, &dev->stats);
return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
tx_error:
- stats->tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1083,12 +1118,19 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_start_xmit = ipip6_tunnel_xmit,
.ndo_do_ioctl = ipip6_tunnel_ioctl,
.ndo_change_mtu = ipip6_tunnel_change_mtu,
+ .ndo_get_stats = ipip6_get_stats,
};
+static void ipip6_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
static void ipip6_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipip6_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ipip6_dev_free;
dev->type = ARPHRD_SIT;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
@@ -1098,9 +1140,10 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_LLTX;
}
-static void ipip6_tunnel_init(struct net_device *dev)
+static int ipip6_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
@@ -1111,9 +1154,14 @@ static void ipip6_tunnel_init(struct net_device *dev)
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
ipip6_tunnel_bind_dev(dev);
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
}
-static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
+static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -1128,11 +1176,15 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
iph->ihl = 5;
iph->ttl = 64;
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
dev_hold(dev);
sitn->tunnels_wc[0] = tunnel;
+ return 0;
}
-static struct xfrm_tunnel sit_handler = {
+static struct xfrm_tunnel sit_handler __read_mostly = {
.handler = ipip6_rcv,
.err_handler = ipip6_err,
.priority = 1,
@@ -1173,7 +1225,10 @@ static int __net_init sit_init_net(struct net *net)
}
dev_net_set(sitn->fb_tunnel_dev, net);
- ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
+ err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
+ if (err)
+ goto err_dev_free;
+
ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
if ((err = register_netdev(sitn->fb_tunnel_dev)))
@@ -1183,7 +1238,8 @@ static int __net_init sit_init_net(struct net *net)
err_reg_dev:
dev_put(sitn->fb_tunnel_dev);
- free_netdev(sitn->fb_tunnel_dev);
+err_dev_free:
+ ipip6_dev_free(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index fe6d40418c0b..8d93f6d81979 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -139,7 +139,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
return -EINVAL;
if (usin->sin6_family != AF_INET6)
- return(-EAFNOSUPPORT);
+ return -EAFNOSUPPORT;
memset(&fl, 0, sizeof(fl));
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index fc3c86a47452..d9864725d0c6 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -30,8 +30,8 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm6_tunnel *tunnel6_handlers;
-static struct xfrm6_tunnel *tunnel46_handlers;
+static struct xfrm6_tunnel *tunnel6_handlers __read_mostly;
+static struct xfrm6_tunnel *tunnel46_handlers __read_mostly;
static DEFINE_MUTEX(tunnel6_mutex);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
@@ -51,7 +51,7 @@ int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
}
handler->next = *pprev;
- *pprev = handler;
+ rcu_assign_pointer(*pprev, handler);
ret = 0;
@@ -88,6 +88,11 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
EXPORT_SYMBOL(xfrm6_tunnel_deregister);
+#define for_each_tunnel_rcu(head, handler) \
+ for (handler = rcu_dereference(head); \
+ handler != NULL; \
+ handler = rcu_dereference(handler->next)) \
+
static int tunnel6_rcv(struct sk_buff *skb)
{
struct xfrm6_tunnel *handler;
@@ -95,7 +100,7 @@ static int tunnel6_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
- for (handler = tunnel6_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel6_handlers, handler)
if (!handler->handler(skb))
return 0;
@@ -113,7 +118,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto drop;
- for (handler = tunnel46_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel46_handlers, handler)
if (!handler->handler(skb))
return 0;
@@ -129,7 +134,7 @@ static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
{
struct xfrm6_tunnel *handler;
- for (handler = tunnel6_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel6_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
break;
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 6baeabbbca82..39676eac3a37 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -199,7 +199,7 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops)
struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
xfrm6_policy_afinfo.garbage_collect(net);
- return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
+ return atomic_read(&ops->entries) > ops->gc_thresh * 2;
}
static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 2ce3a8278f26..ac7584b946a5 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -317,13 +317,13 @@ static const struct xfrm_type xfrm6_tunnel_type = {
.output = xfrm6_tunnel_output,
};
-static struct xfrm6_tunnel xfrm6_tunnel_handler = {
+static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
.handler = xfrm6_tunnel_rcv,
.err_handler = xfrm6_tunnel_err,
.priority = 2,
};
-static struct xfrm6_tunnel xfrm46_tunnel_handler = {
+static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
.handler = xfrm6_tunnel_rcv,
.err_handler = xfrm6_tunnel_err,
.priority = 2,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index fd55b5135de5..bf3635129b17 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -573,9 +573,9 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
/* Requested object/attribute doesn't exist */
if((self->errno == IAS_CLASS_UNKNOWN) ||
(self->errno == IAS_ATTRIB_UNKNOWN))
- return (-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
else
- return (-EHOSTUNREACH);
+ return -EHOSTUNREACH;
}
/* Get the remote TSAP selector */
@@ -663,7 +663,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
__func__, name);
self->daddr = DEV_ADDR_ANY;
kfree(discoveries);
- return(-ENOTUNIQ);
+ return -ENOTUNIQ;
}
/* First time we found that one, save it ! */
daddr = self->daddr;
@@ -677,7 +677,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__);
self->daddr = DEV_ADDR_ANY;
kfree(discoveries);
- return(-EHOSTUNREACH);
+ return -EHOSTUNREACH;
break;
}
}
@@ -689,7 +689,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n",
__func__, name);
self->daddr = DEV_ADDR_ANY;
- return(-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
}
/* Revert back to discovered device & service */
@@ -2465,9 +2465,9 @@ bed:
/* Requested object/attribute doesn't exist */
if((self->errno == IAS_CLASS_UNKNOWN) ||
(self->errno == IAS_ATTRIB_UNKNOWN))
- return (-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
else
- return (-EHOSTUNREACH);
+ return -EHOSTUNREACH;
}
/* Translate from internal to user structure */
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index c1c8ae939126..36c3f037f172 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -315,7 +315,7 @@ struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
/* Get the actual number of device in the buffer and return */
*pn = i;
- return(buffer);
+ return buffer;
}
#ifdef CONFIG_PROC_FS
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index faa82ca2dfdc..a39cca8331df 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -449,8 +449,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
}
#ifdef SERIAL_DO_RESTART
- return ((self->flags & ASYNC_HUP_NOTIFY) ?
- -EAGAIN : -ERESTARTSYS);
+ return (self->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS;
#else
return -EAGAIN;
#endif
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 5bb8353105cc..8ee1ff6c742f 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -45,13 +45,11 @@ static int irlan_eth_close(struct net_device *dev);
static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
struct net_device *dev);
static void irlan_eth_set_multicast_list( struct net_device *dev);
-static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev);
static const struct net_device_ops irlan_eth_netdev_ops = {
.ndo_open = irlan_eth_open,
.ndo_stop = irlan_eth_close,
.ndo_start_xmit = irlan_eth_xmit,
- .ndo_get_stats = irlan_eth_get_stats,
.ndo_set_multicast_list = irlan_eth_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -208,10 +206,10 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
* tried :-) DB
*/
/* irttp_data_request already free the packet */
- self->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
} else {
- self->stats.tx_packets++;
- self->stats.tx_bytes += len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
}
return NETDEV_TX_OK;
@@ -226,15 +224,16 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
{
struct irlan_cb *self = instance;
+ struct net_device *dev = self->dev;
if (skb == NULL) {
- ++self->stats.rx_dropped;
+ dev->stats.rx_dropped++;
return 0;
}
if (skb->len < ETH_HLEN) {
IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
__func__, skb->len);
- ++self->stats.rx_dropped;
+ dev->stats.rx_dropped++;
dev_kfree_skb(skb);
return 0;
}
@@ -244,10 +243,10 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
* might have been previously set by the low level IrDA network
* device driver
*/
- skb->protocol = eth_type_trans(skb, self->dev); /* Remove eth header */
+ skb->protocol = eth_type_trans(skb, dev); /* Remove eth header */
- self->stats.rx_packets++;
- self->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
netif_rx(skb); /* Eat it! */
@@ -348,16 +347,3 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
else
irlan_set_broadcast_filter(self, FALSE);
}
-
-/*
- * Function irlan_get_stats (dev)
- *
- * Get the current statistics for this device
- *
- */
-static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev)
-{
- struct irlan_cb *self = netdev_priv(dev);
-
- return &self->stats;
-}
diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c
index cbcb4eb54037..43f16040a6fe 100644
--- a/net/irda/irlan/irlan_event.c
+++ b/net/irda/irlan/irlan_event.c
@@ -24,7 +24,7 @@
#include <net/irda/irlan_event.h>
-char *irlan_state[] = {
+const char * const irlan_state[] = {
"IRLAN_IDLE",
"IRLAN_QUERY",
"IRLAN_CONN",
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 0e7d8bde145d..6115a44c0a24 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -939,7 +939,7 @@ struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots)
}
/* Return current cached discovery log */
- return(irlmp_copy_discoveries(irlmp->cachelog, pn, mask, TRUE));
+ return irlmp_copy_discoveries(irlmp->cachelog, pn, mask, TRUE);
}
EXPORT_SYMBOL(irlmp_get_discoveries);
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c
index 3750884094da..062e63b1c5c4 100644
--- a/net/irda/irlmp_frame.c
+++ b/net/irda/irlmp_frame.c
@@ -448,7 +448,7 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
(self->cache.slsap_sel == slsap_sel) &&
(self->cache.dlsap_sel == dlsap_sel))
{
- return (self->cache.lsap);
+ return self->cache.lsap;
}
#endif
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index e98e40d76f4f..7f17a8020e8a 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -238,7 +238,7 @@ irnet_ias_to_tsap(irnet_socket * self,
DEXIT(IRDA_SR_TRACE, "\n");
/* Return the TSAP */
- return(dtsap_sel);
+ return dtsap_sel;
}
/*------------------------------------------------------------------*/
@@ -301,7 +301,7 @@ irnet_connect_tsap(irnet_socket * self)
{
clear_bit(0, &self->ttp_connect);
DERROR(IRDA_SR_ERROR, "connect aborted!\n");
- return(err);
+ return err;
}
/* Connect to remote device */
@@ -312,7 +312,7 @@ irnet_connect_tsap(irnet_socket * self)
{
clear_bit(0, &self->ttp_connect);
DERROR(IRDA_SR_ERROR, "connect aborted!\n");
- return(err);
+ return err;
}
/* The above call is non-blocking.
@@ -321,7 +321,7 @@ irnet_connect_tsap(irnet_socket * self)
* See you there ;-) */
DEXIT(IRDA_SR_TRACE, "\n");
- return(err);
+ return err;
}
/*------------------------------------------------------------------*/
@@ -362,10 +362,10 @@ irnet_discover_next_daddr(irnet_socket * self)
/* The above request is non-blocking.
* After a while, IrDA will call us back in irnet_discovervalue_confirm()
* We will then call irnet_ias_to_tsap() and come back here again... */
- return(0);
+ return 0;
}
else
- return(1);
+ return 1;
}
/*------------------------------------------------------------------*/
@@ -436,7 +436,7 @@ irnet_discover_daddr_and_lsap_sel(irnet_socket * self)
/* Follow me in irnet_discovervalue_confirm() */
DEXIT(IRDA_SR_TRACE, "\n");
- return(0);
+ return 0;
}
/*------------------------------------------------------------------*/
@@ -485,7 +485,7 @@ irnet_dname_to_daddr(irnet_socket * self)
/* No luck ! */
DEBUG(IRDA_SR_INFO, "cannot discover device ``%s'' !!!\n", self->rname);
kfree(discoveries);
- return(-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
}
@@ -527,7 +527,7 @@ irda_irnet_create(irnet_socket * self)
INIT_WORK(&self->disconnect_work, irnet_ppp_disconnect);
DEXIT(IRDA_SOCK_TRACE, "\n");
- return(0);
+ return 0;
}
/*------------------------------------------------------------------*/
@@ -601,7 +601,7 @@ irda_irnet_connect(irnet_socket * self)
* We will finish the connection procedure in irnet_connect_tsap().
*/
DEXIT(IRDA_SOCK_TRACE, "\n");
- return(0);
+ return 0;
}
/*------------------------------------------------------------------*/
@@ -733,7 +733,7 @@ irnet_daddr_to_dname(irnet_socket * self)
/* No luck ! */
DEXIT(IRDA_SERV_INFO, ": cannot discover device 0x%08x !!!\n", self->daddr);
kfree(discoveries);
- return(-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
}
/*------------------------------------------------------------------*/
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index dfe7b38dd4af..69f1fa64994e 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -166,7 +166,7 @@ irnet_ctrl_write(irnet_socket * ap,
}
/* Success : we have parsed all commands successfully */
- return(count);
+ return count;
}
#ifdef INITIAL_DISCOVERY
@@ -300,7 +300,7 @@ irnet_ctrl_read(irnet_socket * ap,
}
DEXIT(CTRL_TRACE, "\n");
- return(strlen(event));
+ return strlen(event);
}
#endif /* INITIAL_DISCOVERY */
@@ -409,7 +409,7 @@ irnet_ctrl_read(irnet_socket * ap,
}
DEXIT(CTRL_TRACE, "\n");
- return(strlen(event));
+ return strlen(event);
}
/*------------------------------------------------------------------*/
@@ -623,7 +623,7 @@ dev_irnet_poll(struct file * file,
mask |= irnet_ctrl_poll(ap, file, wait);
DEXIT(FS_TRACE, " - mask=0x%X\n", mask);
- return(mask);
+ return mask;
}
/*------------------------------------------------------------------*/
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index b5df2418f90c..940225866da0 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -103,7 +103,8 @@ static const struct file_operations irnet_device_fops =
.poll = dev_irnet_poll,
.unlocked_ioctl = dev_irnet_ioctl,
.open = dev_irnet_open,
- .release = dev_irnet_close
+ .release = dev_irnet_close,
+ .llseek = noop_llseek,
/* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */
};
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 43040e97c474..d87c22df6f1e 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -565,12 +565,12 @@ pfkey_proto2satype(uint16_t proto)
static uint8_t pfkey_proto_to_xfrm(uint8_t proto)
{
- return (proto == IPSEC_PROTO_ANY ? 0 : proto);
+ return proto == IPSEC_PROTO_ANY ? 0 : proto;
}
static uint8_t pfkey_proto_from_xfrm(uint8_t proto)
{
- return (proto ? proto : IPSEC_PROTO_ANY);
+ return proto ? proto : IPSEC_PROTO_ANY;
}
static inline int pfkey_sockaddr_len(sa_family_t family)
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 1ae697681bc7..8d9ce0accc98 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -144,7 +144,6 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
nf_reset(skb);
if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += data_len;
} else
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index ff954b3e94b6..39a21d0c61c4 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1768,7 +1768,7 @@ static const struct proto_ops pppol2tp_ops = {
.ioctl = pppox_ioctl,
};
-static struct pppox_proto pppol2tp_proto = {
+static const struct pppox_proto pppol2tp_proto = {
.create = pppol2tp_create,
.ioctl = pppol2tp_ioctl
};
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index a87cb3ba2df6..d2b03e0851ef 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -138,10 +138,8 @@ struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
struct crypto_cipher *tfm;
tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return NULL;
-
- crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN);
+ if (!IS_ERR(tfm))
+ crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN);
return tfm;
}
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 3d097b3d7b62..b4d66cca76d6 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -119,10 +119,8 @@ struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
struct crypto_cipher *tfm;
tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return NULL;
-
- crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
+ if (!IS_ERR(tfm))
+ crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
return tfm;
}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 965b272499fd..58eab9e8e4ee 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -86,6 +86,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
tid, 0, reason);
del_timer_sync(&tid_rx->session_timer);
+ del_timer_sync(&tid_rx->reorder_timer);
call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
}
@@ -120,6 +121,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
}
+static void sta_rx_agg_reorder_timer_expired(unsigned long data)
+{
+ u8 *ptid = (u8 *)data;
+ u8 *timer_to_id = ptid - *ptid;
+ struct sta_info *sta = container_of(timer_to_id, struct sta_info,
+ timer_to_tid[0]);
+
+ rcu_read_lock();
+ spin_lock(&sta->lock);
+ ieee80211_release_reorder_timeout(sta, *ptid);
+ spin_unlock(&sta->lock);
+ rcu_read_unlock();
+}
+
static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
u8 dialog_token, u16 status, u16 policy,
u16 buf_size, u16 timeout)
@@ -251,11 +266,18 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
goto end;
}
+ spin_lock_init(&tid_agg_rx->reorder_lock);
+
/* rx timer */
tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
init_timer(&tid_agg_rx->session_timer);
+ /* rx reorder timer */
+ tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired;
+ tid_agg_rx->reorder_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+ init_timer(&tid_agg_rx->reorder_timer);
+
/* prepare reordering buffer */
tid_agg_rx->reorder_buf =
kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 29ac8e1a509e..c981604b71e6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -19,33 +19,6 @@
#include "rate.h"
#include "mesh.h"
-static bool nl80211_type_check(enum nl80211_iftype type)
-{
- switch (type) {
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_MONITOR:
-#ifdef CONFIG_MAC80211_MESH
- case NL80211_IFTYPE_MESH_POINT:
-#endif
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_WDS:
- return true;
- default:
- return false;
- }
-}
-
-static bool nl80211_params_check(enum nl80211_iftype type,
- struct vif_params *params)
-{
- if (!nl80211_type_check(type))
- return false;
-
- return true;
-}
-
static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
@@ -55,9 +28,6 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
struct ieee80211_sub_if_data *sdata;
int err;
- if (!nl80211_params_check(type, params))
- return -EINVAL;
-
err = ieee80211_if_add(local, name, &dev, type, params);
if (err || type != NL80211_IFTYPE_MONITOR || !flags)
return err;
@@ -82,12 +52,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
int ret;
- if (ieee80211_sdata_running(sdata))
- return -EBUSY;
-
- if (!nl80211_params_check(type, params))
- return -EINVAL;
-
ret = ieee80211_if_change_type(sdata, type);
if (ret)
return ret;
@@ -114,44 +78,30 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, const u8 *mac_addr,
struct key_params *params)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct sta_info *sta = NULL;
- enum ieee80211_key_alg alg;
struct ieee80211_key *key;
int err;
- if (!netif_running(dev))
+ if (!ieee80211_sdata_running(sdata))
return -ENETDOWN;
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
+ /* reject WEP and TKIP keys if WEP failed to initialize */
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- alg = ALG_WEP;
- break;
case WLAN_CIPHER_SUITE_TKIP:
- alg = ALG_TKIP;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- alg = ALG_CCMP;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- alg = ALG_AES_CMAC;
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (IS_ERR(sdata->local->wep_tx_tfm))
+ return -EINVAL;
break;
default:
- return -EINVAL;
+ break;
}
- /* reject WEP and TKIP keys if WEP failed to initialize */
- if ((alg == ALG_WEP || alg == ALG_TKIP) &&
- IS_ERR(sdata->local->wep_tx_tfm))
- return -EINVAL;
-
- key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key,
- params->seq_len, params->seq);
- if (!key)
- return -ENOMEM;
+ key = ieee80211_key_alloc(params->cipher, key_idx, params->key_len,
+ params->key, params->seq_len, params->seq);
+ if (IS_ERR(key))
+ return PTR_ERR(key);
mutex_lock(&sdata->local->sta_mtx);
@@ -164,9 +114,10 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
}
}
- ieee80211_key_link(key, sdata, sta);
+ err = ieee80211_key_link(key, sdata, sta);
+ if (err)
+ ieee80211_key_free(sdata->local, key);
- err = 0;
out_unlock:
mutex_unlock(&sdata->local->sta_mtx);
@@ -247,10 +198,10 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
memset(&params, 0, sizeof(params));
- switch (key->conf.alg) {
- case ALG_TKIP:
- params.cipher = WLAN_CIPHER_SUITE_TKIP;
+ params.cipher = key->conf.cipher;
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
iv32 = key->u.tkip.tx.iv32;
iv16 = key->u.tkip.tx.iv16;
@@ -268,8 +219,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
params.seq = seq;
params.seq_len = 6;
break;
- case ALG_CCMP:
- params.cipher = WLAN_CIPHER_SUITE_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP:
seq[0] = key->u.ccmp.tx_pn[5];
seq[1] = key->u.ccmp.tx_pn[4];
seq[2] = key->u.ccmp.tx_pn[3];
@@ -279,14 +229,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
params.seq = seq;
params.seq_len = 6;
break;
- case ALG_WEP:
- if (key->conf.keylen == 5)
- params.cipher = WLAN_CIPHER_SUITE_WEP40;
- else
- params.cipher = WLAN_CIPHER_SUITE_WEP104;
- break;
- case ALG_AES_CMAC:
- params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
seq[0] = key->u.aes_cmac.tx_pn[5];
seq[1] = key->u.aes_cmac.tx_pn[4];
seq[2] = key->u.aes_cmac.tx_pn[3];
@@ -634,6 +577,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
struct sta_info *sta,
struct station_parameters *params)
{
+ unsigned long flags;
u32 rates;
int i, j;
struct ieee80211_supported_band *sband;
@@ -642,7 +586,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
sband = local->hw.wiphy->bands[local->oper_channel->band];
- spin_lock_bh(&sta->lock);
+ spin_lock_irqsave(&sta->flaglock, flags);
mask = params->sta_flags_mask;
set = params->sta_flags_set;
@@ -669,7 +613,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
if (set & BIT(NL80211_STA_FLAG_MFP))
sta->flags |= WLAN_STA_MFP;
}
- spin_unlock_bh(&sta->lock);
+ spin_unlock_irqrestore(&sta->flaglock, flags);
/*
* cfg80211 validates this (1-2007) and allows setting the AID
@@ -1143,9 +1087,9 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
p.uapsd = false;
if (drv_conf_tx(local, params->queue, &p)) {
- printk(KERN_DEBUG "%s: failed to set TX queue "
- "parameters for queue %d\n",
- wiphy_name(local->hw.wiphy), params->queue);
+ wiphy_debug(local->hw.wiphy,
+ "failed to set TX queue parameters for queue %d\n",
+ params->queue);
return -EINVAL;
}
@@ -1207,15 +1151,26 @@ static int ieee80211_scan(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_scan_request *req)
{
- struct ieee80211_sub_if_data *sdata;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type != NL80211_IFTYPE_STATION &&
- sdata->vif.type != NL80211_IFTYPE_ADHOC &&
- sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
- (sdata->vif.type != NL80211_IFTYPE_AP || sdata->u.ap.beacon))
+ switch (ieee80211_vif_type_p2p(&sdata->vif)) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ if (sdata->local->ops->hw_scan)
+ break;
+ /* FIXME: implement NoA while scanning in software */
+ return -EOPNOTSUPP;
+ case NL80211_IFTYPE_AP:
+ if (sdata->u.ap.beacon)
+ return -EOPNOTSUPP;
+ break;
+ default:
return -EOPNOTSUPP;
+ }
return ieee80211_request_scan(sdata, req);
}
@@ -1541,11 +1496,11 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
}
-static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- bool channel_type_valid,
- const u8 *buf, size_t len, u64 *cookie)
+static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid,
+ const u8 *buf, size_t len, u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
@@ -1575,8 +1530,6 @@ static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
return -ENOLINK;
break;
case NL80211_IFTYPE_STATION:
- if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
- flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
break;
default:
return -EOPNOTSUPP;
@@ -1647,6 +1600,6 @@ struct cfg80211_ops mac80211_config_ops = {
.set_bitrate_mask = ieee80211_set_bitrate_mask,
.remain_on_channel = ieee80211_remain_on_channel,
.cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
- .action = ieee80211_action,
+ .mgmt_tx = ieee80211_mgmt_tx,
.set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 32be11e4c4d9..5b24740fc0b0 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -11,7 +11,7 @@ __ieee80211_get_channel_mode(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata;
- WARN_ON(!mutex_is_locked(&local->iflist_mtx));
+ lockdep_assert_held(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata == ignore)
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index a694c593ff6a..ebd5b69f562e 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -85,13 +85,15 @@ static ssize_t tsf_write(struct file *file,
if (strncmp(buf, "reset", 5) == 0) {
if (local->ops->reset_tsf) {
drv_reset_tsf(local);
- printk(KERN_INFO "%s: debugfs reset TSF\n", wiphy_name(local->hw.wiphy));
+ wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
}
} else {
tsf = simple_strtoul(buf, NULL, 0);
if (local->ops->set_tsf) {
drv_set_tsf(local, tsf);
- printk(KERN_INFO "%s: debugfs set TSF to %#018llx\n", wiphy_name(local->hw.wiphy), tsf);
+ wiphy_info(local->hw.wiphy,
+ "debugfs set TSF to %#018llx\n", tsf);
+
}
}
@@ -366,7 +368,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
if (!phyd)
return;
- local->debugfs.stations = debugfs_create_dir("stations", phyd);
local->debugfs.keys = debugfs_create_dir("keys", phyd);
DEBUGFS_ADD(frequency);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index fa5e76e658ef..1647f8dc5cda 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -64,26 +64,13 @@ static ssize_t key_algorithm_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
- char *alg;
+ char buf[15];
struct ieee80211_key *key = file->private_data;
+ u32 c = key->conf.cipher;
- switch (key->conf.alg) {
- case ALG_WEP:
- alg = "WEP\n";
- break;
- case ALG_TKIP:
- alg = "TKIP\n";
- break;
- case ALG_CCMP:
- alg = "CCMP\n";
- break;
- case ALG_AES_CMAC:
- alg = "AES-128-CMAC\n";
- break;
- default:
- return 0;
- }
- return simple_read_from_buffer(userbuf, count, ppos, alg, strlen(alg));
+ sprintf(buf, "%.2x-%.2x-%.2x:%d\n",
+ c >> 24, (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff);
+ return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
}
KEY_OPS(algorithm);
@@ -95,21 +82,22 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
int len;
struct ieee80211_key *key = file->private_data;
- switch (key->conf.alg) {
- case ALG_WEP:
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
len = scnprintf(buf, sizeof(buf), "\n");
break;
- case ALG_TKIP:
+ case WLAN_CIPHER_SUITE_TKIP:
len = scnprintf(buf, sizeof(buf), "%08x %04x\n",
key->u.tkip.tx.iv32,
key->u.tkip.tx.iv16);
break;
- case ALG_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP:
tpn = key->u.ccmp.tx_pn;
len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]);
break;
- case ALG_AES_CMAC:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
tpn = key->u.aes_cmac.tx_pn;
len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
tpn[0], tpn[1], tpn[2], tpn[3], tpn[4],
@@ -130,11 +118,12 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
int i, len;
const u8 *rpn;
- switch (key->conf.alg) {
- case ALG_WEP:
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
len = scnprintf(buf, sizeof(buf), "\n");
break;
- case ALG_TKIP:
+ case WLAN_CIPHER_SUITE_TKIP:
for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
p += scnprintf(p, sizeof(buf)+buf-p,
"%08x %04x\n",
@@ -142,7 +131,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
key->u.tkip.rx[i].iv16);
len = p - buf;
break;
- case ALG_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP:
for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) {
rpn = key->u.ccmp.rx_pn[i];
p += scnprintf(p, sizeof(buf)+buf-p,
@@ -152,7 +141,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
}
len = p - buf;
break;
- case ALG_AES_CMAC:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
rpn = key->u.aes_cmac.rx_pn;
p += scnprintf(p, sizeof(buf)+buf-p,
"%02x%02x%02x%02x%02x%02x\n",
@@ -174,11 +163,11 @@ static ssize_t key_replays_read(struct file *file, char __user *userbuf,
char buf[20];
int len;
- switch (key->conf.alg) {
- case ALG_CCMP:
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays);
break;
- case ALG_AES_CMAC:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
len = scnprintf(buf, sizeof(buf), "%u\n",
key->u.aes_cmac.replays);
break;
@@ -196,8 +185,8 @@ static ssize_t key_icverrors_read(struct file *file, char __user *userbuf,
char buf[20];
int len;
- switch (key->conf.alg) {
- case ALG_AES_CMAC:
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
len = scnprintf(buf, sizeof(buf), "%u\n",
key->u.aes_cmac.icverrors);
break;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 20b2998fa0ed..3e12430591b7 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -409,6 +409,9 @@ void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
sprintf(buf, "netdev:%s", sdata->name);
sdata->debugfs.dir = debugfs_create_dir(buf,
sdata->local->hw.wiphy->debugfsdir);
+ if (sdata->debugfs.dir)
+ sdata->debugfs.subdir_stations = debugfs_create_dir("stations",
+ sdata->debugfs.dir);
add_files(sdata);
}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 76839d4dfaac..6b7ff9fb4604 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -300,7 +300,7 @@ STA_OPS(ht_capa);
void ieee80211_sta_debugfs_add(struct sta_info *sta)
{
- struct dentry *stations_dir = sta->local->debugfs.stations;
+ struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations;
u8 mac[3*ETH_ALEN];
sta->debugfs.add_has_run = true;
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 14123dce544b..16983825f8e8 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -54,6 +54,20 @@ static inline int drv_add_interface(struct ieee80211_local *local,
return ret;
}
+static inline int drv_change_interface(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum nl80211_iftype type, bool p2p)
+{
+ int ret;
+
+ might_sleep();
+
+ trace_drv_change_interface(local, sdata, type, p2p);
+ ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
+ trace_drv_return_int(local, ret);
+ return ret;
+}
+
static inline void drv_remove_interface(struct ieee80211_local *local,
struct ieee80211_vif *vif)
{
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 5d5d2a974668..6831fb1641c8 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -25,12 +25,14 @@ static inline void trace_ ## name(proto) {}
#define STA_PR_FMT " sta:%pM"
#define STA_PR_ARG __entry->sta_addr
-#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
+#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
+ __field(bool, p2p) \
__string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
-#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
+#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
+ __entry->p2p = sdata->vif.p2p; \
__assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
-#define VIF_PR_FMT " vif:%s(%d)"
-#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
+#define VIF_PR_FMT " vif:%s(%d%s)"
+#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
/*
* Tracing for driver callbacks.
@@ -136,6 +138,34 @@ TRACE_EVENT(drv_add_interface,
)
);
+TRACE_EVENT(drv_change_interface,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum nl80211_iftype type, bool p2p),
+
+ TP_ARGS(local, sdata, type, p2p),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u32, new_type)
+ __field(bool, new_p2p)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->new_type = type;
+ __entry->new_p2p = p2p;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " new type:%d%s",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->new_type,
+ __entry->new_p2p ? "/p2p" : ""
+ )
+);
+
TRACE_EVENT(drv_remove_interface,
TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata),
@@ -336,7 +366,7 @@ TRACE_EVENT(drv_set_key,
LOCAL_ENTRY
VIF_ENTRY
STA_ENTRY
- __field(enum ieee80211_key_alg, alg)
+ __field(u32, cipher)
__field(u8, hw_key_idx)
__field(u8, flags)
__field(s8, keyidx)
@@ -346,7 +376,7 @@ TRACE_EVENT(drv_set_key,
LOCAL_ASSIGN;
VIF_ASSIGN;
STA_ASSIGN;
- __entry->alg = key->alg;
+ __entry->cipher = key->cipher;
__entry->flags = key->flags;
__entry->keyidx = key->keyidx;
__entry->hw_key_idx = key->hw_key_idx;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 9d101fb33861..11f74f5f7b2f 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -265,3 +265,31 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
return 0;
}
+
+void ieee80211_request_smps_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.request_smps_work);
+
+ mutex_lock(&sdata->u.mgd.mtx);
+ __ieee80211_request_smps(sdata, sdata->u.mgd.driver_smps_mode);
+ mutex_unlock(&sdata->u.mgd.mtx);
+}
+
+void ieee80211_request_smps(struct ieee80211_vif *vif,
+ enum ieee80211_smps_mode smps_mode)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+ return;
+
+ if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF))
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->u.mgd.request_smps_work);
+}
+/* this might change ... don't want non-open drivers using it */
+EXPORT_SYMBOL_GPL(ieee80211_request_smps);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index c691780725a7..1a3aae54f0cf 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -427,8 +427,8 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
return NULL;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n",
- wiphy_name(local->hw.wiphy), addr, sdata->name);
+ wiphy_debug(local->hw.wiphy, "Adding new IBSS station %pM (dev=%s)\n",
+ addr, sdata->name);
#endif
sta = sta_info_alloc(sdata, addr, gfp);
@@ -920,12 +920,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
sdata->u.ibss.ssid_len = params->ssid_len;
+ mutex_unlock(&sdata->u.ibss.mtx);
+
+ mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
+ mutex_unlock(&sdata->local->mtx);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
- mutex_unlock(&sdata->u.ibss.mtx);
-
return 0;
}
@@ -980,7 +982,9 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&sdata->u.ibss.mtx);
+ mutex_lock(&local->mtx);
ieee80211_recalc_idle(sdata->local);
+ mutex_unlock(&local->mtx);
return 0;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 65e0ed6c2975..945fbf29719d 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -50,12 +50,6 @@ struct ieee80211_local;
* increased memory use (about 2 kB of RAM per entry). */
#define IEEE80211_FRAGMENT_MAX 4
-/*
- * Time after which we ignore scan results and no longer report/use
- * them in any way.
- */
-#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
-
#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
#define IEEE80211_DEFAULT_UAPSD_QUEUES \
@@ -165,12 +159,37 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
#define RX_DROP_MONITOR ((__force ieee80211_rx_result) 2u)
#define RX_QUEUED ((__force ieee80211_rx_result) 3u)
-#define IEEE80211_RX_IN_SCAN BIT(0)
-/* frame is destined to interface currently processed (incl. multicast frames) */
-#define IEEE80211_RX_RA_MATCH BIT(1)
-#define IEEE80211_RX_AMSDU BIT(2)
-#define IEEE80211_RX_FRAGMENTED BIT(3)
-/* only add flags here that do not change with subframes of an aMPDU */
+/**
+ * enum ieee80211_packet_rx_flags - packet RX flags
+ * @IEEE80211_RX_RA_MATCH: frame is destined to interface currently processed
+ * (incl. multicast frames)
+ * @IEEE80211_RX_IN_SCAN: received while scanning
+ * @IEEE80211_RX_FRAGMENTED: fragmented frame
+ * @IEEE80211_RX_AMSDU: a-MSDU packet
+ * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
+ *
+ * These are per-frame flags that are attached to a frame in the
+ * @rx_flags field of &struct ieee80211_rx_status.
+ */
+enum ieee80211_packet_rx_flags {
+ IEEE80211_RX_IN_SCAN = BIT(0),
+ IEEE80211_RX_RA_MATCH = BIT(1),
+ IEEE80211_RX_FRAGMENTED = BIT(2),
+ IEEE80211_RX_AMSDU = BIT(3),
+ IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4),
+};
+
+/**
+ * enum ieee80211_rx_flags - RX data flags
+ *
+ * @IEEE80211_RX_CMNTR: received on cooked monitor already
+ *
+ * These flags are used across handling multiple interfaces
+ * for a single frame.
+ */
+enum ieee80211_rx_flags {
+ IEEE80211_RX_CMNTR = BIT(0),
+};
struct ieee80211_rx_data {
struct sk_buff *skb;
@@ -343,7 +362,10 @@ struct ieee80211_if_managed {
unsigned long timers_running; /* used for quiesce/restart */
bool powersave; /* powersave requested for this iface */
enum ieee80211_smps_mode req_smps, /* requested smps mode */
- ap_smps; /* smps mode AP thinks we're in */
+ ap_smps, /* smps mode AP thinks we're in */
+ driver_smps_mode; /* smps mode request */
+
+ struct work_struct request_smps_work;
unsigned int flags;
@@ -371,6 +393,13 @@ struct ieee80211_if_managed {
int ave_beacon_signal;
/*
+ * Number of Beacon frames used in ave_beacon_signal. This can be used
+ * to avoid generating less reliable cqm events that would be based
+ * only on couple of received frames.
+ */
+ unsigned int count_beacon_signal;
+
+ /*
* Last Beacon frame signal strength average (ave_beacon_signal / 16)
* that triggered a cqm event. 0 indicates that no event has been
* generated for the current association.
@@ -474,6 +503,19 @@ enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
};
+/**
+ * enum ieee80211_sdata_state_bits - virtual interface state bits
+ * @SDATA_STATE_RUNNING: virtual interface is up & running; this
+ * mirrors netif_running() but is separate for interface type
+ * change handling while the interface is up
+ * @SDATA_STATE_OFFCHANNEL: This interface is currently in offchannel
+ * mode, so queues are stopped
+ */
+enum ieee80211_sdata_state_bits {
+ SDATA_STATE_RUNNING,
+ SDATA_STATE_OFFCHANNEL,
+};
+
struct ieee80211_sub_if_data {
struct list_head list;
@@ -487,6 +529,8 @@ struct ieee80211_sub_if_data {
unsigned int flags;
+ unsigned long state;
+
int drop_unencrypted;
char name[IFNAMSIZ];
@@ -497,6 +541,9 @@ struct ieee80211_sub_if_data {
*/
bool ht_opmode_valid;
+ /* to detect idle changes */
+ bool old_idle;
+
/* Fragment table for host-based reassembly */
struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
unsigned int fragment_next;
@@ -508,6 +555,8 @@ struct ieee80211_sub_if_data {
struct ieee80211_key *default_mgmt_key;
u16 sequence_number;
+ __be16 control_port_protocol;
+ bool control_port_no_encrypt;
struct work_struct work;
struct sk_buff_head skb_queue;
@@ -539,6 +588,7 @@ struct ieee80211_sub_if_data {
#ifdef CONFIG_MAC80211_DEBUGFS
struct {
struct dentry *dir;
+ struct dentry *subdir_stations;
struct dentry *default_key;
struct dentry *default_mgmt_key;
} debugfs;
@@ -595,11 +645,17 @@ enum queue_stop_reason {
* determine if we are on the operating channel or not
* @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
* gets only set in conjunction with SCAN_SW_SCANNING
+ * @SCAN_COMPLETED: Set for our scan work function when the driver reported
+ * that the scan completed.
+ * @SCAN_ABORTED: Set for our scan work function when the driver reported
+ * a scan complete for an aborted scan.
*/
enum {
SCAN_SW_SCANNING,
SCAN_HW_SCANNING,
SCAN_OFF_CHANNEL,
+ SCAN_COMPLETED,
+ SCAN_ABORTED,
};
/**
@@ -634,7 +690,6 @@ struct ieee80211_local {
/*
* work stuff, potentially off-channel (in the future)
*/
- struct mutex work_mtx;
struct list_head work_list;
struct timer_list work_timer;
struct work_struct work_work;
@@ -656,6 +711,8 @@ struct ieee80211_local {
int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
unsigned int filter_flags; /* FIF_* */
+ bool wiphy_ciphers_allocated;
+
/* protects the aggregated multicast list and filter calls */
spinlock_t filter_lock;
@@ -746,9 +803,10 @@ struct ieee80211_local {
*/
struct mutex key_mtx;
+ /* mutex for scan and work locking */
+ struct mutex mtx;
/* Scanning and BSS list */
- struct mutex scan_mtx;
unsigned long scanning;
struct cfg80211_ssid scan_ssid;
struct cfg80211_scan_request *int_scan_req;
@@ -866,10 +924,14 @@ struct ieee80211_local {
#ifdef CONFIG_MAC80211_DEBUGFS
struct local_debugfsdentries {
struct dentry *rcdir;
- struct dentry *stations;
struct dentry *keys;
} debugfs;
#endif
+
+ /* dummy netdev for use w/ NAPI */
+ struct net_device napi_dev;
+
+ struct napi_struct napi;
};
static inline struct ieee80211_sub_if_data *
@@ -1003,6 +1065,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
+void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1071,7 +1135,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local);
static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
{
- return netif_running(sdata->dev);
+ return test_bit(SDATA_STATE_RUNNING, &sdata->state);
}
/* tx handling */
@@ -1105,6 +1169,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps, const u8 *da,
const u8 *bssid);
+void ieee80211_request_smps_work(struct work_struct *work);
void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason);
@@ -1131,6 +1196,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
void ieee80211_ba_session_work(struct work_struct *work);
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
+void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1146,6 +1212,12 @@ int __ieee80211_suspend(struct ieee80211_hw *hw);
static inline int __ieee80211_resume(struct ieee80211_hw *hw)
{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
+ "%s: resume with hardware scan still in progress\n",
+ wiphy_name(hw->wiphy));
+
return ieee80211_reconfig(hw_to_local(hw));
}
#else
@@ -1208,7 +1280,8 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
const u8 *key, u8 key_len, u8 key_idx);
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
const u8 *ie, size_t ie_len,
- enum ieee80211_band band);
+ enum ieee80211_band band, u32 rate_mask,
+ u8 channel);
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ebbe264e2b0b..66785739dad3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -94,21 +94,14 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
type2 == NL80211_IFTYPE_AP_VLAN));
}
-static int ieee80211_open(struct net_device *dev)
+static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
+ enum nl80211_iftype iftype)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_sub_if_data *nsdata;
struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
- u32 changed = 0;
- int res;
- u32 hw_reconf_flags = 0;
- u8 null_addr[ETH_ALEN] = {0};
+ struct ieee80211_sub_if_data *nsdata;
+ struct net_device *dev = sdata->dev;
- /* fail early if user set an invalid address */
- if (compare_ether_addr(dev->dev_addr, null_addr) &&
- !is_valid_ether_addr(dev->dev_addr))
- return -EADDRNOTAVAIL;
+ ASSERT_RTNL();
/* we hold the RTNL here so can safely walk the list */
list_for_each_entry(nsdata, &local->interfaces, list) {
@@ -125,7 +118,7 @@ static int ieee80211_open(struct net_device *dev)
* belonging to the same hardware. Then, however, we're
* faced with having to adopt two different TSF timers...
*/
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+ if (iftype == NL80211_IFTYPE_ADHOC &&
nsdata->vif.type == NL80211_IFTYPE_ADHOC)
return -EBUSY;
@@ -139,19 +132,36 @@ static int ieee80211_open(struct net_device *dev)
/*
* check whether it may have the same address
*/
- if (!identical_mac_addr_allowed(sdata->vif.type,
+ if (!identical_mac_addr_allowed(iftype,
nsdata->vif.type))
return -ENOTUNIQ;
/*
* can only add VLANs to enabled APs
*/
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ if (iftype == NL80211_IFTYPE_AP_VLAN &&
nsdata->vif.type == NL80211_IFTYPE_AP)
sdata->bss = &nsdata->u.ap;
}
}
+ return 0;
+}
+
+/*
+ * NOTE: Be very careful when changing this function, it must NOT return
+ * an error on interface type changes that have been pre-checked, so most
+ * checks should be in ieee80211_check_concurrent_iface.
+ */
+static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ u32 changed = 0;
+ int res;
+ u32 hw_reconf_flags = 0;
+
switch (sdata->vif.type) {
case NL80211_IFTYPE_WDS:
if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
@@ -177,7 +187,9 @@ static int ieee80211_open(struct net_device *dev)
/* no special treatment */
break;
case NL80211_IFTYPE_UNSPECIFIED:
- case __NL80211_IFTYPE_AFTER_LAST:
+ case NUM_NL80211_IFTYPES:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
/* cannot happen */
WARN_ON(1);
break;
@@ -187,39 +199,30 @@ static int ieee80211_open(struct net_device *dev)
res = drv_start(local);
if (res)
goto err_del_bss;
+ if (local->ops->napi_poll)
+ napi_enable(&local->napi);
/* we're brought up, everything changes */
hw_reconf_flags = ~0;
ieee80211_led_radio(local, true);
}
/*
- * Check all interfaces and copy the hopefully now-present
- * MAC address to those that have the special null one.
+ * Copy the hopefully now-present MAC address to
+ * this interface, if it has the special null one.
*/
- list_for_each_entry(nsdata, &local->interfaces, list) {
- struct net_device *ndev = nsdata->dev;
-
- /*
- * No need to check running since we do not allow
- * it to start up with this invalid address.
- */
- if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) {
- memcpy(ndev->dev_addr,
- local->hw.wiphy->perm_addr,
- ETH_ALEN);
- memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN);
+ if (is_zero_ether_addr(dev->dev_addr)) {
+ memcpy(dev->dev_addr,
+ local->hw.wiphy->perm_addr,
+ ETH_ALEN);
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (!local->open_count)
+ drv_stop(local);
+ return -EADDRNOTAVAIL;
}
}
- /*
- * Validate the MAC address for this device.
- */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- if (!local->open_count)
- drv_stop(local);
- return -EADDRNOTAVAIL;
- }
-
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
/* no need to tell driver */
@@ -253,9 +256,11 @@ static int ieee80211_open(struct net_device *dev)
netif_carrier_on(dev);
break;
default:
- res = drv_add_interface(local, &sdata->vif);
- if (res)
- goto err_stop;
+ if (coming_up) {
+ res = drv_add_interface(local, &sdata->vif);
+ if (res)
+ goto err_stop;
+ }
if (ieee80211_vif_is_mesh(&sdata->vif)) {
local->fif_other_bss++;
@@ -277,6 +282,8 @@ static int ieee80211_open(struct net_device *dev)
netif_carrier_on(dev);
}
+ set_bit(SDATA_STATE_RUNNING, &sdata->state);
+
if (sdata->vif.type == NL80211_IFTYPE_WDS) {
/* Create STA entry for the WDS peer */
sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
@@ -307,9 +314,13 @@ static int ieee80211_open(struct net_device *dev)
if (sdata->flags & IEEE80211_SDATA_PROMISC)
atomic_inc(&local->iff_promiscs);
+ mutex_lock(&local->mtx);
hw_reconf_flags |= __ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
+
+ if (coming_up)
+ local->open_count++;
- local->open_count++;
if (hw_reconf_flags) {
ieee80211_hw_config(local, hw_reconf_flags);
/*
@@ -334,22 +345,42 @@ static int ieee80211_open(struct net_device *dev)
sdata->bss = NULL;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
list_del(&sdata->u.vlan.list);
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
return res;
}
-static int ieee80211_stop(struct net_device *dev)
+static int ieee80211_open(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ int err;
+
+ /* fail early if user set an invalid address */
+ if (!is_zero_ether_addr(dev->dev_addr) &&
+ !is_valid_ether_addr(dev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
+ if (err)
+ return err;
+
+ return ieee80211_do_open(dev, true);
+}
+
+static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ bool going_down)
+{
struct ieee80211_local *local = sdata->local;
unsigned long flags;
struct sk_buff *skb, *tmp;
u32 hw_reconf_flags = 0;
int i;
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+
/*
* Stop TX on this interface first.
*/
- netif_tx_stop_all_queues(dev);
+ netif_tx_stop_all_queues(sdata->dev);
/*
* Purge work for this interface.
@@ -366,12 +397,9 @@ static int ieee80211_stop(struct net_device *dev)
* (because if we remove a STA after ops->remove_interface()
* the driver will have removed the vif info already!)
*
- * We could relax this and only unlink the stations from the
- * hash table and list but keep them on a per-sdata list that
- * will be inserted back again when the interface is brought
- * up again, but I don't currently see a use case for that,
- * except with WDS which gets a STA entry created when it is
- * brought up.
+ * This is relevant only in AP, WDS and mesh modes, since in
+ * all other modes we've already removed all stations when
+ * disconnecting etc.
*/
sta_info_flush(local, sdata);
@@ -390,11 +418,12 @@ static int ieee80211_stop(struct net_device *dev)
if (sdata->vif.type == NL80211_IFTYPE_AP)
local->fif_pspoll--;
- netif_addr_lock_bh(dev);
+ netif_addr_lock_bh(sdata->dev);
spin_lock_bh(&local->filter_lock);
- __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len);
+ __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
+ sdata->dev->addr_len);
spin_unlock_bh(&local->filter_lock);
- netif_addr_unlock_bh(dev);
+ netif_addr_unlock_bh(sdata->dev);
ieee80211_configure_filter(local);
@@ -406,11 +435,21 @@ static int ieee80211_stop(struct net_device *dev)
struct ieee80211_sub_if_data *vlan, *tmpsdata;
struct beacon_data *old_beacon = sdata->u.ap.beacon;
+ /* sdata_running will return false, so this will disable */
+ ieee80211_bss_info_change_notify(sdata,
+ BSS_CHANGED_BEACON_ENABLED);
+
/* remove beacon */
rcu_assign_pointer(sdata->u.ap.beacon, NULL);
synchronize_rcu();
kfree(old_beacon);
+ /* free all potentially still buffered bcast frames */
+ while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
+ local->total_ps_buffered--;
+ dev_kfree_skb(skb);
+ }
+
/* down all dependent devices, that is VLANs */
list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
u.vlan.list)
@@ -418,7 +457,8 @@ static int ieee80211_stop(struct net_device *dev)
WARN_ON(!list_empty(&sdata->u.ap.vlans));
}
- local->open_count--;
+ if (going_down)
+ local->open_count--;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
@@ -450,27 +490,6 @@ static int ieee80211_stop(struct net_device *dev)
ieee80211_configure_filter(local);
break;
- case NL80211_IFTYPE_STATION:
- del_timer_sync(&sdata->u.mgd.chswitch_timer);
- del_timer_sync(&sdata->u.mgd.timer);
- del_timer_sync(&sdata->u.mgd.conn_mon_timer);
- del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
- /*
- * If any of the timers fired while we waited for it, it will
- * have queued its work. Now the work will be running again
- * but will not rearm the timer again because it checks
- * whether the interface is running, which, at this point,
- * it no longer is.
- */
- cancel_work_sync(&sdata->u.mgd.chswitch_work);
- cancel_work_sync(&sdata->u.mgd.monitor_work);
- cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
-
- /* fall through */
- case NL80211_IFTYPE_ADHOC:
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- del_timer_sync(&sdata->u.ibss.timer);
- /* fall through */
case NL80211_IFTYPE_MESH_POINT:
if (ieee80211_vif_is_mesh(&sdata->vif)) {
/* other_bss and allmulti are always set on mesh
@@ -498,27 +517,34 @@ static int ieee80211_stop(struct net_device *dev)
ieee80211_scan_cancel(local);
/*
- * Disable beaconing for AP and mesh, IBSS can't
- * still be joined to a network at this point.
+ * Disable beaconing here for mesh only, AP and IBSS
+ * are already taken care of.
*/
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT) {
+ if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_BEACON_ENABLED);
- }
- /* free all remaining keys, there shouldn't be any */
+ /*
+ * Free all remaining keys, there shouldn't be any,
+ * except maybe group keys in AP more or WDS?
+ */
ieee80211_free_keys(sdata);
- drv_remove_interface(local, &sdata->vif);
+
+ if (going_down)
+ drv_remove_interface(local, &sdata->vif);
}
sdata->bss = NULL;
+ mutex_lock(&local->mtx);
hw_reconf_flags |= __ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
ieee80211_recalc_ps(local, -1);
if (local->open_count == 0) {
+ if (local->ops->napi_poll)
+ napi_disable(&local->napi);
ieee80211_clear_tx_pending(local);
ieee80211_stop_device(local);
@@ -541,6 +567,13 @@ static int ieee80211_stop(struct net_device *dev)
}
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+}
+
+static int ieee80211_stop(struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ ieee80211_do_stop(sdata, true);
return 0;
}
@@ -585,8 +618,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- struct beacon_data *beacon;
- struct sk_buff *skb;
int flushed;
int i;
@@ -599,37 +630,8 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
__skb_queue_purge(&sdata->fragments[i].skb_list);
sdata->fragment_next = 0;
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_AP:
- beacon = sdata->u.ap.beacon;
- rcu_assign_pointer(sdata->u.ap.beacon, NULL);
- synchronize_rcu();
- kfree(beacon);
-
- while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
- local->total_ps_buffered--;
- dev_kfree_skb(skb);
- }
-
- break;
- case NL80211_IFTYPE_MESH_POINT:
- if (ieee80211_vif_is_mesh(&sdata->vif))
- mesh_rmc_free(sdata);
- break;
- case NL80211_IFTYPE_ADHOC:
- if (WARN_ON(sdata->u.ibss.presp))
- kfree_skb(sdata->u.ibss.presp);
- break;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_MONITOR:
- break;
- case NL80211_IFTYPE_UNSPECIFIED:
- case __NL80211_IFTYPE_AFTER_LAST:
- BUG();
- break;
- }
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ mesh_rmc_free(sdata);
flushed = sta_info_flush(local, sdata);
WARN_ON(flushed);
@@ -844,9 +846,13 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
/* and set some type-dependent values */
sdata->vif.type = type;
+ sdata->vif.p2p = false;
sdata->dev->netdev_ops = &ieee80211_dataif_ops;
sdata->wdev.iftype = type;
+ sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
+ sdata->control_port_no_encrypt = false;
+
/* only monitor differs */
sdata->dev->type = ARPHRD_ETHER;
@@ -854,10 +860,20 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
INIT_WORK(&sdata->work, ieee80211_iface_work);
switch (type) {
+ case NL80211_IFTYPE_P2P_GO:
+ type = NL80211_IFTYPE_AP;
+ sdata->vif.type = type;
+ sdata->vif.p2p = true;
+ /* fall through */
case NL80211_IFTYPE_AP:
skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
INIT_LIST_HEAD(&sdata->u.ap.vlans);
break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ type = NL80211_IFTYPE_STATION;
+ sdata->vif.type = type;
+ sdata->vif.p2p = true;
+ /* fall through */
case NL80211_IFTYPE_STATION:
ieee80211_sta_setup_sdata(sdata);
break;
@@ -878,7 +894,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP_VLAN:
break;
case NL80211_IFTYPE_UNSPECIFIED:
- case __NL80211_IFTYPE_AFTER_LAST:
+ case NUM_NL80211_IFTYPES:
BUG();
break;
}
@@ -886,12 +902,85 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
ieee80211_debugfs_add_netdev(sdata);
}
+static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
+ enum nl80211_iftype type)
+{
+ struct ieee80211_local *local = sdata->local;
+ int ret, err;
+ enum nl80211_iftype internal_type = type;
+ bool p2p = false;
+
+ ASSERT_RTNL();
+
+ if (!local->ops->change_interface)
+ return -EBUSY;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ /*
+ * Could maybe also all others here?
+ * Just not sure how that interacts
+ * with the RX/config path e.g. for
+ * mesh.
+ */
+ break;
+ default:
+ return -EBUSY;
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ /*
+ * Could probably support everything
+ * but WDS here (WDS do_open can fail
+ * under memory pressure, which this
+ * code isn't prepared to handle).
+ */
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ p2p = true;
+ internal_type = NL80211_IFTYPE_STATION;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ p2p = true;
+ internal_type = NL80211_IFTYPE_AP;
+ break;
+ default:
+ return -EBUSY;
+ }
+
+ ret = ieee80211_check_concurrent_iface(sdata, internal_type);
+ if (ret)
+ return ret;
+
+ ieee80211_do_stop(sdata, false);
+
+ ieee80211_teardown_sdata(sdata->dev);
+
+ ret = drv_change_interface(local, sdata, internal_type, p2p);
+ if (ret)
+ type = sdata->vif.type;
+
+ ieee80211_setup_sdata(sdata, type);
+
+ err = ieee80211_do_open(sdata->dev, false);
+ WARN(err, "type change: do_open returned %d", err);
+
+ return ret;
+}
+
int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
enum nl80211_iftype type)
{
+ int ret;
+
ASSERT_RTNL();
- if (type == sdata->vif.type)
+ if (type == ieee80211_vif_type_p2p(&sdata->vif))
return 0;
/* Setting ad-hoc mode on non-IBSS channel is not supported. */
@@ -899,18 +988,15 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
type == NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
- /*
- * We could, here, on changes between IBSS/STA/MESH modes,
- * invoke an MLME function instead that disassociates etc.
- * and goes into the requested mode.
- */
-
- if (ieee80211_sdata_running(sdata))
- return -EBUSY;
-
- /* Purge and reset type-dependent state. */
- ieee80211_teardown_sdata(sdata->dev);
- ieee80211_setup_sdata(sdata, type);
+ if (ieee80211_sdata_running(sdata)) {
+ ret = ieee80211_runtime_change_iftype(sdata, type);
+ if (ret)
+ return ret;
+ } else {
+ /* Purge and reset type-dependent state. */
+ ieee80211_teardown_sdata(sdata->dev);
+ ieee80211_setup_sdata(sdata, type);
+ }
/* reset some values that shouldn't be kept across type changes */
sdata->vif.bss_conf.basic_rates =
@@ -1167,8 +1253,7 @@ static u32 ieee80211_idle_off(struct ieee80211_local *local,
return 0;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: device no longer idle - %s\n",
- wiphy_name(local->hw.wiphy), reason);
+ wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason);
#endif
local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
@@ -1181,8 +1266,7 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
return 0;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: device now idle\n",
- wiphy_name(local->hw.wiphy));
+ wiphy_debug(local->hw.wiphy, "device now idle\n");
#endif
drv_flush(local, false);
@@ -1195,28 +1279,61 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
int count = 0;
+ bool working = false, scanning = false;
+ struct ieee80211_work *wk;
- if (!list_empty(&local->work_list))
- return ieee80211_idle_off(local, "working");
-
- if (local->scanning)
- return ieee80211_idle_off(local, "scanning");
+#ifdef CONFIG_PROVE_LOCKING
+ WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
+ !lockdep_is_held(&local->iflist_mtx));
+#endif
+ lockdep_assert_held(&local->mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!ieee80211_sdata_running(sdata))
+ if (!ieee80211_sdata_running(sdata)) {
+ sdata->vif.bss_conf.idle = true;
continue;
+ }
+
+ sdata->old_idle = sdata->vif.bss_conf.idle;
+
/* do not count disabled managed interfaces */
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !sdata->u.mgd.associated)
+ !sdata->u.mgd.associated) {
+ sdata->vif.bss_conf.idle = true;
continue;
+ }
/* do not count unused IBSS interfaces */
if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- !sdata->u.ibss.ssid_len)
+ !sdata->u.ibss.ssid_len) {
+ sdata->vif.bss_conf.idle = true;
continue;
+ }
/* count everything else */
count++;
}
+ list_for_each_entry(wk, &local->work_list, list) {
+ working = true;
+ wk->sdata->vif.bss_conf.idle = false;
+ }
+
+ if (local->scan_sdata) {
+ scanning = true;
+ local->scan_sdata->vif.bss_conf.idle = false;
+ }
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->old_idle == sdata->vif.bss_conf.idle)
+ continue;
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
+ }
+
+ if (working)
+ return ieee80211_idle_off(local, "working");
+ if (scanning)
+ return ieee80211_idle_off(local, "scanning");
if (!count)
return ieee80211_idle_on(local);
else
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 1b9d87ed143a..6a63d1abd14d 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -49,7 +49,7 @@ static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
static void assert_key_lock(struct ieee80211_local *local)
{
- WARN_ON(!mutex_is_locked(&local->key_mtx));
+ lockdep_assert_held(&local->key_mtx);
}
static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
@@ -60,7 +60,7 @@ static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
return NULL;
}
-static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
+static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
{
struct ieee80211_sub_if_data *sdata;
struct ieee80211_sta *sta;
@@ -68,8 +68,10 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
might_sleep();
- if (!key->local->ops->set_key)
- return;
+ if (!key->local->ops->set_key) {
+ ret = -EOPNOTSUPP;
+ goto out_unsupported;
+ }
assert_key_lock(key->local);
@@ -87,10 +89,27 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP)
- printk(KERN_ERR "mac80211-%s: failed to set key "
- "(%d, %pM) to hardware (%d)\n",
- wiphy_name(key->local->hw.wiphy),
- key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
+ wiphy_err(key->local->hw.wiphy,
+ "failed to set key (%d, %pM) to hardware (%d)\n",
+ key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
+
+out_unsupported:
+ if (ret) {
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /* all of these we can do in software */
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
}
static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
@@ -121,10 +140,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta, &key->conf);
if (ret)
- printk(KERN_ERR "mac80211-%s: failed to remove key "
- "(%d, %pM) from hardware (%d)\n",
- wiphy_name(key->local->hw.wiphy),
- key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
+ wiphy_err(key->local->hw.wiphy,
+ "failed to remove key (%d, %pM) from hardware (%d)\n",
+ key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
}
@@ -227,20 +245,18 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
}
}
-struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
- int idx,
- size_t key_len,
+struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
const u8 *key_data,
size_t seq_len, const u8 *seq)
{
struct ieee80211_key *key;
- int i, j;
+ int i, j, err;
BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS);
key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
if (!key)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/*
* Default to software encryption; we'll later upload the
@@ -249,15 +265,16 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
key->conf.flags = 0;
key->flags = 0;
- key->conf.alg = alg;
+ key->conf.cipher = cipher;
key->conf.keyidx = idx;
key->conf.keylen = key_len;
- switch (alg) {
- case ALG_WEP:
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
key->conf.iv_len = WEP_IV_LEN;
key->conf.icv_len = WEP_ICV_LEN;
break;
- case ALG_TKIP:
+ case WLAN_CIPHER_SUITE_TKIP:
key->conf.iv_len = TKIP_IV_LEN;
key->conf.icv_len = TKIP_ICV_LEN;
if (seq) {
@@ -269,7 +286,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
}
}
break;
- case ALG_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP:
key->conf.iv_len = CCMP_HDR_LEN;
key->conf.icv_len = CCMP_MIC_LEN;
if (seq) {
@@ -278,42 +295,38 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
key->u.ccmp.rx_pn[i][j] =
seq[CCMP_PN_LEN - j - 1];
}
- break;
- case ALG_AES_CMAC:
- key->conf.iv_len = 0;
- key->conf.icv_len = sizeof(struct ieee80211_mmie);
- if (seq)
- for (j = 0; j < 6; j++)
- key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
- break;
- }
- memcpy(key->conf.key, key_data, key_len);
- INIT_LIST_HEAD(&key->list);
-
- if (alg == ALG_CCMP) {
/*
* Initialize AES key state here as an optimization so that
* it does not need to be initialized for every packet.
*/
key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data);
- if (!key->u.ccmp.tfm) {
+ if (IS_ERR(key->u.ccmp.tfm)) {
+ err = PTR_ERR(key->u.ccmp.tfm);
kfree(key);
- return NULL;
+ key = ERR_PTR(err);
}
- }
-
- if (alg == ALG_AES_CMAC) {
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key->conf.iv_len = 0;
+ key->conf.icv_len = sizeof(struct ieee80211_mmie);
+ if (seq)
+ for (j = 0; j < 6; j++)
+ key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
/*
* Initialize AES key state here as an optimization so that
* it does not need to be initialized for every packet.
*/
key->u.aes_cmac.tfm =
ieee80211_aes_cmac_key_setup(key_data);
- if (!key->u.aes_cmac.tfm) {
+ if (IS_ERR(key->u.aes_cmac.tfm)) {
+ err = PTR_ERR(key->u.aes_cmac.tfm);
kfree(key);
- return NULL;
+ key = ERR_PTR(err);
}
+ break;
}
+ memcpy(key->conf.key, key_data, key_len);
+ INIT_LIST_HEAD(&key->list);
return key;
}
@@ -326,9 +339,9 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
if (key->local)
ieee80211_key_disable_hw_accel(key);
- if (key->conf.alg == ALG_CCMP)
+ if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP)
ieee80211_aes_key_free(key->u.ccmp.tfm);
- if (key->conf.alg == ALG_AES_CMAC)
+ if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
if (key->local)
ieee80211_debugfs_key_remove(key);
@@ -336,12 +349,12 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
kfree(key);
}
-void ieee80211_key_link(struct ieee80211_key *key,
- struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta)
+int ieee80211_key_link(struct ieee80211_key *key,
+ struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta)
{
struct ieee80211_key *old_key;
- int idx;
+ int idx, ret;
BUG_ON(!sdata);
BUG_ON(!key);
@@ -396,9 +409,11 @@ void ieee80211_key_link(struct ieee80211_key *key,
ieee80211_debugfs_key_add(key);
- ieee80211_key_enable_hw_accel(key);
+ ret = ieee80211_key_enable_hw_accel(key);
mutex_unlock(&sdata->local->key_mtx);
+
+ return ret;
}
static void __ieee80211_key_free(struct ieee80211_key *key)
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index b665bbb7a471..cb9a4a65cc68 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -123,18 +123,16 @@ struct ieee80211_key {
struct ieee80211_key_conf conf;
};
-struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
- int idx,
- size_t key_len,
+struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
const u8 *key_data,
size_t seq_len, const u8 *seq);
/*
* Insert a key into data structures (sdata, sta if necessary)
* to make it used, free old key.
*/
-void ieee80211_key_link(struct ieee80211_key *key,
- struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta);
+int __must_check ieee80211_key_link(struct ieee80211_key *key,
+ struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta);
void ieee80211_key_free(struct ieee80211_local *local,
struct ieee80211_key *key);
void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index ded5c3843e06..db341a99c7c7 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -99,16 +99,19 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
int ret = 0;
int power;
enum nl80211_channel_type channel_type;
+ u32 offchannel_flag;
might_sleep();
scan_chan = local->scan_channel;
+ offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (scan_chan) {
chan = scan_chan;
channel_type = NL80211_CHAN_NO_HT;
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
- } else if (local->tmp_channel) {
+ } else if (local->tmp_channel &&
+ local->oper_channel != local->tmp_channel) {
chan = scan_chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
@@ -117,8 +120,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
channel_type = local->_oper_channel_type;
local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
}
+ offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (chan != local->hw.conf.channel ||
+ if (offchannel_flag || chan != local->hw.conf.channel ||
channel_type != local->hw.conf.channel_type) {
local->hw.conf.channel = chan;
local->hw.conf.channel_type = channel_type;
@@ -302,7 +306,16 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
trace_api_restart_hw(local);
- /* use this reason, __ieee80211_resume will unblock it */
+ /* wait for scan work complete */
+ flush_workqueue(local->workqueue);
+
+ WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
+ "%s called with hardware scan in progress\n", __func__);
+
+ if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)))
+ ieee80211_scan_cancel(local);
+
+ /* use this reason, ieee80211_reconfig will unblock it */
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
@@ -336,9 +349,6 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
struct ieee80211_if_managed *ifmgd;
int c = 0;
- if (!netif_running(ndev))
- return NOTIFY_DONE;
-
/* Make sure it's our interface that got changed */
if (!wdev)
return NOTIFY_DONE;
@@ -349,11 +359,14 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
bss_conf = &sdata->vif.bss_conf;
+ if (!ieee80211_sdata_running(sdata))
+ return NOTIFY_DONE;
+
/* ARP filtering is only supported in managed mode */
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return NOTIFY_DONE;
- idev = sdata->dev->ip_ptr;
+ idev = __in_dev_get_rtnl(sdata->dev);
if (!idev)
return NOTIFY_DONE;
@@ -390,6 +403,80 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
}
#endif
+static int ieee80211_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ieee80211_local *local =
+ container_of(napi, struct ieee80211_local, napi);
+
+ return local->ops->napi_poll(&local->hw, budget);
+}
+
+void ieee80211_napi_schedule(struct ieee80211_hw *hw)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ napi_schedule(&local->napi);
+}
+EXPORT_SYMBOL(ieee80211_napi_schedule);
+
+void ieee80211_napi_complete(struct ieee80211_hw *hw)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ napi_complete(&local->napi);
+}
+EXPORT_SYMBOL(ieee80211_napi_complete);
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_ADHOC] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4),
+ },
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4),
+ },
+ [NL80211_IFTYPE_AP_VLAN] = {
+ /* copy AP */
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4),
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4),
+ },
+};
+
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
const struct ieee80211_ops *ops)
{
@@ -419,6 +506,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
if (!wiphy)
return NULL;
+ wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes;
+
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_4ADDR_AP |
WIPHY_FLAG_4ADDR_STATION;
@@ -455,7 +544,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
__hw_addr_init(&local->mc_list);
mutex_init(&local->iflist_mtx);
- mutex_init(&local->scan_mtx);
+ mutex_init(&local->mtx);
mutex_init(&local->key_mtx);
spin_lock_init(&local->filter_lock);
@@ -494,6 +583,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
+ /* init dummy netdev for use w/ NAPI */
+ init_dummy_netdev(&local->napi_dev);
+
return local_to_hw(local);
}
EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -506,6 +598,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
int channels, max_bitrates;
bool supp_ht;
static const u32 cipher_suites[] = {
+ /* keep WEP first, it may be removed below */
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
@@ -554,6 +647,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* mac80211 always supports monitor */
local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
+#ifndef CONFIG_MAC80211_MESH
+ /* mesh depends on Kconfig, but drivers should set it if they want */
+ local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
+#endif
+
+ /* mac80211 supports control port protocol changing */
+ local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
+
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
@@ -589,10 +690,41 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (local->hw.wiphy->max_scan_ie_len)
local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
- local->hw.wiphy->cipher_suites = cipher_suites;
- local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
- if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE))
- local->hw.wiphy->n_cipher_suites--;
+ /* Set up cipher suites unless driver already did */
+ if (!local->hw.wiphy->cipher_suites) {
+ local->hw.wiphy->cipher_suites = cipher_suites;
+ local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+ if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE))
+ local->hw.wiphy->n_cipher_suites--;
+ }
+ if (IS_ERR(local->wep_tx_tfm) || IS_ERR(local->wep_rx_tfm)) {
+ if (local->hw.wiphy->cipher_suites == cipher_suites) {
+ local->hw.wiphy->cipher_suites += 2;
+ local->hw.wiphy->n_cipher_suites -= 2;
+ } else {
+ u32 *suites;
+ int r, w = 0;
+
+ /* Filter out WEP */
+
+ suites = kmemdup(
+ local->hw.wiphy->cipher_suites,
+ sizeof(u32) * local->hw.wiphy->n_cipher_suites,
+ GFP_KERNEL);
+ if (!suites)
+ return -ENOMEM;
+ for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
+ u32 suite = local->hw.wiphy->cipher_suites[r];
+ if (suite == WLAN_CIPHER_SUITE_WEP40 ||
+ suite == WLAN_CIPHER_SUITE_WEP104)
+ continue;
+ suites[w++] = suite;
+ }
+ local->hw.wiphy->cipher_suites = suites;
+ local->hw.wiphy->n_cipher_suites = w;
+ local->wiphy_ciphers_allocated = true;
+ }
+ }
result = wiphy_register(local->hw.wiphy);
if (result < 0)
@@ -641,16 +773,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
result = ieee80211_wep_init(local);
if (result < 0)
- printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n",
- wiphy_name(local->hw.wiphy), result);
+ wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
+ result);
rtnl_lock();
result = ieee80211_init_rate_ctrl_alg(local,
hw->rate_control_algorithm);
if (result < 0) {
- printk(KERN_DEBUG "%s: Failed to initialize rate control "
- "algorithm\n", wiphy_name(local->hw.wiphy));
+ wiphy_debug(local->hw.wiphy,
+ "Failed to initialize rate control algorithm\n");
goto fail_rate;
}
@@ -659,8 +791,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
result = ieee80211_if_add(local, "wlan%d", NULL,
NL80211_IFTYPE_STATION, NULL);
if (result)
- printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
- wiphy_name(local->hw.wiphy));
+ wiphy_warn(local->hw.wiphy,
+ "Failed to add default virtual iface\n");
}
rtnl_unlock();
@@ -683,6 +815,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
goto fail_ifa;
#endif
+ netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
+ local->hw.napi_weight);
+
return 0;
#ifdef CONFIG_INET
@@ -703,6 +838,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
fail_workqueue:
wiphy_unregister(local->hw.wiphy);
fail_wiphy_register:
+ if (local->wiphy_ciphers_allocated)
+ kfree(local->hw.wiphy->cipher_suites);
kfree(local->int_scan_req);
return result;
}
@@ -738,6 +875,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
*/
del_timer_sync(&local->work_timer);
+ cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
ieee80211_clear_tx_pending(local);
@@ -746,8 +884,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
if (skb_queue_len(&local->skb_queue) ||
skb_queue_len(&local->skb_queue_unreliable))
- printk(KERN_WARNING "%s: skb_queue not empty\n",
- wiphy_name(local->hw.wiphy));
+ wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
@@ -764,7 +901,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
struct ieee80211_local *local = hw_to_local(hw);
mutex_destroy(&local->iflist_mtx);
- mutex_destroy(&local->scan_mtx);
+ mutex_destroy(&local->mtx);
+
+ if (local->wiphy_ciphers_allocated)
+ kfree(local->hw.wiphy->cipher_suites);
wiphy_free(local->hw.wiphy);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b6c163ac22da..77913a15f537 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -54,6 +54,12 @@
*/
#define IEEE80211_SIGNAL_AVE_WEIGHT 3
+/*
+ * How many Beacon frames need to have been used in average signal strength
+ * before starting to indicate signal change events.
+ */
+#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
+
#define TMR_RUNNING_TIMER 0
#define TMR_RUNNING_CHANSW 1
@@ -86,7 +92,7 @@ enum rx_mgmt_action {
/* utils */
static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd)
{
- WARN_ON(!mutex_is_locked(&ifmgd->mtx));
+ lockdep_assert_held(&ifmgd->mtx);
}
/*
@@ -109,7 +115,7 @@ static void run_again(struct ieee80211_if_managed *ifmgd,
mod_timer(&ifmgd->timer, timeout);
}
-static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata)
+void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
{
if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER)
return;
@@ -118,6 +124,19 @@ static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata)
round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME));
}
+void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ return;
+
+ mod_timer(&sdata->u.mgd.conn_mon_timer,
+ round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
+
+ ifmgd->probe_send_count = 0;
+}
+
static int ecw2cw(int ecw)
{
return (1 << ecw) - 1;
@@ -778,16 +797,17 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.uapsd = uapsd;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
- "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
- wiphy_name(local->hw.wiphy), queue, aci, acm,
- params.aifs, params.cw_min, params.cw_max, params.txop,
- params.uapsd);
+ wiphy_debug(local->hw.wiphy,
+ "WMM queue=%d aci=%d acm=%d aifs=%d "
+ "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
+ queue, aci, acm,
+ params.aifs, params.cw_min, params.cw_max,
+ params.txop, params.uapsd);
#endif
if (drv_conf_tx(local, queue, &params))
- printk(KERN_DEBUG "%s: failed to set TX queue "
- "parameters for queue %d\n",
- wiphy_name(local->hw.wiphy), queue);
+ wiphy_debug(local->hw.wiphy,
+ "failed to set TX queue parameters for queue %d\n",
+ queue);
}
/* enable WMM or activate new settings */
@@ -860,14 +880,6 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
IEEE80211_STA_BEACON_POLL);
- /*
- * Always handle WMM once after association regardless
- * of the first value the AP uses. Setting -1 here has
- * that effect because the AP values is an unsigned
- * 4-bit value.
- */
- sdata->u.mgd.wmm_last_param_set = -1;
-
ieee80211_led_assoc(local, 1);
if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
@@ -990,6 +1002,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
if (remove_sta)
sta_info_destroy_addr(sdata, bssid);
+
+ del_timer_sync(&sdata->u.mgd.conn_mon_timer);
+ del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
+ del_timer_sync(&sdata->u.mgd.timer);
+ del_timer_sync(&sdata->u.mgd.chswitch_timer);
}
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1006,21 +1023,26 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
if (is_multicast_ether_addr(hdr->addr1))
return;
- if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
- return;
-
- mod_timer(&sdata->u.mgd.conn_mon_timer,
- round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
+ ieee80211_sta_reset_conn_monitor(sdata);
}
static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const u8 *ssid;
+ u8 *dst = ifmgd->associated->bssid;
+ u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3);
+
+ /*
+ * Try sending broadcast probe requests for the last three
+ * probe requests after the first ones failed since some
+ * buggy APs only support broadcast probe requests.
+ */
+ if (ifmgd->probe_send_count >= unicast_limit)
+ dst = NULL;
ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
- ieee80211_send_probe_req(sdata, ifmgd->associated->bssid,
- ssid + 2, ssid[1], NULL, 0);
+ ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0);
ifmgd->probe_send_count++;
ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
@@ -1103,8 +1125,11 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
ieee80211_set_disassoc(sdata, true);
- ieee80211_recalc_idle(local);
mutex_unlock(&ifmgd->mtx);
+
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
/*
* must be outside lock due to cfg80211,
* but that's not a problem.
@@ -1173,7 +1198,9 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
sdata->name, bssid, reason_code);
ieee80211_set_disassoc(sdata, true);
+ mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
+ mutex_unlock(&sdata->local->mtx);
return RX_MGMT_CFG80211_DEAUTH;
}
@@ -1203,7 +1230,9 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->name, mgmt->sa, reason_code);
ieee80211_set_disassoc(sdata, true);
+ mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
+ mutex_unlock(&sdata->local->mtx);
return RX_MGMT_CFG80211_DISASSOC;
}
@@ -1330,6 +1359,14 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
return false;
}
+ /*
+ * Always handle WMM once after association regardless
+ * of the first value the AP uses. Setting -1 here has
+ * that effect because the AP values is an unsigned
+ * 4-bit value.
+ */
+ ifmgd->wmm_last_param_set = -1;
+
if (elems.wmm_param)
ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
elems.wmm_param_len);
@@ -1362,7 +1399,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
* Also start the timer that will detect beacon loss.
*/
ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
- mod_beacon_timer(sdata);
+ ieee80211_sta_reset_beacon_monitor(sdata);
return true;
}
@@ -1465,7 +1502,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
* we have or will be receiving any beacons or data, so let's
* schedule the timers again, just in case.
*/
- mod_beacon_timer(sdata);
+ ieee80211_sta_reset_beacon_monitor(sdata);
mod_timer(&ifmgd->conn_mon_timer,
round_jiffies_up(jiffies +
@@ -1540,15 +1577,18 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ifmgd->last_beacon_signal = rx_status->signal;
if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
- ifmgd->ave_beacon_signal = rx_status->signal;
+ ifmgd->ave_beacon_signal = rx_status->signal * 16;
ifmgd->last_cqm_event_signal = 0;
+ ifmgd->count_beacon_signal = 1;
} else {
ifmgd->ave_beacon_signal =
(IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
(16 - IEEE80211_SIGNAL_AVE_WEIGHT) *
ifmgd->ave_beacon_signal) / 16;
+ ifmgd->count_beacon_signal++;
}
if (bss_conf->cqm_rssi_thold &&
+ ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
int sig = ifmgd->ave_beacon_signal / 16;
int last_event = ifmgd->last_cqm_event_signal;
@@ -1588,7 +1628,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
* Push the beacon loss detection into the future since
* we are processing a beacon from the AP just now.
*/
- mod_beacon_timer(sdata);
+ ieee80211_sta_reset_beacon_monitor(sdata);
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
@@ -1751,7 +1791,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_work *wk;
- mutex_lock(&local->work_mtx);
+ mutex_lock(&local->mtx);
list_for_each_entry(wk, &local->work_list, list) {
if (wk->sdata != sdata)
continue;
@@ -1783,7 +1823,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
free_work(wk);
break;
}
- mutex_unlock(&local->work_mtx);
+ mutex_unlock(&local->mtx);
cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
}
@@ -1840,8 +1880,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
" after %dms, disconnecting.\n",
bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
ieee80211_set_disassoc(sdata, true);
- ieee80211_recalc_idle(local);
mutex_unlock(&ifmgd->mtx);
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
/*
* must be outside lock due to cfg80211,
* but that's not a problem.
@@ -1917,6 +1959,8 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
* time -- the code here is properly synchronised.
*/
+ cancel_work_sync(&ifmgd->request_smps_work);
+
cancel_work_sync(&ifmgd->beacon_connection_loss_work);
if (del_timer_sync(&ifmgd->timer))
set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -1952,6 +1996,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
INIT_WORK(&ifmgd->beacon_connection_loss_work,
ieee80211_beacon_connection_loss_work);
+ INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_work);
setup_timer(&ifmgd->timer, ieee80211_sta_timer,
(unsigned long) sdata);
setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
@@ -2249,6 +2294,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
else
ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
+ sdata->control_port_protocol = req->crypto.control_port_ethertype;
+ sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
+
ieee80211_add_work(wk);
return 0;
}
@@ -2275,7 +2323,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&ifmgd->mtx);
- mutex_lock(&local->work_mtx);
+ mutex_lock(&local->mtx);
list_for_each_entry(wk, &local->work_list, list) {
if (wk->sdata != sdata)
continue;
@@ -2294,7 +2342,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
free_work(wk);
break;
}
- mutex_unlock(&local->work_mtx);
+ mutex_unlock(&local->mtx);
/*
* If somebody requests authentication and we haven't
@@ -2319,7 +2367,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
if (assoc_bss)
sta_info_destroy_addr(sdata, bssid);
+ mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
+ mutex_unlock(&sdata->local->mtx);
return 0;
}
@@ -2357,7 +2407,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
cookie, !req->local_state_change);
sta_info_destroy_addr(sdata, bssid);
+ mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
+ mutex_unlock(&sdata->local->mtx);
return 0;
}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c36b1911987a..4b564091e51d 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -22,12 +22,16 @@
static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
local->offchannel_ps_enabled = false;
/* FIXME: what to do when local->pspolling is true? */
del_timer_sync(&local->dynamic_ps_timer);
+ del_timer_sync(&ifmgd->bcn_mon_timer);
+ del_timer_sync(&ifmgd->conn_mon_timer);
+
cancel_work_sync(&local->dynamic_ps_enable_work);
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
@@ -85,6 +89,9 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
+
+ ieee80211_sta_reset_beacon_monitor(sdata);
+ ieee80211_sta_reset_conn_monitor(sdata);
}
void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
@@ -112,8 +119,10 @@ void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
* used from user space controlled off-channel operations.
*/
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
- sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
netif_tx_stop_all_queues(sdata->dev);
+ }
}
mutex_unlock(&local->iflist_mtx);
}
@@ -131,6 +140,7 @@ void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
continue;
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
netif_tx_stop_all_queues(sdata->dev);
if (sdata->u.mgd.associated)
ieee80211_offchannel_ps_enable(sdata);
@@ -155,8 +165,20 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
ieee80211_offchannel_ps_disable(sdata);
}
- if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+ clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+ /*
+ * This may wake up queues even though the driver
+ * currently has them stopped. This is not very
+ * likely, since the driver won't have gotten any
+ * (or hardly any) new packets while we weren't
+ * on the right channel, and even if it happens
+ * it will at most lead to queueing up one more
+ * packet per queue in mac80211 rather than on
+ * the interface qdisc.
+ */
netif_tx_wake_all_queues(sdata->dev);
+ }
/* re-enable beaconing */
if (enable_beaconing &&
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index d287fde0431d..ce671dfd238c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -12,7 +12,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
- ieee80211_scan_cancel(local);
+ if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)))
+ ieee80211_scan_cancel(local);
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index be04d46110fe..b0cc385bf989 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -207,7 +207,7 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
fc = hdr->frame_control;
- return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc));
+ return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc);
}
static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
@@ -368,8 +368,8 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
ref = rate_control_alloc(name, local);
if (!ref) {
- printk(KERN_WARNING "%s: Failed to select rate control "
- "algorithm\n", wiphy_name(local->hw.wiphy));
+ wiphy_warn(local->hw.wiphy,
+ "Failed to select rate control algorithm\n");
return -ENOENT;
}
@@ -380,9 +380,8 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
sta_info_flush(local, NULL);
}
- printk(KERN_DEBUG "%s: Selected rate control "
- "algorithm '%s'\n", wiphy_name(local->hw.wiphy),
- ref->ops->name);
+ wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n",
+ ref->ops->name);
return 0;
}
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index 47438b4a9af5..135f36fd4d5d 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -162,7 +162,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
file_info->next_entry = (file_info->next_entry + 1) %
RC_PID_EVENT_RING_SIZE;
- /* Print information about the event. Note that userpace needs to
+ /* Print information about the event. Note that userspace needs to
* provide large enough buffers. */
length = length < RC_PID_PRINT_BUF_SIZE ?
length : RC_PID_PRINT_BUF_SIZE;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 28624282c5f3..0b0e83ebe3d5 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -315,6 +315,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
int tid;
/* does the frame have a qos control field? */
@@ -323,9 +324,7 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
/* frame has qos control */
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
- rx->flags |= IEEE80211_RX_AMSDU;
- else
- rx->flags &= ~IEEE80211_RX_AMSDU;
+ status->rx_flags |= IEEE80211_RX_AMSDU;
} else {
/*
* IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
@@ -387,26 +386,25 @@ static ieee80211_rx_result debug_noinline
ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
{
struct ieee80211_local *local = rx->local;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
struct sk_buff *skb = rx->skb;
- if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning)))
+ if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
+ return RX_CONTINUE;
+
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning))
return ieee80211_scan_rx(rx->sdata, skb);
- if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) &&
- (rx->flags & IEEE80211_RX_IN_SCAN))) {
+ if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
/* drop all the other packets during a software scan anyway */
if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
dev_kfree_skb(skb);
return RX_QUEUED;
}
- if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
- /* scanning finished during invoking of handlers */
- I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
- return RX_DROP_UNUSABLE;
- }
-
- return RX_CONTINUE;
+ /* scanning finished during invoking of handlers */
+ I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
+ return RX_DROP_UNUSABLE;
}
@@ -538,20 +536,12 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
int index,
struct sk_buff_head *frames)
{
- struct ieee80211_supported_band *sband;
- struct ieee80211_rate *rate = NULL;
struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
- struct ieee80211_rx_status *status;
if (!skb)
goto no_frame;
- status = IEEE80211_SKB_RXCB(skb);
-
- /* release the reordered frames to stack */
- sband = hw->wiphy->bands[status->band];
- if (!(status->flag & RX_FLAG_HT))
- rate = &sband->bitrates[status->rate_idx];
+ /* release the frame from the reorder ring buffer */
tid_agg_rx->stored_mpdu_num--;
tid_agg_rx->reorder_buf[index] = NULL;
__skb_queue_tail(frames, skb);
@@ -580,9 +570,78 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
* frames that have not yet been received are assumed to be lost and the skb
* can be released for processing. This may also release other skb's from the
* reorder buffer if there are no additional gaps between the frames.
+ *
+ * Callers must hold tid_agg_rx->reorder_lock.
*/
#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
+static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
+ struct tid_ampdu_rx *tid_agg_rx,
+ struct sk_buff_head *frames)
+{
+ int index, j;
+
+ /* release the buffer until next missing frame */
+ index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
+ tid_agg_rx->buf_size;
+ if (!tid_agg_rx->reorder_buf[index] &&
+ tid_agg_rx->stored_mpdu_num > 1) {
+ /*
+ * No buffers ready to be released, but check whether any
+ * frames in the reorder buffer have timed out.
+ */
+ int skipped = 1;
+ for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
+ j = (j + 1) % tid_agg_rx->buf_size) {
+ if (!tid_agg_rx->reorder_buf[j]) {
+ skipped++;
+ continue;
+ }
+ if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
+ HT_RX_REORDER_BUF_TIMEOUT))
+ goto set_release_timer;
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ if (net_ratelimit())
+ wiphy_debug(hw->wiphy,
+ "release an RX reorder frame due to timeout on earlier frames\n");
+#endif
+ ieee80211_release_reorder_frame(hw, tid_agg_rx,
+ j, frames);
+
+ /*
+ * Increment the head seq# also for the skipped slots.
+ */
+ tid_agg_rx->head_seq_num =
+ (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
+ skipped = 0;
+ }
+ } else while (tid_agg_rx->reorder_buf[index]) {
+ ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
+ index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
+ tid_agg_rx->buf_size;
+ }
+
+ if (tid_agg_rx->stored_mpdu_num) {
+ j = index = seq_sub(tid_agg_rx->head_seq_num,
+ tid_agg_rx->ssn) % tid_agg_rx->buf_size;
+
+ for (; j != (index - 1) % tid_agg_rx->buf_size;
+ j = (j + 1) % tid_agg_rx->buf_size) {
+ if (tid_agg_rx->reorder_buf[j])
+ break;
+ }
+
+ set_release_timer:
+
+ mod_timer(&tid_agg_rx->reorder_timer,
+ tid_agg_rx->reorder_time[j] +
+ HT_RX_REORDER_BUF_TIMEOUT);
+ } else {
+ del_timer(&tid_agg_rx->reorder_timer);
+ }
+}
+
/*
* As this function belongs to the RX path it must be under
* rcu_read_lock protection. It returns false if the frame
@@ -598,14 +657,16 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
u16 head_seq_num, buf_size;
int index;
+ bool ret = true;
buf_size = tid_agg_rx->buf_size;
head_seq_num = tid_agg_rx->head_seq_num;
+ spin_lock(&tid_agg_rx->reorder_lock);
/* frame with out of date sequence number */
if (seq_less(mpdu_seq_num, head_seq_num)) {
dev_kfree_skb(skb);
- return true;
+ goto out;
}
/*
@@ -626,7 +687,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
/* check if we already stored this frame */
if (tid_agg_rx->reorder_buf[index]) {
dev_kfree_skb(skb);
- return true;
+ goto out;
}
/*
@@ -636,58 +697,19 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
tid_agg_rx->stored_mpdu_num == 0) {
tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
- return false;
+ ret = false;
+ goto out;
}
/* put the frame in the reordering buffer */
tid_agg_rx->reorder_buf[index] = skb;
tid_agg_rx->reorder_time[index] = jiffies;
tid_agg_rx->stored_mpdu_num++;
- /* release the buffer until next missing frame */
- index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
- tid_agg_rx->buf_size;
- if (!tid_agg_rx->reorder_buf[index] &&
- tid_agg_rx->stored_mpdu_num > 1) {
- /*
- * No buffers ready to be released, but check whether any
- * frames in the reorder buffer have timed out.
- */
- int j;
- int skipped = 1;
- for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
- j = (j + 1) % tid_agg_rx->buf_size) {
- if (!tid_agg_rx->reorder_buf[j]) {
- skipped++;
- continue;
- }
- if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
- HT_RX_REORDER_BUF_TIMEOUT))
- break;
-
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: release an RX reorder "
- "frame due to timeout on earlier "
- "frames\n",
- wiphy_name(hw->wiphy));
-#endif
- ieee80211_release_reorder_frame(hw, tid_agg_rx,
- j, frames);
+ ieee80211_sta_reorder_release(hw, tid_agg_rx, frames);
- /*
- * Increment the head seq# also for the skipped slots.
- */
- tid_agg_rx->head_seq_num =
- (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
- skipped = 0;
- }
- } else while (tid_agg_rx->reorder_buf[index]) {
- ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
- index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
- tid_agg_rx->buf_size;
- }
-
- return true;
+ out:
+ spin_unlock(&tid_agg_rx->reorder_lock);
+ return ret;
}
/*
@@ -761,13 +783,14 @@ static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
/* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->queue] ==
hdr->seq_ctrl)) {
- if (rx->flags & IEEE80211_RX_RA_MATCH) {
+ if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
rx->local->dot11FrameDuplicateCount++;
rx->sta->num_duplicates++;
}
@@ -800,7 +823,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
if ((!ieee80211_has_fromds(hdr->frame_control) &&
!ieee80211_has_tods(hdr->frame_control) &&
ieee80211_is_data(hdr->frame_control)) ||
- !(rx->flags & IEEE80211_RX_RA_MATCH)) {
+ !(status->rx_flags & IEEE80211_RX_RA_MATCH)) {
/* Drop IBSS frames and frames for other hosts
* silently. */
return RX_DROP_MONITOR;
@@ -857,7 +880,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
* No point in finding a key and decrypting if the frame is neither
* addressed to us nor a multicast frame.
*/
- if (!(rx->flags & IEEE80211_RX_RA_MATCH))
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_CONTINUE;
/* start without a key */
@@ -873,6 +896,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
rx->key = stakey;
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
+ return RX_CONTINUE;
/* Skip decryption if the frame is not protected. */
if (!ieee80211_has_protected(fc))
return RX_CONTINUE;
@@ -935,7 +961,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
* pairwise or station-to-station keys, but for WEP we allow
* using a key index as well.
*/
- if (rx->key && rx->key->conf.alg != ALG_WEP &&
+ if (rx->key && rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
!is_multicast_ether_addr(hdr->addr1))
rx->key = NULL;
}
@@ -951,8 +978,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
/* the hdr variable is invalid now! */
- switch (rx->key->conf.alg) {
- case ALG_WEP:
+ switch (rx->key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
/* Check for weak IVs if possible */
if (rx->sta && ieee80211_is_data(fc) &&
(!(status->flag & RX_FLAG_IV_STRIPPED) ||
@@ -962,15 +990,21 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
result = ieee80211_crypto_wep_decrypt(rx);
break;
- case ALG_TKIP:
+ case WLAN_CIPHER_SUITE_TKIP:
result = ieee80211_crypto_tkip_decrypt(rx);
break;
- case ALG_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP:
result = ieee80211_crypto_ccmp_decrypt(rx);
break;
- case ALG_AES_CMAC:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
result = ieee80211_crypto_aes_cmac_decrypt(rx);
break;
+ default:
+ /*
+ * We can reach here only with HW-only algorithms
+ * but why didn't it decrypt the frame?!
+ */
+ return RX_DROP_UNUSABLE;
}
/* either the frame has been decrypted or will be dropped */
@@ -1079,7 +1113,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
sta->last_rx = jiffies;
}
- if (!(rx->flags & IEEE80211_RX_RA_MATCH))
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_CONTINUE;
if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
@@ -1236,6 +1270,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
unsigned int frag, seq;
struct ieee80211_fragment_entry *entry;
struct sk_buff *skb;
+ struct ieee80211_rx_status *status;
hdr = (struct ieee80211_hdr *)rx->skb->data;
fc = hdr->frame_control;
@@ -1265,7 +1300,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
/* This is the first fragment of a new frame. */
entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
rx->queue, &(rx->skb));
- if (rx->key && rx->key->conf.alg == ALG_CCMP &&
+ if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
ieee80211_has_protected(fc)) {
int queue = ieee80211_is_mgmt(fc) ?
NUM_RX_DATA_QUEUES : rx->queue;
@@ -1294,7 +1329,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
int i;
u8 pn[CCMP_PN_LEN], *rpn;
int queue;
- if (!rx->key || rx->key->conf.alg != ALG_CCMP)
+ if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
return RX_DROP_UNUSABLE;
memcpy(pn, entry->last_pn, CCMP_PN_LEN);
for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -1335,7 +1370,8 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
}
/* Complete frame has been reassembled - process it now */
- rx->flags |= IEEE80211_RX_FRAGMENTED;
+ status = IEEE80211_SKB_RXCB(rx->skb);
+ status->rx_flags |= IEEE80211_RX_FRAGMENTED;
out:
if (rx->sta)
@@ -1352,9 +1388,10 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
__le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
- !(rx->flags & IEEE80211_RX_RA_MATCH)))
+ !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
return RX_CONTINUE;
if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
@@ -1492,7 +1529,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
* Allow EAPOL frames to us/the PAE group address regardless
* of whether the frame was encrypted or not.
*/
- if (ehdr->h_proto == htons(ETH_P_PAE) &&
+ if (ehdr->h_proto == rx->sdata->control_port_protocol &&
(compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
return true;
@@ -1515,6 +1552,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
struct sk_buff *skb, *xmit_skb;
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
struct sta_info *dsta;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
skb = rx->skb;
xmit_skb = NULL;
@@ -1522,7 +1560,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
- (rx->flags & IEEE80211_RX_RA_MATCH) &&
+ (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
if (is_multicast_ether_addr(ehdr->h_dest)) {
/*
@@ -1599,6 +1637,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
struct sk_buff_head frame_list;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
if (unlikely(!ieee80211_is_data(fc)))
return RX_CONTINUE;
@@ -1606,7 +1645,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (unlikely(!ieee80211_is_data_present(fc)))
return RX_DROP_MONITOR;
- if (!(rx->flags & IEEE80211_RX_AMSDU))
+ if (!(status->rx_flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE;
if (ieee80211_has_a4(hdr->frame_control) &&
@@ -1657,6 +1696,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
struct sk_buff *skb = rx->skb, *fwd_skb;
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -1702,7 +1742,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
mesh_hdr->ttl--;
- if (rx->flags & IEEE80211_RX_RA_MATCH) {
+ if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
if (!mesh_hdr->ttl)
IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
dropped_frames_ttl);
@@ -1909,13 +1949,38 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
}
static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+
+ /*
+ * From here on, look only at management frames.
+ * Data and control frames are already handled,
+ * and unknown (reserved) frames are useless.
+ */
+ if (rx->skb->len < 24)
+ return RX_DROP_MONITOR;
+
+ if (!ieee80211_is_mgmt(mgmt->frame_control))
+ return RX_DROP_MONITOR;
+
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ return RX_DROP_MONITOR;
+
+ if (ieee80211_drop_unencrypted_mgmt(rx))
+ return RX_DROP_UNUSABLE;
+
+ return RX_CONTINUE;
+}
+
+static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
{
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
- struct sk_buff *nskb;
- struct ieee80211_rx_status *status;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
int len = rx->skb->len;
if (!ieee80211_is_action(mgmt->frame_control))
@@ -1928,10 +1993,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
return RX_DROP_UNUSABLE;
- if (!(rx->flags & IEEE80211_RX_RA_MATCH))
- return RX_DROP_UNUSABLE;
-
- if (ieee80211_drop_unencrypted_mgmt(rx))
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_UNUSABLE;
switch (mgmt->u.action.category) {
@@ -2024,17 +2086,36 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto queue;
}
+ return RX_CONTINUE;
+
invalid:
- /*
- * For AP mode, hostapd is responsible for handling any action
- * frames that we didn't handle, including returning unknown
- * ones. For all other modes we will return them to the sender,
- * setting the 0x80 bit in the action category, as required by
- * 802.11-2007 7.3.1.11.
- */
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- return RX_DROP_MONITOR;
+ status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
+ /* will return in the next handlers */
+ return RX_CONTINUE;
+
+ handled:
+ if (rx->sta)
+ rx->sta->rx_packets++;
+ dev_kfree_skb(rx->skb);
+ return RX_QUEUED;
+
+ queue:
+ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&sdata->skb_queue, rx->skb);
+ ieee80211_queue_work(&local->hw, &sdata->work);
+ if (rx->sta)
+ rx->sta->rx_packets++;
+ return RX_QUEUED;
+}
+
+static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+
+ /* skip known-bad action frames and return them in the next handler */
+ if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
+ return RX_CONTINUE;
/*
* Getting here means the kernel doesn't know how to handle
@@ -2042,12 +2123,46 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
* so userspace can register for those to know whether ones
* it transmitted were processed or returned.
*/
- status = IEEE80211_SKB_RXCB(rx->skb);
- if (cfg80211_rx_action(rx->sdata->dev, status->freq,
- rx->skb->data, rx->skb->len,
- GFP_ATOMIC))
- goto handled;
+ if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
+ rx->skb->data, rx->skb->len,
+ GFP_ATOMIC)) {
+ if (rx->sta)
+ rx->sta->rx_packets++;
+ dev_kfree_skb(rx->skb);
+ return RX_QUEUED;
+ }
+
+
+ return RX_CONTINUE;
+}
+
+static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_local *local = rx->local;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
+ struct sk_buff *nskb;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return RX_CONTINUE;
+
+ /*
+ * For AP mode, hostapd is responsible for handling any action
+ * frames that we didn't handle, including returning unknown
+ * ones. For all other modes we will return them to the sender,
+ * setting the 0x80 bit in the action category, as required by
+ * 802.11-2007 7.3.1.11.
+ * Newer versions of hostapd shall also use the management frame
+ * registration mechanisms, but older ones still use cooked
+ * monitor interfaces so push all frames there.
+ */
+ if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
+ (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
+ return RX_DROP_MONITOR;
/* do not return rejected action frames */
if (mgmt->u.action.category & 0x80)
@@ -2066,20 +2181,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
ieee80211_tx_skb(rx->sdata, nskb);
}
-
- handled:
- if (rx->sta)
- rx->sta->rx_packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
-
- queue:
- rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
- skb_queue_tail(&sdata->skb_queue, rx->skb);
- ieee80211_queue_work(&local->hw, &sdata->work);
- if (rx->sta)
- rx->sta->rx_packets++;
- return RX_QUEUED;
}
static ieee80211_rx_result debug_noinline
@@ -2090,15 +2193,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
__le16 stype;
- if (!(rx->flags & IEEE80211_RX_RA_MATCH))
- return RX_DROP_MONITOR;
-
- if (rx->skb->len < 24)
- return RX_DROP_MONITOR;
-
- if (ieee80211_drop_unencrypted_mgmt(rx))
- return RX_DROP_UNUSABLE;
-
rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
if (rxs != RX_CONTINUE)
return rxs;
@@ -2199,6 +2293,14 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
struct net_device *prev_dev = NULL;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ /*
+ * If cooked monitor has been processed already, then
+ * don't do it again. If not, set the flag.
+ */
+ if (rx->flags & IEEE80211_RX_CMNTR)
+ goto out_free_skb;
+ rx->flags |= IEEE80211_RX_CMNTR;
+
if (skb_headroom(skb) < sizeof(*rthdr) &&
pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
goto out_free_skb;
@@ -2253,29 +2355,53 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
if (prev_dev) {
skb->dev = prev_dev;
netif_receive_skb(skb);
- skb = NULL;
- } else
- goto out_free_skb;
-
- return;
+ return;
+ }
out_free_skb:
dev_kfree_skb(skb);
}
+static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
+ ieee80211_rx_result res)
+{
+ switch (res) {
+ case RX_DROP_MONITOR:
+ I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
+ if (rx->sta)
+ rx->sta->rx_dropped++;
+ /* fall through */
+ case RX_CONTINUE: {
+ struct ieee80211_rate *rate = NULL;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB((rx->skb));
+
+ sband = rx->local->hw.wiphy->bands[status->band];
+ if (!(status->flag & RX_FLAG_HT))
+ rate = &sband->bitrates[status->rate_idx];
+
+ ieee80211_rx_cooked_monitor(rx, rate);
+ break;
+ }
+ case RX_DROP_UNUSABLE:
+ I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
+ if (rx->sta)
+ rx->sta->rx_dropped++;
+ dev_kfree_skb(rx->skb);
+ break;
+ case RX_QUEUED:
+ I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
+ break;
+ }
+}
-static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_rx_data *rx,
- struct sk_buff *skb,
- struct ieee80211_rate *rate)
+static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
+ struct sk_buff_head *frames)
{
- struct sk_buff_head reorder_release;
ieee80211_rx_result res = RX_DROP_MONITOR;
-
- __skb_queue_head_init(&reorder_release);
-
- rx->skb = skb;
- rx->sdata = sdata;
+ struct sk_buff *skb;
#define CALL_RXH(rxh) \
do { \
@@ -2284,23 +2410,14 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
goto rxh_next; \
} while (0);
- /*
- * NB: the rxh_next label works even if we jump
- * to it from here because then the list will
- * be empty, which is a trivial check
- */
- CALL_RXH(ieee80211_rx_h_passive_scan)
- CALL_RXH(ieee80211_rx_h_check)
-
- ieee80211_rx_reorder_ampdu(rx, &reorder_release);
-
- while ((skb = __skb_dequeue(&reorder_release))) {
+ while ((skb = __skb_dequeue(frames))) {
/*
* all the other fields are valid across frames
* that belong to an aMPDU since they are on the
* same TID from the same station
*/
rx->skb = skb;
+ rx->flags = 0;
CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data)
@@ -2312,50 +2429,92 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
CALL_RXH(ieee80211_rx_h_remove_qos_control)
CALL_RXH(ieee80211_rx_h_amsdu)
#ifdef CONFIG_MAC80211_MESH
- if (ieee80211_vif_is_mesh(&sdata->vif))
+ if (ieee80211_vif_is_mesh(&rx->sdata->vif))
CALL_RXH(ieee80211_rx_h_mesh_fwding);
#endif
CALL_RXH(ieee80211_rx_h_data)
/* special treatment -- needs the queue */
- res = ieee80211_rx_h_ctrl(rx, &reorder_release);
+ res = ieee80211_rx_h_ctrl(rx, frames);
if (res != RX_CONTINUE)
goto rxh_next;
+ CALL_RXH(ieee80211_rx_h_mgmt_check)
CALL_RXH(ieee80211_rx_h_action)
+ CALL_RXH(ieee80211_rx_h_userspace_mgmt)
+ CALL_RXH(ieee80211_rx_h_action_return)
CALL_RXH(ieee80211_rx_h_mgmt)
+ rxh_next:
+ ieee80211_rx_handlers_result(rx, res);
+
#undef CALL_RXH
+ }
+}
+
+static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
+{
+ struct sk_buff_head reorder_release;
+ ieee80211_rx_result res = RX_DROP_MONITOR;
+
+ __skb_queue_head_init(&reorder_release);
+
+#define CALL_RXH(rxh) \
+ do { \
+ res = rxh(rx); \
+ if (res != RX_CONTINUE) \
+ goto rxh_next; \
+ } while (0);
+
+ CALL_RXH(ieee80211_rx_h_passive_scan)
+ CALL_RXH(ieee80211_rx_h_check)
+
+ ieee80211_rx_reorder_ampdu(rx, &reorder_release);
+
+ ieee80211_rx_handlers(rx, &reorder_release);
+ return;
rxh_next:
- switch (res) {
- case RX_DROP_MONITOR:
- I802_DEBUG_INC(sdata->local->rx_handlers_drop);
- if (rx->sta)
- rx->sta->rx_dropped++;
- /* fall through */
- case RX_CONTINUE:
- ieee80211_rx_cooked_monitor(rx, rate);
- break;
- case RX_DROP_UNUSABLE:
- I802_DEBUG_INC(sdata->local->rx_handlers_drop);
- if (rx->sta)
- rx->sta->rx_dropped++;
- dev_kfree_skb(rx->skb);
- break;
- case RX_QUEUED:
- I802_DEBUG_INC(sdata->local->rx_handlers_queued);
- break;
- }
- }
+ ieee80211_rx_handlers_result(rx, res);
+
+#undef CALL_RXH
+}
+
+/*
+ * This function makes calls into the RX path. Therefore the
+ * caller must hold the sta_info->lock and everything has to
+ * be under rcu_read_lock protection as well.
+ */
+void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
+{
+ struct sk_buff_head frames;
+ struct ieee80211_rx_data rx = {
+ .sta = sta,
+ .sdata = sta->sdata,
+ .local = sta->local,
+ .queue = tid,
+ };
+ struct tid_ampdu_rx *tid_agg_rx;
+
+ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+ if (!tid_agg_rx)
+ return;
+
+ __skb_queue_head_init(&frames);
+
+ spin_lock(&tid_agg_rx->reorder_lock);
+ ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames);
+ spin_unlock(&tid_agg_rx->reorder_lock);
+
+ ieee80211_rx_handlers(&rx, &frames);
}
/* main receive path */
-static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_rx_data *rx,
+static int prepare_for_handlers(struct ieee80211_rx_data *rx,
struct ieee80211_hdr *hdr)
{
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
@@ -2369,7 +2528,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
- rx->flags &= ~IEEE80211_RX_RA_MATCH;
+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
break;
case NL80211_IFTYPE_ADHOC:
@@ -2379,15 +2538,15 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
return 1;
}
else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
- if (!(rx->flags & IEEE80211_RX_IN_SCAN))
+ if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
return 0;
- rx->flags &= ~IEEE80211_RX_RA_MATCH;
+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
} else if (!multicast &&
compare_ether_addr(sdata->vif.addr,
hdr->addr1) != 0) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
- rx->flags &= ~IEEE80211_RX_RA_MATCH;
+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
} else if (!rx->sta) {
int rate_idx;
if (status->flag & RX_FLAG_HT)
@@ -2405,7 +2564,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
- rx->flags &= ~IEEE80211_RX_RA_MATCH;
+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
break;
case NL80211_IFTYPE_AP_VLAN:
@@ -2416,9 +2575,9 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
return 0;
} else if (!ieee80211_bssid_match(bssid,
sdata->vif.addr)) {
- if (!(rx->flags & IEEE80211_RX_IN_SCAN))
+ if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
return 0;
- rx->flags &= ~IEEE80211_RX_RA_MATCH;
+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
break;
case NL80211_IFTYPE_WDS:
@@ -2427,9 +2586,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
return 0;
break;
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_UNSPECIFIED:
- case __NL80211_IFTYPE_AFTER_LAST:
+ default:
/* should never get here */
WARN_ON(1);
break;
@@ -2439,12 +2596,56 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
}
/*
+ * This function returns whether or not the SKB
+ * was destined for RX processing or not, which,
+ * if consume is true, is equivalent to whether
+ * or not the skb was consumed.
+ */
+static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ struct sk_buff *skb, bool consume)
+{
+ struct ieee80211_local *local = rx->local;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ int prepares;
+
+ rx->skb = skb;
+ status->rx_flags |= IEEE80211_RX_RA_MATCH;
+ prepares = prepare_for_handlers(rx, hdr);
+
+ if (!prepares)
+ return false;
+
+ if (status->flag & RX_FLAG_MMIC_ERROR) {
+ if (status->rx_flags & IEEE80211_RX_RA_MATCH)
+ ieee80211_rx_michael_mic_report(hdr, rx);
+ return false;
+ }
+
+ if (!consume) {
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb) {
+ if (net_ratelimit())
+ wiphy_debug(local->hw.wiphy,
+ "failed to copy multicast frame for %s\n",
+ sdata->name);
+ return true;
+ }
+
+ rx->skb = skb;
+ }
+
+ ieee80211_invoke_rx_handlers(rx);
+ return true;
+}
+
+/*
* This is the actual Rx frames handler. as it blongs to Rx path it must
* be called with rcu_read_lock protection.
*/
static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct ieee80211_rate *rate)
+ struct sk_buff *skb)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_local *local = hw_to_local(hw);
@@ -2452,11 +2653,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr;
__le16 fc;
struct ieee80211_rx_data rx;
- int prepares;
- struct ieee80211_sub_if_data *prev = NULL;
- struct sk_buff *skb_new;
- struct sta_info *sta, *tmp;
- bool found_sta = false;
+ struct ieee80211_sub_if_data *prev;
+ struct sta_info *sta, *tmp, *prev_sta;
int err = 0;
fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
@@ -2469,7 +2667,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
- rx.flags |= IEEE80211_RX_IN_SCAN;
+ status->rx_flags |= IEEE80211_RX_IN_SCAN;
if (ieee80211_is_mgmt(fc))
err = skb_linearize(skb);
@@ -2486,91 +2684,67 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_verify_alignment(&rx);
if (ieee80211_is_data(fc)) {
+ prev_sta = NULL;
+
for_each_sta_info(local, hdr->addr2, sta, tmp) {
- rx.sta = sta;
- found_sta = true;
- rx.sdata = sta->sdata;
-
- rx.flags |= IEEE80211_RX_RA_MATCH;
- prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
- if (prepares) {
- if (status->flag & RX_FLAG_MMIC_ERROR) {
- if (rx.flags & IEEE80211_RX_RA_MATCH)
- ieee80211_rx_michael_mic_report(hdr, &rx);
- } else
- prev = rx.sdata;
- }
- }
- }
- if (!found_sta) {
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (!ieee80211_sdata_running(sdata))
+ if (!prev_sta) {
+ prev_sta = sta;
continue;
+ }
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- continue;
+ rx.sta = prev_sta;
+ rx.sdata = prev_sta->sdata;
+ ieee80211_prepare_and_rx_handle(&rx, skb, false);
- /*
- * frame is destined for this interface, but if it's
- * not also for the previous one we handle that after
- * the loop to avoid copying the SKB once too much
- */
+ prev_sta = sta;
+ }
- if (!prev) {
- prev = sdata;
- continue;
- }
+ if (prev_sta) {
+ rx.sta = prev_sta;
+ rx.sdata = prev_sta->sdata;
- rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+ return;
+ }
+ }
- rx.flags |= IEEE80211_RX_RA_MATCH;
- prepares = prepare_for_handlers(prev, &rx, hdr);
+ prev = NULL;
- if (!prepares)
- goto next;
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
- if (status->flag & RX_FLAG_MMIC_ERROR) {
- rx.sdata = prev;
- if (rx.flags & IEEE80211_RX_RA_MATCH)
- ieee80211_rx_michael_mic_report(hdr,
- &rx);
- goto next;
- }
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ continue;
- /*
- * frame was destined for the previous interface
- * so invoke RX handlers for it
- */
+ /*
+ * frame is destined for this interface, but if it's
+ * not also for the previous one we handle that after
+ * the loop to avoid copying the SKB once too much
+ */
- skb_new = skb_copy(skb, GFP_ATOMIC);
- if (!skb_new) {
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: failed to copy "
- "multicast frame for %s\n",
- wiphy_name(local->hw.wiphy),
- prev->name);
- goto next;
- }
- ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
-next:
+ if (!prev) {
prev = sdata;
+ continue;
}
- if (prev) {
- rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ rx.sdata = prev;
+ ieee80211_prepare_and_rx_handle(&rx, skb, false);
- rx.flags |= IEEE80211_RX_RA_MATCH;
- prepares = prepare_for_handlers(prev, &rx, hdr);
+ prev = sdata;
+ }
- if (!prepares)
- prev = NULL;
- }
+ if (prev) {
+ rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ rx.sdata = prev;
+
+ if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+ return;
}
- if (prev)
- ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
- else
- dev_kfree_skb(skb);
+
+ dev_kfree_skb(skb);
}
/*
@@ -2611,30 +2785,41 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (WARN_ON(!local->started))
goto drop;
- if (status->flag & RX_FLAG_HT) {
+ if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
/*
- * rate_idx is MCS index, which can be [0-76] as documented on:
- *
- * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
- *
- * Anything else would be some sort of driver or hardware error.
- * The driver should catch hardware errors.
+ * Validate the rate, unless a PLCP error means that
+ * we probably can't have a valid rate here anyway.
*/
- if (WARN((status->rate_idx < 0 ||
- status->rate_idx > 76),
- "Rate marked as an HT rate but passed "
- "status->rate_idx is not "
- "an MCS index [0-76]: %d (0x%02x)\n",
- status->rate_idx,
- status->rate_idx))
- goto drop;
- } else {
- if (WARN_ON(status->rate_idx < 0 ||
- status->rate_idx >= sband->n_bitrates))
- goto drop;
- rate = &sband->bitrates[status->rate_idx];
+
+ if (status->flag & RX_FLAG_HT) {
+ /*
+ * rate_idx is MCS index, which can be [0-76]
+ * as documented on:
+ *
+ * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
+ *
+ * Anything else would be some sort of driver or
+ * hardware error. The driver should catch hardware
+ * errors.
+ */
+ if (WARN((status->rate_idx < 0 ||
+ status->rate_idx > 76),
+ "Rate marked as an HT rate but passed "
+ "status->rate_idx is not "
+ "an MCS index [0-76]: %d (0x%02x)\n",
+ status->rate_idx,
+ status->rate_idx))
+ goto drop;
+ } else {
+ if (WARN_ON(status->rate_idx < 0 ||
+ status->rate_idx >= sband->n_bitrates))
+ goto drop;
+ rate = &sband->bitrates[status->rate_idx];
+ }
}
+ status->rx_flags = 0;
+
/*
* key references and virtual interfaces are protected using RCU
* and this requires that we are in a read-side RCU section during
@@ -2654,7 +2839,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
- __ieee80211_rx_handle_packet(hw, skb, rate);
+ __ieee80211_rx_handle_packet(hw, skb);
rcu_read_unlock();
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 872d7b6ef6b3..5171a9581631 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -242,20 +242,19 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
local->hw_scan_req->n_channels = n_chans;
ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
- req->ie, req->ie_len, band);
+ req->ie, req->ie_len, band, (u32) -1,
+ 0);
local->hw_scan_req->ie_len = ielen;
return true;
}
-void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
{
struct ieee80211_local *local = hw_to_local(hw);
bool was_hw_scan;
- trace_api_scan_completed(local, aborted);
-
- mutex_lock(&local->scan_mtx);
+ mutex_lock(&local->mtx);
/*
* It's ok to abort a not-yet-running scan (that
@@ -267,7 +266,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
aborted = true;
if (WARN_ON(!local->scan_req)) {
- mutex_unlock(&local->scan_mtx);
+ mutex_unlock(&local->mtx);
return;
}
@@ -275,7 +274,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work, 0);
- mutex_unlock(&local->scan_mtx);
+ mutex_unlock(&local->mtx);
return;
}
@@ -291,7 +290,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
local->scan_channel = NULL;
/* we only have to protect scan_req and hw/sw scan */
- mutex_unlock(&local->scan_mtx);
+ mutex_unlock(&local->mtx);
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
if (was_hw_scan)
@@ -304,12 +303,26 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
ieee80211_offchannel_return(local, true);
done:
+ mutex_lock(&local->mtx);
ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
ieee80211_mlme_notify_scan_completed(local);
ieee80211_ibss_notify_scan_completed(local);
ieee80211_mesh_notify_scan_completed(local);
ieee80211_queue_work(&local->hw, &local->work_work);
}
+
+void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ trace_api_scan_completed(local, aborted);
+
+ set_bit(SCAN_COMPLETED, &local->scanning);
+ if (aborted)
+ set_bit(SCAN_ABORTED, &local->scanning);
+ ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+}
EXPORT_SYMBOL(ieee80211_scan_completed);
static int ieee80211_start_sw_scan(struct ieee80211_local *local)
@@ -447,7 +460,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
/* if no more bands/channels left, complete scan and advance to the idle state */
if (local->scan_channel_idx >= local->scan_req->n_channels) {
- ieee80211_scan_completed(&local->hw, false);
+ __ieee80211_scan_completed(&local->hw, false);
return 1;
}
@@ -639,17 +652,25 @@ void ieee80211_scan_work(struct work_struct *work)
struct ieee80211_sub_if_data *sdata = local->scan_sdata;
unsigned long next_delay = 0;
- mutex_lock(&local->scan_mtx);
+ if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
+ bool aborted;
+
+ aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
+ __ieee80211_scan_completed(&local->hw, aborted);
+ return;
+ }
+
+ mutex_lock(&local->mtx);
if (!sdata || !local->scan_req) {
- mutex_unlock(&local->scan_mtx);
+ mutex_unlock(&local->mtx);
return;
}
if (local->hw_scan_req) {
int rc = drv_hw_scan(local, sdata, local->hw_scan_req);
- mutex_unlock(&local->scan_mtx);
+ mutex_unlock(&local->mtx);
if (rc)
- ieee80211_scan_completed(&local->hw, true);
+ __ieee80211_scan_completed(&local->hw, true);
return;
}
@@ -661,20 +682,20 @@ void ieee80211_scan_work(struct work_struct *work)
local->scan_sdata = NULL;
rc = __ieee80211_start_scan(sdata, req);
- mutex_unlock(&local->scan_mtx);
+ mutex_unlock(&local->mtx);
if (rc)
- ieee80211_scan_completed(&local->hw, true);
+ __ieee80211_scan_completed(&local->hw, true);
return;
}
- mutex_unlock(&local->scan_mtx);
+ mutex_unlock(&local->mtx);
/*
* Avoid re-scheduling when the sdata is going away.
*/
if (!ieee80211_sdata_running(sdata)) {
- ieee80211_scan_completed(&local->hw, true);
+ __ieee80211_scan_completed(&local->hw, true);
return;
}
@@ -711,9 +732,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
{
int res;
- mutex_lock(&sdata->local->scan_mtx);
+ mutex_lock(&sdata->local->mtx);
res = __ieee80211_start_scan(sdata, req);
- mutex_unlock(&sdata->local->scan_mtx);
+ mutex_unlock(&sdata->local->mtx);
return res;
}
@@ -726,7 +747,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
int ret = -EBUSY;
enum ieee80211_band band;
- mutex_lock(&local->scan_mtx);
+ mutex_lock(&local->mtx);
/* busy scanning */
if (local->scan_req)
@@ -761,7 +782,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req);
unlock:
- mutex_unlock(&local->scan_mtx);
+ mutex_unlock(&local->mtx);
return ret;
}
@@ -775,11 +796,11 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
* Only call this function when a scan can't be
* queued -- mostly at suspend under RTNL.
*/
- mutex_lock(&local->scan_mtx);
+ mutex_lock(&local->mtx);
abortscan = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
(!local->scanning && local->scan_req);
- mutex_unlock(&local->scan_mtx);
+ mutex_unlock(&local->mtx);
if (abortscan)
- ieee80211_scan_completed(&local->hw, true);
+ __ieee80211_scan_completed(&local->hw, true);
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 6d86f0c1ad04..ca2cba9cea87 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -125,7 +125,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
lockdep_is_held(&local->sta_mtx));
while (sta) {
if ((sta->sdata == sdata ||
- sta->sdata->bss == sdata->bss) &&
+ (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
@@ -174,8 +174,7 @@ static void __sta_info_free(struct ieee80211_local *local,
}
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: Destroyed STA %pM\n",
- wiphy_name(local->hw.wiphy), sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
kfree(sta);
@@ -262,8 +261,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: Allocated STA %pM\n",
- wiphy_name(local->hw.wiphy), sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Allocated STA %pM\n", sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
#ifdef CONFIG_MAC80211_MESH
@@ -282,7 +280,7 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
unsigned long flags;
int err = 0;
- WARN_ON(!mutex_is_locked(&local->sta_mtx));
+ lockdep_assert_held(&local->sta_mtx);
/* notify driver */
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -300,8 +298,9 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
sta->uploaded = true;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (async)
- printk(KERN_DEBUG "%s: Finished adding IBSS STA %pM\n",
- wiphy_name(local->hw.wiphy), sta->sta.addr);
+ wiphy_debug(local->hw.wiphy,
+ "Finished adding IBSS STA %pM\n",
+ sta->sta.addr);
#endif
}
@@ -411,8 +410,8 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
spin_unlock_irqrestore(&local->sta_lock, flags);
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: Added IBSS STA %pM\n",
- wiphy_name(local->hw.wiphy), sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
+ sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
ieee80211_queue_work(&local->hw, &local->sta_finish_work);
@@ -459,8 +458,7 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
}
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: Inserted STA %pM\n",
- wiphy_name(local->hw.wiphy), sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
/* move reference to rcu-protected */
@@ -690,8 +688,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
#endif
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: Removed STA %pM\n",
- wiphy_name(local->hw.wiphy), sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Removed STA %pM\n", sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
cancel_work_sync(&sta->drv_unblock_wk);
@@ -841,13 +838,20 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&local->sta_mtx);
}
-struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
- const u8 *addr)
+struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
+ const u8 *addr,
+ const u8 *localaddr)
{
struct sta_info *sta, *nxt;
- /* Just return a random station ... first in list ... */
+ /*
+ * Just return a random station if localaddr is NULL
+ * ... first in list.
+ */
for_each_sta_info(hw_to_local(hw), addr, sta, nxt) {
+ if (localaddr &&
+ compare_ether_addr(sta->sdata->vif.addr, localaddr) != 0)
+ continue;
if (!sta->uploaded)
return NULL;
return &sta->sta;
@@ -855,7 +859,7 @@ struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
return NULL;
}
-EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
+EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr);
struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
const u8 *addr)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 54262e72376d..810c5ce98316 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -103,6 +103,7 @@ struct tid_ampdu_tx {
* @reorder_buf: buffer to reorder incoming aggregated MPDUs
* @reorder_time: jiffies when skb was added
* @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
+ * @reorder_timer: releases expired frames from the reorder buffer.
* @head_seq_num: head sequence number in reordering buffer.
* @stored_mpdu_num: number of MPDUs in reordering buffer
* @ssn: Starting Sequence Number expected to be aggregated.
@@ -110,20 +111,25 @@ struct tid_ampdu_tx {
* @timeout: reset timer value (in TUs).
* @dialog_token: dialog token for aggregation session
* @rcu_head: RCU head used for freeing this struct
+ * @reorder_lock: serializes access to reorder buffer, see below.
*
* This structure is protected by RCU and the per-station
* spinlock. Assignments to the array holding it must hold
- * the spinlock, only the RX path can access it under RCU
- * lock-free. The RX path, since it is single-threaded,
- * can even modify the structure without locking since the
- * only other modifications to it are done when the struct
- * can not yet or no longer be found by the RX path.
+ * the spinlock.
+ *
+ * The @reorder_lock is used to protect the variables and
+ * arrays such as @reorder_buf, @reorder_time, @head_seq_num,
+ * @stored_mpdu_num and @reorder_time from being corrupted by
+ * concurrent access of the RX path and the expired frame
+ * release timer.
*/
struct tid_ampdu_rx {
struct rcu_head rcu_head;
+ spinlock_t reorder_lock;
struct sk_buff **reorder_buf;
unsigned long *reorder_time;
struct timer_list session_timer;
+ struct timer_list reorder_timer;
u16 head_seq_num;
u16 stored_mpdu_num;
u16 ssn;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 10caec5ea8fa..dd85006c4fe8 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -58,6 +58,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
info->control.vif = &sta->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
IEEE80211_TX_INTFL_RETRANSMISSION;
+ info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
sta->tx_filtered_count++;
@@ -114,11 +115,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit())
- printk(KERN_DEBUG "%s: dropped TX filtered frame, "
- "queue_len=%d PS=%d @%lu\n",
- wiphy_name(local->hw.wiphy),
- skb_queue_len(&sta->tx_filtered),
- !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
+ wiphy_debug(local->hw.wiphy,
+ "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
+ skb_queue_len(&sta->tx_filtered),
+ !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
#endif
dev_kfree_skb(skb);
}
@@ -296,7 +296,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
}
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX)
- cfg80211_action_tx_status(
+ cfg80211_mgmt_tx_status(
skb->dev, (unsigned long) skb, skb->data, skb->len,
!!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c54db966926b..e1733dcb58a7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -351,8 +351,8 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
local->total_ps_buffered = total;
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n",
- wiphy_name(local->hw.wiphy), purged);
+ wiphy_debug(local->hw.wiphy, "PS buffers full - purged %d frames\n",
+ purged);
#endif
}
@@ -509,6 +509,18 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
}
static ieee80211_tx_result debug_noinline
+ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+
+ if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol &&
+ tx->sdata->control_port_no_encrypt))
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+ return TX_CONTINUE;
+}
+
+static ieee80211_tx_result debug_noinline
ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
{
struct ieee80211_key *key = NULL;
@@ -527,7 +539,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
else if ((key = rcu_dereference(tx->sdata->default_key)))
tx->key = key;
else if (tx->sdata->drop_unencrypted &&
- (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) &&
+ (tx->skb->protocol != tx->sdata->control_port_protocol) &&
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(!ieee80211_is_robust_mgmt_frame(hdr) ||
(ieee80211_is_action(hdr->frame_control) &&
@@ -543,15 +555,16 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
tx->key->tx_rx_count++;
/* TODO: add threshold stuff again */
- switch (tx->key->conf.alg) {
- case ALG_WEP:
+ switch (tx->key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
if (ieee80211_is_auth(hdr->frame_control))
break;
- case ALG_TKIP:
+ case WLAN_CIPHER_SUITE_TKIP:
if (!ieee80211_is_data_present(hdr->frame_control))
tx->key = NULL;
break;
- case ALG_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP:
if (!ieee80211_is_data_present(hdr->frame_control) &&
!ieee80211_use_mfp(hdr->frame_control, tx->sta,
tx->skb))
@@ -561,7 +574,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
IEEE80211_KEY_FLAG_SW_MGMT) &&
ieee80211_is_mgmt(hdr->frame_control);
break;
- case ALG_AES_CMAC:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
if (!ieee80211_is_mgmt(hdr->frame_control))
tx->key = NULL;
break;
@@ -946,22 +959,31 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+
if (!tx->key)
return TX_CONTINUE;
- switch (tx->key->conf.alg) {
- case ALG_WEP:
+ switch (tx->key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
return ieee80211_crypto_wep_encrypt(tx);
- case ALG_TKIP:
+ case WLAN_CIPHER_SUITE_TKIP:
return ieee80211_crypto_tkip_encrypt(tx);
- case ALG_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP:
return ieee80211_crypto_ccmp_encrypt(tx);
- case ALG_AES_CMAC:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
return ieee80211_crypto_aes_cmac_encrypt(tx);
+ default:
+ /* handle hw-only algorithm */
+ if (info->control.hw_key) {
+ ieee80211_tx_set_protected(tx);
+ return TX_CONTINUE;
+ }
+ break;
+
}
- /* not reached */
- WARN_ON(1);
return TX_DROP;
}
@@ -1339,6 +1361,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
CALL_TXH(ieee80211_tx_h_dynamic_ps);
CALL_TXH(ieee80211_tx_h_check_assoc);
CALL_TXH(ieee80211_tx_h_ps_buf);
+ CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
CALL_TXH(ieee80211_tx_h_select_key);
if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
CALL_TXH(ieee80211_tx_h_rate_ctrl);
@@ -1511,8 +1534,8 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
I802_DEBUG_INC(local->tx_expand_skb_head);
if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
- printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n",
- wiphy_name(local->hw.wiphy));
+ wiphy_debug(local->hw.wiphy,
+ "failed to reallocate TX buffer\n");
return -ENOMEM;
}
@@ -1586,6 +1609,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
return;
}
+ hdr = (struct ieee80211_hdr *) skb->data;
info->control.vif = &sdata->vif;
if (ieee80211_vif_is_mesh(&sdata->vif) &&
@@ -1699,7 +1723,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
u16 ethertype, hdrlen, meshhdrlen = 0;
__le16 fc;
struct ieee80211_hdr hdr;
- struct ieee80211s_hdr mesh_hdr;
+ struct ieee80211s_hdr mesh_hdr __maybe_unused;
const u8 *encaps_data;
int encaps_len, skip_header_bytes;
int nh_pos, h_pos;
@@ -1816,7 +1840,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
#endif
case NL80211_IFTYPE_STATION:
memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
- if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
+ if (sdata->u.mgd.use_4addr &&
+ cpu_to_be16(ethertype) != sdata->control_port_protocol) {
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
@@ -1869,7 +1894,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (!ieee80211_vif_is_mesh(&sdata->vif) &&
unlikely(!is_multicast_ether_addr(hdr.addr1) &&
!(sta_flags & WLAN_STA_AUTHORIZED) &&
- !(ethertype == ETH_P_PAE &&
+ !(cpu_to_be16(ethertype) == sdata->control_port_protocol &&
compare_ether_addr(sdata->vif.addr,
skb->data + ETH_ALEN) == 0))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -2068,8 +2093,7 @@ void ieee80211_tx_pending(unsigned long data)
if (skb_queue_empty(&local->pending[i]))
list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_tx_wake_queue(
- netdev_get_tx_queue(sdata->dev, i));
+ netif_wake_subqueue(sdata->dev, i);
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 748387d45bc0..aba025d748e9 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -283,8 +283,11 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
if (skb_queue_empty(&local->pending[queue])) {
rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
+ continue;
+ netif_wake_subqueue(sdata->dev, queue);
+ }
rcu_read_unlock();
} else
tasklet_schedule(&local->tx_pending_tasklet);
@@ -323,7 +326,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue));
+ netif_stop_subqueue(sdata->dev, queue);
rcu_read_unlock();
}
@@ -471,16 +474,10 @@ void ieee80211_iterate_active_interfaces(
list_for_each_entry(sdata, &local->interfaces, list) {
switch (sdata->vif.type) {
- case __NL80211_IFTYPE_AFTER_LAST:
- case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
continue;
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MESH_POINT:
+ default:
break;
}
if (ieee80211_sdata_running(sdata))
@@ -505,16 +502,10 @@ void ieee80211_iterate_active_interfaces_atomic(
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
switch (sdata->vif.type) {
- case __NL80211_IFTYPE_AFTER_LAST:
- case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
continue;
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MESH_POINT:
+ default:
break;
}
if (ieee80211_sdata_running(sdata))
@@ -904,26 +895,34 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
const u8 *ie, size_t ie_len,
- enum ieee80211_band band)
+ enum ieee80211_band band, u32 rate_mask,
+ u8 channel)
{
struct ieee80211_supported_band *sband;
u8 *pos;
size_t offset = 0, noffset;
int supp_rates_len, i;
+ u8 rates[32];
+ int num_rates;
+ int ext_rates_len;
sband = local->hw.wiphy->bands[band];
pos = buffer;
- supp_rates_len = min_t(int, sband->n_bitrates, 8);
+ num_rates = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((BIT(i) & rate_mask) == 0)
+ continue; /* skip rate */
+ rates[num_rates++] = (u8) (sband->bitrates[i].bitrate / 5);
+ }
+
+ supp_rates_len = min_t(int, num_rates, 8);
*pos++ = WLAN_EID_SUPP_RATES;
*pos++ = supp_rates_len;
-
- for (i = 0; i < supp_rates_len; i++) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
+ memcpy(pos, rates, supp_rates_len);
+ pos += supp_rates_len;
/* insert "request information" if in custom IEs */
if (ie && ie_len) {
@@ -941,14 +940,18 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
offset = noffset;
}
- if (sband->n_bitrates > i) {
+ ext_rates_len = num_rates - supp_rates_len;
+ if (ext_rates_len > 0) {
*pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = sband->n_bitrates - i;
+ *pos++ = ext_rates_len;
+ memcpy(pos, rates + supp_rates_len, ext_rates_len);
+ pos += ext_rates_len;
+ }
- for (; i < sband->n_bitrates; i++) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
+ if (channel && sband->band == IEEE80211_BAND_2GHZ) {
+ *pos++ = WLAN_EID_DS_PARAMS;
+ *pos++ = 1;
+ *pos++ = channel;
}
/* insert custom IEs that go before HT */
@@ -1017,6 +1020,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
struct ieee80211_mgmt *mgmt;
size_t buf_len;
u8 *buf;
+ u8 chan;
/* FIXME: come up with a proper value */
buf = kmalloc(200 + ie_len, GFP_KERNEL);
@@ -1026,8 +1030,14 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
return;
}
+ chan = ieee80211_frequency_to_channel(
+ local->hw.conf.channel->center_freq);
+
buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
- local->hw.conf.channel->band);
+ local->hw.conf.channel->band,
+ sdata->rc_rateidx_mask
+ [local->hw.conf.channel->band],
+ chan);
skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
ssid, ssid_len,
@@ -1189,7 +1199,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* ignore virtual */
break;
case NL80211_IFTYPE_UNSPECIFIED:
- case __NL80211_IFTYPE_AFTER_LAST:
+ case NUM_NL80211_IFTYPES:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
WARN_ON(1);
break;
}
@@ -1293,9 +1305,9 @@ void ieee80211_recalc_smps(struct ieee80211_local *local,
int count = 0;
if (forsdata)
- WARN_ON(!mutex_is_locked(&forsdata->u.mgd.mtx));
+ lockdep_assert_held(&forsdata->u.mgd.mtx);
- WARN_ON(!mutex_is_locked(&local->iflist_mtx));
+ lockdep_assert_held(&local->iflist_mtx);
/*
* This function could be improved to handle multiple
@@ -1308,7 +1320,7 @@ void ieee80211_recalc_smps(struct ieee80211_local *local,
*/
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
goto set;
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 9ebc8d8a1f5b..f27484c22b9f 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -240,7 +240,7 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local,
keyidx = skb->data[hdrlen + 3] >> 6;
- if (!key || keyidx != key->conf.keyidx || key->conf.alg != ALG_WEP)
+ if (!key || keyidx != key->conf.keyidx)
return -1;
klen = 3 + key->conf.keylen;
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 81d4ad64184a..ae344d1ba056 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -43,7 +43,7 @@ enum work_action {
/* utils */
static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
{
- WARN_ON(!mutex_is_locked(&local->work_mtx));
+ lockdep_assert_held(&local->mtx);
}
/*
@@ -757,7 +757,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
mgmt = (struct ieee80211_mgmt *) skb->data;
fc = le16_to_cpu(mgmt->frame_control);
- mutex_lock(&local->work_mtx);
+ mutex_lock(&local->mtx);
list_for_each_entry(wk, &local->work_list, list) {
const u8 *bssid = NULL;
@@ -833,7 +833,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
WARN(1, "unexpected: %d", rma);
}
- mutex_unlock(&local->work_mtx);
+ mutex_unlock(&local->mtx);
if (rma != WORK_ACT_DONE)
goto out;
@@ -845,9 +845,9 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
case WORK_DONE_REQUEUE:
synchronize_rcu();
wk->started = false; /* restart */
- mutex_lock(&local->work_mtx);
+ mutex_lock(&local->mtx);
list_add_tail(&wk->list, &local->work_list);
- mutex_unlock(&local->work_mtx);
+ mutex_unlock(&local->mtx);
}
out:
@@ -888,9 +888,9 @@ static void ieee80211_work_work(struct work_struct *work)
while ((skb = skb_dequeue(&local->work_skb_queue)))
ieee80211_work_rx_queued_mgmt(local, skb);
- ieee80211_recalc_idle(local);
+ mutex_lock(&local->mtx);
- mutex_lock(&local->work_mtx);
+ ieee80211_recalc_idle(local);
list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
bool started = wk->started;
@@ -995,20 +995,16 @@ static void ieee80211_work_work(struct work_struct *work)
run_again(local, jiffies + HZ/2);
}
- mutex_lock(&local->scan_mtx);
-
if (list_empty(&local->work_list) && local->scan_req &&
!local->scanning)
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work,
round_jiffies_relative(0));
- mutex_unlock(&local->scan_mtx);
-
- mutex_unlock(&local->work_mtx);
-
ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
+
list_for_each_entry_safe(wk, tmp, &free_work, list) {
wk->done(wk, NULL);
list_del(&wk->list);
@@ -1035,16 +1031,15 @@ void ieee80211_add_work(struct ieee80211_work *wk)
wk->started = false;
local = wk->sdata->local;
- mutex_lock(&local->work_mtx);
+ mutex_lock(&local->mtx);
list_add_tail(&wk->list, &local->work_list);
- mutex_unlock(&local->work_mtx);
+ mutex_unlock(&local->mtx);
ieee80211_queue_work(&local->hw, &local->work_work);
}
void ieee80211_work_init(struct ieee80211_local *local)
{
- mutex_init(&local->work_mtx);
INIT_LIST_HEAD(&local->work_list);
setup_timer(&local->work_timer, ieee80211_work_timer,
(unsigned long)local);
@@ -1057,7 +1052,7 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
struct ieee80211_work *wk;
- mutex_lock(&local->work_mtx);
+ mutex_lock(&local->mtx);
list_for_each_entry(wk, &local->work_list, list) {
if (wk->sdata != sdata)
continue;
@@ -1065,19 +1060,19 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
wk->started = true;
wk->timeout = jiffies;
}
- mutex_unlock(&local->work_mtx);
+ mutex_unlock(&local->mtx);
/* run cleanups etc. */
ieee80211_work_work(&local->work_work);
- mutex_lock(&local->work_mtx);
+ mutex_lock(&local->mtx);
list_for_each_entry(wk, &local->work_list, list) {
if (wk->sdata != sdata)
continue;
WARN_ON(1);
break;
}
- mutex_unlock(&local->work_mtx);
+ mutex_unlock(&local->mtx);
}
ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -1163,7 +1158,7 @@ int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_work *wk, *tmp;
bool found = false;
- mutex_lock(&local->work_mtx);
+ mutex_lock(&local->mtx);
list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
if ((unsigned long) wk == cookie) {
wk->timeout = jiffies;
@@ -1171,7 +1166,7 @@ int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
break;
}
}
- mutex_unlock(&local->work_mtx);
+ mutex_unlock(&local->mtx);
if (!found)
return -ENOENT;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 8d59d27d887e..bee230d8fd11 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -36,8 +36,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
int tail;
hdr = (struct ieee80211_hdr *)skb->data;
- if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 ||
- !ieee80211_is_data_present(hdr->frame_control))
+ if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
+ skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control))
return TX_CONTINUE;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -94,7 +94,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
if (status->flag & RX_FLAG_MMIC_STRIPPED)
return RX_CONTINUE;
- if (!rx->key || rx->key->conf.alg != ALG_TKIP ||
+ if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
!ieee80211_has_protected(hdr->frame_control) ||
!ieee80211_is_data_present(hdr->frame_control))
return RX_CONTINUE;
@@ -117,7 +117,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
key = &rx->key->conf.key[key_offset];
michael_mic(key, hdr, data, data_len, mic);
if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) {
- if (!(rx->flags & IEEE80211_RX_RA_MATCH))
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_UNUSABLE;
mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
@@ -221,19 +221,13 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
if (!rx->sta || skb->len - hdrlen < 12)
return RX_DROP_UNUSABLE;
- if (status->flag & RX_FLAG_DECRYPTED) {
- if (status->flag & RX_FLAG_IV_STRIPPED) {
- /*
- * Hardware took care of all processing, including
- * replay protection, and stripped the ICV/IV so
- * we cannot do any checks here.
- */
- return RX_CONTINUE;
- }
-
- /* let TKIP code verify IV, but skip decryption */
+ /*
+ * Let TKIP code verify IV, but skip decryption.
+ * In the case where hardware checks the IV as well,
+ * we don't even get here, see ieee80211_rx_h_decrypt()
+ */
+ if (status->flag & RX_FLAG_DECRYPTED)
hwaccel = 1;
- }
res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
key, skb->data + hdrlen,
@@ -447,10 +441,6 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE;
- if ((status->flag & RX_FLAG_DECRYPTED) &&
- (status->flag & RX_FLAG_IV_STRIPPED))
- return RX_CONTINUE;
-
ccmp_hdr2pn(pn, skb->data + hdrlen);
queue = ieee80211_is_mgmt(hdr->frame_control) ?
@@ -564,10 +554,6 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
if (!ieee80211_is_mgmt(hdr->frame_control))
return RX_CONTINUE;
- if ((status->flag & RX_FLAG_DECRYPTED) &&
- (status->flag & RX_FLAG_IV_STRIPPED))
- return RX_CONTINUE;
-
if (skb->len < 24 + sizeof(*mmie))
return RX_DROP_UNUSABLE;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4c2f89df5cce..0c043b6ce65e 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -40,6 +40,7 @@
#include <net/udp.h>
#include <net/icmp.h> /* for icmp_send */
#include <net/route.h>
+#include <net/ip6_checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
@@ -637,10 +638,12 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
}
/* And finally the ICMP checksum */
- icmph->icmp6_cksum = 0;
- /* TODO IPv6: is this correct for ICMPv6? */
- ip_vs_checksum_complete(skb, icmp_offset);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ skb->len - icmp_offset,
+ IPPROTO_ICMPV6, 0);
+ skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
+ skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
+ skb->ip_summed = CHECKSUM_PARTIAL;
if (inout)
IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
@@ -1381,8 +1384,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
cp->protocol == IPPROTO_SCTP) {
if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
- (atomic_read(&cp->in_pkts) %
- sysctl_ip_vs_sync_threshold[1]
+ (pkts % sysctl_ip_vs_sync_threshold[1]
== sysctl_ip_vs_sync_threshold[0])) ||
(cp->old_state != cp->state &&
((cp->state == IP_VS_SCTP_S_CLOSED) ||
@@ -1393,7 +1395,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
}
}
- if (af == AF_INET &&
+ /* Keep this block last: TCP and others with pp->num_states <= 1 */
+ else if (af == AF_INET &&
(ip_vs_sync_state & IP_VS_STATE_MASTER) &&
(((cp->protocol != IPPROTO_TCP ||
cp->state == IP_VS_TCP_S_ESTABLISHED) &&
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 0f0c079c422a..ca8ec8c4f311 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -61,7 +61,7 @@ static DEFINE_RWLOCK(__ip_vs_svc_lock);
static DEFINE_RWLOCK(__ip_vs_rs_lock);
/* lock for state and timeout tables */
-static DEFINE_RWLOCK(__ip_vs_securetcp_lock);
+static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
/* lock for drop entry handling */
static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
@@ -204,7 +204,7 @@ static void update_defense_level(void)
spin_unlock(&__ip_vs_droppacket_lock);
/* secure_tcp */
- write_lock(&__ip_vs_securetcp_lock);
+ spin_lock(&ip_vs_securetcp_lock);
switch (sysctl_ip_vs_secure_tcp) {
case 0:
if (old_secure_tcp >= 2)
@@ -238,7 +238,7 @@ static void update_defense_level(void)
old_secure_tcp = sysctl_ip_vs_secure_tcp;
if (to_change >= 0)
ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
- write_unlock(&__ip_vs_securetcp_lock);
+ spin_unlock(&ip_vs_securetcp_lock);
local_bh_enable();
}
@@ -843,7 +843,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
return -EINVAL;
}
- dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
+ dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL);
if (dest == NULL) {
pr_err("%s(): no memory.\n", __func__);
return -ENOMEM;
@@ -1177,7 +1177,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
}
#endif
- svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
+ svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL);
if (svc == NULL) {
IP_VS_DBG(1, "%s(): no memory\n", __func__);
ret = -ENOMEM;
@@ -2155,7 +2155,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
ret = -ESRCH;
- goto out_unlock;
+ goto out_drop_service;
}
switch (cmd) {
@@ -2189,6 +2189,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
ret = -EINVAL;
}
+out_drop_service:
if (svc)
ip_vs_service_put(svc);
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index bbc1ac795952..727e45b66953 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -35,7 +35,7 @@
static LIST_HEAD(ip_vs_schedulers);
/* lock for service table */
-static DEFINE_RWLOCK(__ip_vs_sched_lock);
+static DEFINE_SPINLOCK(ip_vs_sched_lock);
/*
@@ -108,7 +108,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name);
- read_lock_bh(&__ip_vs_sched_lock);
+ spin_lock_bh(&ip_vs_sched_lock);
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
/*
@@ -122,14 +122,14 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
}
if (strcmp(sched_name, sched->name)==0) {
/* HIT */
- read_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
return sched;
}
if (sched->module)
module_put(sched->module);
}
- read_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
return NULL;
}
@@ -184,10 +184,10 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
/* increase the module use count */
ip_vs_use_count_inc();
- write_lock_bh(&__ip_vs_sched_lock);
+ spin_lock_bh(&ip_vs_sched_lock);
if (!list_empty(&scheduler->n_list)) {
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
ip_vs_use_count_dec();
pr_err("%s(): [%s] scheduler already linked\n",
__func__, scheduler->name);
@@ -200,7 +200,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
*/
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
if (strcmp(scheduler->name, sched->name) == 0) {
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
ip_vs_use_count_dec();
pr_err("%s(): [%s] scheduler already existed "
"in the system\n", __func__, scheduler->name);
@@ -211,7 +211,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
* Add it into the d-linked scheduler list
*/
list_add(&scheduler->n_list, &ip_vs_schedulers);
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
pr_info("[%s] scheduler registered.\n", scheduler->name);
@@ -229,9 +229,9 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
return -EINVAL;
}
- write_lock_bh(&__ip_vs_sched_lock);
+ spin_lock_bh(&ip_vs_sched_lock);
if (list_empty(&scheduler->n_list)) {
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
pr_err("%s(): [%s] scheduler is not in the list. failed\n",
__func__, scheduler->name);
return -EINVAL;
@@ -241,7 +241,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
* Remove it from the d-linked scheduler list
*/
list_del(&scheduler->n_list);
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
/* decrease the module use count */
ip_vs_use_count_dec();
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index b46a8390896d..9228ee0dc11a 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -448,6 +448,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
{
__be16 _ports[2], *ports;
u8 nexthdr;
+ int poff;
memset(dst, 0, sizeof(*dst));
@@ -492,19 +493,13 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
return 0;
}
- switch (nexthdr) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- case IPPROTO_UDPLITE:
- case IPPROTO_SCTP:
- case IPPROTO_DCCP:
- ports = skb_header_pointer(skb, protoff, sizeof(_ports),
+ poff = proto_ports_offset(nexthdr);
+ if (poff >= 0) {
+ ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports),
&_ports);
- break;
- default:
+ } else {
_ports[0] = _ports[1] = 0;
ports = _ports;
- break;
}
if (!ports)
return -1;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9a17f28b1253..3616f27b9d46 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -488,7 +488,7 @@ retry:
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (err < 0)
goto out_unlock;
@@ -1209,7 +1209,7 @@ static int packet_snd(struct socket *sock,
err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
if (err)
goto out_free;
- err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (err < 0)
goto out_free;
diff --git a/net/phonet/Kconfig b/net/phonet/Kconfig
index 6ec7d55b1769..901956ada9c8 100644
--- a/net/phonet/Kconfig
+++ b/net/phonet/Kconfig
@@ -14,3 +14,14 @@ config PHONET
To compile this driver as a module, choose M here: the module
will be called phonet. If unsure, say N.
+
+config PHONET_PIPECTRLR
+ bool "Phonet Pipe Controller"
+ depends on PHONET
+ default N
+ help
+ The Pipe Controller implementation in Phonet stack to support Pipe
+ data with Nokia Slim modems like WG2.5 used on ST-Ericsson U8500
+ platform.
+
+ If unsure, say N.
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 73aee7f2fcdc..fd95beb72f5d 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -251,6 +251,16 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
else if (phonet_address_lookup(net, daddr) == 0) {
dev = phonet_device_get(net);
skb->pkt_type = PACKET_LOOPBACK;
+ } else if (pn_sockaddr_get_object(target) == 0) {
+ /* Resource routing (small race until phonet_rcv()) */
+ struct sock *sk = pn_find_sock_by_res(net,
+ target->spn_resource);
+ if (sk) {
+ sock_put(sk);
+ dev = phonet_device_get(net);
+ skb->pkt_type = PACKET_LOOPBACK;
+ } else
+ dev = phonet_route_output(net, daddr);
} else
dev = phonet_route_output(net, daddr);
@@ -383,6 +393,13 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
goto out;
}
+ /* resource routing */
+ if (pn_sockaddr_get_object(&sa) == 0) {
+ struct sock *sk = pn_find_sock_by_res(net, sa.spn_resource);
+ if (sk)
+ return sk_receive_skb(sk, skb, 0);
+ }
+
/* check if we are the destination */
if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) {
/* Phonet packet input */
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 1bd38db4fe1e..2f032381bd45 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -52,6 +52,19 @@ static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
answ = skb ? skb->len : 0;
release_sock(sk);
return put_user(answ, (int __user *)arg);
+
+ case SIOCPNADDRESOURCE:
+ case SIOCPNDELRESOURCE: {
+ u32 res;
+ if (get_user(res, (u32 __user *)arg))
+ return -EFAULT;
+ if (res >= 256)
+ return -EINVAL;
+ if (cmd == SIOCPNADDRESOURCE)
+ return pn_sock_bind_res(sk, res);
+ else
+ return pn_sock_unbind_res(sk, res);
+ }
}
return -ENOIOCTLCMD;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 15003021f4f0..aa3d8700d213 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -88,6 +88,15 @@ static int pep_reply(struct sock *sk, struct sk_buff *oskb,
const struct pnpipehdr *oph = pnp_hdr(oskb);
struct pnpipehdr *ph;
struct sk_buff *skb;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ const struct phonethdr *hdr = pn_hdr(oskb);
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = hdr->pn_sdev,
+ .spn_obj = hdr->pn_sobj,
+ };
+#endif
skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
if (!skb)
@@ -105,10 +114,271 @@ static int pep_reply(struct sock *sk, struct sk_buff *oskb,
ph->pipe_handle = oph->pipe_handle;
ph->error_code = code;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ return pn_skb_send(sk, skb, &spn);
+#else
return pn_skb_send(sk, skb, &pipe_srv);
+#endif
}
#define PAD 0x00
+
+#ifdef CONFIG_PHONET_PIPECTRLR
+static u8 pipe_negotiate_fc(u8 *host_fc, u8 *remote_fc, int len)
+{
+ int i, j;
+ u8 base_fc, final_fc;
+
+ for (i = 0; i < len; i++) {
+ base_fc = host_fc[i];
+ for (j = 0; j < len; j++) {
+ if (remote_fc[j] == base_fc) {
+ final_fc = base_fc;
+ goto done;
+ }
+ }
+ }
+ return -EINVAL;
+
+done:
+ return final_fc;
+
+}
+
+static int pipe_get_flow_info(struct sock *sk, struct sk_buff *skb,
+ u8 *pref_rx_fc, u8 *req_tx_fc)
+{
+ struct pnpipehdr *hdr;
+ u8 n_sb;
+
+ if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
+ return -EINVAL;
+
+ hdr = pnp_hdr(skb);
+ n_sb = hdr->data[4];
+
+ __skb_pull(skb, sizeof(*hdr) + 4);
+ while (n_sb > 0) {
+ u8 type, buf[3], len = sizeof(buf);
+ u8 *data = pep_get_sb(skb, &type, &len, buf);
+
+ if (data == NULL)
+ return -EINVAL;
+
+ switch (type) {
+ case PN_PIPE_SB_REQUIRED_FC_TX:
+ if (len < 3 || (data[2] | data[3] | data[4]) > 3)
+ break;
+ req_tx_fc[0] = data[2];
+ req_tx_fc[1] = data[3];
+ req_tx_fc[2] = data[4];
+ break;
+
+ case PN_PIPE_SB_PREFERRED_FC_RX:
+ if (len < 3 || (data[2] | data[3] | data[4]) > 3)
+ break;
+ pref_rx_fc[0] = data[2];
+ pref_rx_fc[1] = data[3];
+ pref_rx_fc[2] = data[4];
+ break;
+
+ }
+ n_sb--;
+ }
+ return 0;
+}
+
+static int pipe_handler_send_req(struct sock *sk, u16 dobj, u8 utid,
+ u8 msg_id, u8 p_handle, gfp_t priority)
+{
+ int len;
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(dobj),
+ .spn_obj = pn_obj(dobj),
+ };
+
+ static const u8 data[4] = {
+ PAD, PAD, PAD, PAD,
+ };
+
+ switch (msg_id) {
+ case PNS_PEP_CONNECT_REQ:
+ len = sizeof(data);
+ break;
+
+ case PNS_PEP_DISCONNECT_REQ:
+ case PNS_PEP_ENABLE_REQ:
+ case PNS_PEP_DISABLE_REQ:
+ len = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
+ if (!skb)
+ return -ENOMEM;
+ skb_set_owner_w(skb, sk);
+
+ skb_reserve(skb, MAX_PNPIPE_HEADER);
+ if (len) {
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, data, len);
+ }
+ __skb_push(skb, sizeof(*ph));
+ skb_reset_transport_header(skb);
+ ph = pnp_hdr(skb);
+ ph->utid = utid;
+ ph->message_id = msg_id;
+ ph->pipe_handle = p_handle;
+ ph->error_code = PN_PIPE_NO_ERROR;
+
+ return pn_skb_send(sk, skb, &spn);
+}
+
+static int pipe_handler_send_created_ind(struct sock *sk, u16 dobj,
+ u8 utid, u8 p_handle, u8 msg_id, u8 tx_fc, u8 rx_fc)
+{
+ int err_code;
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(dobj),
+ .spn_obj = pn_obj(dobj),
+ };
+
+ static u8 data[4] = {
+ 0x03, 0x04,
+ };
+ data[2] = tx_fc;
+ data[3] = rx_fc;
+
+ /*
+ * actually, below is number of sub-blocks and not error code.
+ * Pipe_created_ind message format does not have any
+ * error code field. However, the Phonet stack will always send
+ * an error code as part of pnpipehdr. So, use that err_code to
+ * specify the number of sub-blocks.
+ */
+ err_code = 0x01;
+
+ skb = alloc_skb(MAX_PNPIPE_HEADER + sizeof(data), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ skb_set_owner_w(skb, sk);
+
+ skb_reserve(skb, MAX_PNPIPE_HEADER);
+ __skb_put(skb, sizeof(data));
+ skb_copy_to_linear_data(skb, data, sizeof(data));
+ __skb_push(skb, sizeof(*ph));
+ skb_reset_transport_header(skb);
+ ph = pnp_hdr(skb);
+ ph->utid = utid;
+ ph->message_id = msg_id;
+ ph->pipe_handle = p_handle;
+ ph->error_code = err_code;
+
+ return pn_skb_send(sk, skb, &spn);
+}
+
+static int pipe_handler_send_ind(struct sock *sk, u16 dobj, u8 utid,
+ u8 p_handle, u8 msg_id)
+{
+ int err_code;
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(dobj),
+ .spn_obj = pn_obj(dobj),
+ };
+
+ /*
+ * actually, below is a filler.
+ * Pipe_enabled/disabled_ind message format does not have any
+ * error code field. However, the Phonet stack will always send
+ * an error code as part of pnpipehdr. So, use that err_code to
+ * specify the filler value.
+ */
+ err_code = 0x0;
+
+ skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ skb_set_owner_w(skb, sk);
+
+ skb_reserve(skb, MAX_PNPIPE_HEADER);
+ __skb_push(skb, sizeof(*ph));
+ skb_reset_transport_header(skb);
+ ph = pnp_hdr(skb);
+ ph->utid = utid;
+ ph->message_id = msg_id;
+ ph->pipe_handle = p_handle;
+ ph->error_code = err_code;
+
+ return pn_skb_send(sk, skb, &spn);
+}
+
+static int pipe_handler_enable_pipe(struct sock *sk, int cmd)
+{
+ int ret;
+ struct pep_sock *pn = pep_sk(sk);
+
+ switch (cmd) {
+ case PNPIPE_ENABLE:
+ ret = pipe_handler_send_req(sk, pn->pn_sk.sobject,
+ PNS_PIPE_ENABLE_UTID, PNS_PEP_ENABLE_REQ,
+ pn->pipe_handle, GFP_ATOMIC);
+ break;
+
+ case PNPIPE_DISABLE:
+ ret = pipe_handler_send_req(sk, pn->pn_sk.sobject,
+ PNS_PIPE_DISABLE_UTID, PNS_PEP_DISABLE_REQ,
+ pn->pipe_handle, GFP_ATOMIC);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int pipe_handler_create_pipe(struct sock *sk, int pipe_handle, int cmd)
+{
+ int ret;
+ struct pep_sock *pn = pep_sk(sk);
+
+ switch (cmd) {
+ case PNPIPE_CREATE:
+ ret = pipe_handler_send_req(sk, pn->pn_sk.sobject,
+ PNS_PEP_CONNECT_UTID, PNS_PEP_CONNECT_REQ,
+ pipe_handle, GFP_ATOMIC);
+ break;
+
+ case PNPIPE_DESTROY:
+ ret = pipe_handler_send_req(sk, pn->remote_pep,
+ PNS_PEP_DISCONNECT_UTID,
+ PNS_PEP_DISCONNECT_REQ,
+ pn->pipe_handle, GFP_ATOMIC);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+#endif
+
static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
{
static const u8 data[20] = {
@@ -173,6 +443,14 @@ static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
struct pep_sock *pn = pep_sk(sk);
struct pnpipehdr *ph;
struct sk_buff *skb;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(pn->remote_pep),
+ .spn_obj = pn_obj(pn->remote_pep),
+ };
+#endif
skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
if (!skb)
@@ -192,7 +470,11 @@ static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
ph->data[3] = PAD;
ph->data[4] = status;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ return pn_skb_send(sk, skb, &spn);
+#else
return pn_skb_send(sk, skb, &pipe_srv);
+#endif
}
/* Send our RX flow control information to the sender.
@@ -309,6 +591,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
struct pnpipehdr *hdr = pnp_hdr(skb);
struct sk_buff_head *queue;
int err = 0;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ struct phonethdr *ph = pn_hdr(skb);
+ static u8 host_pref_rx_fc[3], host_req_tx_fc[3];
+ u8 remote_pref_rx_fc[3], remote_req_tx_fc[3];
+ u8 negotiated_rx_fc, negotiated_tx_fc;
+#endif
BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
@@ -317,6 +605,40 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
break;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNS_PEP_CONNECT_RESP:
+ if ((ph->pn_sdev == pn_dev(pn->remote_pep)) &&
+ (ph->pn_sobj == pn_obj(pn->remote_pep))) {
+ pipe_get_flow_info(sk, skb, remote_pref_rx_fc,
+ remote_req_tx_fc);
+
+ negotiated_tx_fc = pipe_negotiate_fc(remote_req_tx_fc,
+ host_pref_rx_fc,
+ sizeof(host_pref_rx_fc));
+ negotiated_rx_fc = pipe_negotiate_fc(host_req_tx_fc,
+ remote_pref_rx_fc,
+ sizeof(host_pref_rx_fc));
+
+ pn->pipe_state = PIPE_DISABLED;
+ pipe_handler_send_created_ind(sk, pn->remote_pep,
+ PNS_PIPE_CREATED_IND_UTID,
+ pn->pipe_handle, PNS_PIPE_CREATED_IND,
+ negotiated_tx_fc, negotiated_rx_fc);
+ pipe_handler_send_created_ind(sk, pn->pn_sk.sobject,
+ PNS_PIPE_CREATED_IND_UTID,
+ pn->pipe_handle, PNS_PIPE_CREATED_IND,
+ negotiated_tx_fc, negotiated_rx_fc);
+ } else {
+ pipe_handler_send_req(sk, pn->remote_pep,
+ PNS_PEP_CONNECT_UTID,
+ PNS_PEP_CONNECT_REQ, pn->pipe_handle,
+ GFP_ATOMIC);
+ pipe_get_flow_info(sk, skb, host_pref_rx_fc,
+ host_req_tx_fc);
+ }
+ break;
+#endif
+
case PNS_PEP_DISCONNECT_REQ:
pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
sk->sk_state = TCP_CLOSE_WAIT;
@@ -324,11 +646,41 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
sk->sk_state_change(sk);
break;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNS_PEP_DISCONNECT_RESP:
+ pn->pipe_state = PIPE_IDLE;
+ pipe_handler_send_req(sk, pn->pn_sk.sobject,
+ PNS_PEP_DISCONNECT_UTID,
+ PNS_PEP_DISCONNECT_REQ, pn->pipe_handle,
+ GFP_KERNEL);
+ break;
+#endif
+
case PNS_PEP_ENABLE_REQ:
/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
break;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNS_PEP_ENABLE_RESP:
+ if ((ph->pn_sdev == pn_dev(pn->remote_pep)) &&
+ (ph->pn_sobj == pn_obj(pn->remote_pep))) {
+ pn->pipe_state = PIPE_ENABLED;
+ pipe_handler_send_ind(sk, pn->remote_pep,
+ PNS_PIPE_ENABLED_IND_UTID,
+ pn->pipe_handle, PNS_PIPE_ENABLED_IND);
+ pipe_handler_send_ind(sk, pn->pn_sk.sobject,
+ PNS_PIPE_ENABLED_IND_UTID,
+ pn->pipe_handle, PNS_PIPE_ENABLED_IND);
+ } else
+ pipe_handler_send_req(sk, pn->remote_pep,
+ PNS_PIPE_ENABLE_UTID,
+ PNS_PEP_ENABLE_REQ, pn->pipe_handle,
+ GFP_KERNEL);
+
+ break;
+#endif
+
case PNS_PEP_RESET_REQ:
switch (hdr->state_after_reset) {
case PN_PIPE_DISABLE:
@@ -347,6 +699,27 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
break;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNS_PEP_DISABLE_RESP:
+ if ((ph->pn_sdev == pn_dev(pn->remote_pep)) &&
+ (ph->pn_sobj == pn_obj(pn->remote_pep))) {
+ pn->pipe_state = PIPE_DISABLED;
+ pipe_handler_send_ind(sk, pn->remote_pep,
+ PNS_PIPE_DISABLED_IND_UTID,
+ pn->pipe_handle,
+ PNS_PIPE_DISABLED_IND);
+ pipe_handler_send_ind(sk, pn->pn_sk.sobject,
+ PNS_PIPE_DISABLED_IND_UTID,
+ pn->pipe_handle,
+ PNS_PIPE_DISABLED_IND);
+ } else
+ pipe_handler_send_req(sk, pn->remote_pep,
+ PNS_PIPE_DISABLE_UTID,
+ PNS_PEP_DISABLE_REQ, pn->pipe_handle,
+ GFP_KERNEL);
+ break;
+#endif
+
case PNS_PEP_CTRL_REQ:
if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
atomic_inc(&sk->sk_drops);
@@ -520,6 +893,9 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
newpn->init_enable = enabled;
newpn->aligned = aligned;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ newpn->remote_pep = pn->remote_pep;
+#endif
BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
skb_queue_head(&newsk->sk_receive_queue, skb);
@@ -621,6 +997,28 @@ drop:
return err;
}
+static int pipe_do_remove(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, MAX_PNPIPE_HEADER);
+ __skb_push(skb, sizeof(*ph));
+ skb_reset_transport_header(skb);
+ ph = pnp_hdr(skb);
+ ph->utid = 0;
+ ph->message_id = PNS_PIPE_REMOVE_REQ;
+ ph->pipe_handle = pn->pipe_handle;
+ ph->data[0] = PAD;
+
+ return pn_skb_send(sk, skb, &pipe_srv);
+}
+
/* associated socket ceases to exist */
static void pep_sock_close(struct sock *sk, long timeout)
{
@@ -639,7 +1037,10 @@ static void pep_sock_close(struct sock *sk, long timeout)
sk_for_each_safe(sknode, p, n, &pn->ackq)
sk_del_node_init(sknode);
sk->sk_state = TCP_CLOSE;
- }
+ } else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
+ /* Forcefully remove dangling Phonet pipe */
+ pipe_do_remove(sk);
+
ifindex = pn->ifindex;
pn->ifindex = 0;
release_sock(sk);
@@ -757,6 +1158,10 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
{
struct pep_sock *pn = pep_sk(sk);
int val = 0, err = 0;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ int remote_pep;
+ int pipe_handle;
+#endif
if (level != SOL_PNPIPE)
return -ENOPROTOOPT;
@@ -767,6 +1172,48 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
lock_sock(sk);
switch (optname) {
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNPIPE_CREATE:
+ if (val) {
+ if (pn->pipe_state > PIPE_IDLE) {
+ err = -EFAULT;
+ break;
+ }
+ remote_pep = val & 0xFFFF;
+ pipe_handle = (val >> 16) & 0xFF;
+ pn->remote_pep = remote_pep;
+ err = pipe_handler_create_pipe(sk, pipe_handle,
+ PNPIPE_CREATE);
+ break;
+ }
+
+ case PNPIPE_ENABLE:
+ if (pn->pipe_state != PIPE_DISABLED) {
+ err = -EFAULT;
+ break;
+ }
+ err = pipe_handler_enable_pipe(sk, PNPIPE_ENABLE);
+ break;
+
+ case PNPIPE_DISABLE:
+ if (pn->pipe_state != PIPE_ENABLED) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = pipe_handler_enable_pipe(sk, PNPIPE_DISABLE);
+ break;
+
+ case PNPIPE_DESTROY:
+ if (pn->pipe_state < PIPE_DISABLED) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = pipe_handler_create_pipe(sk, 0x0, PNPIPE_DESTROY);
+ break;
+#endif
+
case PNPIPE_ENCAP:
if (val && val != PNPIPE_ENCAP_IP) {
err = -EINVAL;
@@ -816,6 +1263,13 @@ static int pep_getsockopt(struct sock *sk, int level, int optname,
case PNPIPE_ENCAP:
val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
break;
+
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNPIPE_INQ:
+ val = pn->pipe_state;
+ break;
+#endif
+
case PNPIPE_IFINDEX:
val = pn->ifindex;
break;
@@ -835,6 +1289,15 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
struct pnpipehdr *ph;
+ int err;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(pn->remote_pep),
+ .spn_obj = pn_obj(pn->remote_pep),
+ };
+#endif
if (pn_flow_safe(pn->tx_fc) &&
!atomic_add_unless(&pn->tx_credits, -1, 0)) {
@@ -852,8 +1315,16 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
} else
ph->message_id = PNS_PIPE_DATA;
ph->pipe_handle = pn->pipe_handle;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ err = pn_skb_send(sk, skb, &spn);
+#else
+ err = pn_skb_send(sk, skb, &pipe_srv);
+#endif
+
+ if (err && pn_flow_safe(pn->tx_fc))
+ atomic_inc(&pn->tx_credits);
+ return err;
- return pn_skb_send(sk, skb, &pipe_srv);
}
static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
@@ -873,7 +1344,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
flags & MSG_DONTWAIT, &err);
if (!skb)
- return -ENOBUFS;
+ return err;
skb_reserve(skb, MAX_PHONET_HEADER + 3);
err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index b18e48fae975..947038ddd04c 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -292,8 +292,7 @@ static void phonet_route_autodel(struct net_device *dev)
if (bitmap_empty(deleted, 64))
return; /* short-circuit RCU */
synchronize_rcu();
- for (i = find_first_bit(deleted, 64); i < 64;
- i = find_next_bit(deleted, 64, i + 1)) {
+ for_each_set_bit(i, deleted, 64) {
rtm_phonet_notify(RTM_DELROUTE, dev, i);
dev_put(dev);
}
@@ -374,6 +373,7 @@ int __init phonet_device_init(void)
if (err)
return err;
+ proc_net_fops_create(&init_net, "pnresource", 0, &pn_res_seq_fops);
register_netdevice_notifier(&phonet_device_notifier);
err = phonet_netlink_register();
if (err)
@@ -386,6 +386,7 @@ void phonet_device_exit(void)
rtnl_unregister_all(PF_PHONET);
unregister_netdevice_notifier(&phonet_device_notifier);
unregister_pernet_device(&phonet_net_ops);
+ proc_net_remove(&init_net, "pnresource");
}
int phonet_route_add(struct net_device *dev, u8 daddr)
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 6e9848bf0370..aca8fba099e9 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -158,6 +158,7 @@ void pn_sock_unhash(struct sock *sk)
spin_lock_bh(&pnsocks.lock);
sk_del_node_init(sk);
spin_unlock_bh(&pnsocks.lock);
+ pn_sock_unbind_all_res(sk);
}
EXPORT_SYMBOL(pn_sock_unhash);
@@ -281,7 +282,9 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
return POLLHUP;
- if (sk->sk_state == TCP_ESTABLISHED && atomic_read(&pn->tx_credits))
+ if (sk->sk_state == TCP_ESTABLISHED &&
+ atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
+ atomic_read(&pn->tx_credits))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
return mask;
@@ -563,3 +566,188 @@ const struct file_operations pn_sock_seq_fops = {
.release = seq_release_net,
};
#endif
+
+static struct {
+ struct sock *sk[256];
+} pnres;
+
+/*
+ * Find and hold socket based on resource.
+ */
+struct sock *pn_find_sock_by_res(struct net *net, u8 res)
+{
+ struct sock *sk;
+
+ if (!net_eq(net, &init_net))
+ return NULL;
+
+ rcu_read_lock();
+ sk = rcu_dereference(pnres.sk[res]);
+ if (sk)
+ sock_hold(sk);
+ rcu_read_unlock();
+ return sk;
+}
+
+static DEFINE_MUTEX(resource_mutex);
+
+int pn_sock_bind_res(struct sock *sk, u8 res)
+{
+ int ret = -EADDRINUSE;
+
+ if (!net_eq(sock_net(sk), &init_net))
+ return -ENOIOCTLCMD;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (pn_socket_autobind(sk->sk_socket))
+ return -EAGAIN;
+
+ mutex_lock(&resource_mutex);
+ if (pnres.sk[res] == NULL) {
+ sock_hold(sk);
+ rcu_assign_pointer(pnres.sk[res], sk);
+ ret = 0;
+ }
+ mutex_unlock(&resource_mutex);
+ return ret;
+}
+
+int pn_sock_unbind_res(struct sock *sk, u8 res)
+{
+ int ret = -ENOENT;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&resource_mutex);
+ if (pnres.sk[res] == sk) {
+ rcu_assign_pointer(pnres.sk[res], NULL);
+ ret = 0;
+ }
+ mutex_unlock(&resource_mutex);
+
+ if (ret == 0) {
+ synchronize_rcu();
+ sock_put(sk);
+ }
+ return ret;
+}
+
+void pn_sock_unbind_all_res(struct sock *sk)
+{
+ unsigned res, match = 0;
+
+ mutex_lock(&resource_mutex);
+ for (res = 0; res < 256; res++) {
+ if (pnres.sk[res] == sk) {
+ rcu_assign_pointer(pnres.sk[res], NULL);
+ match++;
+ }
+ }
+ mutex_unlock(&resource_mutex);
+
+ if (match == 0)
+ return;
+ synchronize_rcu();
+ while (match > 0) {
+ sock_put(sk);
+ match--;
+ }
+}
+
+#ifdef CONFIG_PROC_FS
+static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct net *net = seq_file_net(seq);
+ unsigned i;
+
+ if (!net_eq(net, &init_net))
+ return NULL;
+
+ for (i = 0; i < 256; i++) {
+ if (pnres.sk[i] == NULL)
+ continue;
+ if (!pos)
+ return pnres.sk + i;
+ pos--;
+ }
+ return NULL;
+}
+
+static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
+{
+ struct net *net = seq_file_net(seq);
+ unsigned i;
+
+ BUG_ON(!net_eq(net, &init_net));
+
+ for (i = (sk - pnres.sk) + 1; i < 256; i++)
+ if (pnres.sk[i])
+ return pnres.sk + i;
+ return NULL;
+}
+
+static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(resource_mutex)
+{
+ mutex_lock(&resource_mutex);
+ return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct sock **sk;
+
+ if (v == SEQ_START_TOKEN)
+ sk = pn_res_get_idx(seq, 0);
+ else
+ sk = pn_res_get_next(seq, v);
+ (*pos)++;
+ return sk;
+}
+
+static void pn_res_seq_stop(struct seq_file *seq, void *v)
+ __releases(resource_mutex)
+{
+ mutex_unlock(&resource_mutex);
+}
+
+static int pn_res_seq_show(struct seq_file *seq, void *v)
+{
+ int len;
+
+ if (v == SEQ_START_TOKEN)
+ seq_printf(seq, "%s%n", "rs uid inode", &len);
+ else {
+ struct sock **psk = v;
+ struct sock *sk = *psk;
+
+ seq_printf(seq, "%02X %5d %lu%n",
+ (int) (psk - pnres.sk), sock_i_uid(sk),
+ sock_i_ino(sk), &len);
+ }
+ seq_printf(seq, "%*s\n", 63 - len, "");
+ return 0;
+}
+
+static const struct seq_operations pn_res_seq_ops = {
+ .start = pn_res_seq_start,
+ .next = pn_res_seq_next,
+ .stop = pn_res_seq_stop,
+ .show = pn_res_seq_show,
+};
+
+static int pn_res_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &pn_res_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+const struct file_operations pn_res_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = pn_res_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+#endif
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index aebfecbdb841..bb6ad81b671d 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -39,7 +39,15 @@
#include <net/sock.h>
#include "rds.h"
-#include "rdma.h"
+
+char *rds_str_array(char **array, size_t elements, size_t index)
+{
+ if ((index < elements) && array[index])
+ return array[index];
+ else
+ return "unknown";
+}
+EXPORT_SYMBOL(rds_str_array);
/* this is just used for stats gathering :/ */
static DEFINE_SPINLOCK(rds_sock_lock);
@@ -62,7 +70,7 @@ static int rds_release(struct socket *sock)
struct rds_sock *rs;
unsigned long flags;
- if (sk == NULL)
+ if (!sk)
goto out;
rs = rds_sk_to_rs(sk);
@@ -73,7 +81,15 @@ static int rds_release(struct socket *sock)
* with the socket. */
rds_clear_recv_queue(rs);
rds_cong_remove_socket(rs);
+
+ /*
+ * the binding lookup hash uses rcu, we need to
+ * make sure we sychronize_rcu before we free our
+ * entry
+ */
rds_remove_bound(rs);
+ synchronize_rcu();
+
rds_send_drop_to(rs, NULL);
rds_rdma_drop_keys(rs);
rds_notify_queue_get(rs, NULL);
@@ -83,6 +99,8 @@ static int rds_release(struct socket *sock)
rds_sock_count--;
spin_unlock_irqrestore(&rds_sock_lock, flags);
+ rds_trans_put(rs->rs_transport);
+
sock->sk = NULL;
sock_put(sk);
out:
@@ -514,7 +532,7 @@ out:
spin_unlock_irqrestore(&rds_sock_lock, flags);
}
-static void __exit rds_exit(void)
+static void rds_exit(void)
{
sock_unregister(rds_family_ops.family);
proto_unregister(&rds_proto);
@@ -529,7 +547,7 @@ static void __exit rds_exit(void)
}
module_exit(rds_exit);
-static int __init rds_init(void)
+static int rds_init(void)
{
int ret;
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 5d95fc007f1a..2f6b3fcc79f8 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -34,45 +34,52 @@
#include <net/sock.h>
#include <linux/in.h>
#include <linux/if_arp.h>
+#include <linux/jhash.h>
#include "rds.h"
-/*
- * XXX this probably still needs more work.. no INADDR_ANY, and rbtrees aren't
- * particularly zippy.
- *
- * This is now called for every incoming frame so we arguably care much more
- * about it than we used to.
- */
+#define BIND_HASH_SIZE 1024
+static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
static DEFINE_SPINLOCK(rds_bind_lock);
-static struct rb_root rds_bind_tree = RB_ROOT;
-static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port,
- struct rds_sock *insert)
+static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
+{
+ return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) &
+ (BIND_HASH_SIZE - 1));
+}
+
+static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
+ struct rds_sock *insert)
{
- struct rb_node **p = &rds_bind_tree.rb_node;
- struct rb_node *parent = NULL;
struct rds_sock *rs;
+ struct hlist_node *node;
+ struct hlist_head *head = hash_to_bucket(addr, port);
u64 cmp;
u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
- while (*p) {
- parent = *p;
- rs = rb_entry(parent, struct rds_sock, rs_bound_node);
-
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) {
cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
be16_to_cpu(rs->rs_bound_port);
- if (needle < cmp)
- p = &(*p)->rb_left;
- else if (needle > cmp)
- p = &(*p)->rb_right;
- else
+ if (cmp == needle) {
+ rcu_read_unlock();
return rs;
+ }
}
+ rcu_read_unlock();
if (insert) {
- rb_link_node(&insert->rs_bound_node, parent, p);
- rb_insert_color(&insert->rs_bound_node, &rds_bind_tree);
+ /*
+ * make sure our addr and port are set before
+ * we are added to the list, other people
+ * in rcu will find us as soon as the
+ * hlist_add_head_rcu is done
+ */
+ insert->rs_bound_addr = addr;
+ insert->rs_bound_port = port;
+ rds_sock_addref(insert);
+
+ hlist_add_head_rcu(&insert->rs_bound_node, head);
}
return NULL;
}
@@ -86,15 +93,13 @@ static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port,
struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
{
struct rds_sock *rs;
- unsigned long flags;
- spin_lock_irqsave(&rds_bind_lock, flags);
- rs = rds_bind_tree_walk(addr, port, NULL);
+ rs = rds_bind_lookup(addr, port, NULL);
+
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
else
rs = NULL;
- spin_unlock_irqrestore(&rds_bind_lock, flags);
rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
ntohs(port));
@@ -121,22 +126,15 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
do {
if (rover == 0)
rover++;
- if (rds_bind_tree_walk(addr, cpu_to_be16(rover), rs) == NULL) {
- *port = cpu_to_be16(rover);
+ if (!rds_bind_lookup(addr, cpu_to_be16(rover), rs)) {
+ *port = rs->rs_bound_port;
ret = 0;
+ rdsdebug("rs %p binding to %pI4:%d\n",
+ rs, &addr, (int)ntohs(*port));
break;
}
} while (rover++ != last);
- if (ret == 0) {
- rs->rs_bound_addr = addr;
- rs->rs_bound_port = *port;
- rds_sock_addref(rs);
-
- rdsdebug("rs %p binding to %pI4:%d\n",
- rs, &addr, (int)ntohs(*port));
- }
-
spin_unlock_irqrestore(&rds_bind_lock, flags);
return ret;
@@ -153,7 +151,7 @@ void rds_remove_bound(struct rds_sock *rs)
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port));
- rb_erase(&rs->rs_bound_node, &rds_bind_tree);
+ hlist_del_init_rcu(&rs->rs_bound_node);
rds_sock_put(rs);
rs->rs_bound_addr = 0;
}
@@ -184,7 +182,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
- if (trans == NULL) {
+ if (!trans) {
ret = -EADDRNOTAVAIL;
rds_remove_bound(rs);
if (printk_ratelimit())
@@ -198,5 +196,9 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
out:
release_sock(sk);
+
+ /* we might have called rds_remove_bound on error */
+ if (ret)
+ synchronize_rcu();
return ret;
}
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 0871a29f0780..75ea686f27d5 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -141,7 +141,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
unsigned long flags;
map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
- if (map == NULL)
+ if (!map)
return NULL;
map->m_addr = addr;
@@ -159,7 +159,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
ret = rds_cong_tree_walk(addr, map);
spin_unlock_irqrestore(&rds_cong_lock, flags);
- if (ret == NULL) {
+ if (!ret) {
ret = map;
map = NULL;
}
@@ -205,7 +205,7 @@ int rds_cong_get_maps(struct rds_connection *conn)
conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
- if (conn->c_lcong == NULL || conn->c_fcong == NULL)
+ if (!(conn->c_lcong && conn->c_fcong))
return -ENOMEM;
return 0;
@@ -221,7 +221,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ rds_send_xmit(conn);
}
}
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 7619b671ca28..870992e08cae 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -37,7 +37,6 @@
#include "rds.h"
#include "loop.h"
-#include "rdma.h"
#define RDS_CONNECTION_HASH_BITS 12
#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
@@ -63,18 +62,7 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
} while (0)
-static inline int rds_conn_is_sending(struct rds_connection *conn)
-{
- int ret = 0;
-
- if (!mutex_trylock(&conn->c_send_lock))
- ret = 1;
- else
- mutex_unlock(&conn->c_send_lock);
-
- return ret;
-}
-
+/* rcu read lock must be held or the connection spinlock */
static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
__be32 laddr, __be32 faddr,
struct rds_transport *trans)
@@ -82,7 +70,7 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
struct rds_connection *conn, *ret = NULL;
struct hlist_node *pos;
- hlist_for_each_entry(conn, pos, head, c_hash_node) {
+ hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
conn->c_trans == trans) {
ret = conn;
@@ -129,10 +117,11 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
{
struct rds_connection *conn, *parent = NULL;
struct hlist_head *head = rds_conn_bucket(laddr, faddr);
+ struct rds_transport *loop_trans;
unsigned long flags;
int ret;
- spin_lock_irqsave(&rds_conn_lock, flags);
+ rcu_read_lock();
conn = rds_conn_lookup(head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
!is_outgoing) {
@@ -143,12 +132,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
parent = conn;
conn = parent->c_passive;
}
- spin_unlock_irqrestore(&rds_conn_lock, flags);
+ rcu_read_unlock();
if (conn)
goto out;
conn = kmem_cache_zalloc(rds_conn_slab, gfp);
- if (conn == NULL) {
+ if (!conn) {
conn = ERR_PTR(-ENOMEM);
goto out;
}
@@ -159,7 +148,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
spin_lock_init(&conn->c_lock);
conn->c_next_tx_seq = 1;
- mutex_init(&conn->c_send_lock);
+ init_waitqueue_head(&conn->c_waitq);
INIT_LIST_HEAD(&conn->c_send_queue);
INIT_LIST_HEAD(&conn->c_retrans);
@@ -175,7 +164,9 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
* can bind to the destination address then we'd rather the messages
* flow through loopback rather than either transport.
*/
- if (rds_trans_get_preferred(faddr)) {
+ loop_trans = rds_trans_get_preferred(faddr);
+ if (loop_trans) {
+ rds_trans_put(loop_trans);
conn->c_loopback = 1;
if (is_outgoing && trans->t_prefer_loopback) {
/* "outgoing" connection - and the transport
@@ -238,7 +229,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
kmem_cache_free(rds_conn_slab, conn);
conn = found;
} else {
- hlist_add_head(&conn->c_hash_node, head);
+ hlist_add_head_rcu(&conn->c_hash_node, head);
rds_cong_add_conn(conn);
rds_conn_count++;
}
@@ -263,21 +254,91 @@ struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
}
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
+void rds_conn_shutdown(struct rds_connection *conn)
+{
+ /* shut it down unless it's down already */
+ if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
+ /*
+ * Quiesce the connection mgmt handlers before we start tearing
+ * things down. We don't hold the mutex for the entire
+ * duration of the shutdown operation, else we may be
+ * deadlocking with the CM handler. Instead, the CM event
+ * handler is supposed to check for state DISCONNECTING
+ */
+ mutex_lock(&conn->c_cm_lock);
+ if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
+ && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
+ rds_conn_error(conn, "shutdown called in state %d\n",
+ atomic_read(&conn->c_state));
+ mutex_unlock(&conn->c_cm_lock);
+ return;
+ }
+ mutex_unlock(&conn->c_cm_lock);
+
+ wait_event(conn->c_waitq,
+ !test_bit(RDS_IN_XMIT, &conn->c_flags));
+
+ conn->c_trans->conn_shutdown(conn);
+ rds_conn_reset(conn);
+
+ if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
+ /* This can happen - eg when we're in the middle of tearing
+ * down the connection, and someone unloads the rds module.
+ * Quite reproduceable with loopback connections.
+ * Mostly harmless.
+ */
+ rds_conn_error(conn,
+ "%s: failed to transition to state DOWN, "
+ "current state is %d\n",
+ __func__,
+ atomic_read(&conn->c_state));
+ return;
+ }
+ }
+
+ /* Then reconnect if it's still live.
+ * The passive side of an IB loopback connection is never added
+ * to the conn hash, so we never trigger a reconnect on this
+ * conn - the reconnect is always triggered by the active peer. */
+ cancel_delayed_work_sync(&conn->c_conn_w);
+ rcu_read_lock();
+ if (!hlist_unhashed(&conn->c_hash_node)) {
+ rcu_read_unlock();
+ rds_queue_reconnect(conn);
+ } else {
+ rcu_read_unlock();
+ }
+}
+
+/*
+ * Stop and free a connection.
+ *
+ * This can only be used in very limited circumstances. It assumes that once
+ * the conn has been shutdown that no one else is referencing the connection.
+ * We can only ensure this in the rmmod path in the current code.
+ */
void rds_conn_destroy(struct rds_connection *conn)
{
struct rds_message *rm, *rtmp;
+ unsigned long flags;
rdsdebug("freeing conn %p for %pI4 -> "
"%pI4\n", conn, &conn->c_laddr,
&conn->c_faddr);
- hlist_del_init(&conn->c_hash_node);
+ /* Ensure conn will not be scheduled for reconnect */
+ spin_lock_irq(&rds_conn_lock);
+ hlist_del_init_rcu(&conn->c_hash_node);
+ spin_unlock_irq(&rds_conn_lock);
+ synchronize_rcu();
- /* wait for the rds thread to shut it down */
- atomic_set(&conn->c_state, RDS_CONN_ERROR);
- cancel_delayed_work(&conn->c_conn_w);
- queue_work(rds_wq, &conn->c_down_w);
- flush_workqueue(rds_wq);
+ /* shut the connection down */
+ rds_conn_drop(conn);
+ flush_work(&conn->c_down_w);
+
+ /* make sure lingering queued work won't try to ref the conn */
+ cancel_delayed_work_sync(&conn->c_send_w);
+ cancel_delayed_work_sync(&conn->c_recv_w);
/* tear down queued messages */
list_for_each_entry_safe(rm, rtmp,
@@ -302,7 +363,9 @@ void rds_conn_destroy(struct rds_connection *conn)
BUG_ON(!list_empty(&conn->c_retrans));
kmem_cache_free(rds_conn_slab, conn);
+ spin_lock_irqsave(&rds_conn_lock, flags);
rds_conn_count--;
+ spin_unlock_irqrestore(&rds_conn_lock, flags);
}
EXPORT_SYMBOL_GPL(rds_conn_destroy);
@@ -316,23 +379,23 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
struct list_head *list;
struct rds_connection *conn;
struct rds_message *rm;
- unsigned long flags;
unsigned int total = 0;
+ unsigned long flags;
size_t i;
len /= sizeof(struct rds_info_message);
- spin_lock_irqsave(&rds_conn_lock, flags);
+ rcu_read_lock();
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
i++, head++) {
- hlist_for_each_entry(conn, pos, head, c_hash_node) {
+ hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
if (want_send)
list = &conn->c_send_queue;
else
list = &conn->c_retrans;
- spin_lock(&conn->c_lock);
+ spin_lock_irqsave(&conn->c_lock, flags);
/* XXX too lazy to maintain counts.. */
list_for_each_entry(rm, list, m_conn_item) {
@@ -343,11 +406,10 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
conn->c_faddr, 0);
}
- spin_unlock(&conn->c_lock);
+ spin_unlock_irqrestore(&conn->c_lock, flags);
}
}
-
- spin_unlock_irqrestore(&rds_conn_lock, flags);
+ rcu_read_unlock();
lens->nr = total;
lens->each = sizeof(struct rds_info_message);
@@ -377,19 +439,17 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
uint64_t buffer[(item_len + 7) / 8];
struct hlist_head *head;
struct hlist_node *pos;
- struct hlist_node *tmp;
struct rds_connection *conn;
- unsigned long flags;
size_t i;
- spin_lock_irqsave(&rds_conn_lock, flags);
+ rcu_read_lock();
lens->nr = 0;
lens->each = item_len;
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
i++, head++) {
- hlist_for_each_entry_safe(conn, pos, tmp, head, c_hash_node) {
+ hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
/* XXX no c_lock usage.. */
if (!visitor(conn, buffer))
@@ -405,8 +465,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
lens->nr++;
}
}
-
- spin_unlock_irqrestore(&rds_conn_lock, flags);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
@@ -423,8 +482,8 @@ static int rds_conn_info_visitor(struct rds_connection *conn,
sizeof(cinfo->transport));
cinfo->flags = 0;
- rds_conn_info_set(cinfo->flags,
- rds_conn_is_sending(conn), SENDING);
+ rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
+ SENDING);
/* XXX Future: return the state rather than these funky bits */
rds_conn_info_set(cinfo->flags,
atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
@@ -444,12 +503,12 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
sizeof(struct rds_info_connection));
}
-int __init rds_conn_init(void)
+int rds_conn_init(void)
{
rds_conn_slab = kmem_cache_create("rds_connection",
sizeof(struct rds_connection),
0, 0, NULL);
- if (rds_conn_slab == NULL)
+ if (!rds_conn_slab)
return -ENOMEM;
rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
@@ -487,6 +546,18 @@ void rds_conn_drop(struct rds_connection *conn)
EXPORT_SYMBOL_GPL(rds_conn_drop);
/*
+ * If the connection is down, trigger a connect. We may have scheduled a
+ * delayed reconnect however - in this case we should not interfere.
+ */
+void rds_conn_connect_if_down(struct rds_connection *conn)
+{
+ if (rds_conn_state(conn) == RDS_CONN_DOWN &&
+ !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
+ queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
+}
+EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
+
+/*
* An error occurred on the connection
*/
void
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 8f2d6dd7700a..b12a3951167d 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -53,12 +53,71 @@ MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
module_param(rds_ib_retry_count, int, 0444);
MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
+/*
+ * we have a clumsy combination of RCU and a rwsem protecting this list
+ * because it is used both in the get_mr fast path and while blocking in
+ * the FMR flushing path.
+ */
+DECLARE_RWSEM(rds_ib_devices_lock);
struct list_head rds_ib_devices;
/* NOTE: if also grabbing ibdev lock, grab this first */
DEFINE_SPINLOCK(ib_nodev_conns_lock);
LIST_HEAD(ib_nodev_conns);
+void rds_ib_nodev_connect(void)
+{
+ struct rds_ib_connection *ic;
+
+ spin_lock(&ib_nodev_conns_lock);
+ list_for_each_entry(ic, &ib_nodev_conns, ib_node)
+ rds_conn_connect_if_down(ic->conn);
+ spin_unlock(&ib_nodev_conns_lock);
+}
+
+void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
+{
+ struct rds_ib_connection *ic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rds_ibdev->spinlock, flags);
+ list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
+ rds_conn_drop(ic->conn);
+ spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
+}
+
+/*
+ * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
+ * from interrupt context so we push freing off into a work struct in krdsd.
+ */
+static void rds_ib_dev_free(struct work_struct *work)
+{
+ struct rds_ib_ipaddr *i_ipaddr, *i_next;
+ struct rds_ib_device *rds_ibdev = container_of(work,
+ struct rds_ib_device, free_work);
+
+ if (rds_ibdev->mr_pool)
+ rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
+ if (rds_ibdev->mr)
+ ib_dereg_mr(rds_ibdev->mr);
+ if (rds_ibdev->pd)
+ ib_dealloc_pd(rds_ibdev->pd);
+
+ list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
+ list_del(&i_ipaddr->list);
+ kfree(i_ipaddr);
+ }
+
+ kfree(rds_ibdev);
+}
+
+void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
+{
+ BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
+ if (atomic_dec_and_test(&rds_ibdev->refcount))
+ queue_work(rds_wq, &rds_ibdev->free_work);
+}
+
void rds_ib_add_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
@@ -77,11 +136,14 @@ void rds_ib_add_one(struct ib_device *device)
goto free_attr;
}
- rds_ibdev = kmalloc(sizeof *rds_ibdev, GFP_KERNEL);
+ rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
+ ibdev_to_node(device));
if (!rds_ibdev)
goto free_attr;
spin_lock_init(&rds_ibdev->spinlock);
+ atomic_set(&rds_ibdev->refcount, 1);
+ INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
rds_ibdev->max_wrs = dev_attr->max_qp_wr;
rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
@@ -91,68 +153,107 @@ void rds_ib_add_one(struct ib_device *device)
min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
fmr_pool_size;
+ rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom;
+ rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom;
+
rds_ibdev->dev = device;
rds_ibdev->pd = ib_alloc_pd(device);
- if (IS_ERR(rds_ibdev->pd))
- goto free_dev;
+ if (IS_ERR(rds_ibdev->pd)) {
+ rds_ibdev->pd = NULL;
+ goto put_dev;
+ }
- rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(rds_ibdev->mr))
- goto err_pd;
+ rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(rds_ibdev->mr)) {
+ rds_ibdev->mr = NULL;
+ goto put_dev;
+ }
rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
if (IS_ERR(rds_ibdev->mr_pool)) {
rds_ibdev->mr_pool = NULL;
- goto err_mr;
+ goto put_dev;
}
INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
INIT_LIST_HEAD(&rds_ibdev->conn_list);
- list_add_tail(&rds_ibdev->list, &rds_ib_devices);
+
+ down_write(&rds_ib_devices_lock);
+ list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
+ up_write(&rds_ib_devices_lock);
+ atomic_inc(&rds_ibdev->refcount);
ib_set_client_data(device, &rds_ib_client, rds_ibdev);
+ atomic_inc(&rds_ibdev->refcount);
- goto free_attr;
+ rds_ib_nodev_connect();
-err_mr:
- ib_dereg_mr(rds_ibdev->mr);
-err_pd:
- ib_dealloc_pd(rds_ibdev->pd);
-free_dev:
- kfree(rds_ibdev);
+put_dev:
+ rds_ib_dev_put(rds_ibdev);
free_attr:
kfree(dev_attr);
}
+/*
+ * New connections use this to find the device to associate with the
+ * connection. It's not in the fast path so we're not concerned about the
+ * performance of the IB call. (As of this writing, it uses an interrupt
+ * blocking spinlock to serialize walking a per-device list of all registered
+ * clients.)
+ *
+ * RCU is used to handle incoming connections racing with device teardown.
+ * Rather than use a lock to serialize removal from the client_data and
+ * getting a new reference, we use an RCU grace period. The destruction
+ * path removes the device from client_data and then waits for all RCU
+ * readers to finish.
+ *
+ * A new connection can get NULL from this if its arriving on a
+ * device that is in the process of being removed.
+ */
+struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
+{
+ struct rds_ib_device *rds_ibdev;
+
+ rcu_read_lock();
+ rds_ibdev = ib_get_client_data(device, &rds_ib_client);
+ if (rds_ibdev)
+ atomic_inc(&rds_ibdev->refcount);
+ rcu_read_unlock();
+ return rds_ibdev;
+}
+
+/*
+ * The IB stack is letting us know that a device is going away. This can
+ * happen if the underlying HCA driver is removed or if PCI hotplug is removing
+ * the pci function, for example.
+ *
+ * This can be called at any time and can be racing with any other RDS path.
+ */
void rds_ib_remove_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
- struct rds_ib_ipaddr *i_ipaddr, *i_next;
rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (!rds_ibdev)
return;
- list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
- list_del(&i_ipaddr->list);
- kfree(i_ipaddr);
- }
+ rds_ib_dev_shutdown(rds_ibdev);
- rds_ib_destroy_conns(rds_ibdev);
+ /* stop connection attempts from getting a reference to this device. */
+ ib_set_client_data(device, &rds_ib_client, NULL);
- if (rds_ibdev->mr_pool)
- rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
-
- ib_dereg_mr(rds_ibdev->mr);
-
- while (ib_dealloc_pd(rds_ibdev->pd)) {
- rdsdebug("Failed to dealloc pd %p\n", rds_ibdev->pd);
- msleep(1);
- }
+ down_write(&rds_ib_devices_lock);
+ list_del_rcu(&rds_ibdev->list);
+ up_write(&rds_ib_devices_lock);
- list_del(&rds_ibdev->list);
- kfree(rds_ibdev);
+ /*
+ * This synchronize rcu is waiting for readers of both the ib
+ * client data and the devices list to finish before we drop
+ * both of those references.
+ */
+ synchronize_rcu();
+ rds_ib_dev_put(rds_ibdev);
+ rds_ib_dev_put(rds_ibdev);
}
struct ib_client rds_ib_client = {
@@ -186,7 +287,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
- rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
+ rds_ibdev = ic->rds_ibdev;
iinfo->max_send_wr = ic->i_send_ring.w_nr;
iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
iinfo->max_send_sge = rds_ibdev->max_sge;
@@ -248,29 +349,36 @@ static int rds_ib_laddr_check(__be32 addr)
return ret;
}
+static void rds_ib_unregister_client(void)
+{
+ ib_unregister_client(&rds_ib_client);
+ /* wait for rds_ib_dev_free() to complete */
+ flush_workqueue(rds_wq);
+}
+
void rds_ib_exit(void)
{
rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
+ rds_ib_unregister_client();
rds_ib_destroy_nodev_conns();
- ib_unregister_client(&rds_ib_client);
rds_ib_sysctl_exit();
rds_ib_recv_exit();
rds_trans_unregister(&rds_ib_transport);
+ rds_ib_fmr_exit();
}
struct rds_transport rds_ib_transport = {
.laddr_check = rds_ib_laddr_check,
.xmit_complete = rds_ib_xmit_complete,
.xmit = rds_ib_xmit,
- .xmit_cong_map = NULL,
.xmit_rdma = rds_ib_xmit_rdma,
+ .xmit_atomic = rds_ib_xmit_atomic,
.recv = rds_ib_recv,
.conn_alloc = rds_ib_conn_alloc,
.conn_free = rds_ib_conn_free,
.conn_connect = rds_ib_conn_connect,
.conn_shutdown = rds_ib_conn_shutdown,
.inc_copy_to_user = rds_ib_inc_copy_to_user,
- .inc_purge = rds_ib_inc_purge,
.inc_free = rds_ib_inc_free,
.cm_initiate_connect = rds_ib_cm_initiate_connect,
.cm_handle_connect = rds_ib_cm_handle_connect,
@@ -286,16 +394,20 @@ struct rds_transport rds_ib_transport = {
.t_type = RDS_TRANS_IB
};
-int __init rds_ib_init(void)
+int rds_ib_init(void)
{
int ret;
INIT_LIST_HEAD(&rds_ib_devices);
- ret = ib_register_client(&rds_ib_client);
+ ret = rds_ib_fmr_init();
if (ret)
goto out;
+ ret = ib_register_client(&rds_ib_client);
+ if (ret)
+ goto out_fmr_exit;
+
ret = rds_ib_sysctl_init();
if (ret)
goto out_ibreg;
@@ -317,7 +429,9 @@ out_recv:
out_sysctl:
rds_ib_sysctl_exit();
out_ibreg:
- ib_unregister_client(&rds_ib_client);
+ rds_ib_unregister_client();
+out_fmr_exit:
+ rds_ib_fmr_exit();
out:
return ret;
}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 64df4e79b29f..7ad3d57e06a5 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -3,11 +3,13 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
#include "rds.h"
#include "rdma_transport.h"
#define RDS_FMR_SIZE 256
-#define RDS_FMR_POOL_SIZE 4096
+#define RDS_FMR_POOL_SIZE 8192
#define RDS_IB_MAX_SGE 8
#define RDS_IB_RECV_SGE 2
@@ -19,6 +21,9 @@
#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
+#define RDS_IB_RECYCLE_BATCH_COUNT 32
+
+extern struct rw_semaphore rds_ib_devices_lock;
extern struct list_head rds_ib_devices;
/*
@@ -26,20 +31,29 @@ extern struct list_head rds_ib_devices;
* try and minimize the amount of memory tied up both the device and
* socket receive queues.
*/
-/* page offset of the final full frag that fits in the page */
-#define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
struct rds_page_frag {
struct list_head f_item;
- struct page *f_page;
- unsigned long f_offset;
- dma_addr_t f_mapped;
+ struct list_head f_cache_entry;
+ struct scatterlist f_sg;
};
struct rds_ib_incoming {
struct list_head ii_frags;
+ struct list_head ii_cache_entry;
struct rds_incoming ii_inc;
};
+struct rds_ib_cache_head {
+ struct list_head *first;
+ unsigned long count;
+};
+
+struct rds_ib_refill_cache {
+ struct rds_ib_cache_head *percpu;
+ struct list_head *xfer;
+ struct list_head *ready;
+};
+
struct rds_ib_connect_private {
/* Add new fields at the end, and don't permute existing fields. */
__be32 dp_saddr;
@@ -53,8 +67,7 @@ struct rds_ib_connect_private {
};
struct rds_ib_send_work {
- struct rds_message *s_rm;
- struct rds_rdma_op *s_op;
+ void *s_op;
struct ib_send_wr s_wr;
struct ib_sge s_sge[RDS_IB_MAX_SGE];
unsigned long s_queued;
@@ -92,10 +105,11 @@ struct rds_ib_connection {
/* tx */
struct rds_ib_work_ring i_send_ring;
- struct rds_message *i_rm;
+ struct rm_data_op *i_data_op;
struct rds_header *i_send_hdrs;
u64 i_send_hdrs_dma;
struct rds_ib_send_work *i_sends;
+ atomic_t i_signaled_sends;
/* rx */
struct tasklet_struct i_recv_tasklet;
@@ -106,8 +120,9 @@ struct rds_ib_connection {
struct rds_header *i_recv_hdrs;
u64 i_recv_hdrs_dma;
struct rds_ib_recv_work *i_recvs;
- struct rds_page_frag i_frag;
u64 i_ack_recv; /* last ACK received */
+ struct rds_ib_refill_cache i_cache_incs;
+ struct rds_ib_refill_cache i_cache_frags;
/* sending acks */
unsigned long i_ack_flags;
@@ -138,7 +153,6 @@ struct rds_ib_connection {
/* Batched completions */
unsigned int i_unsignaled_wrs;
- long i_unsignaled_bytes;
};
/* This assumes that atomic_t is at least 32 bits */
@@ -164,9 +178,17 @@ struct rds_ib_device {
unsigned int max_fmrs;
int max_sge;
unsigned int max_wrs;
+ unsigned int max_initiator_depth;
+ unsigned int max_responder_resources;
spinlock_t spinlock; /* protect the above */
+ atomic_t refcount;
+ struct work_struct free_work;
};
+#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
+#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
+#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
+
/* bits for i_ack_flags */
#define IB_ACK_IN_FLIGHT 0
#define IB_ACK_REQUESTED 1
@@ -202,6 +224,8 @@ struct rds_ib_statistics {
uint64_t s_ib_rdma_mr_pool_flush;
uint64_t s_ib_rdma_mr_pool_wait;
uint64_t s_ib_rdma_mr_pool_depleted;
+ uint64_t s_ib_atomic_cswp;
+ uint64_t s_ib_atomic_fadd;
};
extern struct workqueue_struct *rds_ib_wq;
@@ -243,6 +267,8 @@ static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
extern struct rds_transport rds_ib_transport;
extern void rds_ib_add_one(struct ib_device *device);
extern void rds_ib_remove_one(struct ib_device *device);
+struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
+void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
extern struct ib_client rds_ib_client;
extern unsigned int fmr_pool_size;
@@ -258,7 +284,7 @@ void rds_ib_conn_free(void *arg);
int rds_ib_conn_connect(struct rds_connection *conn);
void rds_ib_conn_shutdown(struct rds_connection *conn);
void rds_ib_state_change(struct sock *sk);
-int __init rds_ib_listen_init(void);
+int rds_ib_listen_init(void);
void rds_ib_listen_stop(void);
void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
@@ -275,15 +301,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn,
int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
-void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock);
-static inline void rds_ib_destroy_nodev_conns(void)
-{
- __rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock);
-}
-static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev)
-{
- __rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock);
-}
+void rds_ib_destroy_nodev_conns(void);
struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
@@ -292,14 +310,16 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
void rds_ib_sync_mr(void *trans_private, int dir);
void rds_ib_free_mr(void *trans_private, int invalidate);
void rds_ib_flush_mrs(void);
+int rds_ib_fmr_init(void);
+void rds_ib_fmr_exit(void);
/* ib_recv.c */
-int __init rds_ib_recv_init(void);
+int rds_ib_recv_init(void);
void rds_ib_recv_exit(void);
int rds_ib_recv(struct rds_connection *conn);
-int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
- gfp_t page_gfp, int prefill);
-void rds_ib_inc_purge(struct rds_incoming *inc);
+int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
+void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
void rds_ib_inc_free(struct rds_incoming *inc);
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
size_t size);
@@ -325,17 +345,19 @@ u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
extern wait_queue_head_t rds_ib_ring_empty_wait;
/* ib_send.c */
+char *rds_ib_wc_status_str(enum ib_wc_status status);
void rds_ib_xmit_complete(struct rds_connection *conn);
int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off);
void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
void rds_ib_send_init_ring(struct rds_ib_connection *ic);
void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
-int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
+int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
u32 *adv_credits, int need_posted, int max_posted);
+int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
/* ib_stats.c */
DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
@@ -344,7 +366,7 @@ unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
/* ib_sysctl.c */
-int __init rds_ib_sysctl_init(void);
+int rds_ib_sysctl_init(void);
void rds_ib_sysctl_exit(void);
extern unsigned long rds_ib_sysctl_max_send_wr;
extern unsigned long rds_ib_sysctl_max_recv_wr;
@@ -354,28 +376,4 @@ extern unsigned long rds_ib_sysctl_max_recv_allocation;
extern unsigned int rds_ib_sysctl_flow_control;
extern ctl_table rds_ib_sysctl_table[];
-/*
- * Helper functions for getting/setting the header and data SGEs in
- * RDS packets (not RDMA)
- *
- * From version 3.1 onwards, header is in front of data in the sge.
- */
-static inline struct ib_sge *
-rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
-{
- if (ic->conn->c_version > RDS_PROTOCOL_3_0)
- return &sge[0];
- else
- return &sge[1];
-}
-
-static inline struct ib_sge *
-rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
-{
- if (ic->conn->c_version > RDS_PROTOCOL_3_0)
- return &sge[1];
- else
- return &sge[0];
-}
-
#endif
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index f68832798db2..ee369d201a65 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -38,6 +38,36 @@
#include "rds.h"
#include "ib.h"
+static char *rds_ib_event_type_strings[] = {
+#define RDS_IB_EVENT_STRING(foo) \
+ [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo)
+ RDS_IB_EVENT_STRING(CQ_ERR),
+ RDS_IB_EVENT_STRING(QP_FATAL),
+ RDS_IB_EVENT_STRING(QP_REQ_ERR),
+ RDS_IB_EVENT_STRING(QP_ACCESS_ERR),
+ RDS_IB_EVENT_STRING(COMM_EST),
+ RDS_IB_EVENT_STRING(SQ_DRAINED),
+ RDS_IB_EVENT_STRING(PATH_MIG),
+ RDS_IB_EVENT_STRING(PATH_MIG_ERR),
+ RDS_IB_EVENT_STRING(DEVICE_FATAL),
+ RDS_IB_EVENT_STRING(PORT_ACTIVE),
+ RDS_IB_EVENT_STRING(PORT_ERR),
+ RDS_IB_EVENT_STRING(LID_CHANGE),
+ RDS_IB_EVENT_STRING(PKEY_CHANGE),
+ RDS_IB_EVENT_STRING(SM_CHANGE),
+ RDS_IB_EVENT_STRING(SRQ_ERR),
+ RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED),
+ RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED),
+ RDS_IB_EVENT_STRING(CLIENT_REREGISTER),
+#undef RDS_IB_EVENT_STRING
+};
+
+static char *rds_ib_event_str(enum ib_event_type type)
+{
+ return rds_str_array(rds_ib_event_type_strings,
+ ARRAY_SIZE(rds_ib_event_type_strings), type);
+};
+
/*
* Set the selected protocol version
*/
@@ -95,7 +125,6 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
{
const struct rds_ib_connect_private *dp = NULL;
struct rds_ib_connection *ic = conn->c_transport_data;
- struct rds_ib_device *rds_ibdev;
struct ib_qp_attr qp_attr;
int err;
@@ -111,11 +140,21 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
}
}
- printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
- &conn->c_faddr,
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version),
- ic->i_flowctl ? ", flow control" : "");
+ if (conn->c_version < RDS_PROTOCOL(3,1)) {
+ printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
+ " no longer supported\n",
+ &conn->c_faddr,
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version));
+ rds_conn_destroy(conn);
+ return;
+ } else {
+ printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
+ &conn->c_faddr,
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version),
+ ic->i_flowctl ? ", flow control" : "");
+ }
/*
* Init rings and fill recv. this needs to wait until protocol negotiation
@@ -125,7 +164,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
rds_ib_recv_init_ring(ic);
/* Post receive buffers - as a side effect, this will update
* the posted credit count. */
- rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
+ rds_ib_recv_refill(conn, 1);
/* Tune RNR behavior */
rds_ib_tune_rnr(ic, &qp_attr);
@@ -135,12 +174,11 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
if (err)
printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
- /* update ib_device with this local ipaddr & conn */
- rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
- err = rds_ib_update_ipaddr(rds_ibdev, conn->c_laddr);
+ /* update ib_device with this local ipaddr */
+ err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
if (err)
- printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", err);
- rds_ib_add_conn(rds_ibdev, conn);
+ printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
+ err);
/* If the peer gave us the last packet it saw, process this as if
* we had received a regular ACK. */
@@ -153,18 +191,23 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
struct rdma_conn_param *conn_param,
struct rds_ib_connect_private *dp,
- u32 protocol_version)
+ u32 protocol_version,
+ u32 max_responder_resources,
+ u32 max_initiator_depth)
{
+ struct rds_ib_connection *ic = conn->c_transport_data;
+ struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
+
memset(conn_param, 0, sizeof(struct rdma_conn_param));
- /* XXX tune these? */
- conn_param->responder_resources = 1;
- conn_param->initiator_depth = 1;
+
+ conn_param->responder_resources =
+ min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
+ conn_param->initiator_depth =
+ min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
conn_param->rnr_retry_count = 7;
if (dp) {
- struct rds_ib_connection *ic = conn->c_transport_data;
-
memset(dp, 0, sizeof(*dp));
dp->dp_saddr = conn->c_laddr;
dp->dp_daddr = conn->c_faddr;
@@ -189,7 +232,8 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
{
- rdsdebug("event %u data %p\n", event->event, data);
+ rdsdebug("event %u (%s) data %p\n",
+ event->event, rds_ib_event_str(event->event), data);
}
static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
@@ -197,16 +241,18 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
struct rds_connection *conn = data;
struct rds_ib_connection *ic = conn->c_transport_data;
- rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event);
+ rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
+ rds_ib_event_str(event->event));
switch (event->event) {
case IB_EVENT_COMM_EST:
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
default:
- rdsdebug("Fatal QP Event %u "
+ rdsdebug("Fatal QP Event %u (%s) "
"- connection %pI4->%pI4, reconnecting\n",
- event->event, &conn->c_laddr, &conn->c_faddr);
+ event->event, rds_ib_event_str(event->event),
+ &conn->c_laddr, &conn->c_faddr);
rds_conn_drop(conn);
break;
}
@@ -224,18 +270,16 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
struct rds_ib_device *rds_ibdev;
int ret;
- /* rds_ib_add_one creates a rds_ib_device object per IB device,
- * and allocates a protection domain, memory range and FMR pool
- * for each. If that fails for any reason, it will not register
- * the rds_ibdev at all.
+ /*
+ * It's normal to see a null device if an incoming connection races
+ * with device removal, so we don't print a warning.
*/
- rds_ibdev = ib_get_client_data(dev, &rds_ib_client);
- if (rds_ibdev == NULL) {
- if (printk_ratelimit())
- printk(KERN_NOTICE "RDS/IB: No client_data for device %s\n",
- dev->name);
+ rds_ibdev = rds_ib_get_client_data(dev);
+ if (!rds_ibdev)
return -EOPNOTSUPP;
- }
+
+ /* add the conn now so that connection establishment has the dev */
+ rds_ib_add_conn(rds_ibdev, conn);
if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
@@ -306,7 +350,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_send_ring.w_nr *
sizeof(struct rds_header),
&ic->i_send_hdrs_dma, GFP_KERNEL);
- if (ic->i_send_hdrs == NULL) {
+ if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent send failed\n");
goto out;
@@ -316,7 +360,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_recv_ring.w_nr *
sizeof(struct rds_header),
&ic->i_recv_hdrs_dma, GFP_KERNEL);
- if (ic->i_recv_hdrs == NULL) {
+ if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent recv failed\n");
goto out;
@@ -324,22 +368,24 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
&ic->i_ack_dma, GFP_KERNEL);
- if (ic->i_ack == NULL) {
+ if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent ack failed\n");
goto out;
}
- ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
- if (ic->i_sends == NULL) {
+ ic->i_sends = vmalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
+ ibdev_to_node(dev));
+ if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
goto out;
}
memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
- ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
- if (ic->i_recvs == NULL) {
+ ic->i_recvs = vmalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
+ ibdev_to_node(dev));
+ if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
goto out;
@@ -352,6 +398,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_send_cq, ic->i_recv_cq);
out:
+ rds_ib_dev_put(rds_ibdev);
return ret;
}
@@ -409,7 +456,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
struct rds_ib_connection *ic = NULL;
struct rdma_conn_param conn_param;
u32 version;
- int err, destroy = 1;
+ int err = 1, destroy = 1;
/* Check whether the remote protocol version matches ours. */
version = rds_ib_protocol_compatible(event);
@@ -448,7 +495,6 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
/* Wait and see - our connect may still be succeeding */
rds_ib_stats_inc(s_ib_connect_raced);
}
- mutex_unlock(&conn->c_cm_lock);
goto out;
}
@@ -475,24 +521,23 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
err = rds_ib_setup_qp(conn);
if (err) {
rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
- mutex_unlock(&conn->c_cm_lock);
goto out;
}
- rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version);
+ rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
+ event->param.conn.responder_resources,
+ event->param.conn.initiator_depth);
/* rdma_accept() calls rdma_reject() internally if it fails */
err = rdma_accept(cm_id, &conn_param);
- mutex_unlock(&conn->c_cm_lock);
- if (err) {
+ if (err)
rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
- goto out;
- }
-
- return 0;
out:
- rdma_reject(cm_id, NULL, 0);
+ if (conn)
+ mutex_unlock(&conn->c_cm_lock);
+ if (err)
+ rdma_reject(cm_id, NULL, 0);
return destroy;
}
@@ -516,8 +561,8 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
goto out;
}
- rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION);
-
+ rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
+ UINT_MAX, UINT_MAX);
ret = rdma_connect(cm_id, &conn_param);
if (ret)
rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
@@ -601,9 +646,19 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
ic->i_cm_id, err);
}
+ /*
+ * We want to wait for tx and rx completion to finish
+ * before we tear down the connection, but we have to be
+ * careful not to get stuck waiting on a send ring that
+ * only has unsignaled sends in it. We've shutdown new
+ * sends before getting here so by waiting for signaled
+ * sends to complete we're ensured that there will be no
+ * more tx processing.
+ */
wait_event(rds_ib_ring_empty_wait,
- rds_ib_ring_empty(&ic->i_send_ring) &&
- rds_ib_ring_empty(&ic->i_recv_ring));
+ rds_ib_ring_empty(&ic->i_recv_ring) &&
+ (atomic_read(&ic->i_signaled_sends) == 0));
+ tasklet_kill(&ic->i_recv_tasklet);
if (ic->i_send_hdrs)
ib_dma_free_coherent(dev,
@@ -654,9 +709,12 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
BUG_ON(ic->rds_ibdev);
/* Clear pending transmit */
- if (ic->i_rm) {
- rds_message_put(ic->i_rm);
- ic->i_rm = NULL;
+ if (ic->i_data_op) {
+ struct rds_message *rm;
+
+ rm = container_of(ic->i_data_op, struct rds_message, data);
+ rds_message_put(rm);
+ ic->i_data_op = NULL;
}
/* Clear the ACK state */
@@ -690,12 +748,19 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
{
struct rds_ib_connection *ic;
unsigned long flags;
+ int ret;
/* XXX too lazy? */
ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL);
- if (ic == NULL)
+ if (!ic)
return -ENOMEM;
+ ret = rds_ib_recv_alloc_caches(ic);
+ if (ret) {
+ kfree(ic);
+ return ret;
+ }
+
INIT_LIST_HEAD(&ic->ib_node);
tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
(unsigned long) ic);
@@ -703,6 +768,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&ic->i_ack_lock);
#endif
+ atomic_set(&ic->i_signaled_sends, 0);
/*
* rds_ib_conn_shutdown() waits for these to be emptied so they
@@ -744,6 +810,8 @@ void rds_ib_conn_free(void *arg)
list_del(&ic->ib_node);
spin_unlock_irq(lock_ptr);
+ rds_ib_recv_free_caches(ic);
+
kfree(ic);
}
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index a54cd63f9e35..b5a88415a18e 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -32,11 +32,16 @@
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/rculist.h>
#include "rds.h"
-#include "rdma.h"
#include "ib.h"
+#include "xlist.h"
+struct workqueue_struct *rds_ib_fmr_wq;
+
+static DEFINE_PER_CPU(unsigned long, clean_list_grace);
+#define CLEAN_LIST_BUSY_BIT 0
/*
* This is stored as mr->r_trans_private.
@@ -45,7 +50,11 @@ struct rds_ib_mr {
struct rds_ib_device *device;
struct rds_ib_mr_pool *pool;
struct ib_fmr *fmr;
- struct list_head list;
+
+ struct xlist_head xlist;
+
+ /* unmap_list is for freeing */
+ struct list_head unmap_list;
unsigned int remap_count;
struct scatterlist *sg;
@@ -59,14 +68,16 @@ struct rds_ib_mr {
*/
struct rds_ib_mr_pool {
struct mutex flush_lock; /* serialize fmr invalidate */
- struct work_struct flush_worker; /* flush worker */
+ struct delayed_work flush_worker; /* flush worker */
- spinlock_t list_lock; /* protect variables below */
atomic_t item_count; /* total # of MRs */
atomic_t dirty_count; /* # dirty of MRs */
- struct list_head drop_list; /* MRs that have reached their max_maps limit */
- struct list_head free_list; /* unused MRs */
- struct list_head clean_list; /* unused & unamapped MRs */
+
+ struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
+ struct xlist_head free_list; /* unused MRs */
+ struct xlist_head clean_list; /* global unused & unamapped MRs */
+ wait_queue_head_t flush_wait;
+
atomic_t free_pinned; /* memory pinned by free MRs */
unsigned long max_items;
unsigned long max_items_soft;
@@ -74,7 +85,7 @@ struct rds_ib_mr_pool {
struct ib_fmr_attr fmr_attr;
};
-static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all);
+static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
@@ -83,16 +94,17 @@ static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
struct rds_ib_device *rds_ibdev;
struct rds_ib_ipaddr *i_ipaddr;
- list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
- spin_lock_irq(&rds_ibdev->spinlock);
- list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
+ list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
if (i_ipaddr->ipaddr == ipaddr) {
- spin_unlock_irq(&rds_ibdev->spinlock);
+ atomic_inc(&rds_ibdev->refcount);
+ rcu_read_unlock();
return rds_ibdev;
}
}
- spin_unlock_irq(&rds_ibdev->spinlock);
}
+ rcu_read_unlock();
return NULL;
}
@@ -108,7 +120,7 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
i_ipaddr->ipaddr = ipaddr;
spin_lock_irq(&rds_ibdev->spinlock);
- list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
+ list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
spin_unlock_irq(&rds_ibdev->spinlock);
return 0;
@@ -116,17 +128,24 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
{
- struct rds_ib_ipaddr *i_ipaddr, *next;
+ struct rds_ib_ipaddr *i_ipaddr;
+ struct rds_ib_ipaddr *to_free = NULL;
+
spin_lock_irq(&rds_ibdev->spinlock);
- list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) {
+ list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
if (i_ipaddr->ipaddr == ipaddr) {
- list_del(&i_ipaddr->list);
- kfree(i_ipaddr);
+ list_del_rcu(&i_ipaddr->list);
+ to_free = i_ipaddr;
break;
}
}
spin_unlock_irq(&rds_ibdev->spinlock);
+
+ if (to_free) {
+ synchronize_rcu();
+ kfree(to_free);
+ }
}
int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
@@ -134,8 +153,10 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
struct rds_ib_device *rds_ibdev_old;
rds_ibdev_old = rds_ib_get_device(ipaddr);
- if (rds_ibdev_old)
+ if (rds_ibdev_old) {
rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
+ rds_ib_dev_put(rds_ibdev_old);
+ }
return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
}
@@ -150,12 +171,13 @@ void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *con
BUG_ON(list_empty(&ic->ib_node));
list_del(&ic->ib_node);
- spin_lock_irq(&rds_ibdev->spinlock);
+ spin_lock(&rds_ibdev->spinlock);
list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
- spin_unlock_irq(&rds_ibdev->spinlock);
+ spin_unlock(&rds_ibdev->spinlock);
spin_unlock_irq(&ib_nodev_conns_lock);
ic->rds_ibdev = rds_ibdev;
+ atomic_inc(&rds_ibdev->refcount);
}
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
@@ -175,18 +197,18 @@ void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *
spin_unlock(&ib_nodev_conns_lock);
ic->rds_ibdev = NULL;
+ rds_ib_dev_put(rds_ibdev);
}
-void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
+void rds_ib_destroy_nodev_conns(void)
{
struct rds_ib_connection *ic, *_ic;
LIST_HEAD(tmp_list);
/* avoid calling conn_destroy with irqs off */
- spin_lock_irq(list_lock);
- list_splice(list, &tmp_list);
- INIT_LIST_HEAD(list);
- spin_unlock_irq(list_lock);
+ spin_lock_irq(&ib_nodev_conns_lock);
+ list_splice(&ib_nodev_conns, &tmp_list);
+ spin_unlock_irq(&ib_nodev_conns_lock);
list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
rds_conn_destroy(ic->conn);
@@ -200,12 +222,12 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
if (!pool)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->drop_list);
- INIT_LIST_HEAD(&pool->clean_list);
+ INIT_XLIST_HEAD(&pool->free_list);
+ INIT_XLIST_HEAD(&pool->drop_list);
+ INIT_XLIST_HEAD(&pool->clean_list);
mutex_init(&pool->flush_lock);
- spin_lock_init(&pool->list_lock);
- INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
+ init_waitqueue_head(&pool->flush_wait);
+ INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
pool->fmr_attr.max_pages = fmr_message_size;
pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
@@ -233,34 +255,60 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co
void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
{
- flush_workqueue(rds_wq);
- rds_ib_flush_mr_pool(pool, 1);
+ cancel_delayed_work_sync(&pool->flush_worker);
+ rds_ib_flush_mr_pool(pool, 1, NULL);
WARN_ON(atomic_read(&pool->item_count));
WARN_ON(atomic_read(&pool->free_pinned));
kfree(pool);
}
+static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
+ struct rds_ib_mr **ibmr_ret)
+{
+ struct xlist_head *ibmr_xl;
+ ibmr_xl = xlist_del_head_fast(xl);
+ *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
+}
+
static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
- unsigned long flags;
+ struct xlist_head *ret;
+ unsigned long *flag;
- spin_lock_irqsave(&pool->list_lock, flags);
- if (!list_empty(&pool->clean_list)) {
- ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list);
- list_del_init(&ibmr->list);
- }
- spin_unlock_irqrestore(&pool->list_lock, flags);
+ preempt_disable();
+ flag = &__get_cpu_var(clean_list_grace);
+ set_bit(CLEAN_LIST_BUSY_BIT, flag);
+ ret = xlist_del_head(&pool->clean_list);
+ if (ret)
+ ibmr = list_entry(ret, struct rds_ib_mr, xlist);
+ clear_bit(CLEAN_LIST_BUSY_BIT, flag);
+ preempt_enable();
return ibmr;
}
+static inline void wait_clean_list_grace(void)
+{
+ int cpu;
+ unsigned long *flag;
+
+ for_each_online_cpu(cpu) {
+ flag = &per_cpu(clean_list_grace, cpu);
+ while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
+ cpu_relax();
+ }
+}
+
static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
{
struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
struct rds_ib_mr *ibmr = NULL;
int err = 0, iter = 0;
+ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+
while (1) {
ibmr = rds_ib_reuse_fmr(pool);
if (ibmr)
@@ -287,19 +335,24 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
/* We do have some empty MRs. Flush them out. */
rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
- rds_ib_flush_mr_pool(pool, 0);
+ rds_ib_flush_mr_pool(pool, 0, &ibmr);
+ if (ibmr)
+ return ibmr;
}
- ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
+ ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
if (!ibmr) {
err = -ENOMEM;
goto out_no_cigar;
}
+ memset(ibmr, 0, sizeof(*ibmr));
+
ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
(IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE),
+ IB_ACCESS_REMOTE_WRITE|
+ IB_ACCESS_REMOTE_ATOMIC),
&pool->fmr_attr);
if (IS_ERR(ibmr->fmr)) {
err = PTR_ERR(ibmr->fmr);
@@ -367,7 +420,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
if (page_cnt > fmr_message_size)
return -EINVAL;
- dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC);
+ dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
+ rdsibdev_to_node(rds_ibdev));
if (!dma_pages)
return -ENOMEM;
@@ -441,7 +495,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
/* FIXME we need a way to tell a r/w MR
* from a r/o MR */
- BUG_ON(in_interrupt());
+ BUG_ON(irqs_disabled());
set_page_dirty(page);
put_page(page);
}
@@ -477,33 +531,109 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
}
/*
+ * given an xlist of mrs, put them all into the list_head for more processing
+ */
+static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
+{
+ struct rds_ib_mr *ibmr;
+ struct xlist_head splice;
+ struct xlist_head *cur;
+ struct xlist_head *next;
+
+ splice.next = NULL;
+ xlist_splice(xlist, &splice);
+ cur = splice.next;
+ while (cur) {
+ next = cur->next;
+ ibmr = list_entry(cur, struct rds_ib_mr, xlist);
+ list_add_tail(&ibmr->unmap_list, list);
+ cur = next;
+ }
+}
+
+/*
+ * this takes a list head of mrs and turns it into an xlist of clusters.
+ * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
+ * reuse.
+ */
+static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
+ struct list_head *list, struct xlist_head *xlist,
+ struct xlist_head **tail_ret)
+{
+ struct rds_ib_mr *ibmr;
+ struct xlist_head *cur_mr = xlist;
+ struct xlist_head *tail_mr = NULL;
+
+ list_for_each_entry(ibmr, list, unmap_list) {
+ tail_mr = &ibmr->xlist;
+ tail_mr->next = NULL;
+ cur_mr->next = tail_mr;
+ cur_mr = tail_mr;
+ }
+ *tail_ret = tail_mr;
+}
+
+/*
* Flush our pool of MRs.
* At a minimum, all currently unused MRs are unmapped.
* If the number of MRs allocated exceeds the limit, we also try
* to free as many MRs as needed to get back to this limit.
*/
-static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
+static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
+ int free_all, struct rds_ib_mr **ibmr_ret)
{
struct rds_ib_mr *ibmr, *next;
+ struct xlist_head clean_xlist;
+ struct xlist_head *clean_tail;
LIST_HEAD(unmap_list);
LIST_HEAD(fmr_list);
unsigned long unpinned = 0;
- unsigned long flags;
unsigned int nfreed = 0, ncleaned = 0, free_goal;
int ret = 0;
rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
- mutex_lock(&pool->flush_lock);
+ if (ibmr_ret) {
+ DEFINE_WAIT(wait);
+ while(!mutex_trylock(&pool->flush_lock)) {
+ ibmr = rds_ib_reuse_fmr(pool);
+ if (ibmr) {
+ *ibmr_ret = ibmr;
+ finish_wait(&pool->flush_wait, &wait);
+ goto out_nolock;
+ }
+
+ prepare_to_wait(&pool->flush_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (xlist_empty(&pool->clean_list))
+ schedule();
+
+ ibmr = rds_ib_reuse_fmr(pool);
+ if (ibmr) {
+ *ibmr_ret = ibmr;
+ finish_wait(&pool->flush_wait, &wait);
+ goto out_nolock;
+ }
+ }
+ finish_wait(&pool->flush_wait, &wait);
+ } else
+ mutex_lock(&pool->flush_lock);
+
+ if (ibmr_ret) {
+ ibmr = rds_ib_reuse_fmr(pool);
+ if (ibmr) {
+ *ibmr_ret = ibmr;
+ goto out;
+ }
+ }
- spin_lock_irqsave(&pool->list_lock, flags);
/* Get the list of all MRs to be dropped. Ordering matters -
- * we want to put drop_list ahead of free_list. */
- list_splice_init(&pool->free_list, &unmap_list);
- list_splice_init(&pool->drop_list, &unmap_list);
+ * we want to put drop_list ahead of free_list.
+ */
+ xlist_append_to_list(&pool->drop_list, &unmap_list);
+ xlist_append_to_list(&pool->free_list, &unmap_list);
if (free_all)
- list_splice_init(&pool->clean_list, &unmap_list);
- spin_unlock_irqrestore(&pool->list_lock, flags);
+ xlist_append_to_list(&pool->clean_list, &unmap_list);
free_goal = rds_ib_flush_goal(pool, free_all);
@@ -511,19 +641,20 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
goto out;
/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
- list_for_each_entry(ibmr, &unmap_list, list)
+ list_for_each_entry(ibmr, &unmap_list, unmap_list)
list_add(&ibmr->fmr->list, &fmr_list);
+
ret = ib_unmap_fmr(&fmr_list);
if (ret)
printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
/* Now we can destroy the DMA mapping and unpin any pages */
- list_for_each_entry_safe(ibmr, next, &unmap_list, list) {
+ list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
unpinned += ibmr->sg_len;
__rds_ib_teardown_mr(ibmr);
if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
rds_ib_stats_inc(s_ib_rdma_mr_free);
- list_del(&ibmr->list);
+ list_del(&ibmr->unmap_list);
ib_dealloc_fmr(ibmr->fmr);
kfree(ibmr);
nfreed++;
@@ -531,9 +662,27 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
ncleaned++;
}
- spin_lock_irqsave(&pool->list_lock, flags);
- list_splice(&unmap_list, &pool->clean_list);
- spin_unlock_irqrestore(&pool->list_lock, flags);
+ if (!list_empty(&unmap_list)) {
+ /* we have to make sure that none of the things we're about
+ * to put on the clean list would race with other cpus trying
+ * to pull items off. The xlist would explode if we managed to
+ * remove something from the clean list and then add it back again
+ * while another CPU was spinning on that same item in xlist_del_head.
+ *
+ * This is pretty unlikely, but just in case wait for an xlist grace period
+ * here before adding anything back into the clean list.
+ */
+ wait_clean_list_grace();
+
+ list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
+ if (ibmr_ret)
+ refill_local(pool, &clean_xlist, ibmr_ret);
+
+ /* refill_local may have emptied our list */
+ if (!xlist_empty(&clean_xlist))
+ xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
+
+ }
atomic_sub(unpinned, &pool->free_pinned);
atomic_sub(ncleaned, &pool->dirty_count);
@@ -541,14 +690,35 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
out:
mutex_unlock(&pool->flush_lock);
+ if (waitqueue_active(&pool->flush_wait))
+ wake_up(&pool->flush_wait);
+out_nolock:
return ret;
}
+int rds_ib_fmr_init(void)
+{
+ rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
+ if (!rds_ib_fmr_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * By the time this is called all the IB devices should have been torn down and
+ * had their pools freed. As each pool is freed its work struct is waited on,
+ * so the pool flushing work queue should be idle by the time we get here.
+ */
+void rds_ib_fmr_exit(void)
+{
+ destroy_workqueue(rds_ib_fmr_wq);
+}
+
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
{
- struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
+ struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
- rds_ib_flush_mr_pool(pool, 0);
+ rds_ib_flush_mr_pool(pool, 0, NULL);
}
void rds_ib_free_mr(void *trans_private, int invalidate)
@@ -556,47 +726,49 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
struct rds_ib_mr *ibmr = trans_private;
struct rds_ib_device *rds_ibdev = ibmr->device;
struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
- unsigned long flags;
rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
/* Return it to the pool's free list */
- spin_lock_irqsave(&pool->list_lock, flags);
if (ibmr->remap_count >= pool->fmr_attr.max_maps)
- list_add(&ibmr->list, &pool->drop_list);
+ xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
else
- list_add(&ibmr->list, &pool->free_list);
+ xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
atomic_add(ibmr->sg_len, &pool->free_pinned);
atomic_inc(&pool->dirty_count);
- spin_unlock_irqrestore(&pool->list_lock, flags);
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- queue_work(rds_wq, &pool->flush_worker);
+ queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
if (invalidate) {
if (likely(!in_interrupt())) {
- rds_ib_flush_mr_pool(pool, 0);
+ rds_ib_flush_mr_pool(pool, 0, NULL);
} else {
/* We get here if the user created a MR marked
* as use_once and invalidate at the same time. */
- queue_work(rds_wq, &pool->flush_worker);
+ queue_delayed_work(rds_ib_fmr_wq,
+ &pool->flush_worker, 10);
}
}
+
+ rds_ib_dev_put(rds_ibdev);
}
void rds_ib_flush_mrs(void)
{
struct rds_ib_device *rds_ibdev;
+ down_read(&rds_ib_devices_lock);
list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
if (pool)
- rds_ib_flush_mr_pool(pool, 0);
+ rds_ib_flush_mr_pool(pool, 0, NULL);
}
+ up_read(&rds_ib_devices_lock);
}
void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
@@ -628,6 +800,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
ibmr->device = rds_ibdev;
+ rds_ibdev = NULL;
out:
if (ret) {
@@ -635,5 +808,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
rds_ib_free_mr(ibmr, 0);
ibmr = ERR_PTR(ret);
}
+ if (rds_ibdev)
+ rds_ib_dev_put(rds_ibdev);
return ibmr;
}
+
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index c74e9904a6b2..e29e0ca32f74 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -43,42 +43,6 @@ static struct kmem_cache *rds_ib_incoming_slab;
static struct kmem_cache *rds_ib_frag_slab;
static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
-static void rds_ib_frag_drop_page(struct rds_page_frag *frag)
-{
- rdsdebug("frag %p page %p\n", frag, frag->f_page);
- __free_page(frag->f_page);
- frag->f_page = NULL;
-}
-
-static void rds_ib_frag_free(struct rds_page_frag *frag)
-{
- rdsdebug("frag %p page %p\n", frag, frag->f_page);
- BUG_ON(frag->f_page != NULL);
- kmem_cache_free(rds_ib_frag_slab, frag);
-}
-
-/*
- * We map a page at a time. Its fragments are posted in order. This
- * is called in fragment order as the fragments get send completion events.
- * Only the last frag in the page performs the unmapping.
- *
- * It's OK for ring cleanup to call this in whatever order it likes because
- * DMA is not in flight and so we can unmap while other ring entries still
- * hold page references in their frags.
- */
-static void rds_ib_recv_unmap_page(struct rds_ib_connection *ic,
- struct rds_ib_recv_work *recv)
-{
- struct rds_page_frag *frag = recv->r_frag;
-
- rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
- if (frag->f_mapped)
- ib_dma_unmap_page(ic->i_cm_id->device,
- frag->f_mapped,
- RDS_FRAG_SIZE, DMA_FROM_DEVICE);
- frag->f_mapped = 0;
-}
-
void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
{
struct rds_ib_recv_work *recv;
@@ -95,16 +59,161 @@ void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
recv->r_wr.sg_list = recv->r_sge;
recv->r_wr.num_sge = RDS_IB_RECV_SGE;
- sge = rds_ib_data_sge(ic, recv->r_sge);
+ sge = &recv->r_sge[0];
+ sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
+ sge->length = sizeof(struct rds_header);
+ sge->lkey = ic->i_mr->lkey;
+
+ sge = &recv->r_sge[1];
sge->addr = 0;
sge->length = RDS_FRAG_SIZE;
sge->lkey = ic->i_mr->lkey;
+ }
+}
- sge = rds_ib_header_sge(ic, recv->r_sge);
- sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
- sge->length = sizeof(struct rds_header);
- sge->lkey = ic->i_mr->lkey;
+/*
+ * The entire 'from' list, including the from element itself, is put on
+ * to the tail of the 'to' list.
+ */
+static void list_splice_entire_tail(struct list_head *from,
+ struct list_head *to)
+{
+ struct list_head *from_last = from->prev;
+
+ list_splice_tail(from_last, to);
+ list_add_tail(from_last, to);
+}
+
+static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
+{
+ struct list_head *tmp;
+
+ tmp = xchg(&cache->xfer, NULL);
+ if (tmp) {
+ if (cache->ready)
+ list_splice_entire_tail(tmp, cache->ready);
+ else
+ cache->ready = tmp;
+ }
+}
+
+static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
+{
+ struct rds_ib_cache_head *head;
+ int cpu;
+
+ cache->percpu = alloc_percpu(struct rds_ib_cache_head);
+ if (!cache->percpu)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ head = per_cpu_ptr(cache->percpu, cpu);
+ head->first = NULL;
+ head->count = 0;
+ }
+ cache->xfer = NULL;
+ cache->ready = NULL;
+
+ return 0;
+}
+
+int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
+{
+ int ret;
+
+ ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
+ if (!ret) {
+ ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
+ if (ret)
+ free_percpu(ic->i_cache_incs.percpu);
}
+
+ return ret;
+}
+
+static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
+ struct list_head *caller_list)
+{
+ struct rds_ib_cache_head *head;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ head = per_cpu_ptr(cache->percpu, cpu);
+ if (head->first) {
+ list_splice_entire_tail(head->first, caller_list);
+ head->first = NULL;
+ }
+ }
+
+ if (cache->ready) {
+ list_splice_entire_tail(cache->ready, caller_list);
+ cache->ready = NULL;
+ }
+}
+
+void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
+{
+ struct rds_ib_incoming *inc;
+ struct rds_ib_incoming *inc_tmp;
+ struct rds_page_frag *frag;
+ struct rds_page_frag *frag_tmp;
+ LIST_HEAD(list);
+
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
+ rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
+ free_percpu(ic->i_cache_incs.percpu);
+
+ list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
+ list_del(&inc->ii_cache_entry);
+ WARN_ON(!list_empty(&inc->ii_frags));
+ kmem_cache_free(rds_ib_incoming_slab, inc);
+ }
+
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
+ rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
+ free_percpu(ic->i_cache_frags.percpu);
+
+ list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
+ list_del(&frag->f_cache_entry);
+ WARN_ON(!list_empty(&frag->f_item));
+ kmem_cache_free(rds_ib_frag_slab, frag);
+ }
+}
+
+/* fwd decl */
+static void rds_ib_recv_cache_put(struct list_head *new_item,
+ struct rds_ib_refill_cache *cache);
+static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
+
+
+/* Recycle frag and attached recv buffer f_sg */
+static void rds_ib_frag_free(struct rds_ib_connection *ic,
+ struct rds_page_frag *frag)
+{
+ rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
+
+ rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
+}
+
+/* Recycle inc after freeing attached frags */
+void rds_ib_inc_free(struct rds_incoming *inc)
+{
+ struct rds_ib_incoming *ibinc;
+ struct rds_page_frag *frag;
+ struct rds_page_frag *pos;
+ struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
+
+ ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
+
+ /* Free attached frags */
+ list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
+ list_del_init(&frag->f_item);
+ rds_ib_frag_free(ic, frag);
+ }
+ BUG_ON(!list_empty(&ibinc->ii_frags));
+
+ rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
+ rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
}
static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
@@ -115,10 +224,8 @@ static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
recv->r_ibinc = NULL;
}
if (recv->r_frag) {
- rds_ib_recv_unmap_page(ic, recv);
- if (recv->r_frag->f_page)
- rds_ib_frag_drop_page(recv->r_frag);
- rds_ib_frag_free(recv->r_frag);
+ ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
+ rds_ib_frag_free(ic, recv->r_frag);
recv->r_frag = NULL;
}
}
@@ -129,84 +236,111 @@ void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
for (i = 0; i < ic->i_recv_ring.w_nr; i++)
rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
-
- if (ic->i_frag.f_page)
- rds_ib_frag_drop_page(&ic->i_frag);
}
-static int rds_ib_recv_refill_one(struct rds_connection *conn,
- struct rds_ib_recv_work *recv,
- gfp_t kptr_gfp, gfp_t page_gfp)
+static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
+ gfp_t slab_mask)
{
- struct rds_ib_connection *ic = conn->c_transport_data;
- dma_addr_t dma_addr;
- struct ib_sge *sge;
- int ret = -ENOMEM;
+ struct rds_ib_incoming *ibinc;
+ struct list_head *cache_item;
+ int avail_allocs;
- if (recv->r_ibinc == NULL) {
- if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
+ cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
+ if (cache_item) {
+ ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
+ } else {
+ avail_allocs = atomic_add_unless(&rds_ib_allocation,
+ 1, rds_ib_sysctl_max_recv_allocation);
+ if (!avail_allocs) {
rds_ib_stats_inc(s_ib_rx_alloc_limit);
- goto out;
+ return NULL;
}
- recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
- kptr_gfp);
- if (recv->r_ibinc == NULL) {
+ ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
+ if (!ibinc) {
atomic_dec(&rds_ib_allocation);
- goto out;
+ return NULL;
}
- INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
- rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
}
+ INIT_LIST_HEAD(&ibinc->ii_frags);
+ rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
- if (recv->r_frag == NULL) {
- recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, kptr_gfp);
- if (recv->r_frag == NULL)
- goto out;
- INIT_LIST_HEAD(&recv->r_frag->f_item);
- recv->r_frag->f_page = NULL;
+ return ibinc;
+}
+
+static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
+ gfp_t slab_mask, gfp_t page_mask)
+{
+ struct rds_page_frag *frag;
+ struct list_head *cache_item;
+ int ret;
+
+ cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
+ if (cache_item) {
+ frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
+ } else {
+ frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
+ if (!frag)
+ return NULL;
+
+ sg_init_table(&frag->f_sg, 1);
+ ret = rds_page_remainder_alloc(&frag->f_sg,
+ RDS_FRAG_SIZE, page_mask);
+ if (ret) {
+ kmem_cache_free(rds_ib_frag_slab, frag);
+ return NULL;
+ }
}
- if (ic->i_frag.f_page == NULL) {
- ic->i_frag.f_page = alloc_page(page_gfp);
- if (ic->i_frag.f_page == NULL)
- goto out;
- ic->i_frag.f_offset = 0;
+ INIT_LIST_HEAD(&frag->f_item);
+
+ return frag;
+}
+
+static int rds_ib_recv_refill_one(struct rds_connection *conn,
+ struct rds_ib_recv_work *recv, int prefill)
+{
+ struct rds_ib_connection *ic = conn->c_transport_data;
+ struct ib_sge *sge;
+ int ret = -ENOMEM;
+ gfp_t slab_mask = GFP_NOWAIT;
+ gfp_t page_mask = GFP_NOWAIT;
+
+ if (prefill) {
+ slab_mask = GFP_KERNEL;
+ page_mask = GFP_HIGHUSER;
}
- dma_addr = ib_dma_map_page(ic->i_cm_id->device,
- ic->i_frag.f_page,
- ic->i_frag.f_offset,
- RDS_FRAG_SIZE,
- DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
- goto out;
+ if (!ic->i_cache_incs.ready)
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
+ if (!ic->i_cache_frags.ready)
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
/*
- * Once we get the RDS_PAGE_LAST_OFF frag then rds_ib_frag_unmap()
- * must be called on this recv. This happens as completions hit
- * in order or on connection shutdown.
+ * ibinc was taken from recv if recv contained the start of a message.
+ * recvs that were continuations will still have this allocated.
*/
- recv->r_frag->f_page = ic->i_frag.f_page;
- recv->r_frag->f_offset = ic->i_frag.f_offset;
- recv->r_frag->f_mapped = dma_addr;
+ if (!recv->r_ibinc) {
+ recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
+ if (!recv->r_ibinc)
+ goto out;
+ }
- sge = rds_ib_data_sge(ic, recv->r_sge);
- sge->addr = dma_addr;
- sge->length = RDS_FRAG_SIZE;
+ WARN_ON(recv->r_frag); /* leak! */
+ recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
+ if (!recv->r_frag)
+ goto out;
+
+ ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
+ 1, DMA_FROM_DEVICE);
+ WARN_ON(ret != 1);
- sge = rds_ib_header_sge(ic, recv->r_sge);
+ sge = &recv->r_sge[0];
sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
sge->length = sizeof(struct rds_header);
- get_page(recv->r_frag->f_page);
-
- if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) {
- ic->i_frag.f_offset += RDS_FRAG_SIZE;
- } else {
- put_page(ic->i_frag.f_page);
- ic->i_frag.f_page = NULL;
- ic->i_frag.f_offset = 0;
- }
+ sge = &recv->r_sge[1];
+ sge->addr = sg_dma_address(&recv->r_frag->f_sg);
+ sge->length = sg_dma_len(&recv->r_frag->f_sg);
ret = 0;
out:
@@ -216,13 +350,11 @@ out:
/*
* This tries to allocate and post unused work requests after making sure that
* they have all the allocations they need to queue received fragments into
- * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
- * pairs don't go unmatched.
+ * sockets.
*
* -1 is returned if posting fails due to temporary resource exhaustion.
*/
-int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
- gfp_t page_gfp, int prefill)
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_recv_work *recv;
@@ -236,28 +368,25 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
if (pos >= ic->i_recv_ring.w_nr) {
printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
pos);
- ret = -EINVAL;
break;
}
recv = &ic->i_recvs[pos];
- ret = rds_ib_recv_refill_one(conn, recv, kptr_gfp, page_gfp);
+ ret = rds_ib_recv_refill_one(conn, recv, prefill);
if (ret) {
- ret = -1;
break;
}
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
- recv->r_ibinc, recv->r_frag->f_page,
- (long) recv->r_frag->f_mapped, ret);
+ recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
+ (long) sg_dma_address(&recv->r_frag->f_sg), ret);
if (ret) {
rds_ib_conn_error(conn, "recv post on "
"%pI4 returned %d, disconnecting and "
"reconnecting\n", &conn->c_faddr,
ret);
- ret = -1;
break;
}
@@ -270,37 +399,73 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
if (ret)
rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
- return ret;
}
-void rds_ib_inc_purge(struct rds_incoming *inc)
+/*
+ * We want to recycle several types of recv allocations, like incs and frags.
+ * To use this, the *_free() function passes in the ptr to a list_head within
+ * the recyclee, as well as the cache to put it on.
+ *
+ * First, we put the memory on a percpu list. When this reaches a certain size,
+ * We move it to an intermediate non-percpu list in a lockless manner, with some
+ * xchg/compxchg wizardry.
+ *
+ * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
+ * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
+ * list_empty() will return true with one element is actually present.
+ */
+static void rds_ib_recv_cache_put(struct list_head *new_item,
+ struct rds_ib_refill_cache *cache)
{
- struct rds_ib_incoming *ibinc;
- struct rds_page_frag *frag;
- struct rds_page_frag *pos;
+ unsigned long flags;
+ struct rds_ib_cache_head *chp;
+ struct list_head *old;
- ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
- rdsdebug("purging ibinc %p inc %p\n", ibinc, inc);
+ local_irq_save(flags);
- list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
- list_del_init(&frag->f_item);
- rds_ib_frag_drop_page(frag);
- rds_ib_frag_free(frag);
- }
+ chp = per_cpu_ptr(cache->percpu, smp_processor_id());
+ if (!chp->first)
+ INIT_LIST_HEAD(new_item);
+ else /* put on front */
+ list_add_tail(new_item, chp->first);
+ chp->first = new_item;
+ chp->count++;
+
+ if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT)
+ goto end;
+
+ /*
+ * Return our per-cpu first list to the cache's xfer by atomically
+ * grabbing the current xfer list, appending it to our per-cpu list,
+ * and then atomically returning that entire list back to the
+ * cache's xfer list as long as it's still empty.
+ */
+ do {
+ old = xchg(&cache->xfer, NULL);
+ if (old)
+ list_splice_entire_tail(old, chp->first);
+ old = cmpxchg(&cache->xfer, NULL, chp->first);
+ } while (old);
+
+ chp->first = NULL;
+ chp->count = 0;
+end:
+ local_irq_restore(flags);
}
-void rds_ib_inc_free(struct rds_incoming *inc)
+static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
{
- struct rds_ib_incoming *ibinc;
-
- ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
+ struct list_head *head = cache->ready;
+
+ if (head) {
+ if (!list_empty(head)) {
+ cache->ready = head->next;
+ list_del_init(head);
+ } else
+ cache->ready = NULL;
+ }
- rds_ib_inc_purge(inc);
- rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
- BUG_ON(!list_empty(&ibinc->ii_frags));
- kmem_cache_free(rds_ib_incoming_slab, ibinc);
- atomic_dec(&rds_ib_allocation);
- BUG_ON(atomic_read(&rds_ib_allocation) < 0);
+ return head;
}
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
@@ -336,13 +501,13 @@ int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
to_copy = min_t(unsigned long, to_copy, len - copied);
rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
- "[%p, %lu] + %lu\n",
+ "[%p, %u] + %lu\n",
to_copy, iov->iov_base, iov->iov_len, iov_off,
- frag->f_page, frag->f_offset, frag_off);
+ sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
/* XXX needs + offset for multiple recvs per page */
- ret = rds_page_copy_to_user(frag->f_page,
- frag->f_offset + frag_off,
+ ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
+ frag->f_sg.offset + frag_off,
iov->iov_base + iov_off,
to_copy);
if (ret) {
@@ -557,47 +722,6 @@ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
return rds_ib_get_ack(ic);
}
-static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
- struct rds_ib_recv_work *recv,
- u32 data_len)
-{
- struct rds_ib_connection *ic = conn->c_transport_data;
- void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
- void *addr;
- u32 misplaced_hdr_bytes;
-
- /*
- * Support header at the front (RDS 3.1+) as well as header-at-end.
- *
- * Cases:
- * 1) header all in header buff (great!)
- * 2) header all in data page (copy all to header buff)
- * 3) header split across hdr buf + data page
- * (move bit in hdr buff to end before copying other bit from data page)
- */
- if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
- return hdr_buff;
-
- if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
- addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
- memcpy(hdr_buff,
- addr + recv->r_frag->f_offset + data_len,
- sizeof(struct rds_header));
- kunmap_atomic(addr, KM_SOFTIRQ0);
- return hdr_buff;
- }
-
- misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
-
- memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
-
- addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
- memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
- sizeof(struct rds_header) - misplaced_hdr_bytes);
- kunmap_atomic(addr, KM_SOFTIRQ0);
- return hdr_buff;
-}
-
/*
* It's kind of lame that we're copying from the posted receive pages into
* long-lived bitmaps. We could have posted the bitmaps and rdma written into
@@ -639,7 +763,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
- addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0);
+ addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0);
src = addr + frag_off;
dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -710,7 +834,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
}
data_len -= sizeof(struct rds_header);
- ihdr = rds_ib_get_header(conn, recv, data_len);
+ ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
/* Validate the checksum. */
if (!rds_message_verify_checksum(ihdr)) {
@@ -742,12 +866,12 @@ static void rds_ib_process_recv(struct rds_connection *conn,
* the inc is freed. We don't go that route, so we have to drop the
* page ref ourselves. We can't just leave the page on the recv
* because that confuses the dma mapping of pages and each recv's use
- * of a partial page. We can leave the frag, though, it will be
- * reused.
+ * of a partial page.
*
* FIXME: Fold this into the code path below.
*/
- rds_ib_frag_drop_page(recv->r_frag);
+ rds_ib_frag_free(ic, recv->r_frag);
+ recv->r_frag = NULL;
return;
}
@@ -757,7 +881,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
* into the inc and save the inc so we can hang upcoming fragments
* off its list.
*/
- if (ibinc == NULL) {
+ if (!ibinc) {
ibinc = recv->r_ibinc;
recv->r_ibinc = NULL;
ic->i_ibinc = ibinc;
@@ -842,32 +966,38 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
struct rds_ib_recv_work *recv;
while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
- rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
- (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
+ rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
+ (unsigned long long)wc.wr_id, wc.status,
+ rds_ib_wc_status_str(wc.status), wc.byte_len,
be32_to_cpu(wc.ex.imm_data));
rds_ib_stats_inc(s_ib_rx_cq_event);
recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
- rds_ib_recv_unmap_page(ic, recv);
+ ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
/*
* Also process recvs in connecting state because it is possible
* to get a recv completion _before_ the rdmacm ESTABLISHED
* event is processed.
*/
- if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
+ if (wc.status == IB_WC_SUCCESS) {
+ rds_ib_process_recv(conn, recv, wc.byte_len, state);
+ } else {
/* We expect errors as the qp is drained during shutdown */
- if (wc.status == IB_WC_SUCCESS) {
- rds_ib_process_recv(conn, recv, wc.byte_len, state);
- } else {
- rds_ib_conn_error(conn, "recv completion on "
- "%pI4 had status %u, disconnecting and "
- "reconnecting\n", &conn->c_faddr,
- wc.status);
- }
+ if (rds_conn_up(conn) || rds_conn_connecting(conn))
+ rds_ib_conn_error(conn, "recv completion on %pI4 had "
+ "status %u (%s), disconnecting and "
+ "reconnecting\n", &conn->c_faddr,
+ wc.status,
+ rds_ib_wc_status_str(wc.status));
}
+ /*
+ * It's very important that we only free this ring entry if we've truly
+ * freed the resources allocated to the entry. The refilling path can
+ * leak if we don't.
+ */
rds_ib_ring_free(&ic->i_recv_ring, 1);
}
}
@@ -897,11 +1027,8 @@ void rds_ib_recv_tasklet_fn(unsigned long data)
if (rds_ib_ring_empty(&ic->i_recv_ring))
rds_ib_stats_inc(s_ib_rx_ring_empty);
- /*
- * If the ring is running low, then schedule the thread to refill.
- */
if (rds_ib_ring_low(&ic->i_recv_ring))
- queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
+ rds_ib_recv_refill(conn, 0);
}
int rds_ib_recv(struct rds_connection *conn)
@@ -910,25 +1037,13 @@ int rds_ib_recv(struct rds_connection *conn)
int ret = 0;
rdsdebug("conn %p\n", conn);
-
- /*
- * If we get a temporary posting failure in this context then
- * we're really low and we want the caller to back off for a bit.
- */
- mutex_lock(&ic->i_recv_mutex);
- if (rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0))
- ret = -ENOMEM;
- else
- rds_ib_stats_inc(s_ib_rx_refill_from_thread);
- mutex_unlock(&ic->i_recv_mutex);
-
if (rds_conn_up(conn))
rds_ib_attempt_ack(ic);
return ret;
}
-int __init rds_ib_recv_init(void)
+int rds_ib_recv_init(void)
{
struct sysinfo si;
int ret = -ENOMEM;
@@ -939,14 +1054,14 @@ int __init rds_ib_recv_init(void)
rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
sizeof(struct rds_ib_incoming),
- 0, 0, NULL);
- if (rds_ib_incoming_slab == NULL)
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!rds_ib_incoming_slab)
goto out;
rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
sizeof(struct rds_page_frag),
- 0, 0, NULL);
- if (rds_ib_frag_slab == NULL)
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!rds_ib_frag_slab)
kmem_cache_destroy(rds_ib_incoming_slab);
else
ret = 0;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 17fa80803ab0..71f373c421bc 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -36,11 +36,49 @@
#include <linux/dmapool.h>
#include "rds.h"
-#include "rdma.h"
#include "ib.h"
-static void rds_ib_send_rdma_complete(struct rds_message *rm,
- int wc_status)
+static char *rds_ib_wc_status_strings[] = {
+#define RDS_IB_WC_STATUS_STR(foo) \
+ [IB_WC_##foo] = __stringify(IB_WC_##foo)
+ RDS_IB_WC_STATUS_STR(SUCCESS),
+ RDS_IB_WC_STATUS_STR(LOC_LEN_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_QP_OP_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_EEC_OP_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_PROT_ERR),
+ RDS_IB_WC_STATUS_STR(WR_FLUSH_ERR),
+ RDS_IB_WC_STATUS_STR(MW_BIND_ERR),
+ RDS_IB_WC_STATUS_STR(BAD_RESP_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_ACCESS_ERR),
+ RDS_IB_WC_STATUS_STR(REM_INV_REQ_ERR),
+ RDS_IB_WC_STATUS_STR(REM_ACCESS_ERR),
+ RDS_IB_WC_STATUS_STR(REM_OP_ERR),
+ RDS_IB_WC_STATUS_STR(RETRY_EXC_ERR),
+ RDS_IB_WC_STATUS_STR(RNR_RETRY_EXC_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_RDD_VIOL_ERR),
+ RDS_IB_WC_STATUS_STR(REM_INV_RD_REQ_ERR),
+ RDS_IB_WC_STATUS_STR(REM_ABORT_ERR),
+ RDS_IB_WC_STATUS_STR(INV_EECN_ERR),
+ RDS_IB_WC_STATUS_STR(INV_EEC_STATE_ERR),
+ RDS_IB_WC_STATUS_STR(FATAL_ERR),
+ RDS_IB_WC_STATUS_STR(RESP_TIMEOUT_ERR),
+ RDS_IB_WC_STATUS_STR(GENERAL_ERR),
+#undef RDS_IB_WC_STATUS_STR
+};
+
+char *rds_ib_wc_status_str(enum ib_wc_status status)
+{
+ return rds_str_array(rds_ib_wc_status_strings,
+ ARRAY_SIZE(rds_ib_wc_status_strings), status);
+}
+
+/*
+ * Convert IB-specific error message to RDS error message and call core
+ * completion handler.
+ */
+static void rds_ib_send_complete(struct rds_message *rm,
+ int wc_status,
+ void (*complete)(struct rds_message *rm, int status))
{
int notify_status;
@@ -60,69 +98,125 @@ static void rds_ib_send_rdma_complete(struct rds_message *rm,
notify_status = RDS_RDMA_OTHER_ERROR;
break;
}
- rds_rdma_send_complete(rm, notify_status);
+ complete(rm, notify_status);
+}
+
+static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
+ struct rm_data_op *op,
+ int wc_status)
+{
+ if (op->op_nents)
+ ib_dma_unmap_sg(ic->i_cm_id->device,
+ op->op_sg, op->op_nents,
+ DMA_TO_DEVICE);
}
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
- struct rds_rdma_op *op)
+ struct rm_rdma_op *op,
+ int wc_status)
{
- if (op->r_mapped) {
+ if (op->op_mapped) {
ib_dma_unmap_sg(ic->i_cm_id->device,
- op->r_sg, op->r_nents,
- op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- op->r_mapped = 0;
+ op->op_sg, op->op_nents,
+ op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ op->op_mapped = 0;
}
+
+ /* If the user asked for a completion notification on this
+ * message, we can implement three different semantics:
+ * 1. Notify when we received the ACK on the RDS message
+ * that was queued with the RDMA. This provides reliable
+ * notification of RDMA status at the expense of a one-way
+ * packet delay.
+ * 2. Notify when the IB stack gives us the completion event for
+ * the RDMA operation.
+ * 3. Notify when the IB stack gives us the completion event for
+ * the accompanying RDS messages.
+ * Here, we implement approach #3. To implement approach #2,
+ * we would need to take an event for the rdma WR. To implement #1,
+ * don't call rds_rdma_send_complete at all, and fall back to the notify
+ * handling in the ACK processing code.
+ *
+ * Note: There's no need to explicitly sync any RDMA buffers using
+ * ib_dma_sync_sg_for_cpu - the completion for the RDMA
+ * operation itself unmapped the RDMA buffers, which takes care
+ * of synching.
+ */
+ rds_ib_send_complete(container_of(op, struct rds_message, rdma),
+ wc_status, rds_rdma_send_complete);
+
+ if (op->op_write)
+ rds_stats_add(s_send_rdma_bytes, op->op_bytes);
+ else
+ rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
}
-static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
- struct rds_ib_send_work *send,
- int wc_status)
+static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
+ struct rm_atomic_op *op,
+ int wc_status)
{
- struct rds_message *rm = send->s_rm;
-
- rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
-
- ib_dma_unmap_sg(ic->i_cm_id->device,
- rm->m_sg, rm->m_nents,
- DMA_TO_DEVICE);
-
- if (rm->m_rdma_op != NULL) {
- rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
-
- /* If the user asked for a completion notification on this
- * message, we can implement three different semantics:
- * 1. Notify when we received the ACK on the RDS message
- * that was queued with the RDMA. This provides reliable
- * notification of RDMA status at the expense of a one-way
- * packet delay.
- * 2. Notify when the IB stack gives us the completion event for
- * the RDMA operation.
- * 3. Notify when the IB stack gives us the completion event for
- * the accompanying RDS messages.
- * Here, we implement approach #3. To implement approach #2,
- * call rds_rdma_send_complete from the cq_handler. To implement #1,
- * don't call rds_rdma_send_complete at all, and fall back to the notify
- * handling in the ACK processing code.
- *
- * Note: There's no need to explicitly sync any RDMA buffers using
- * ib_dma_sync_sg_for_cpu - the completion for the RDMA
- * operation itself unmapped the RDMA buffers, which takes care
- * of synching.
- */
- rds_ib_send_rdma_complete(rm, wc_status);
+ /* unmap atomic recvbuf */
+ if (op->op_mapped) {
+ ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
+ DMA_FROM_DEVICE);
+ op->op_mapped = 0;
+ }
- if (rm->m_rdma_op->r_write)
- rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes);
- else
- rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes);
+ rds_ib_send_complete(container_of(op, struct rds_message, atomic),
+ wc_status, rds_atomic_send_complete);
+
+ if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
+ rds_ib_stats_inc(s_ib_atomic_cswp);
+ else
+ rds_ib_stats_inc(s_ib_atomic_fadd);
+}
+
+/*
+ * Unmap the resources associated with a struct send_work.
+ *
+ * Returns the rm for no good reason other than it is unobtainable
+ * other than by switching on wr.opcode, currently, and the caller,
+ * the event handler, needs it.
+ */
+static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
+ struct rds_ib_send_work *send,
+ int wc_status)
+{
+ struct rds_message *rm = NULL;
+
+ /* In the error case, wc.opcode sometimes contains garbage */
+ switch (send->s_wr.opcode) {
+ case IB_WR_SEND:
+ if (send->s_op) {
+ rm = container_of(send->s_op, struct rds_message, data);
+ rds_ib_send_unmap_data(ic, send->s_op, wc_status);
+ }
+ break;
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_READ:
+ if (send->s_op) {
+ rm = container_of(send->s_op, struct rds_message, rdma);
+ rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
+ }
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ if (send->s_op) {
+ rm = container_of(send->s_op, struct rds_message, atomic);
+ rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
+ }
+ break;
+ default:
+ if (printk_ratelimit())
+ printk(KERN_NOTICE
+ "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
+ __func__, send->s_wr.opcode);
+ break;
}
- /* If anyone waited for this message to get flushed out, wake
- * them up now */
- rds_message_unmapped(rm);
+ send->s_wr.opcode = 0xdead;
- rds_message_put(rm);
- send->s_rm = NULL;
+ return rm;
}
void rds_ib_send_init_ring(struct rds_ib_connection *ic)
@@ -133,23 +227,18 @@ void rds_ib_send_init_ring(struct rds_ib_connection *ic)
for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
struct ib_sge *sge;
- send->s_rm = NULL;
send->s_op = NULL;
send->s_wr.wr_id = i;
send->s_wr.sg_list = send->s_sge;
- send->s_wr.num_sge = 1;
- send->s_wr.opcode = IB_WR_SEND;
- send->s_wr.send_flags = 0;
send->s_wr.ex.imm_data = 0;
- sge = rds_ib_data_sge(ic, send->s_sge);
- sge->lkey = ic->i_mr->lkey;
-
- sge = rds_ib_header_sge(ic, send->s_sge);
+ sge = &send->s_sge[0];
sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
sge->length = sizeof(struct rds_header);
sge->lkey = ic->i_mr->lkey;
+
+ send->s_sge[1].lkey = ic->i_mr->lkey;
}
}
@@ -159,16 +248,24 @@ void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
u32 i;
for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
- if (send->s_wr.opcode == 0xdead)
- continue;
- if (send->s_rm)
- rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
- if (send->s_op)
- rds_ib_send_unmap_rdma(ic, send->s_op);
+ if (send->s_op && send->s_wr.opcode != 0xdead)
+ rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
}
}
/*
+ * The only fast path caller always has a non-zero nr, so we don't
+ * bother testing nr before performing the atomic sub.
+ */
+static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
+{
+ if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
+ waitqueue_active(&rds_ib_ring_empty_wait))
+ wake_up(&rds_ib_ring_empty_wait);
+ BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
+}
+
+/*
* The _oldest/_free ring operations here race cleanly with the alloc/unalloc
* operations performed in the send path. As the sender allocs and potentially
* unallocs the next free entry in the ring it doesn't alter which is
@@ -178,12 +275,14 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
{
struct rds_connection *conn = context;
struct rds_ib_connection *ic = conn->c_transport_data;
+ struct rds_message *rm = NULL;
struct ib_wc wc;
struct rds_ib_send_work *send;
u32 completed;
u32 oldest;
u32 i = 0;
int ret;
+ int nr_sig = 0;
rdsdebug("cq %p conn %p\n", cq, conn);
rds_ib_stats_inc(s_ib_tx_cq_call);
@@ -192,8 +291,9 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
while (ib_poll_cq(cq, 1, &wc) > 0) {
- rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
- (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
+ rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
+ (unsigned long long)wc.wr_id, wc.status,
+ rds_ib_wc_status_str(wc.status), wc.byte_len,
be32_to_cpu(wc.ex.imm_data));
rds_ib_stats_inc(s_ib_tx_cq_event);
@@ -210,51 +310,30 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
for (i = 0; i < completed; i++) {
send = &ic->i_sends[oldest];
+ if (send->s_wr.send_flags & IB_SEND_SIGNALED)
+ nr_sig++;
- /* In the error case, wc.opcode sometimes contains garbage */
- switch (send->s_wr.opcode) {
- case IB_WR_SEND:
- if (send->s_rm)
- rds_ib_send_unmap_rm(ic, send, wc.status);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_READ:
- /* Nothing to be done - the SG list will be unmapped
- * when the SEND completes. */
- break;
- default:
- if (printk_ratelimit())
- printk(KERN_NOTICE
- "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
- __func__, send->s_wr.opcode);
- break;
- }
+ rm = rds_ib_send_unmap_op(ic, send, wc.status);
- send->s_wr.opcode = 0xdead;
- send->s_wr.num_sge = 1;
if (send->s_queued + HZ/2 < jiffies)
rds_ib_stats_inc(s_ib_tx_stalled);
- /* If a RDMA operation produced an error, signal this right
- * away. If we don't, the subsequent SEND that goes with this
- * RDMA will be canceled with ERR_WFLUSH, and the application
- * never learn that the RDMA failed. */
- if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
- struct rds_message *rm;
-
- rm = rds_send_get_message(conn, send->s_op);
- if (rm) {
- if (rm->m_rdma_op)
- rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
- rds_ib_send_rdma_complete(rm, wc.status);
- rds_message_put(rm);
+ if (send->s_op) {
+ if (send->s_op == rm->m_final_op) {
+ /* If anyone waited for this message to get flushed out, wake
+ * them up now */
+ rds_message_unmapped(rm);
}
+ rds_message_put(rm);
+ send->s_op = NULL;
}
oldest = (oldest + 1) % ic->i_send_ring.w_nr;
}
rds_ib_ring_free(&ic->i_send_ring, completed);
+ rds_ib_sub_signaled(ic, nr_sig);
+ nr_sig = 0;
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued))
@@ -262,10 +341,10 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
/* We expect errors as the qp is drained during shutdown */
if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
- rds_ib_conn_error(conn,
- "send completion on %pI4 "
- "had status %u, disconnecting and reconnecting\n",
- &conn->c_faddr, wc.status);
+ rds_ib_conn_error(conn, "send completion on %pI4 had status "
+ "%u (%s), disconnecting and reconnecting\n",
+ &conn->c_faddr, wc.status,
+ rds_ib_wc_status_str(wc.status));
}
}
}
@@ -294,7 +373,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
* credits (see rds_ib_send_add_credits below).
*
* The RDS send code is essentially single-threaded; rds_send_xmit
- * grabs c_send_lock to ensure exclusive access to the send ring.
+ * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
* However, the ACK sending code is independent and can race with
* message SENDs.
*
@@ -413,40 +492,21 @@ void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
}
-static inline void
-rds_ib_xmit_populate_wr(struct rds_ib_connection *ic,
- struct rds_ib_send_work *send, unsigned int pos,
- unsigned long buffer, unsigned int length,
- int send_flags)
+static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
+ struct rds_ib_send_work *send,
+ bool notify)
{
- struct ib_sge *sge;
-
- WARN_ON(pos != send - ic->i_sends);
-
- send->s_wr.send_flags = send_flags;
- send->s_wr.opcode = IB_WR_SEND;
- send->s_wr.num_sge = 2;
- send->s_wr.next = NULL;
- send->s_queued = jiffies;
- send->s_op = NULL;
-
- if (length != 0) {
- sge = rds_ib_data_sge(ic, send->s_sge);
- sge->addr = buffer;
- sge->length = length;
- sge->lkey = ic->i_mr->lkey;
-
- sge = rds_ib_header_sge(ic, send->s_sge);
- } else {
- /* We're sending a packet with no payload. There is only
- * one SGE */
- send->s_wr.num_sge = 1;
- sge = &send->s_sge[0];
+ /*
+ * We want to delay signaling completions just enough to get
+ * the batching benefits but not so much that we create dead time
+ * on the wire.
+ */
+ if (ic->i_unsignaled_wrs-- == 0 || notify) {
+ ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
+ send->s_wr.send_flags |= IB_SEND_SIGNALED;
+ return 1;
}
-
- sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
- sge->length = sizeof(struct rds_header);
- sge->lkey = ic->i_mr->lkey;
+ return 0;
}
/*
@@ -475,13 +535,14 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
u32 pos;
u32 i;
u32 work_alloc;
- u32 credit_alloc;
+ u32 credit_alloc = 0;
u32 posted;
u32 adv_credits = 0;
int send_flags = 0;
- int sent;
+ int bytes_sent = 0;
int ret;
int flow_controlled = 0;
+ int nr_sig = 0;
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
@@ -507,14 +568,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
goto out;
}
- credit_alloc = work_alloc;
if (ic->i_flowctl) {
credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
adv_credits += posted;
if (credit_alloc < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
work_alloc = credit_alloc;
- flow_controlled++;
+ flow_controlled = 1;
}
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
@@ -525,31 +585,25 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
}
/* map the message the first time we see it */
- if (ic->i_rm == NULL) {
- /*
- printk(KERN_NOTICE "rds_ib_xmit prep msg dport=%u flags=0x%x len=%d\n",
- be16_to_cpu(rm->m_inc.i_hdr.h_dport),
- rm->m_inc.i_hdr.h_flags,
- be32_to_cpu(rm->m_inc.i_hdr.h_len));
- */
- if (rm->m_nents) {
- rm->m_count = ib_dma_map_sg(dev,
- rm->m_sg, rm->m_nents, DMA_TO_DEVICE);
- rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count);
- if (rm->m_count == 0) {
+ if (!ic->i_data_op) {
+ if (rm->data.op_nents) {
+ rm->data.op_count = ib_dma_map_sg(dev,
+ rm->data.op_sg,
+ rm->data.op_nents,
+ DMA_TO_DEVICE);
+ rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
+ if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
- rm->m_count = 0;
+ rm->data.op_count = 0;
}
- ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
- ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
rds_message_addref(rm);
- ic->i_rm = rm;
+ ic->i_data_op = &rm->data;
/* Finalize the header */
if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
@@ -559,10 +613,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
/* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */
- if (rm->m_rdma_op) {
+ if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
- ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key);
+ ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
@@ -582,99 +636,77 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
/*
* Update adv_credits since we reset the ACK_REQUIRED bit.
*/
- rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
- adv_credits += posted;
- BUG_ON(adv_credits > 255);
+ if (ic->i_flowctl) {
+ rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
+ adv_credits += posted;
+ BUG_ON(adv_credits > 255);
+ }
}
- send = &ic->i_sends[pos];
- first = send;
- prev = NULL;
- scat = &rm->m_sg[sg];
- sent = 0;
- i = 0;
-
/* Sometimes you want to put a fence between an RDMA
* READ and the following SEND.
* We could either do this all the time
* or when requested by the user. Right now, we let
* the application choose.
*/
- if (rm->m_rdma_op && rm->m_rdma_op->r_fence)
+ if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
- /*
- * We could be copying the header into the unused tail of the page.
- * That would need to be changed in the future when those pages might
- * be mapped userspace pages or page cache pages. So instead we always
- * use a second sge and our long-lived ring of mapped headers. We send
- * the header after the data so that the data payload can be aligned on
- * the receiver.
- */
+ /* Each frag gets a header. Msgs may be 0 bytes */
+ send = &ic->i_sends[pos];
+ first = send;
+ prev = NULL;
+ scat = &ic->i_data_op->op_sg[sg];
+ i = 0;
+ do {
+ unsigned int len = 0;
- /* handle a 0-len message */
- if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) {
- rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags);
- goto add_header;
- }
+ /* Set up the header */
+ send->s_wr.send_flags = send_flags;
+ send->s_wr.opcode = IB_WR_SEND;
+ send->s_wr.num_sge = 1;
+ send->s_wr.next = NULL;
+ send->s_queued = jiffies;
+ send->s_op = NULL;
- /* if there's data reference it with a chain of work reqs */
- for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) {
- unsigned int len;
+ send->s_sge[0].addr = ic->i_send_hdrs_dma
+ + (pos * sizeof(struct rds_header));
+ send->s_sge[0].length = sizeof(struct rds_header);
- send = &ic->i_sends[pos];
+ memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
- len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
- rds_ib_xmit_populate_wr(ic, send, pos,
- ib_sg_dma_address(dev, scat) + off, len,
- send_flags);
+ /* Set up the data, if present */
+ if (i < work_alloc
+ && scat != &rm->data.op_sg[rm->data.op_count]) {
+ len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
+ send->s_wr.num_sge = 2;
- /*
- * We want to delay signaling completions just enough to get
- * the batching benefits but not so much that we create dead time
- * on the wire.
- */
- if (ic->i_unsignaled_wrs-- == 0) {
- ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
- send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
- }
+ send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
+ send->s_sge[1].length = len;
- ic->i_unsignaled_bytes -= len;
- if (ic->i_unsignaled_bytes <= 0) {
- ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
- send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+ bytes_sent += len;
+ off += len;
+ if (off == ib_sg_dma_len(dev, scat)) {
+ scat++;
+ off = 0;
+ }
}
+ rds_ib_set_wr_signal_state(ic, send, 0);
+
/*
* Always signal the last one if we're stopping due to flow control.
*/
- if (flow_controlled && i == (work_alloc-1))
+ if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+ if (send->s_wr.send_flags & IB_SEND_SIGNALED)
+ nr_sig++;
+
rdsdebug("send %p wr %p num_sge %u next %p\n", send,
&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
- sent += len;
- off += len;
- if (off == ib_sg_dma_len(dev, scat)) {
- scat++;
- off = 0;
- }
-
-add_header:
- /* Tack on the header after the data. The header SGE should already
- * have been set up to point to the right header buffer. */
- memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
-
- if (0) {
- struct rds_header *hdr = &ic->i_send_hdrs[pos];
-
- printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
- be16_to_cpu(hdr->h_dport),
- hdr->h_flags,
- be32_to_cpu(hdr->h_len));
- }
- if (adv_credits) {
+ if (ic->i_flowctl && adv_credits) {
struct rds_header *hdr = &ic->i_send_hdrs[pos];
/* add credit and redo the header checksum */
@@ -689,20 +721,25 @@ add_header:
prev = send;
pos = (pos + 1) % ic->i_send_ring.w_nr;
- }
+ send = &ic->i_sends[pos];
+ i++;
+
+ } while (i < work_alloc
+ && scat != &rm->data.op_sg[rm->data.op_count]);
/* Account the RDS header in the number of bytes we sent, but just once.
* The caller has no concept of fragmentation. */
if (hdr_off == 0)
- sent += sizeof(struct rds_header);
+ bytes_sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
- if (scat == &rm->m_sg[rm->m_count]) {
- prev->s_rm = ic->i_rm;
- prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
- ic->i_rm = NULL;
+ if (scat == &rm->data.op_sg[rm->data.op_count]) {
+ prev->s_op = ic->i_data_op;
+ prev->s_wr.send_flags |= IB_SEND_SOLICITED;
+ ic->i_data_op = NULL;
}
+ /* Put back wrs & credits we didn't use */
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
@@ -710,6 +747,9 @@ add_header:
if (ic->i_flowctl && i < credit_alloc)
rds_ib_send_add_credits(conn, credit_alloc - i);
+ if (nr_sig)
+ atomic_add(nr_sig, &ic->i_signaled_sends);
+
/* XXX need to worry about failed_wr and partial sends. */
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
@@ -720,32 +760,127 @@ add_header:
printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
- if (prev->s_rm) {
- ic->i_rm = prev->s_rm;
- prev->s_rm = NULL;
+ rds_ib_sub_signaled(ic, nr_sig);
+ if (prev->s_op) {
+ ic->i_data_op = prev->s_op;
+ prev->s_op = NULL;
}
rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
- ret = sent;
+ ret = bytes_sent;
out:
BUG_ON(adv_credits);
return ret;
}
-int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
+/*
+ * Issue atomic operation.
+ * A simplified version of the rdma case, we always map 1 SG, and
+ * only 8 bytes, for the return value from the atomic operation.
+ */
+int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
+{
+ struct rds_ib_connection *ic = conn->c_transport_data;
+ struct rds_ib_send_work *send = NULL;
+ struct ib_send_wr *failed_wr;
+ struct rds_ib_device *rds_ibdev;
+ u32 pos;
+ u32 work_alloc;
+ int ret;
+ int nr_sig = 0;
+
+ rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
+
+ work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
+ if (work_alloc != 1) {
+ rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
+ rds_ib_stats_inc(s_ib_tx_ring_full);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* address of send request in ring */
+ send = &ic->i_sends[pos];
+ send->s_queued = jiffies;
+
+ if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
+ send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
+ send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare;
+ send->s_wr.wr.atomic.swap = op->op_m_cswp.swap;
+ send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask;
+ send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask;
+ } else { /* FADD */
+ send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
+ send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add;
+ send->s_wr.wr.atomic.swap = 0;
+ send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask;
+ send->s_wr.wr.atomic.swap_mask = 0;
+ }
+ nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
+ send->s_wr.num_sge = 1;
+ send->s_wr.next = NULL;
+ send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
+ send->s_wr.wr.atomic.rkey = op->op_rkey;
+ send->s_op = op;
+ rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
+
+ /* map 8 byte retval buffer to the device */
+ ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
+ rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
+ if (ret != 1) {
+ rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
+ rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
+ ret = -ENOMEM; /* XXX ? */
+ goto out;
+ }
+
+ /* Convert our struct scatterlist to struct ib_sge */
+ send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
+ send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
+ send->s_sge[0].lkey = ic->i_mr->lkey;
+
+ rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
+ send->s_sge[0].addr, send->s_sge[0].length);
+
+ if (nr_sig)
+ atomic_add(nr_sig, &ic->i_signaled_sends);
+
+ failed_wr = &send->s_wr;
+ ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
+ rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
+ send, &send->s_wr, ret, failed_wr);
+ BUG_ON(failed_wr != &send->s_wr);
+ if (ret) {
+ printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
+ "returned %d\n", &conn->c_faddr, ret);
+ rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
+ rds_ib_sub_signaled(ic, nr_sig);
+ goto out;
+ }
+
+ if (unlikely(failed_wr != &send->s_wr)) {
+ printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
+ BUG_ON(failed_wr != &send->s_wr);
+ }
+
+out:
+ return ret;
+}
+
+int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_send_work *send = NULL;
struct rds_ib_send_work *first;
struct rds_ib_send_work *prev;
struct ib_send_wr *failed_wr;
- struct rds_ib_device *rds_ibdev;
struct scatterlist *scat;
unsigned long len;
- u64 remote_addr = op->r_remote_addr;
+ u64 remote_addr = op->op_remote_addr;
+ u32 max_sge = ic->rds_ibdev->max_sge;
u32 pos;
u32 work_alloc;
u32 i;
@@ -753,29 +888,28 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
int sent;
int ret;
int num_sge;
-
- rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
-
- /* map the message the first time we see it */
- if (!op->r_mapped) {
- op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
- op->r_sg, op->r_nents, (op->r_write) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
- if (op->r_count == 0) {
+ int nr_sig = 0;
+
+ /* map the op the first time we see it */
+ if (!op->op_mapped) {
+ op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
+ op->op_sg, op->op_nents, (op->op_write) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
+ if (op->op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
ret = -ENOMEM; /* XXX ? */
goto out;
}
- op->r_mapped = 1;
+ op->op_mapped = 1;
}
/*
* Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message.
*/
- i = ceil(op->r_count, rds_ibdev->max_sge);
+ i = ceil(op->op_count, max_sge);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) {
@@ -788,30 +922,24 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
send = &ic->i_sends[pos];
first = send;
prev = NULL;
- scat = &op->r_sg[0];
+ scat = &op->op_sg[0];
sent = 0;
- num_sge = op->r_count;
+ num_sge = op->op_count;
- for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
+ for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
send->s_wr.send_flags = 0;
send->s_queued = jiffies;
- /*
- * We want to delay signaling completions just enough to get
- * the batching benefits but not so much that we create dead time on the wire.
- */
- if (ic->i_unsignaled_wrs-- == 0) {
- ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
- send->s_wr.send_flags = IB_SEND_SIGNALED;
- }
+ send->s_op = NULL;
+
+ nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
- send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
+ send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
send->s_wr.wr.rdma.remote_addr = remote_addr;
- send->s_wr.wr.rdma.rkey = op->r_key;
- send->s_op = op;
+ send->s_wr.wr.rdma.rkey = op->op_rkey;
- if (num_sge > rds_ibdev->max_sge) {
- send->s_wr.num_sge = rds_ibdev->max_sge;
- num_sge -= rds_ibdev->max_sge;
+ if (num_sge > max_sge) {
+ send->s_wr.num_sge = max_sge;
+ num_sge -= max_sge;
} else {
send->s_wr.num_sge = num_sge;
}
@@ -821,7 +949,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
if (prev)
prev->s_wr.next = &send->s_wr;
- for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
+ for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
len = ib_sg_dma_len(ic->i_cm_id->device, scat);
send->s_sge[j].addr =
ib_sg_dma_address(ic->i_cm_id->device, scat);
@@ -843,15 +971,20 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
send = ic->i_sends;
}
- /* if we finished the message then send completion owns it */
- if (scat == &op->r_sg[op->r_count])
- prev->s_wr.send_flags = IB_SEND_SIGNALED;
+ /* give a reference to the last op */
+ if (scat == &op->op_sg[op->op_count]) {
+ prev->s_op = op;
+ rds_message_addref(container_of(op, struct rds_message, rdma));
+ }
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
}
+ if (nr_sig)
+ atomic_add(nr_sig, &ic->i_signaled_sends);
+
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
@@ -861,6 +994,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
+ rds_ib_sub_signaled(ic, nr_sig);
goto out;
}
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index d2c904dd6fbc..2d5965d6e97c 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -67,6 +67,8 @@ static const char *const rds_ib_stat_names[] = {
"ib_rdma_mr_pool_flush",
"ib_rdma_mr_pool_wait",
"ib_rdma_mr_pool_depleted",
+ "ib_atomic_cswp",
+ "ib_atomic_fadd",
};
unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index 03f01cb4e0fe..fc3da37220fd 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -49,10 +49,6 @@ unsigned long rds_ib_sysctl_max_unsig_wrs = 16;
static unsigned long rds_ib_sysctl_max_unsig_wr_min = 1;
static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64;
-unsigned long rds_ib_sysctl_max_unsig_bytes = (16 << 20);
-static unsigned long rds_ib_sysctl_max_unsig_bytes_min = 1;
-static unsigned long rds_ib_sysctl_max_unsig_bytes_max = ~0UL;
-
/*
* This sysctl does nothing.
*
@@ -94,15 +90,6 @@ ctl_table rds_ib_sysctl_table[] = {
.extra2 = &rds_ib_sysctl_max_unsig_wr_max,
},
{
- .procname = "max_unsignaled_bytes",
- .data = &rds_ib_sysctl_max_unsig_bytes,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_doulongvec_minmax,
- .extra1 = &rds_ib_sysctl_max_unsig_bytes_min,
- .extra2 = &rds_ib_sysctl_max_unsig_bytes_max,
- },
- {
.procname = "max_recv_allocation",
.data = &rds_ib_sysctl_max_recv_allocation,
.maxlen = sizeof(unsigned long),
@@ -132,10 +119,10 @@ void rds_ib_sysctl_exit(void)
unregister_sysctl_table(rds_ib_sysctl_hdr);
}
-int __init rds_ib_sysctl_init(void)
+int rds_ib_sysctl_init(void)
{
rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table);
- if (rds_ib_sysctl_hdr == NULL)
+ if (!rds_ib_sysctl_hdr)
return -ENOMEM;
return 0;
}
diff --git a/net/rds/info.c b/net/rds/info.c
index c45c4173a44d..4fdf1b6e84ff 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -76,7 +76,7 @@ void rds_info_register_func(int optname, rds_info_func func)
BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST);
spin_lock(&rds_info_lock);
- BUG_ON(rds_info_funcs[offset] != NULL);
+ BUG_ON(rds_info_funcs[offset]);
rds_info_funcs[offset] = func;
spin_unlock(&rds_info_lock);
}
@@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(rds_info_deregister_func);
*/
void rds_info_iter_unmap(struct rds_info_iterator *iter)
{
- if (iter->addr != NULL) {
+ if (iter->addr) {
kunmap_atomic(iter->addr, KM_USER0);
iter->addr = NULL;
}
@@ -117,7 +117,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
unsigned long this;
while (bytes) {
- if (iter->addr == NULL)
+ if (!iter->addr)
iter->addr = kmap_atomic(*iter->pages, KM_USER0);
this = min(bytes, PAGE_SIZE - iter->offset);
@@ -188,7 +188,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
>> PAGE_SHIFT;
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
- if (pages == NULL) {
+ if (!pages) {
ret = -ENOMEM;
goto out;
}
@@ -206,7 +206,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
call_func:
func = rds_info_funcs[optname - RDS_INFO_FIRST];
- if (func == NULL) {
+ if (!func) {
ret = -ENOPROTOOPT;
goto out;
}
@@ -234,7 +234,7 @@ call_func:
ret = -EFAULT;
out:
- for (i = 0; pages != NULL && i < nr_pages; i++)
+ for (i = 0; pages && i < nr_pages; i++)
put_page(pages[i]);
kfree(pages);
diff --git a/net/rds/iw.c b/net/rds/iw.c
index c8f3d3525cb9..56808cac0fc7 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -264,7 +264,6 @@ struct rds_transport rds_iw_transport = {
.laddr_check = rds_iw_laddr_check,
.xmit_complete = rds_iw_xmit_complete,
.xmit = rds_iw_xmit,
- .xmit_cong_map = NULL,
.xmit_rdma = rds_iw_xmit_rdma,
.recv = rds_iw_recv,
.conn_alloc = rds_iw_conn_alloc,
@@ -272,7 +271,6 @@ struct rds_transport rds_iw_transport = {
.conn_connect = rds_iw_conn_connect,
.conn_shutdown = rds_iw_conn_shutdown,
.inc_copy_to_user = rds_iw_inc_copy_to_user,
- .inc_purge = rds_iw_inc_purge,
.inc_free = rds_iw_inc_free,
.cm_initiate_connect = rds_iw_cm_initiate_connect,
.cm_handle_connect = rds_iw_cm_handle_connect,
@@ -289,7 +287,7 @@ struct rds_transport rds_iw_transport = {
.t_prefer_loopback = 1,
};
-int __init rds_iw_init(void)
+int rds_iw_init(void)
{
int ret;
diff --git a/net/rds/iw.h b/net/rds/iw.h
index eef2f0c28476..543e665fafe3 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -70,7 +70,7 @@ struct rds_iw_send_work {
struct rds_message *s_rm;
/* We should really put these into a union: */
- struct rds_rdma_op *s_op;
+ struct rm_rdma_op *s_op;
struct rds_iw_mapping *s_mapping;
struct ib_mr *s_mr;
struct ib_fast_reg_page_list *s_page_list;
@@ -284,7 +284,7 @@ void rds_iw_conn_free(void *arg);
int rds_iw_conn_connect(struct rds_connection *conn);
void rds_iw_conn_shutdown(struct rds_connection *conn);
void rds_iw_state_change(struct sock *sk);
-int __init rds_iw_listen_init(void);
+int rds_iw_listen_init(void);
void rds_iw_listen_stop(void);
void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...);
int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
@@ -321,12 +321,11 @@ void rds_iw_flush_mrs(void);
void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
/* ib_recv.c */
-int __init rds_iw_recv_init(void);
+int rds_iw_recv_init(void);
void rds_iw_recv_exit(void);
int rds_iw_recv(struct rds_connection *conn);
int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
gfp_t page_gfp, int prefill);
-void rds_iw_inc_purge(struct rds_incoming *inc);
void rds_iw_inc_free(struct rds_incoming *inc);
int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
size_t size);
@@ -358,7 +357,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
void rds_iw_send_init_ring(struct rds_iw_connection *ic);
void rds_iw_send_clear_ring(struct rds_iw_connection *ic);
-int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
+int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
@@ -371,7 +370,7 @@ unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
/* ib_sysctl.c */
-int __init rds_iw_sysctl_init(void);
+int rds_iw_sysctl_init(void);
void rds_iw_sysctl_exit(void);
extern unsigned long rds_iw_sysctl_max_send_wr;
extern unsigned long rds_iw_sysctl_max_recv_wr;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index b5dd6ac39be8..712cf2d1f28e 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -257,7 +257,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
* the rds_iwdev at all.
*/
rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
- if (rds_iwdev == NULL) {
+ if (!rds_iwdev) {
if (printk_ratelimit())
printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
dev->name);
@@ -292,7 +292,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
ic->i_send_ring.w_nr *
sizeof(struct rds_header),
&ic->i_send_hdrs_dma, GFP_KERNEL);
- if (ic->i_send_hdrs == NULL) {
+ if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent send failed\n");
goto out;
@@ -302,7 +302,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
ic->i_recv_ring.w_nr *
sizeof(struct rds_header),
&ic->i_recv_hdrs_dma, GFP_KERNEL);
- if (ic->i_recv_hdrs == NULL) {
+ if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent recv failed\n");
goto out;
@@ -310,14 +310,14 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
&ic->i_ack_dma, GFP_KERNEL);
- if (ic->i_ack == NULL) {
+ if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent ack failed\n");
goto out;
}
ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work));
- if (ic->i_sends == NULL) {
+ if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
goto out;
@@ -325,7 +325,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
rds_iw_send_init_ring(ic);
ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work));
- if (ic->i_recvs == NULL) {
+ if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
goto out;
@@ -696,7 +696,7 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
/* XXX too lazy? */
ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL);
- if (ic == NULL)
+ if (!ic)
return -ENOMEM;
INIT_LIST_HEAD(&ic->iw_node);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 13dc1862d862..0e7accc23ee2 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -34,7 +34,6 @@
#include <linux/slab.h>
#include "rds.h"
-#include "rdma.h"
#include "iw.h"
@@ -207,9 +206,9 @@ void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *con
BUG_ON(list_empty(&ic->iw_node));
list_del(&ic->iw_node);
- spin_lock_irq(&rds_iwdev->spinlock);
+ spin_lock(&rds_iwdev->spinlock);
list_add_tail(&ic->iw_node, &rds_iwdev->conn_list);
- spin_unlock_irq(&rds_iwdev->spinlock);
+ spin_unlock(&rds_iwdev->spinlock);
spin_unlock_irq(&iw_nodev_conns_lock);
ic->rds_iwdev = rds_iwdev;
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 3d479067d54d..5e57347f49ff 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -53,7 +53,7 @@ static void rds_iw_frag_drop_page(struct rds_page_frag *frag)
static void rds_iw_frag_free(struct rds_page_frag *frag)
{
rdsdebug("frag %p page %p\n", frag, frag->f_page);
- BUG_ON(frag->f_page != NULL);
+ BUG_ON(frag->f_page);
kmem_cache_free(rds_iw_frag_slab, frag);
}
@@ -143,14 +143,14 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
struct ib_sge *sge;
int ret = -ENOMEM;
- if (recv->r_iwinc == NULL) {
+ if (!recv->r_iwinc) {
if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
rds_iw_stats_inc(s_iw_rx_alloc_limit);
goto out;
}
recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
kptr_gfp);
- if (recv->r_iwinc == NULL) {
+ if (!recv->r_iwinc) {
atomic_dec(&rds_iw_allocation);
goto out;
}
@@ -158,17 +158,17 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
}
- if (recv->r_frag == NULL) {
+ if (!recv->r_frag) {
recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp);
- if (recv->r_frag == NULL)
+ if (!recv->r_frag)
goto out;
INIT_LIST_HEAD(&recv->r_frag->f_item);
recv->r_frag->f_page = NULL;
}
- if (ic->i_frag.f_page == NULL) {
+ if (!ic->i_frag.f_page) {
ic->i_frag.f_page = alloc_page(page_gfp);
- if (ic->i_frag.f_page == NULL)
+ if (!ic->i_frag.f_page)
goto out;
ic->i_frag.f_offset = 0;
}
@@ -273,7 +273,7 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
return ret;
}
-void rds_iw_inc_purge(struct rds_incoming *inc)
+static void rds_iw_inc_purge(struct rds_incoming *inc)
{
struct rds_iw_incoming *iwinc;
struct rds_page_frag *frag;
@@ -716,7 +716,7 @@ static void rds_iw_process_recv(struct rds_connection *conn,
* into the inc and save the inc so we can hang upcoming fragments
* off its list.
*/
- if (iwinc == NULL) {
+ if (!iwinc) {
iwinc = recv->r_iwinc;
recv->r_iwinc = NULL;
ic->i_iwinc = iwinc;
@@ -887,7 +887,7 @@ int rds_iw_recv(struct rds_connection *conn)
return ret;
}
-int __init rds_iw_recv_init(void)
+int rds_iw_recv_init(void)
{
struct sysinfo si;
int ret = -ENOMEM;
@@ -899,13 +899,13 @@ int __init rds_iw_recv_init(void)
rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming",
sizeof(struct rds_iw_incoming),
0, 0, NULL);
- if (rds_iw_incoming_slab == NULL)
+ if (!rds_iw_incoming_slab)
goto out;
rds_iw_frag_slab = kmem_cache_create("rds_iw_frag",
sizeof(struct rds_page_frag),
0, 0, NULL);
- if (rds_iw_frag_slab == NULL)
+ if (!rds_iw_frag_slab)
kmem_cache_destroy(rds_iw_incoming_slab);
else
ret = 0;
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 52182ff7519e..6280ea020d4e 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -36,7 +36,6 @@
#include <linux/dmapool.h>
#include "rds.h"
-#include "rdma.h"
#include "iw.h"
static void rds_iw_send_rdma_complete(struct rds_message *rm,
@@ -64,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm,
}
static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
- struct rds_rdma_op *op)
+ struct rm_rdma_op *op)
{
- if (op->r_mapped) {
+ if (op->op_mapped) {
ib_dma_unmap_sg(ic->i_cm_id->device,
- op->r_sg, op->r_nents,
- op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- op->r_mapped = 0;
+ op->op_sg, op->op_nents,
+ op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ op->op_mapped = 0;
}
}
@@ -83,11 +82,11 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
ib_dma_unmap_sg(ic->i_cm_id->device,
- rm->m_sg, rm->m_nents,
+ rm->data.op_sg, rm->data.op_nents,
DMA_TO_DEVICE);
- if (rm->m_rdma_op != NULL) {
- rds_iw_send_unmap_rdma(ic, rm->m_rdma_op);
+ if (rm->rdma.op_active) {
+ rds_iw_send_unmap_rdma(ic, &rm->rdma);
/* If the user asked for a completion notification on this
* message, we can implement three different semantics:
@@ -111,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
*/
rds_iw_send_rdma_complete(rm, wc_status);
- if (rm->m_rdma_op->r_write)
- rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes);
+ if (rm->rdma.op_write)
+ rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
else
- rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes);
+ rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
}
/* If anyone waited for this message to get flushed out, wake
@@ -556,25 +555,27 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
}
/* map the message the first time we see it */
- if (ic->i_rm == NULL) {
+ if (!ic->i_rm) {
/*
printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n",
be16_to_cpu(rm->m_inc.i_hdr.h_dport),
rm->m_inc.i_hdr.h_flags,
be32_to_cpu(rm->m_inc.i_hdr.h_len));
*/
- if (rm->m_nents) {
- rm->m_count = ib_dma_map_sg(dev,
- rm->m_sg, rm->m_nents, DMA_TO_DEVICE);
- rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count);
- if (rm->m_count == 0) {
+ if (rm->data.op_nents) {
+ rm->data.op_count = ib_dma_map_sg(dev,
+ rm->data.op_sg,
+ rm->data.op_nents,
+ DMA_TO_DEVICE);
+ rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
+ if (rm->data.op_count == 0) {
rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
- rm->m_count = 0;
+ rm->data.op_count = 0;
}
ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
@@ -590,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
/* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */
- if (rm->m_rdma_op) {
+ if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
- ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key);
+ ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
@@ -621,7 +622,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
send = &ic->i_sends[pos];
first = send;
prev = NULL;
- scat = &rm->m_sg[sg];
+ scat = &rm->data.op_sg[sg];
sent = 0;
i = 0;
@@ -631,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
* or when requested by the user. Right now, we let
* the application choose.
*/
- if (rm->m_rdma_op && rm->m_rdma_op->r_fence)
+ if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
/*
@@ -650,7 +651,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
}
/* if there's data reference it with a chain of work reqs */
- for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) {
+ for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) {
unsigned int len;
send = &ic->i_sends[pos];
@@ -728,7 +729,7 @@ add_header:
sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
- if (scat == &rm->m_sg[rm->m_count]) {
+ if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_rm = ic->i_rm;
prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
ic->i_rm = NULL;
@@ -784,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
}
-int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
+int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
{
struct rds_iw_connection *ic = conn->c_transport_data;
struct rds_iw_send_work *send = NULL;
@@ -794,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
struct rds_iw_device *rds_iwdev;
struct scatterlist *scat;
unsigned long len;
- u64 remote_addr = op->r_remote_addr;
+ u64 remote_addr = op->op_remote_addr;
u32 pos, fr_pos;
u32 work_alloc;
u32 i;
@@ -806,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
/* map the message the first time we see it */
- if (!op->r_mapped) {
- op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
- op->r_sg, op->r_nents, (op->r_write) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
- if (op->r_count == 0) {
+ if (!op->op_mapped) {
+ op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
+ op->op_sg, op->op_nents, (op->op_write) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
+ if (op->op_count == 0) {
rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
ret = -ENOMEM; /* XXX ? */
goto out;
}
- op->r_mapped = 1;
+ op->op_mapped = 1;
}
- if (!op->r_write) {
+ if (!op->op_write) {
/* Alloc space on the send queue for the fastreg */
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
if (work_alloc != 1) {
@@ -835,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
* Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message.
*/
- i = ceil(op->r_count, rds_iwdev->max_sge);
+ i = ceil(op->op_count, rds_iwdev->max_sge);
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) {
@@ -846,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
}
send = &ic->i_sends[pos];
- if (!op->r_write) {
+ if (!op->op_write) {
first = prev = &ic->i_sends[fr_pos];
} else {
first = send;
prev = NULL;
}
- scat = &op->r_sg[0];
+ scat = &op->op_sg[0];
sent = 0;
- num_sge = op->r_count;
+ num_sge = op->op_count;
- for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
+ for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
send->s_wr.send_flags = 0;
send->s_queued = jiffies;
@@ -873,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
* for local access after RDS is finished with it, using
* IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
*/
- if (op->r_write)
+ if (op->op_write)
send->s_wr.opcode = IB_WR_RDMA_WRITE;
else
send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
send->s_wr.wr.rdma.remote_addr = remote_addr;
- send->s_wr.wr.rdma.rkey = op->r_key;
+ send->s_wr.wr.rdma.rkey = op->op_rkey;
send->s_op = op;
if (num_sge > rds_iwdev->max_sge) {
@@ -893,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
if (prev)
prev->s_wr.next = &send->s_wr;
- for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
+ for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
len = ib_sg_dma_len(ic->i_cm_id->device, scat);
if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
@@ -927,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
}
/* if we finished the message then send completion owns it */
- if (scat == &op->r_sg[op->r_count])
+ if (scat == &op->op_sg[op->op_count])
first->s_wr.send_flags = IB_SEND_SIGNALED;
if (i < work_alloc) {
@@ -941,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
* adapters do not allow using the lkey for this at all. To bypass this use a
* fastreg_mr (or possibly a dma_mr)
*/
- if (!op->r_write) {
+ if (!op->op_write) {
rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
- op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
+ op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
work_alloc++;
}
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index 1c4428a61a02..23e3a9a26aaf 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -122,10 +122,10 @@ void rds_iw_sysctl_exit(void)
unregister_sysctl_table(rds_iw_sysctl_hdr);
}
-int __init rds_iw_sysctl_init(void)
+int rds_iw_sysctl_init(void)
{
rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table);
- if (rds_iw_sysctl_hdr == NULL)
+ if (!rds_iw_sysctl_hdr)
return -ENOMEM;
return 0;
}
diff --git a/net/rds/loop.c b/net/rds/loop.c
index dd9879379457..c390156b426f 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -61,10 +61,17 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg,
unsigned int off)
{
+ /* Do not send cong updates to loopback */
+ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
+ rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
+ return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+ }
+
BUG_ON(hdr_off || sg || off);
rds_inc_init(&rm->m_inc, conn, conn->c_laddr);
- rds_message_addref(rm); /* for the inc */
+ /* For the embedded inc. Matching put is in loop_inc_free() */
+ rds_message_addref(rm);
rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
GFP_KERNEL, KM_USER0);
@@ -77,16 +84,14 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
}
-static int rds_loop_xmit_cong_map(struct rds_connection *conn,
- struct rds_cong_map *map,
- unsigned long offset)
+/*
+ * See rds_loop_xmit(). Since our inc is embedded in the rm, we
+ * make sure the rm lives at least until the inc is done.
+ */
+static void rds_loop_inc_free(struct rds_incoming *inc)
{
- BUG_ON(offset);
- BUG_ON(map != conn->c_lcong);
-
- rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
-
- return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+ struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
+ rds_message_put(rm);
}
/* we need to at least give the thread something to succeed */
@@ -112,7 +117,7 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
unsigned long flags;
lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL);
- if (lc == NULL)
+ if (!lc)
return -ENOMEM;
INIT_LIST_HEAD(&lc->loop_node);
@@ -169,14 +174,12 @@ void rds_loop_exit(void)
*/
struct rds_transport rds_loop_transport = {
.xmit = rds_loop_xmit,
- .xmit_cong_map = rds_loop_xmit_cong_map,
.recv = rds_loop_recv,
.conn_alloc = rds_loop_conn_alloc,
.conn_free = rds_loop_conn_free,
.conn_connect = rds_loop_conn_connect,
.conn_shutdown = rds_loop_conn_shutdown,
.inc_copy_to_user = rds_message_inc_copy_to_user,
- .inc_purge = rds_message_inc_purge,
- .inc_free = rds_message_inc_free,
+ .inc_free = rds_loop_inc_free,
.t_name = "loopback",
};
diff --git a/net/rds/message.c b/net/rds/message.c
index 9a1d67e001ba..84f937f11d47 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -34,9 +34,6 @@
#include <linux/slab.h>
#include "rds.h"
-#include "rdma.h"
-
-static DECLARE_WAIT_QUEUE_HEAD(rds_message_flush_waitq);
static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
[RDS_EXTHDR_NONE] = 0,
@@ -63,29 +60,31 @@ static void rds_message_purge(struct rds_message *rm)
if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
return;
- for (i = 0; i < rm->m_nents; i++) {
- rdsdebug("putting data page %p\n", (void *)sg_page(&rm->m_sg[i]));
+ for (i = 0; i < rm->data.op_nents; i++) {
+ rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
/* XXX will have to put_page for page refs */
- __free_page(sg_page(&rm->m_sg[i]));
+ __free_page(sg_page(&rm->data.op_sg[i]));
}
- rm->m_nents = 0;
+ rm->data.op_nents = 0;
- if (rm->m_rdma_op)
- rds_rdma_free_op(rm->m_rdma_op);
- if (rm->m_rdma_mr)
- rds_mr_put(rm->m_rdma_mr);
-}
+ if (rm->rdma.op_active)
+ rds_rdma_free_op(&rm->rdma);
+ if (rm->rdma.op_rdma_mr)
+ rds_mr_put(rm->rdma.op_rdma_mr);
-void rds_message_inc_purge(struct rds_incoming *inc)
-{
- struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
- rds_message_purge(rm);
+ if (rm->atomic.op_active)
+ rds_atomic_free_op(&rm->atomic);
+ if (rm->atomic.op_rdma_mr)
+ rds_mr_put(rm->atomic.op_rdma_mr);
}
void rds_message_put(struct rds_message *rm)
{
rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
-
+ if (atomic_read(&rm->m_refcount) == 0) {
+printk(KERN_CRIT "danger refcount zero on %p\n", rm);
+WARN_ON(1);
+ }
if (atomic_dec_and_test(&rm->m_refcount)) {
BUG_ON(!list_empty(&rm->m_sock_item));
BUG_ON(!list_empty(&rm->m_conn_item));
@@ -96,12 +95,6 @@ void rds_message_put(struct rds_message *rm)
}
EXPORT_SYMBOL_GPL(rds_message_put);
-void rds_message_inc_free(struct rds_incoming *inc)
-{
- struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
- rds_message_put(rm);
-}
-
void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
__be16 dport, u64 seq)
{
@@ -214,41 +207,68 @@ int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 o
}
EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
-struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp)
+/*
+ * Each rds_message is allocated with extra space for the scatterlist entries
+ * rds ops will need. This is to minimize memory allocation count. Then, each rds op
+ * can grab SGs when initializing its part of the rds_message.
+ */
+struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
{
struct rds_message *rm;
- rm = kzalloc(sizeof(struct rds_message) +
- (nents * sizeof(struct scatterlist)), gfp);
+ rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
if (!rm)
goto out;
- if (nents)
- sg_init_table(rm->m_sg, nents);
+ rm->m_used_sgs = 0;
+ rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
+
atomic_set(&rm->m_refcount, 1);
INIT_LIST_HEAD(&rm->m_sock_item);
INIT_LIST_HEAD(&rm->m_conn_item);
spin_lock_init(&rm->m_rs_lock);
+ init_waitqueue_head(&rm->m_flush_wait);
out:
return rm;
}
+/*
+ * RDS ops use this to grab SG entries from the rm's sg pool.
+ */
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
+{
+ struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
+ struct scatterlist *sg_ret;
+
+ WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
+ WARN_ON(!nents);
+
+ sg_ret = &sg_first[rm->m_used_sgs];
+ sg_init_table(sg_ret, nents);
+ rm->m_used_sgs += nents;
+
+ return sg_ret;
+}
+
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
{
struct rds_message *rm;
unsigned int i;
+ int num_sgs = ceil(total_len, PAGE_SIZE);
+ int extra_bytes = num_sgs * sizeof(struct scatterlist);
- rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL);
- if (rm == NULL)
+ rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
+ if (!rm)
return ERR_PTR(-ENOMEM);
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
- rm->m_nents = ceil(total_len, PAGE_SIZE);
+ rm->data.op_nents = ceil(total_len, PAGE_SIZE);
+ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
- for (i = 0; i < rm->m_nents; ++i) {
- sg_set_page(&rm->m_sg[i],
+ for (i = 0; i < rm->data.op_nents; ++i) {
+ sg_set_page(&rm->data.op_sg[i],
virt_to_page(page_addrs[i]),
PAGE_SIZE, 0);
}
@@ -256,40 +276,33 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
return rm;
}
-struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
+int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
size_t total_len)
{
unsigned long to_copy;
unsigned long iov_off;
unsigned long sg_off;
- struct rds_message *rm;
struct iovec *iov;
struct scatterlist *sg;
- int ret;
-
- rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL);
- if (rm == NULL) {
- ret = -ENOMEM;
- goto out;
- }
+ int ret = 0;
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
/*
* now allocate and copy in the data payload.
*/
- sg = rm->m_sg;
+ sg = rm->data.op_sg;
iov = first_iov;
iov_off = 0;
sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
while (total_len) {
- if (sg_page(sg) == NULL) {
+ if (!sg_page(sg)) {
ret = rds_page_remainder_alloc(sg, total_len,
GFP_HIGHUSER);
if (ret)
goto out;
- rm->m_nents++;
+ rm->data.op_nents++;
sg_off = 0;
}
@@ -320,14 +333,8 @@ struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
sg++;
}
- ret = 0;
out:
- if (ret) {
- if (rm)
- rds_message_put(rm);
- rm = ERR_PTR(ret);
- }
- return rm;
+ return ret;
}
int rds_message_inc_copy_to_user(struct rds_incoming *inc,
@@ -348,7 +355,7 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
iov = first_iov;
iov_off = 0;
- sg = rm->m_sg;
+ sg = rm->data.op_sg;
vec_off = 0;
copied = 0;
@@ -394,15 +401,14 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
*/
void rds_message_wait(struct rds_message *rm)
{
- wait_event(rds_message_flush_waitq,
+ wait_event_interruptible(rm->m_flush_wait,
!test_bit(RDS_MSG_MAPPED, &rm->m_flags));
}
void rds_message_unmapped(struct rds_message *rm)
{
clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
- if (waitqueue_active(&rds_message_flush_waitq))
- wake_up(&rds_message_flush_waitq);
+ wake_up_interruptible(&rm->m_flush_wait);
}
EXPORT_SYMBOL_GPL(rds_message_unmapped);
diff --git a/net/rds/page.c b/net/rds/page.c
index 595a952d4b17..5e44f5ae7898 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -116,7 +116,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
/* jump straight to allocation if we're trying for a huge page */
if (bytes >= PAGE_SIZE) {
page = alloc_page(gfp);
- if (page == NULL) {
+ if (!page) {
ret = -ENOMEM;
} else {
sg_set_page(scat, page, PAGE_SIZE, 0);
@@ -162,7 +162,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
rem = &per_cpu(rds_page_remainders, get_cpu());
local_irq_save(flags);
- if (page == NULL) {
+ if (!page) {
ret = -ENOMEM;
break;
}
@@ -186,6 +186,7 @@ out:
ret ? 0 : scat->length);
return ret;
}
+EXPORT_SYMBOL_GPL(rds_page_remainder_alloc);
static int rds_page_remainder_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 75fd13bb631b..1a41debca1ce 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -35,7 +35,7 @@
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
-#include "rdma.h"
+#include "rds.h"
/*
* XXX
@@ -130,14 +130,22 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
{
struct rds_mr *mr;
struct rb_node *node;
+ unsigned long flags;
/* Release any MRs associated with this socket */
+ spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = container_of(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
+ rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
+ RB_CLEAR_NODE(&mr->r_rb_node);
+ spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
+ rds_destroy_mr(mr);
rds_mr_put(mr);
+ spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
+ spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
@@ -181,7 +189,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
goto out;
}
- if (rs->rs_transport->get_mr == NULL) {
+ if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -197,13 +205,13 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
- if (pages == NULL) {
+ if (!pages) {
ret = -ENOMEM;
goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
- if (mr == NULL) {
+ if (!mr) {
ret = -ENOMEM;
goto out;
}
@@ -230,13 +238,13 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
* r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
* the zero page.
*/
- ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1);
+ ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
if (ret < 0)
goto out;
nents = ret;
sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
- if (sg == NULL) {
+ if (!sg) {
ret = -ENOMEM;
goto out;
}
@@ -406,68 +414,127 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
- if (mr && (mr->r_use_once || force)) {
+ if (!mr) {
+ printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
+ spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
+ return;
+ }
+
+ if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
- } else if (mr)
- atomic_inc(&mr->r_refcount);
+ }
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region.
* Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */
- if (mr != NULL) {
- if (mr->r_trans->sync_mr)
- mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
-
- /* If the MR was marked as invalidate, this will
- * trigger an async flush. */
- if (zot_me)
- rds_destroy_mr(mr);
- rds_mr_put(mr);
- }
+ if (mr->r_trans->sync_mr)
+ mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
+
+ /* If the MR was marked as invalidate, this will
+ * trigger an async flush. */
+ if (zot_me)
+ rds_destroy_mr(mr);
+ rds_mr_put(mr);
}
-void rds_rdma_free_op(struct rds_rdma_op *ro)
+void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
- for (i = 0; i < ro->r_nents; i++) {
- struct page *page = sg_page(&ro->r_sg[i]);
+ for (i = 0; i < ro->op_nents; i++) {
+ struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
- if (!ro->r_write) {
- BUG_ON(in_interrupt());
+ if (!ro->op_write) {
+ BUG_ON(irqs_disabled());
set_page_dirty(page);
}
put_page(page);
}
- kfree(ro->r_notifier);
- kfree(ro);
+ kfree(ro->op_notifier);
+ ro->op_notifier = NULL;
+ ro->op_active = 0;
+}
+
+void rds_atomic_free_op(struct rm_atomic_op *ao)
+{
+ struct page *page = sg_page(ao->op_sg);
+
+ /* Mark page dirty if it was possibly modified, which
+ * is the case for a RDMA_READ which copies from remote
+ * to local memory */
+ set_page_dirty(page);
+ put_page(page);
+
+ kfree(ao->op_notifier);
+ ao->op_notifier = NULL;
+ ao->op_active = 0;
}
+
/*
- * args is a pointer to an in-kernel copy in the sendmsg cmsg.
+ * Count the number of pages needed to describe an incoming iovec.
*/
-static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
- struct rds_rdma_args *args)
+static int rds_rdma_pages(struct rds_rdma_args *args)
{
struct rds_iovec vec;
- struct rds_rdma_op *op = NULL;
+ struct rds_iovec __user *local_vec;
+ unsigned int tot_pages = 0;
unsigned int nr_pages;
- unsigned int max_pages;
+ unsigned int i;
+
+ local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
+
+ /* figure out the number of pages in the vector */
+ for (i = 0; i < args->nr_local; i++) {
+ if (copy_from_user(&vec, &local_vec[i],
+ sizeof(struct rds_iovec)))
+ return -EFAULT;
+
+ nr_pages = rds_pages_in_vec(&vec);
+ if (nr_pages == 0)
+ return -EINVAL;
+
+ tot_pages += nr_pages;
+ }
+
+ return tot_pages;
+}
+
+int rds_rdma_extra_size(struct rds_rdma_args *args)
+{
+ return rds_rdma_pages(args) * sizeof(struct scatterlist);
+}
+
+/*
+ * The application asks for a RDMA transfer.
+ * Extract all arguments and set up the rdma_op
+ */
+int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg)
+{
+ struct rds_rdma_args *args;
+ struct rds_iovec vec;
+ struct rm_rdma_op *op = &rm->rdma;
+ int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
struct rds_iovec __user *local_vec;
- struct scatterlist *sg;
unsigned int nr;
unsigned int i, j;
- int ret;
+ int ret = 0;
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
+ || rm->rdma.op_active)
+ return -EINVAL;
+
+ args = CMSG_DATA(cmsg);
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
@@ -479,61 +546,38 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
goto out;
}
- nr_pages = 0;
- max_pages = 0;
-
- local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
-
- /* figure out the number of pages in the vector */
- for (i = 0; i < args->nr_local; i++) {
- if (copy_from_user(&vec, &local_vec[i],
- sizeof(struct rds_iovec))) {
- ret = -EFAULT;
- goto out;
- }
-
- nr = rds_pages_in_vec(&vec);
- if (nr == 0) {
- ret = -EINVAL;
- goto out;
- }
-
- max_pages = max(nr, max_pages);
- nr_pages += nr;
- }
-
- pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
- if (pages == NULL) {
- ret = -ENOMEM;
+ nr_pages = rds_rdma_pages(args);
+ if (nr_pages < 0)
goto out;
- }
- op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL);
- if (op == NULL) {
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
ret = -ENOMEM;
goto out;
}
- op->r_write = !!(args->flags & RDS_RDMA_READWRITE);
- op->r_fence = !!(args->flags & RDS_RDMA_FENCE);
- op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
- op->r_recverr = rs->rs_recverr;
+ op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
+ op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
+ op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
+ op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
+ op->op_active = 1;
+ op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
- sg_init_table(op->r_sg, nr_pages);
+ op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
- if (op->r_notify || op->r_recverr) {
+ if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
- op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
- if (!op->r_notifier) {
+ op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
+ if (!op->op_notifier) {
ret = -ENOMEM;
goto out;
}
- op->r_notifier->n_user_token = args->user_token;
- op->r_notifier->n_status = RDS_RDMA_SUCCESS;
+ op->op_notifier->n_user_token = args->user_token;
+ op->op_notifier->n_status = RDS_RDMA_SUCCESS;
}
/* The cookie contains the R_Key of the remote memory region, and
@@ -543,15 +587,17 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
* destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c
*/
- op->r_key = rds_rdma_cookie_key(args->cookie);
- op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
+ op->op_rkey = rds_rdma_cookie_key(args->cookie);
+ op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr,
- op->r_key);
+ op->op_rkey);
+
+ local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
@@ -569,15 +615,10 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
rs->rs_user_addr = vec.addr;
rs->rs_user_bytes = vec.bytes;
- /* did the user change the vec under us? */
- if (nr > max_pages || op->r_nents + nr > nr_pages) {
- ret = -EINVAL;
- goto out;
- }
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
- ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write);
+ ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
@@ -588,8 +629,9 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
for (j = 0; j < nr; j++) {
unsigned int offset = vec.addr & ~PAGE_MASK;
+ struct scatterlist *sg;
- sg = &op->r_sg[op->r_nents + j];
+ sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
offset);
@@ -601,10 +643,9 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
vec.bytes -= sg->length;
}
- op->r_nents += nr;
+ op->op_nents += nr;
}
-
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
@@ -612,38 +653,17 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
ret = -EINVAL;
goto out;
}
- op->r_bytes = nr_bytes;
+ op->op_bytes = nr_bytes;
ret = 0;
out:
kfree(pages);
- if (ret) {
- if (op)
- rds_rdma_free_op(op);
- op = ERR_PTR(ret);
- }
- return op;
-}
-
-/*
- * The application asks for a RDMA transfer.
- * Extract all arguments and set up the rdma_op
- */
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg)
-{
- struct rds_rdma_op *op;
-
- if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
- rm->m_rdma_op != NULL)
- return -EINVAL;
+ if (ret)
+ rds_rdma_free_op(op);
- op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
- if (IS_ERR(op))
- return PTR_ERR(op);
rds_stats_inc(s_send_rdma);
- rm->m_rdma_op = op;
- return 0;
+
+ return ret;
}
/*
@@ -673,7 +693,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
- if (mr == NULL)
+ if (!mr)
err = -EINVAL; /* invalid r_key */
else
atomic_inc(&mr->r_refcount);
@@ -681,7 +701,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
- rm->m_rdma_mr = mr;
+ rm->rdma.op_rdma_mr = mr;
}
return err;
}
@@ -699,5 +719,98 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
rm->m_rdma_cookie != 0)
return -EINVAL;
- return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);
+ return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
+}
+
+/*
+ * Fill in rds_message for an atomic request.
+ */
+int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg)
+{
+ struct page *page = NULL;
+ struct rds_atomic_args *args;
+ int ret = 0;
+
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
+ || rm->atomic.op_active)
+ return -EINVAL;
+
+ args = CMSG_DATA(cmsg);
+
+ /* Nonmasked & masked cmsg ops converted to masked hw ops */
+ switch (cmsg->cmsg_type) {
+ case RDS_CMSG_ATOMIC_FADD:
+ rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
+ rm->atomic.op_m_fadd.add = args->fadd.add;
+ rm->atomic.op_m_fadd.nocarry_mask = 0;
+ break;
+ case RDS_CMSG_MASKED_ATOMIC_FADD:
+ rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
+ rm->atomic.op_m_fadd.add = args->m_fadd.add;
+ rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
+ break;
+ case RDS_CMSG_ATOMIC_CSWP:
+ rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
+ rm->atomic.op_m_cswp.compare = args->cswp.compare;
+ rm->atomic.op_m_cswp.swap = args->cswp.swap;
+ rm->atomic.op_m_cswp.compare_mask = ~0;
+ rm->atomic.op_m_cswp.swap_mask = ~0;
+ break;
+ case RDS_CMSG_MASKED_ATOMIC_CSWP:
+ rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
+ rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
+ rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
+ rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
+ rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
+ break;
+ default:
+ BUG(); /* should never happen */
+ }
+
+ rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
+ rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
+ rm->atomic.op_active = 1;
+ rm->atomic.op_recverr = rs->rs_recverr;
+ rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
+
+ /* verify 8 byte-aligned */
+ if (args->local_addr & 0x7) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = rds_pin_pages(args->local_addr, 1, &page, 1);
+ if (ret != 1)
+ goto err;
+ ret = 0;
+
+ sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
+
+ if (rm->atomic.op_notify || rm->atomic.op_recverr) {
+ /* We allocate an uninitialized notifier here, because
+ * we don't want to do that in the completion handler. We
+ * would have to use GFP_ATOMIC there, and don't want to deal
+ * with failed allocations.
+ */
+ rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
+ if (!rm->atomic.op_notifier) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ rm->atomic.op_notifier->n_user_token = args->user_token;
+ rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
+ }
+
+ rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
+ rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
+
+ return ret;
+err:
+ if (page)
+ put_page(page);
+ kfree(rm->atomic.op_notifier);
+
+ return ret;
}
diff --git a/net/rds/rdma.h b/net/rds/rdma.h
deleted file mode 100644
index 909c39835a5d..000000000000
--- a/net/rds/rdma.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef _RDS_RDMA_H
-#define _RDS_RDMA_H
-
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/scatterlist.h>
-
-#include "rds.h"
-
-struct rds_mr {
- struct rb_node r_rb_node;
- atomic_t r_refcount;
- u32 r_key;
-
- /* A copy of the creation flags */
- unsigned int r_use_once:1;
- unsigned int r_invalidate:1;
- unsigned int r_write:1;
-
- /* This is for RDS_MR_DEAD.
- * It would be nice & consistent to make this part of the above
- * bit field here, but we need to use test_and_set_bit.
- */
- unsigned long r_state;
- struct rds_sock *r_sock; /* back pointer to the socket that owns us */
- struct rds_transport *r_trans;
- void *r_trans_private;
-};
-
-/* Flags for mr->r_state */
-#define RDS_MR_DEAD 0
-
-struct rds_rdma_op {
- u32 r_key;
- u64 r_remote_addr;
- unsigned int r_write:1;
- unsigned int r_fence:1;
- unsigned int r_notify:1;
- unsigned int r_recverr:1;
- unsigned int r_mapped:1;
- struct rds_notifier *r_notifier;
- unsigned int r_bytes;
- unsigned int r_nents;
- unsigned int r_count;
- struct scatterlist r_sg[0];
-};
-
-static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
-{
- return r_key | (((u64) offset) << 32);
-}
-
-static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
-{
- return cookie;
-}
-
-static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
-{
- return cookie >> 32;
-}
-
-int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
-int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
-int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
-void rds_rdma_drop_keys(struct rds_sock *rs);
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
-int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
-int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
-void rds_rdma_free_op(struct rds_rdma_op *ro);
-void rds_rdma_send_complete(struct rds_message *rm, int);
-
-extern void __rds_put_mr_final(struct rds_mr *mr);
-static inline void rds_mr_put(struct rds_mr *mr)
-{
- if (atomic_dec_and_test(&mr->r_refcount))
- __rds_put_mr_final(mr);
-}
-
-#endif
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index e599ba2f950d..e6ed10aee190 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -36,6 +36,34 @@
static struct rdma_cm_id *rds_rdma_listen_id;
+static char *rds_cm_event_strings[] = {
+#define RDS_CM_EVENT_STRING(foo) \
+ [RDMA_CM_EVENT_##foo] = __stringify(RDMA_CM_EVENT_##foo)
+ RDS_CM_EVENT_STRING(ADDR_RESOLVED),
+ RDS_CM_EVENT_STRING(ADDR_ERROR),
+ RDS_CM_EVENT_STRING(ROUTE_RESOLVED),
+ RDS_CM_EVENT_STRING(ROUTE_ERROR),
+ RDS_CM_EVENT_STRING(CONNECT_REQUEST),
+ RDS_CM_EVENT_STRING(CONNECT_RESPONSE),
+ RDS_CM_EVENT_STRING(CONNECT_ERROR),
+ RDS_CM_EVENT_STRING(UNREACHABLE),
+ RDS_CM_EVENT_STRING(REJECTED),
+ RDS_CM_EVENT_STRING(ESTABLISHED),
+ RDS_CM_EVENT_STRING(DISCONNECTED),
+ RDS_CM_EVENT_STRING(DEVICE_REMOVAL),
+ RDS_CM_EVENT_STRING(MULTICAST_JOIN),
+ RDS_CM_EVENT_STRING(MULTICAST_ERROR),
+ RDS_CM_EVENT_STRING(ADDR_CHANGE),
+ RDS_CM_EVENT_STRING(TIMEWAIT_EXIT),
+#undef RDS_CM_EVENT_STRING
+};
+
+static char *rds_cm_event_str(enum rdma_cm_event_type type)
+{
+ return rds_str_array(rds_cm_event_strings,
+ ARRAY_SIZE(rds_cm_event_strings), type);
+};
+
int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
@@ -44,8 +72,8 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rds_transport *trans;
int ret = 0;
- rdsdebug("conn %p id %p handling event %u\n", conn, cm_id,
- event->event);
+ rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id,
+ event->event, rds_cm_event_str(event->event));
if (cm_id->device->node_type == RDMA_NODE_RNIC)
trans = &rds_iw_transport;
@@ -109,7 +137,8 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
default:
/* things like device disconnect? */
- printk(KERN_ERR "RDS: unknown event %u!\n", event->event);
+ printk(KERN_ERR "RDS: unknown event %u (%s)!\n",
+ event->event, rds_cm_event_str(event->event));
break;
}
@@ -117,12 +146,13 @@ out:
if (conn)
mutex_unlock(&conn->c_cm_lock);
- rdsdebug("id %p event %u handling ret %d\n", cm_id, event->event, ret);
+ rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event,
+ rds_cm_event_str(event->event), ret);
return ret;
}
-static int __init rds_rdma_listen_init(void)
+static int rds_rdma_listen_init(void)
{
struct sockaddr_in sin;
struct rdma_cm_id *cm_id;
@@ -177,7 +207,7 @@ static void rds_rdma_listen_stop(void)
}
}
-int __init rds_rdma_init(void)
+int rds_rdma_init(void)
{
int ret;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c224b5bb3ba9..8103dcf8b976 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -80,6 +80,7 @@ enum {
/* Bits for c_flags */
#define RDS_LL_SEND_FULL 0
#define RDS_RECONNECT_PENDING 1
+#define RDS_IN_XMIT 2
struct rds_connection {
struct hlist_node c_hash_node;
@@ -91,12 +92,13 @@ struct rds_connection {
struct rds_cong_map *c_lcong;
struct rds_cong_map *c_fcong;
- struct mutex c_send_lock; /* protect send ring */
struct rds_message *c_xmit_rm;
unsigned long c_xmit_sg;
unsigned int c_xmit_hdr_off;
unsigned int c_xmit_data_off;
+ unsigned int c_xmit_atomic_sent;
unsigned int c_xmit_rdma_sent;
+ unsigned int c_xmit_data_sent;
spinlock_t c_lock; /* protect msg queues */
u64 c_next_tx_seq;
@@ -116,11 +118,10 @@ struct rds_connection {
struct delayed_work c_conn_w;
struct work_struct c_down_w;
struct mutex c_cm_lock; /* protect conn state & cm */
+ wait_queue_head_t c_waitq;
struct list_head c_map_item;
unsigned long c_map_queued;
- unsigned long c_map_offset;
- unsigned long c_map_bytes;
unsigned int c_unacked_packets;
unsigned int c_unacked_bytes;
@@ -206,6 +207,48 @@ struct rds_incoming {
rds_rdma_cookie_t i_rdma_cookie;
};
+struct rds_mr {
+ struct rb_node r_rb_node;
+ atomic_t r_refcount;
+ u32 r_key;
+
+ /* A copy of the creation flags */
+ unsigned int r_use_once:1;
+ unsigned int r_invalidate:1;
+ unsigned int r_write:1;
+
+ /* This is for RDS_MR_DEAD.
+ * It would be nice & consistent to make this part of the above
+ * bit field here, but we need to use test_and_set_bit.
+ */
+ unsigned long r_state;
+ struct rds_sock *r_sock; /* back pointer to the socket that owns us */
+ struct rds_transport *r_trans;
+ void *r_trans_private;
+};
+
+/* Flags for mr->r_state */
+#define RDS_MR_DEAD 0
+
+static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
+{
+ return r_key | (((u64) offset) << 32);
+}
+
+static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
+{
+ return cookie;
+}
+
+static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
+{
+ return cookie >> 32;
+}
+
+/* atomic operation types */
+#define RDS_ATOMIC_TYPE_CSWP 0
+#define RDS_ATOMIC_TYPE_FADD 1
+
/*
* m_sock_item and m_conn_item are on lists that are serialized under
* conn->c_lock. m_sock_item has additional meaning in that once it is empty
@@ -258,13 +301,71 @@ struct rds_message {
* -> rs->rs_lock
*/
spinlock_t m_rs_lock;
+ wait_queue_head_t m_flush_wait;
+
struct rds_sock *m_rs;
- struct rds_rdma_op *m_rdma_op;
+
+ /* cookie to send to remote, in rds header */
rds_rdma_cookie_t m_rdma_cookie;
- struct rds_mr *m_rdma_mr;
- unsigned int m_nents;
- unsigned int m_count;
- struct scatterlist m_sg[0];
+
+ unsigned int m_used_sgs;
+ unsigned int m_total_sgs;
+
+ void *m_final_op;
+
+ struct {
+ struct rm_atomic_op {
+ int op_type;
+ union {
+ struct {
+ uint64_t compare;
+ uint64_t swap;
+ uint64_t compare_mask;
+ uint64_t swap_mask;
+ } op_m_cswp;
+ struct {
+ uint64_t add;
+ uint64_t nocarry_mask;
+ } op_m_fadd;
+ };
+
+ u32 op_rkey;
+ u64 op_remote_addr;
+ unsigned int op_notify:1;
+ unsigned int op_recverr:1;
+ unsigned int op_mapped:1;
+ unsigned int op_silent:1;
+ unsigned int op_active:1;
+ struct scatterlist *op_sg;
+ struct rds_notifier *op_notifier;
+
+ struct rds_mr *op_rdma_mr;
+ } atomic;
+ struct rm_rdma_op {
+ u32 op_rkey;
+ u64 op_remote_addr;
+ unsigned int op_write:1;
+ unsigned int op_fence:1;
+ unsigned int op_notify:1;
+ unsigned int op_recverr:1;
+ unsigned int op_mapped:1;
+ unsigned int op_silent:1;
+ unsigned int op_active:1;
+ unsigned int op_bytes;
+ unsigned int op_nents;
+ unsigned int op_count;
+ struct scatterlist *op_sg;
+ struct rds_notifier *op_notifier;
+
+ struct rds_mr *op_rdma_mr;
+ } rdma;
+ struct rm_data_op {
+ unsigned int op_active:1;
+ unsigned int op_nents;
+ unsigned int op_count;
+ struct scatterlist *op_sg;
+ } data;
+ };
};
/*
@@ -305,10 +406,6 @@ struct rds_notifier {
* transport is responsible for other serialization, including
* rds_recv_incoming(). This is called in process context but
* should try hard not to block.
- *
- * @xmit_cong_map: This asks the transport to send the local bitmap down the
- * given connection. XXX get a better story about the bitmap
- * flag and header.
*/
#define RDS_TRANS_IB 0
@@ -332,13 +429,11 @@ struct rds_transport {
void (*xmit_complete)(struct rds_connection *conn);
int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off);
- int (*xmit_cong_map)(struct rds_connection *conn,
- struct rds_cong_map *map, unsigned long offset);
- int (*xmit_rdma)(struct rds_connection *conn, struct rds_rdma_op *op);
+ int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
+ int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
int (*recv)(struct rds_connection *conn);
int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
size_t size);
- void (*inc_purge)(struct rds_incoming *inc);
void (*inc_free)(struct rds_incoming *inc);
int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
@@ -367,17 +462,11 @@ struct rds_sock {
* bound_addr used for both incoming and outgoing, no INADDR_ANY
* support.
*/
- struct rb_node rs_bound_node;
+ struct hlist_node rs_bound_node;
__be32 rs_bound_addr;
__be32 rs_conn_addr;
__be16 rs_bound_port;
__be16 rs_conn_port;
-
- /*
- * This is only used to communicate the transport between bind and
- * initiating connections. All other trans use is referenced through
- * the connection.
- */
struct rds_transport *rs_transport;
/*
@@ -466,8 +555,8 @@ struct rds_statistics {
uint64_t s_recv_ping;
uint64_t s_send_queue_empty;
uint64_t s_send_queue_full;
- uint64_t s_send_sem_contention;
- uint64_t s_send_sem_queue_raced;
+ uint64_t s_send_lock_contention;
+ uint64_t s_send_lock_queue_raced;
uint64_t s_send_immediate_retry;
uint64_t s_send_delayed_retry;
uint64_t s_send_drop_acked;
@@ -487,6 +576,7 @@ struct rds_statistics {
};
/* af_rds.c */
+char *rds_str_array(char **array, size_t elements, size_t index);
void rds_sock_addref(struct rds_sock *rs);
void rds_sock_put(struct rds_sock *rs);
void rds_wake_sk_sleep(struct rds_sock *rs);
@@ -521,15 +611,17 @@ void rds_cong_exit(void);
struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
/* conn.c */
-int __init rds_conn_init(void);
+int rds_conn_init(void);
void rds_conn_exit(void);
struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp);
struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp);
+void rds_conn_shutdown(struct rds_connection *conn);
void rds_conn_destroy(struct rds_connection *conn);
void rds_conn_reset(struct rds_connection *conn);
void rds_conn_drop(struct rds_connection *conn);
+void rds_conn_connect_if_down(struct rds_connection *conn);
void rds_for_each_conn_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
@@ -566,7 +658,8 @@ rds_conn_connecting(struct rds_connection *conn)
/* message.c */
struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
-struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
+int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
size_t total_len);
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
@@ -580,7 +673,6 @@ int rds_message_get_version_extension(struct rds_header *hdr, unsigned int *vers
int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
int rds_message_inc_copy_to_user(struct rds_incoming *inc,
struct iovec *first_iov, size_t size);
-void rds_message_inc_purge(struct rds_incoming *inc);
void rds_message_inc_free(struct rds_incoming *inc);
void rds_message_addref(struct rds_message *rm);
void rds_message_put(struct rds_message *rm);
@@ -636,14 +728,39 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
is_acked_func is_acked);
-int rds_send_acked_before(struct rds_connection *conn, u64 seq);
void rds_send_remove_from_sock(struct list_head *messages, int status);
int rds_send_pong(struct rds_connection *conn, __be16 dport);
struct rds_message *rds_send_get_message(struct rds_connection *,
- struct rds_rdma_op *);
+ struct rm_rdma_op *);
/* rdma.c */
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
+int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
+int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
+int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
+void rds_rdma_drop_keys(struct rds_sock *rs);
+int rds_rdma_extra_size(struct rds_rdma_args *args);
+int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+void rds_rdma_free_op(struct rm_rdma_op *ro);
+void rds_atomic_free_op(struct rm_atomic_op *ao);
+void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
+void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
+int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+
+extern void __rds_put_mr_final(struct rds_mr *mr);
+static inline void rds_mr_put(struct rds_mr *mr)
+{
+ if (atomic_dec_and_test(&mr->r_refcount))
+ __rds_put_mr_final(mr);
+}
/* stats.c */
DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
@@ -657,14 +774,14 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
put_cpu(); \
} while (0)
#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
-int __init rds_stats_init(void);
+int rds_stats_init(void);
void rds_stats_exit(void);
void rds_stats_info_copy(struct rds_info_iterator *iter,
uint64_t *values, const char *const *names,
size_t nr);
/* sysctl.c */
-int __init rds_sysctl_init(void);
+int rds_sysctl_init(void);
void rds_sysctl_exit(void);
extern unsigned long rds_sysctl_sndbuf_min;
extern unsigned long rds_sysctl_sndbuf_default;
@@ -678,9 +795,10 @@ extern unsigned long rds_sysctl_trace_flags;
extern unsigned int rds_sysctl_trace_level;
/* threads.c */
-int __init rds_threads_init(void);
+int rds_threads_init(void);
void rds_threads_exit(void);
extern struct workqueue_struct *rds_wq;
+void rds_queue_reconnect(struct rds_connection *conn);
void rds_connect_worker(struct work_struct *);
void rds_shutdown_worker(struct work_struct *);
void rds_send_worker(struct work_struct *);
@@ -691,9 +809,10 @@ void rds_connect_complete(struct rds_connection *conn);
int rds_trans_register(struct rds_transport *trans);
void rds_trans_unregister(struct rds_transport *trans);
struct rds_transport *rds_trans_get_preferred(__be32 addr);
+void rds_trans_put(struct rds_transport *trans);
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
-int __init rds_trans_init(void);
+int rds_trans_init(void);
void rds_trans_exit(void);
#endif
diff --git a/net/rds/recv.c b/net/rds/recv.c
index c93588c2d553..68800f02aa30 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -36,7 +36,6 @@
#include <linux/in.h>
#include "rds.h"
-#include "rdma.h"
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
__be32 saddr)
@@ -210,7 +209,7 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
}
rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
- if (rs == NULL) {
+ if (!rs) {
rds_stats_inc(s_recv_drop_no_sock);
goto out;
}
@@ -251,7 +250,7 @@ static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
{
unsigned long flags;
- if (*inc == NULL) {
+ if (!*inc) {
read_lock_irqsave(&rs->rs_recv_lock, flags);
if (!list_empty(&rs->rs_recv_queue)) {
*inc = list_entry(rs->rs_recv_queue.next,
@@ -334,10 +333,10 @@ int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
if (msghdr) {
cmsg.user_token = notifier->n_user_token;
- cmsg.status = notifier->n_status;
+ cmsg.status = notifier->n_status;
err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
- sizeof(cmsg), &cmsg);
+ sizeof(cmsg), &cmsg);
if (err)
break;
}
diff --git a/net/rds/send.c b/net/rds/send.c
index 9c1c6bcaa6c9..9b951a0ab6b7 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -37,7 +37,6 @@
#include <linux/list.h>
#include "rds.h"
-#include "rdma.h"
/* When transmitting messages in rds_send_xmit, we need to emerge from
* time to time and briefly release the CPU. Otherwise the softlock watchdog
@@ -54,7 +53,8 @@ module_param(send_batch_count, int, 0444);
MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
/*
- * Reset the send state. Caller must hold c_send_lock when calling here.
+ * Reset the send state. Callers must ensure that this doesn't race with
+ * rds_send_xmit().
*/
void rds_send_reset(struct rds_connection *conn)
{
@@ -62,18 +62,22 @@ void rds_send_reset(struct rds_connection *conn)
unsigned long flags;
if (conn->c_xmit_rm) {
+ rm = conn->c_xmit_rm;
+ conn->c_xmit_rm = NULL;
/* Tell the user the RDMA op is no longer mapped by the
* transport. This isn't entirely true (it's flushed out
* independently) but as the connection is down, there's
* no ongoing RDMA to/from that memory */
- rds_message_unmapped(conn->c_xmit_rm);
- rds_message_put(conn->c_xmit_rm);
- conn->c_xmit_rm = NULL;
+ rds_message_unmapped(rm);
+ rds_message_put(rm);
}
+
conn->c_xmit_sg = 0;
conn->c_xmit_hdr_off = 0;
conn->c_xmit_data_off = 0;
+ conn->c_xmit_atomic_sent = 0;
conn->c_xmit_rdma_sent = 0;
+ conn->c_xmit_data_sent = 0;
conn->c_map_queued = 0;
@@ -90,6 +94,25 @@ void rds_send_reset(struct rds_connection *conn)
spin_unlock_irqrestore(&conn->c_lock, flags);
}
+static int acquire_in_xmit(struct rds_connection *conn)
+{
+ return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
+}
+
+static void release_in_xmit(struct rds_connection *conn)
+{
+ clear_bit(RDS_IN_XMIT, &conn->c_flags);
+ smp_mb__after_clear_bit();
+ /*
+ * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
+ * hot path and finding waiters is very rare. We don't want to walk
+ * the system-wide hashed waitqueue buckets in the fast path only to
+ * almost never find waiters.
+ */
+ if (waitqueue_active(&conn->c_waitq))
+ wake_up_all(&conn->c_waitq);
+}
+
/*
* We're making the concious trade-off here to only send one message
* down the connection at a time.
@@ -109,102 +132,69 @@ int rds_send_xmit(struct rds_connection *conn)
struct rds_message *rm;
unsigned long flags;
unsigned int tmp;
- unsigned int send_quota = send_batch_count;
struct scatterlist *sg;
int ret = 0;
- int was_empty = 0;
LIST_HEAD(to_be_dropped);
+restart:
+
/*
* sendmsg calls here after having queued its message on the send
* queue. We only have one task feeding the connection at a time. If
* another thread is already feeding the queue then we back off. This
* avoids blocking the caller and trading per-connection data between
* caches per message.
- *
- * The sem holder will issue a retry if they notice that someone queued
- * a message after they stopped walking the send queue but before they
- * dropped the sem.
*/
- if (!mutex_trylock(&conn->c_send_lock)) {
- rds_stats_inc(s_send_sem_contention);
+ if (!acquire_in_xmit(conn)) {
+ rds_stats_inc(s_send_lock_contention);
ret = -ENOMEM;
goto out;
}
+ /*
+ * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
+ * we do the opposite to avoid races.
+ */
+ if (!rds_conn_up(conn)) {
+ release_in_xmit(conn);
+ ret = 0;
+ goto out;
+ }
+
if (conn->c_trans->xmit_prepare)
conn->c_trans->xmit_prepare(conn);
/*
* spin trying to push headers and data down the connection until
- * the connection doens't make forward progress.
+ * the connection doesn't make forward progress.
*/
- while (--send_quota) {
- /*
- * See if need to send a congestion map update if we're
- * between sending messages. The send_sem protects our sole
- * use of c_map_offset and _bytes.
- * Note this is used only by transports that define a special
- * xmit_cong_map function. For all others, we create allocate
- * a cong_map message and treat it just like any other send.
- */
- if (conn->c_map_bytes) {
- ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
- conn->c_map_offset);
- if (ret <= 0)
- break;
+ while (1) {
- conn->c_map_offset += ret;
- conn->c_map_bytes -= ret;
- if (conn->c_map_bytes)
- continue;
- }
-
- /* If we're done sending the current message, clear the
- * offset and S/G temporaries.
- */
rm = conn->c_xmit_rm;
- if (rm != NULL &&
- conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
- conn->c_xmit_sg == rm->m_nents) {
- conn->c_xmit_rm = NULL;
- conn->c_xmit_sg = 0;
- conn->c_xmit_hdr_off = 0;
- conn->c_xmit_data_off = 0;
- conn->c_xmit_rdma_sent = 0;
- /* Release the reference to the previous message. */
- rds_message_put(rm);
- rm = NULL;
- }
-
- /* If we're asked to send a cong map update, do so.
+ /*
+ * If between sending messages, we can send a pending congestion
+ * map update.
*/
- if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) {
- if (conn->c_trans->xmit_cong_map != NULL) {
- conn->c_map_offset = 0;
- conn->c_map_bytes = sizeof(struct rds_header) +
- RDS_CONG_MAP_BYTES;
- continue;
- }
-
+ if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
rm = rds_cong_update_alloc(conn);
if (IS_ERR(rm)) {
ret = PTR_ERR(rm);
break;
}
+ rm->data.op_active = 1;
conn->c_xmit_rm = rm;
}
/*
- * Grab the next message from the send queue, if there is one.
+ * If not already working on one, grab the next message.
*
* c_xmit_rm holds a ref while we're sending this message down
* the connction. We can use this ref while holding the
* send_sem.. rds_send_reset() is serialized with it.
*/
- if (rm == NULL) {
+ if (!rm) {
unsigned int len;
spin_lock_irqsave(&conn->c_lock, flags);
@@ -224,10 +214,8 @@ int rds_send_xmit(struct rds_connection *conn)
spin_unlock_irqrestore(&conn->c_lock, flags);
- if (rm == NULL) {
- was_empty = 1;
+ if (!rm)
break;
- }
/* Unfortunately, the way Infiniband deals with
* RDMA to a bad MR key is by moving the entire
@@ -236,13 +224,12 @@ int rds_send_xmit(struct rds_connection *conn)
* connection.
* Therefore, we never retransmit messages with RDMA ops.
*/
- if (rm->m_rdma_op &&
+ if (rm->rdma.op_active &&
test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
spin_lock_irqsave(&conn->c_lock, flags);
if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
list_move(&rm->m_conn_item, &to_be_dropped);
spin_unlock_irqrestore(&conn->c_lock, flags);
- rds_message_put(rm);
continue;
}
@@ -263,23 +250,55 @@ int rds_send_xmit(struct rds_connection *conn)
conn->c_xmit_rm = rm;
}
- /*
- * Try and send an rdma message. Let's see if we can
- * keep this simple and require that the transport either
- * send the whole rdma or none of it.
- */
- if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) {
- ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op);
+ /* The transport either sends the whole rdma or none of it */
+ if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
+ rm->m_final_op = &rm->rdma;
+ ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
if (ret)
break;
conn->c_xmit_rdma_sent = 1;
+
/* The transport owns the mapped memory for now.
* You can't unmap it while it's on the send queue */
set_bit(RDS_MSG_MAPPED, &rm->m_flags);
}
- if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
- conn->c_xmit_sg < rm->m_nents) {
+ if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
+ rm->m_final_op = &rm->atomic;
+ ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
+ if (ret)
+ break;
+ conn->c_xmit_atomic_sent = 1;
+
+ /* The transport owns the mapped memory for now.
+ * You can't unmap it while it's on the send queue */
+ set_bit(RDS_MSG_MAPPED, &rm->m_flags);
+ }
+
+ /*
+ * A number of cases require an RDS header to be sent
+ * even if there is no data.
+ * We permit 0-byte sends; rds-ping depends on this.
+ * However, if there are exclusively attached silent ops,
+ * we skip the hdr/data send, to enable silent operation.
+ */
+ if (rm->data.op_nents == 0) {
+ int ops_present;
+ int all_ops_are_silent = 1;
+
+ ops_present = (rm->atomic.op_active || rm->rdma.op_active);
+ if (rm->atomic.op_active && !rm->atomic.op_silent)
+ all_ops_are_silent = 0;
+ if (rm->rdma.op_active && !rm->rdma.op_silent)
+ all_ops_are_silent = 0;
+
+ if (ops_present && all_ops_are_silent
+ && !rm->m_rdma_cookie)
+ rm->data.op_active = 0;
+ }
+
+ if (rm->data.op_active && !conn->c_xmit_data_sent) {
+ rm->m_final_op = &rm->data;
ret = conn->c_trans->xmit(conn, rm,
conn->c_xmit_hdr_off,
conn->c_xmit_sg,
@@ -295,7 +314,7 @@ int rds_send_xmit(struct rds_connection *conn)
ret -= tmp;
}
- sg = &rm->m_sg[conn->c_xmit_sg];
+ sg = &rm->data.op_sg[conn->c_xmit_sg];
while (ret) {
tmp = min_t(int, ret, sg->length -
conn->c_xmit_data_off);
@@ -306,49 +325,63 @@ int rds_send_xmit(struct rds_connection *conn)
sg++;
conn->c_xmit_sg++;
BUG_ON(ret != 0 &&
- conn->c_xmit_sg == rm->m_nents);
+ conn->c_xmit_sg == rm->data.op_nents);
}
}
+
+ if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
+ (conn->c_xmit_sg == rm->data.op_nents))
+ conn->c_xmit_data_sent = 1;
}
- }
- /* Nuke any messages we decided not to retransmit. */
- if (!list_empty(&to_be_dropped))
- rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
+ /*
+ * A rm will only take multiple times through this loop
+ * if there is a data op. Thus, if the data is sent (or there was
+ * none), then we're done with the rm.
+ */
+ if (!rm->data.op_active || conn->c_xmit_data_sent) {
+ conn->c_xmit_rm = NULL;
+ conn->c_xmit_sg = 0;
+ conn->c_xmit_hdr_off = 0;
+ conn->c_xmit_data_off = 0;
+ conn->c_xmit_rdma_sent = 0;
+ conn->c_xmit_atomic_sent = 0;
+ conn->c_xmit_data_sent = 0;
+
+ rds_message_put(rm);
+ }
+ }
if (conn->c_trans->xmit_complete)
conn->c_trans->xmit_complete(conn);
- /*
- * We might be racing with another sender who queued a message but
- * backed off on noticing that we held the c_send_lock. If we check
- * for queued messages after dropping the sem then either we'll
- * see the queued message or the queuer will get the sem. If we
- * notice the queued message then we trigger an immediate retry.
- *
- * We need to be careful only to do this when we stopped processing
- * the send queue because it was empty. It's the only way we
- * stop processing the loop when the transport hasn't taken
- * responsibility for forward progress.
- */
- mutex_unlock(&conn->c_send_lock);
+ release_in_xmit(conn);
- if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
- /* We exhausted the send quota, but there's work left to
- * do. Return and (re-)schedule the send worker.
- */
- ret = -EAGAIN;
+ /* Nuke any messages we decided not to retransmit. */
+ if (!list_empty(&to_be_dropped)) {
+ /* irqs on here, so we can put(), unlike above */
+ list_for_each_entry(rm, &to_be_dropped, m_conn_item)
+ rds_message_put(rm);
+ rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
}
- if (ret == 0 && was_empty) {
- /* A simple bit test would be way faster than taking the
- * spin lock */
- spin_lock_irqsave(&conn->c_lock, flags);
+ /*
+ * Other senders can queue a message after we last test the send queue
+ * but before we clear RDS_IN_XMIT. In that case they'd back off and
+ * not try and send their newly queued message. We need to check the
+ * send queue after having cleared RDS_IN_XMIT so that their message
+ * doesn't get stuck on the send queue.
+ *
+ * If the transport cannot continue (i.e ret != 0), then it must
+ * call us when more room is available, such as from the tx
+ * completion handler.
+ */
+ if (ret == 0) {
+ smp_mb();
if (!list_empty(&conn->c_send_queue)) {
- rds_stats_inc(s_send_sem_queue_raced);
- ret = -EAGAIN;
+ rds_stats_inc(s_send_lock_queue_raced);
+ goto restart;
}
- spin_unlock_irqrestore(&conn->c_lock, flags);
}
out:
return ret;
@@ -376,52 +409,60 @@ static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
}
/*
- * Returns true if there are no messages on the send and retransmit queues
- * which have a sequence number greater than or equal to the given sequence
- * number.
+ * This is pretty similar to what happens below in the ACK
+ * handling code - except that we call here as soon as we get
+ * the IB send completion on the RDMA op and the accompanying
+ * message.
*/
-int rds_send_acked_before(struct rds_connection *conn, u64 seq)
+void rds_rdma_send_complete(struct rds_message *rm, int status)
{
- struct rds_message *rm, *tmp;
- int ret = 1;
+ struct rds_sock *rs = NULL;
+ struct rm_rdma_op *ro;
+ struct rds_notifier *notifier;
+ unsigned long flags;
- spin_lock(&conn->c_lock);
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
- list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
- if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
- ret = 0;
- break;
- }
+ ro = &rm->rdma;
+ if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
+ ro->op_active && ro->op_notify && ro->op_notifier) {
+ notifier = ro->op_notifier;
+ rs = rm->m_rs;
+ sock_hold(rds_rs_to_sk(rs));
- list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
- if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
- ret = 0;
- break;
+ notifier->n_status = status;
+ spin_lock(&rs->rs_lock);
+ list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
+ spin_unlock(&rs->rs_lock);
+
+ ro->op_notifier = NULL;
}
- spin_unlock(&conn->c_lock);
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
- return ret;
+ if (rs) {
+ rds_wake_sk_sleep(rs);
+ sock_put(rds_rs_to_sk(rs));
+ }
}
+EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
/*
- * This is pretty similar to what happens below in the ACK
- * handling code - except that we call here as soon as we get
- * the IB send completion on the RDMA op and the accompanying
- * message.
+ * Just like above, except looks at atomic op
*/
-void rds_rdma_send_complete(struct rds_message *rm, int status)
+void rds_atomic_send_complete(struct rds_message *rm, int status)
{
struct rds_sock *rs = NULL;
- struct rds_rdma_op *ro;
+ struct rm_atomic_op *ao;
struct rds_notifier *notifier;
+ unsigned long flags;
- spin_lock(&rm->m_rs_lock);
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
- ro = rm->m_rdma_op;
- if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
- ro && ro->r_notify && ro->r_notifier) {
- notifier = ro->r_notifier;
+ ao = &rm->atomic;
+ if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
+ && ao->op_active && ao->op_notify && ao->op_notifier) {
+ notifier = ao->op_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
@@ -430,17 +471,17 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
spin_unlock(&rs->rs_lock);
- ro->r_notifier = NULL;
+ ao->op_notifier = NULL;
}
- spin_unlock(&rm->m_rs_lock);
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
if (rs) {
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
}
-EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
+EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
/*
* This is the same as rds_rdma_send_complete except we
@@ -448,15 +489,23 @@ EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
* socket, socket lock) and can just move the notifier.
*/
static inline void
-__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
+__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
{
- struct rds_rdma_op *ro;
+ struct rm_rdma_op *ro;
+ struct rm_atomic_op *ao;
+
+ ro = &rm->rdma;
+ if (ro->op_active && ro->op_notify && ro->op_notifier) {
+ ro->op_notifier->n_status = status;
+ list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
+ ro->op_notifier = NULL;
+ }
- ro = rm->m_rdma_op;
- if (ro && ro->r_notify && ro->r_notifier) {
- ro->r_notifier->n_status = status;
- list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
- ro->r_notifier = NULL;
+ ao = &rm->atomic;
+ if (ao->op_active && ao->op_notify && ao->op_notifier) {
+ ao->op_notifier->n_status = status;
+ list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
+ ao->op_notifier = NULL;
}
/* No need to wake the app - caller does this */
@@ -468,7 +517,7 @@ __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status
* So speed is not an issue here.
*/
struct rds_message *rds_send_get_message(struct rds_connection *conn,
- struct rds_rdma_op *op)
+ struct rm_rdma_op *op)
{
struct rds_message *rm, *tmp, *found = NULL;
unsigned long flags;
@@ -476,7 +525,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
spin_lock_irqsave(&conn->c_lock, flags);
list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
- if (rm->m_rdma_op == op) {
+ if (&rm->rdma == op) {
atomic_inc(&rm->m_refcount);
found = rm;
goto out;
@@ -484,7 +533,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
}
list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
- if (rm->m_rdma_op == op) {
+ if (&rm->rdma == op) {
atomic_inc(&rm->m_refcount);
found = rm;
break;
@@ -544,19 +593,20 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
spin_lock(&rs->rs_lock);
if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
- struct rds_rdma_op *ro = rm->m_rdma_op;
+ struct rm_rdma_op *ro = &rm->rdma;
struct rds_notifier *notifier;
list_del_init(&rm->m_sock_item);
rds_send_sndbuf_remove(rs, rm);
- if (ro && ro->r_notifier && (status || ro->r_notify)) {
- notifier = ro->r_notifier;
+ if (ro->op_active && ro->op_notifier &&
+ (ro->op_notify || (ro->op_recverr && status))) {
+ notifier = ro->op_notifier;
list_add_tail(&notifier->n_list,
&rs->rs_notify_queue);
if (!notifier->n_status)
notifier->n_status = status;
- rm->m_rdma_op->r_notifier = NULL;
+ rm->rdma.op_notifier = NULL;
}
was_on_sock = 1;
rm->m_rs = NULL;
@@ -619,9 +669,8 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
{
struct rds_message *rm, *tmp;
struct rds_connection *conn;
- unsigned long flags, flags2;
+ unsigned long flags;
LIST_HEAD(list);
- int wake = 0;
/* get all the messages we're dropping under the rs lock */
spin_lock_irqsave(&rs->rs_lock, flags);
@@ -631,59 +680,54 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
dest->sin_port != rm->m_inc.i_hdr.h_dport))
continue;
- wake = 1;
list_move(&rm->m_sock_item, &list);
rds_send_sndbuf_remove(rs, rm);
clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
}
/* order flag updates with the rs lock */
- if (wake)
- smp_mb__after_clear_bit();
+ smp_mb__after_clear_bit();
spin_unlock_irqrestore(&rs->rs_lock, flags);
- conn = NULL;
+ if (list_empty(&list))
+ return;
- /* now remove the messages from the conn list as needed */
+ /* Remove the messages from the conn */
list_for_each_entry(rm, &list, m_sock_item) {
- /* We do this here rather than in the loop above, so that
- * we don't have to nest m_rs_lock under rs->rs_lock */
- spin_lock_irqsave(&rm->m_rs_lock, flags2);
- /* If this is a RDMA operation, notify the app. */
- spin_lock(&rs->rs_lock);
- __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
- spin_unlock(&rs->rs_lock);
- rm->m_rs = NULL;
- spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
+ conn = rm->m_inc.i_conn;
+
+ spin_lock_irqsave(&conn->c_lock, flags);
/*
- * If we see this flag cleared then we're *sure* that someone
- * else beat us to removing it from the conn. If we race
- * with their flag update we'll get the lock and then really
- * see that the flag has been cleared.
+ * Maybe someone else beat us to removing rm from the conn.
+ * If we race with their flag update we'll get the lock and
+ * then really see that the flag has been cleared.
*/
- if (!test_bit(RDS_MSG_ON_CONN, &rm->m_flags))
+ if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
+ spin_unlock_irqrestore(&conn->c_lock, flags);
continue;
-
- if (conn != rm->m_inc.i_conn) {
- if (conn)
- spin_unlock_irqrestore(&conn->c_lock, flags);
- conn = rm->m_inc.i_conn;
- spin_lock_irqsave(&conn->c_lock, flags);
}
+ list_del_init(&rm->m_conn_item);
+ spin_unlock_irqrestore(&conn->c_lock, flags);
- if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
- list_del_init(&rm->m_conn_item);
- rds_message_put(rm);
- }
- }
+ /*
+ * Couldn't grab m_rs_lock in top loop (lock ordering),
+ * but we can now.
+ */
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
- if (conn)
- spin_unlock_irqrestore(&conn->c_lock, flags);
+ spin_lock(&rs->rs_lock);
+ __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
+ spin_unlock(&rs->rs_lock);
- if (wake)
- rds_wake_sk_sleep(rs);
+ rm->m_rs = NULL;
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
+
+ rds_message_put(rm);
+ }
+
+ rds_wake_sk_sleep(rs);
while (!list_empty(&list)) {
rm = list_entry(list.next, struct rds_message, m_sock_item);
@@ -763,6 +807,63 @@ out:
return *queued;
}
+/*
+ * rds_message is getting to be quite complicated, and we'd like to allocate
+ * it all in one go. This figures out how big it needs to be up front.
+ */
+static int rds_rm_size(struct msghdr *msg, int data_len)
+{
+ struct cmsghdr *cmsg;
+ int size = 0;
+ int cmsg_groups = 0;
+ int retval;
+
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ if (!CMSG_OK(msg, cmsg))
+ return -EINVAL;
+
+ if (cmsg->cmsg_level != SOL_RDS)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case RDS_CMSG_RDMA_ARGS:
+ cmsg_groups |= 1;
+ retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
+ if (retval < 0)
+ return retval;
+ size += retval;
+
+ break;
+
+ case RDS_CMSG_RDMA_DEST:
+ case RDS_CMSG_RDMA_MAP:
+ cmsg_groups |= 2;
+ /* these are valid but do no add any size */
+ break;
+
+ case RDS_CMSG_ATOMIC_CSWP:
+ case RDS_CMSG_ATOMIC_FADD:
+ case RDS_CMSG_MASKED_ATOMIC_CSWP:
+ case RDS_CMSG_MASKED_ATOMIC_FADD:
+ cmsg_groups |= 1;
+ size += sizeof(struct scatterlist);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ }
+
+ size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
+
+ /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
+ if (cmsg_groups == 3)
+ return -EINVAL;
+
+ return size;
+}
+
static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
struct msghdr *msg, int *allocated_mr)
{
@@ -777,7 +878,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
continue;
/* As a side effect, RDMA_DEST and RDMA_MAP will set
- * rm->m_rdma_cookie and rm->m_rdma_mr.
+ * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
*/
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
@@ -793,6 +894,12 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
if (!ret)
*allocated_mr = 1;
break;
+ case RDS_CMSG_ATOMIC_CSWP:
+ case RDS_CMSG_ATOMIC_FADD:
+ case RDS_CMSG_MASKED_ATOMIC_CSWP:
+ case RDS_CMSG_MASKED_ATOMIC_FADD:
+ ret = rds_cmsg_atomic(rs, rm, cmsg);
+ break;
default:
return -EINVAL;
@@ -850,13 +957,26 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
goto out;
}
- rm = rds_message_copy_from_user(msg->msg_iov, payload_len);
- if (IS_ERR(rm)) {
- ret = PTR_ERR(rm);
- rm = NULL;
+ /* size of rm including all sgs */
+ ret = rds_rm_size(msg, payload_len);
+ if (ret < 0)
+ goto out;
+
+ rm = rds_message_alloc(ret, GFP_KERNEL);
+ if (!rm) {
+ ret = -ENOMEM;
goto out;
}
+ /* Attach data to the rm */
+ if (payload_len) {
+ rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
+ ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
+ if (ret)
+ goto out;
+ }
+ rm->data.op_active = 1;
+
rm->m_daddr = daddr;
/* rds_conn_create has a spinlock that runs with IRQ off.
@@ -879,22 +999,23 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (ret)
goto out;
- if ((rm->m_rdma_cookie || rm->m_rdma_op) &&
- conn->c_trans->xmit_rdma == NULL) {
+ if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
if (printk_ratelimit())
printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
- rm->m_rdma_op, conn->c_trans->xmit_rdma);
+ &rm->rdma, conn->c_trans->xmit_rdma);
ret = -EOPNOTSUPP;
goto out;
}
- /* If the connection is down, trigger a connect. We may
- * have scheduled a delayed reconnect however - in this case
- * we should not interfere.
- */
- if (rds_conn_state(conn) == RDS_CONN_DOWN &&
- !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
- queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
+ if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
+ &rm->atomic, conn->c_trans->xmit_atomic);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rds_conn_connect_if_down(conn);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
if (ret) {
@@ -938,7 +1059,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
rds_stats_inc(s_send_queued);
if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
- rds_send_worker(&conn->c_send_w.work);
+ rds_send_xmit(conn);
rds_message_put(rm);
return payload_len;
@@ -966,20 +1087,15 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
int ret = 0;
rm = rds_message_alloc(0, GFP_ATOMIC);
- if (rm == NULL) {
+ if (!rm) {
ret = -ENOMEM;
goto out;
}
rm->m_daddr = conn->c_faddr;
+ rm->data.op_active = 1;
- /* If the connection is down, trigger a connect. We may
- * have scheduled a delayed reconnect however - in this case
- * we should not interfere.
- */
- if (rds_conn_state(conn) == RDS_CONN_DOWN &&
- !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
- queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
+ rds_conn_connect_if_down(conn);
ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
if (ret)
@@ -999,7 +1115,9 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
rds_stats_inc(s_send_queued);
rds_stats_inc(s_send_pong);
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
+ rds_send_xmit(conn);
+
rds_message_put(rm);
return 0;
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 7598eb07cfb1..10c759ccac0c 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -57,8 +57,8 @@ static const char *const rds_stat_names[] = {
"recv_ping",
"send_queue_empty",
"send_queue_full",
- "send_sem_contention",
- "send_sem_queue_raced",
+ "send_lock_contention",
+ "send_lock_queue_raced",
"send_immediate_retry",
"send_delayed_retry",
"send_drop_acked",
@@ -143,7 +143,7 @@ void rds_stats_exit(void)
rds_info_deregister_func(RDS_INFO_COUNTERS, rds_stats_info);
}
-int __init rds_stats_init(void)
+int rds_stats_init(void)
{
rds_info_register_func(RDS_INFO_COUNTERS, rds_stats_info);
return 0;
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 7829a20325d3..25ad0c77a26c 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -105,13 +105,13 @@ void rds_sysctl_exit(void)
unregister_sysctl_table(rds_sysctl_reg_table);
}
-int __init rds_sysctl_init(void)
+int rds_sysctl_init(void)
{
rds_sysctl_reconnect_min = msecs_to_jiffies(1);
rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table);
- if (rds_sysctl_reg_table == NULL)
+ if (!rds_sysctl_reg_table)
return -ENOMEM;
return 0;
}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index babf4577ff7d..eeb08e6ab96b 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -200,7 +200,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
struct rds_tcp_connection *tc;
tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
- if (tc == NULL)
+ if (!tc)
return -ENOMEM;
tc->t_sock = NULL;
@@ -258,7 +258,6 @@ struct rds_transport rds_tcp_transport = {
.laddr_check = rds_tcp_laddr_check,
.xmit_prepare = rds_tcp_xmit_prepare,
.xmit_complete = rds_tcp_xmit_complete,
- .xmit_cong_map = rds_tcp_xmit_cong_map,
.xmit = rds_tcp_xmit,
.recv = rds_tcp_recv,
.conn_alloc = rds_tcp_conn_alloc,
@@ -266,7 +265,6 @@ struct rds_transport rds_tcp_transport = {
.conn_connect = rds_tcp_conn_connect,
.conn_shutdown = rds_tcp_conn_shutdown,
.inc_copy_to_user = rds_tcp_inc_copy_to_user,
- .inc_purge = rds_tcp_inc_purge,
.inc_free = rds_tcp_inc_free,
.stats_info_copy = rds_tcp_stats_info_copy,
.exit = rds_tcp_exit,
@@ -276,14 +274,14 @@ struct rds_transport rds_tcp_transport = {
.t_prefer_loopback = 1,
};
-int __init rds_tcp_init(void)
+int rds_tcp_init(void)
{
int ret;
rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
sizeof(struct rds_tcp_connection),
0, 0, NULL);
- if (rds_tcp_conn_slab == NULL) {
+ if (!rds_tcp_conn_slab) {
ret = -ENOMEM;
goto out;
}
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 844fa6b9cf5a..f5e6f7bebb50 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -43,7 +43,7 @@ struct rds_tcp_statistics {
};
/* tcp.c */
-int __init rds_tcp_init(void);
+int rds_tcp_init(void);
void rds_tcp_exit(void);
void rds_tcp_tune(struct socket *sock);
void rds_tcp_nonagle(struct socket *sock);
@@ -61,16 +61,15 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn);
void rds_tcp_state_change(struct sock *sk);
/* tcp_listen.c */
-int __init rds_tcp_listen_init(void);
+int rds_tcp_listen_init(void);
void rds_tcp_listen_stop(void);
void rds_tcp_listen_data_ready(struct sock *sk, int bytes);
/* tcp_recv.c */
-int __init rds_tcp_recv_init(void);
+int rds_tcp_recv_init(void);
void rds_tcp_recv_exit(void);
void rds_tcp_data_ready(struct sock *sk, int bytes);
int rds_tcp_recv(struct rds_connection *conn);
-void rds_tcp_inc_purge(struct rds_incoming *inc);
void rds_tcp_inc_free(struct rds_incoming *inc);
int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
size_t size);
@@ -81,8 +80,6 @@ void rds_tcp_xmit_complete(struct rds_connection *conn);
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off);
void rds_tcp_write_space(struct sock *sk);
-int rds_tcp_xmit_cong_map(struct rds_connection *conn,
- struct rds_cong_map *map, unsigned long offset);
/* tcp_stats.c */
DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index c519939e8da9..af95c8e058fc 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -45,7 +45,7 @@ void rds_tcp_state_change(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
- if (conn == NULL) {
+ if (!conn) {
state_change = sk->sk_state_change;
goto out;
}
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 27844f231d10..8b5cc4aa8868 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -116,7 +116,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
read_lock_bh(&sk->sk_callback_lock);
ready = sk->sk_user_data;
- if (ready == NULL) { /* check for teardown race */
+ if (!ready) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
@@ -135,7 +135,7 @@ out:
ready(sk, bytes);
}
-int __init rds_tcp_listen_init(void)
+int rds_tcp_listen_init(void)
{
struct sockaddr_in sin;
struct socket *sock = NULL;
@@ -178,7 +178,7 @@ void rds_tcp_listen_stop(void)
struct socket *sock = rds_tcp_listen_sock;
struct sock *sk;
- if (sock == NULL)
+ if (!sock)
return;
sk = sock->sk;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index e43797404102..67263fbee623 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -39,7 +39,7 @@
static struct kmem_cache *rds_tcp_incoming_slab;
-void rds_tcp_inc_purge(struct rds_incoming *inc)
+static void rds_tcp_inc_purge(struct rds_incoming *inc)
{
struct rds_tcp_incoming *tinc;
tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
@@ -190,10 +190,10 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
* processing.
*/
while (left) {
- if (tinc == NULL) {
+ if (!tinc) {
tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
arg->gfp);
- if (tinc == NULL) {
+ if (!tinc) {
desc->error = -ENOMEM;
goto out;
}
@@ -229,7 +229,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
if (left && tc->t_tinc_data_rem) {
clone = skb_clone(skb, arg->gfp);
- if (clone == NULL) {
+ if (!clone) {
desc->error = -ENOMEM;
goto out;
}
@@ -326,7 +326,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
- if (conn == NULL) { /* check for teardown race */
+ if (!conn) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
@@ -342,12 +342,12 @@ out:
ready(sk, bytes);
}
-int __init rds_tcp_recv_init(void)
+int rds_tcp_recv_init(void)
{
rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
sizeof(struct rds_tcp_incoming),
0, 0, NULL);
- if (rds_tcp_incoming_slab == NULL)
+ if (!rds_tcp_incoming_slab)
return -ENOMEM;
return 0;
}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 2f012a07d94d..aa16841afbdf 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -77,56 +77,6 @@ int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
}
/* the core send_sem serializes this with other xmit and shutdown */
-int rds_tcp_xmit_cong_map(struct rds_connection *conn,
- struct rds_cong_map *map, unsigned long offset)
-{
- static struct rds_header rds_tcp_map_header = {
- .h_flags = RDS_FLAG_CONG_BITMAP,
- };
- struct rds_tcp_connection *tc = conn->c_transport_data;
- unsigned long i;
- int ret;
- int copied = 0;
-
- /* Some problem claims cpu_to_be32(constant) isn't a constant. */
- rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES);
-
- if (offset < sizeof(struct rds_header)) {
- ret = rds_tcp_sendmsg(tc->t_sock,
- (void *)&rds_tcp_map_header + offset,
- sizeof(struct rds_header) - offset);
- if (ret <= 0)
- return ret;
- offset += ret;
- copied = ret;
- if (offset < sizeof(struct rds_header))
- return ret;
- }
-
- offset -= sizeof(struct rds_header);
- i = offset / PAGE_SIZE;
- offset = offset % PAGE_SIZE;
- BUG_ON(i >= RDS_CONG_MAP_PAGES);
-
- do {
- ret = tc->t_sock->ops->sendpage(tc->t_sock,
- virt_to_page(map->m_page_addrs[i]),
- offset, PAGE_SIZE - offset,
- MSG_DONTWAIT);
- if (ret <= 0)
- break;
- copied += ret;
- offset += ret;
- if (offset == PAGE_SIZE) {
- offset = 0;
- i++;
- }
- } while (i < RDS_CONG_MAP_PAGES);
-
- return copied ? copied : ret;
-}
-
-/* the core send_sem serializes this with other xmit and shutdown */
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off)
{
@@ -166,21 +116,21 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
goto out;
}
- while (sg < rm->m_nents) {
+ while (sg < rm->data.op_nents) {
ret = tc->t_sock->ops->sendpage(tc->t_sock,
- sg_page(&rm->m_sg[sg]),
- rm->m_sg[sg].offset + off,
- rm->m_sg[sg].length - off,
+ sg_page(&rm->data.op_sg[sg]),
+ rm->data.op_sg[sg].offset + off,
+ rm->data.op_sg[sg].length - off,
MSG_DONTWAIT|MSG_NOSIGNAL);
- rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]),
- rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off,
+ rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
+ rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
ret);
if (ret <= 0)
break;
off += ret;
done += ret;
- if (off == rm->m_sg[sg].length) {
+ if (off == rm->data.op_sg[sg].length) {
off = 0;
sg++;
}
@@ -226,7 +176,7 @@ void rds_tcp_write_space(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
- if (conn == NULL) {
+ if (!conn) {
write_space = sk->sk_write_space;
goto out;
}
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 786c20eaaf5e..0fd90f8c5f59 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -61,7 +61,7 @@
*
* Transition to state DISCONNECTING/DOWN:
* - Inside the shutdown worker; synchronizes with xmit path
- * through c_send_lock, and with connection management callbacks
+ * through RDS_IN_XMIT, and with connection management callbacks
* via c_cm_lock.
*
* For receive callbacks, we rely on the underlying transport
@@ -110,7 +110,7 @@ EXPORT_SYMBOL_GPL(rds_connect_complete);
* We should *always* start with a random backoff; otherwise a broken connection
* will always take several iterations to be re-established.
*/
-static void rds_queue_reconnect(struct rds_connection *conn)
+void rds_queue_reconnect(struct rds_connection *conn)
{
unsigned long rand;
@@ -156,58 +156,6 @@ void rds_connect_worker(struct work_struct *work)
}
}
-void rds_shutdown_worker(struct work_struct *work)
-{
- struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w);
-
- /* shut it down unless it's down already */
- if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
- /*
- * Quiesce the connection mgmt handlers before we start tearing
- * things down. We don't hold the mutex for the entire
- * duration of the shutdown operation, else we may be
- * deadlocking with the CM handler. Instead, the CM event
- * handler is supposed to check for state DISCONNECTING
- */
- mutex_lock(&conn->c_cm_lock);
- if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) &&
- !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
- rds_conn_error(conn, "shutdown called in state %d\n",
- atomic_read(&conn->c_state));
- mutex_unlock(&conn->c_cm_lock);
- return;
- }
- mutex_unlock(&conn->c_cm_lock);
-
- mutex_lock(&conn->c_send_lock);
- conn->c_trans->conn_shutdown(conn);
- rds_conn_reset(conn);
- mutex_unlock(&conn->c_send_lock);
-
- if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
- /* This can happen - eg when we're in the middle of tearing
- * down the connection, and someone unloads the rds module.
- * Quite reproduceable with loopback connections.
- * Mostly harmless.
- */
- rds_conn_error(conn,
- "%s: failed to transition to state DOWN, "
- "current state is %d\n",
- __func__,
- atomic_read(&conn->c_state));
- return;
- }
- }
-
- /* Then reconnect if it's still live.
- * The passive side of an IB loopback connection is never added
- * to the conn hash, so we never trigger a reconnect on this
- * conn - the reconnect is always triggered by the active peer. */
- cancel_delayed_work(&conn->c_conn_w);
- if (!hlist_unhashed(&conn->c_hash_node))
- rds_queue_reconnect(conn);
-}
-
void rds_send_worker(struct work_struct *work)
{
struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work);
@@ -252,15 +200,22 @@ void rds_recv_worker(struct work_struct *work)
}
}
+void rds_shutdown_worker(struct work_struct *work)
+{
+ struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w);
+
+ rds_conn_shutdown(conn);
+}
+
void rds_threads_exit(void)
{
destroy_workqueue(rds_wq);
}
-int __init rds_threads_init(void)
+int rds_threads_init(void)
{
- rds_wq = create_workqueue("krdsd");
- if (rds_wq == NULL)
+ rds_wq = create_singlethread_workqueue("krdsd");
+ if (!rds_wq)
return -ENOMEM;
return 0;
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 7e1067901353..7f2ac4fec367 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -71,19 +71,28 @@ void rds_trans_unregister(struct rds_transport *trans)
}
EXPORT_SYMBOL_GPL(rds_trans_unregister);
+void rds_trans_put(struct rds_transport *trans)
+{
+ if (trans && trans->t_owner)
+ module_put(trans->t_owner);
+}
+
struct rds_transport *rds_trans_get_preferred(__be32 addr)
{
struct rds_transport *ret = NULL;
- int i;
+ struct rds_transport *trans;
+ unsigned int i;
if (IN_LOOPBACK(ntohl(addr)))
return &rds_loop_transport;
down_read(&rds_trans_sem);
- for (i = 0; i < RDS_TRANS_COUNT; i++)
- {
- if (transports[i] && (transports[i]->laddr_check(addr) == 0)) {
- ret = transports[i];
+ for (i = 0; i < RDS_TRANS_COUNT; i++) {
+ trans = transports[i];
+
+ if (trans && (trans->laddr_check(addr) == 0) &&
+ (!trans->t_owner || try_module_get(trans->t_owner))) {
+ ret = trans;
break;
}
}
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
new file mode 100644
index 000000000000..e6b5190daddd
--- /dev/null
+++ b/net/rds/xlist.h
@@ -0,0 +1,80 @@
+#ifndef _LINUX_XLIST_H
+#define _LINUX_XLIST_H
+
+#include <linux/stddef.h>
+#include <linux/poison.h>
+#include <linux/prefetch.h>
+#include <asm/system.h>
+
+struct xlist_head {
+ struct xlist_head *next;
+};
+
+static inline void INIT_XLIST_HEAD(struct xlist_head *list)
+{
+ list->next = NULL;
+}
+
+static inline int xlist_empty(struct xlist_head *head)
+{
+ return head->next == NULL;
+}
+
+static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail,
+ struct xlist_head *head)
+{
+ struct xlist_head *cur;
+ struct xlist_head *check;
+
+ while (1) {
+ cur = head->next;
+ tail->next = cur;
+ check = cmpxchg(&head->next, cur, new);
+ if (check == cur)
+ break;
+ }
+}
+
+static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
+{
+ struct xlist_head *cur;
+ struct xlist_head *check;
+ struct xlist_head *next;
+
+ while (1) {
+ cur = head->next;
+ if (!cur)
+ goto out;
+
+ next = cur->next;
+ check = cmpxchg(&head->next, cur, next);
+ if (check == cur)
+ goto out;
+ }
+out:
+ return cur;
+}
+
+static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
+{
+ struct xlist_head *cur;
+
+ cur = head->next;
+ if (!cur)
+ return NULL;
+
+ head->next = cur->next;
+ return cur;
+}
+
+static inline void xlist_splice(struct xlist_head *list,
+ struct xlist_head *head)
+{
+ struct xlist_head *cur;
+
+ WARN_ON(head->next);
+ cur = xchg(&list->next, NULL);
+ head->next = cur;
+}
+
+#endif
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index 3713d7ecab96..1bca6d49ec96 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -142,7 +142,7 @@ static unsigned long rfkill_last_scheduled;
static unsigned long rfkill_ratelimit(const unsigned long last)
{
const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
- return (time_after(jiffies, last + delay)) ? 0 : delay;
+ return time_after(jiffies, last + delay) ? 0 : delay;
}
static void rfkill_schedule_ratelimited(void)
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index a750a28e0221..fa5f5641a2c2 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -114,7 +114,7 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
if (ax25s)
ax25_cb_put(ax25s);
- return (neigh->ax25 != NULL);
+ return neigh->ax25 != NULL;
}
/*
@@ -137,7 +137,7 @@ static int rose_link_up(struct rose_neigh *neigh)
if (ax25s)
ax25_cb_put(ax25s);
- return (neigh->ax25 != NULL);
+ return neigh->ax25 != NULL;
}
/*
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2f691fb180d1..a36270a994d7 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -518,6 +518,16 @@ config NET_ACT_SKBEDIT
To compile this code as a module, choose M here: the
module will be called act_skbedit.
+config NET_ACT_CSUM
+ tristate "Checksum Updating"
+ depends on NET_CLS_ACT && INET
+ ---help---
+ Say Y here to update some common checksum after some direct
+ packet alterations.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_csum.
+
config NET_CLS_IND
bool "Incoming device classification"
depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index f14e71bfa58f..960f5dba6304 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_ACT_NAT) += act_nat.o
obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
+obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
new file mode 100644
index 000000000000..67dc7ce9b63a
--- /dev/null
+++ b/net/sched/act_csum.c
@@ -0,0 +1,595 @@
+/*
+ * Checksum updating actions
+ *
+ * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include <linux/netlink.h>
+#include <net/netlink.h>
+#include <linux/rtnetlink.h>
+
+#include <linux/skbuff.h>
+
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/igmp.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
+
+#include <net/act_api.h>
+
+#include <linux/tc_act/tc_csum.h>
+#include <net/tc_act/tc_csum.h>
+
+#define CSUM_TAB_MASK 15
+static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1];
+static u32 csum_idx_gen;
+static DEFINE_RWLOCK(csum_lock);
+
+static struct tcf_hashinfo csum_hash_info = {
+ .htab = tcf_csum_ht,
+ .hmask = CSUM_TAB_MASK,
+ .lock = &csum_lock,
+};
+
+static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
+ [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
+};
+
+static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
+ struct tc_action *a, int ovr, int bind)
+{
+ struct nlattr *tb[TCA_CSUM_MAX + 1];
+ struct tc_csum *parm;
+ struct tcf_common *pc;
+ struct tcf_csum *p;
+ int ret = 0, err;
+
+ if (nla == NULL)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[TCA_CSUM_PARMS] == NULL)
+ return -EINVAL;
+ parm = nla_data(tb[TCA_CSUM_PARMS]);
+
+ pc = tcf_hash_check(parm->index, a, bind, &csum_hash_info);
+ if (!pc) {
+ pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
+ &csum_idx_gen, &csum_hash_info);
+ if (IS_ERR(pc))
+ return PTR_ERR(pc);
+ p = to_tcf_csum(pc);
+ ret = ACT_P_CREATED;
+ } else {
+ p = to_tcf_csum(pc);
+ if (!ovr) {
+ tcf_hash_release(pc, bind, &csum_hash_info);
+ return -EEXIST;
+ }
+ }
+
+ spin_lock_bh(&p->tcf_lock);
+ p->tcf_action = parm->action;
+ p->update_flags = parm->update_flags;
+ spin_unlock_bh(&p->tcf_lock);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(pc, &csum_hash_info);
+
+ return ret;
+}
+
+static int tcf_csum_cleanup(struct tc_action *a, int bind)
+{
+ struct tcf_csum *p = a->priv;
+ return tcf_hash_release(&p->common, bind, &csum_hash_info);
+}
+
+/**
+ * tcf_csum_skb_nextlayer - Get next layer pointer
+ * @skb: sk_buff to use
+ * @ihl: previous summed headers length
+ * @ipl: complete packet length
+ * @jhl: next header length
+ *
+ * Check the expected next layer availability in the specified sk_buff.
+ * Return the next layer pointer if pass, NULL otherwise.
+ */
+static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
+ unsigned int ihl, unsigned int ipl,
+ unsigned int jhl)
+{
+ int ntkoff = skb_network_offset(skb);
+ int hl = ihl + jhl;
+
+ if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
+ (skb_cloned(skb) &&
+ !skb_clone_writable(skb, hl + ntkoff) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ return NULL;
+ else
+ return (void *)(skb_network_header(skb) + ihl);
+}
+
+static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct icmphdr *icmph;
+
+ icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
+ if (icmph == NULL)
+ return 0;
+
+ icmph->checksum = 0;
+ skb->csum = csum_partial(icmph, ipl - ihl, 0);
+ icmph->checksum = csum_fold(skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct igmphdr *igmph;
+
+ igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
+ if (igmph == NULL)
+ return 0;
+
+ igmph->csum = 0;
+ skb->csum = csum_partial(igmph, ipl - ihl, 0);
+ igmph->csum = csum_fold(skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv6_icmp(struct sk_buff *skb, struct ipv6hdr *ip6h,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct icmp6hdr *icmp6h;
+
+ icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
+ if (icmp6h == NULL)
+ return 0;
+
+ icmp6h->icmp6_cksum = 0;
+ skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
+ icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ ipl - ihl, IPPROTO_ICMPV6,
+ skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv4_tcp(struct sk_buff *skb, struct iphdr *iph,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct tcphdr *tcph;
+
+ tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
+ if (tcph == NULL)
+ return 0;
+
+ tcph->check = 0;
+ skb->csum = csum_partial(tcph, ipl - ihl, 0);
+ tcph->check = tcp_v4_check(ipl - ihl,
+ iph->saddr, iph->daddr, skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv6_tcp(struct sk_buff *skb, struct ipv6hdr *ip6h,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct tcphdr *tcph;
+
+ tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
+ if (tcph == NULL)
+ return 0;
+
+ tcph->check = 0;
+ skb->csum = csum_partial(tcph, ipl - ihl, 0);
+ tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ ipl - ihl, IPPROTO_TCP,
+ skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv4_udp(struct sk_buff *skb, struct iphdr *iph,
+ unsigned int ihl, unsigned int ipl, int udplite)
+{
+ struct udphdr *udph;
+ u16 ul;
+
+ /*
+ * Support both UDP and UDPLITE checksum algorithms, Don't use
+ * udph->len to get the real length without any protocol check,
+ * UDPLITE uses udph->len for another thing,
+ * Use iph->tot_len, or just ipl.
+ */
+
+ udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
+ if (udph == NULL)
+ return 0;
+
+ ul = ntohs(udph->len);
+
+ if (udplite || udph->check) {
+
+ udph->check = 0;
+
+ if (udplite) {
+ if (ul == 0)
+ skb->csum = csum_partial(udph, ipl - ihl, 0);
+ else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
+ skb->csum = csum_partial(udph, ul, 0);
+ else
+ goto ignore_obscure_skb;
+ } else {
+ if (ul != ipl - ihl)
+ goto ignore_obscure_skb;
+
+ skb->csum = csum_partial(udph, ul, 0);
+ }
+
+ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ ul, iph->protocol,
+ skb->csum);
+
+ if (!udph->check)
+ udph->check = CSUM_MANGLED_0;
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ignore_obscure_skb:
+ return 1;
+}
+
+static int tcf_csum_ipv6_udp(struct sk_buff *skb, struct ipv6hdr *ip6h,
+ unsigned int ihl, unsigned int ipl, int udplite)
+{
+ struct udphdr *udph;
+ u16 ul;
+
+ /*
+ * Support both UDP and UDPLITE checksum algorithms, Don't use
+ * udph->len to get the real length without any protocol check,
+ * UDPLITE uses udph->len for another thing,
+ * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
+ */
+
+ udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
+ if (udph == NULL)
+ return 0;
+
+ ul = ntohs(udph->len);
+
+ udph->check = 0;
+
+ if (udplite) {
+ if (ul == 0)
+ skb->csum = csum_partial(udph, ipl - ihl, 0);
+
+ else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
+ skb->csum = csum_partial(udph, ul, 0);
+
+ else
+ goto ignore_obscure_skb;
+ } else {
+ if (ul != ipl - ihl)
+ goto ignore_obscure_skb;
+
+ skb->csum = csum_partial(udph, ul, 0);
+ }
+
+ udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
+ udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
+ skb->csum);
+
+ if (!udph->check)
+ udph->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ignore_obscure_skb:
+ return 1;
+}
+
+static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
+{
+ struct iphdr *iph;
+ int ntkoff;
+
+ ntkoff = skb_network_offset(skb);
+
+ if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
+ goto fail;
+
+ iph = ip_hdr(skb);
+
+ switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
+ case IPPROTO_ICMP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
+ if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
+ ntohs(iph->tot_len)))
+ goto fail;
+ break;
+ case IPPROTO_IGMP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
+ if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
+ ntohs(iph->tot_len)))
+ goto fail;
+ break;
+ case IPPROTO_TCP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
+ if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4,
+ ntohs(iph->tot_len)))
+ goto fail;
+ break;
+ case IPPROTO_UDP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
+ if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
+ ntohs(iph->tot_len), 0))
+ goto fail;
+ break;
+ case IPPROTO_UDPLITE:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
+ if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
+ ntohs(iph->tot_len), 1))
+ goto fail;
+ break;
+ }
+
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
+ if (skb_cloned(skb) &&
+ !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ goto fail;
+
+ ip_send_check(iph);
+ }
+
+ return 1;
+
+fail:
+ return 0;
+}
+
+static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
+ unsigned int ixhl, unsigned int *pl)
+{
+ int off, len, optlen;
+ unsigned char *xh = (void *)ip6xh;
+
+ off = sizeof(*ip6xh);
+ len = ixhl - off;
+
+ while (len > 1) {
+ switch (xh[off]) {
+ case IPV6_TLV_PAD0:
+ optlen = 1;
+ break;
+ case IPV6_TLV_JUMBO:
+ optlen = xh[off + 1] + 2;
+ if (optlen != 6 || len < 6 || (off & 3) != 2)
+ /* wrong jumbo option length/alignment */
+ return 0;
+ *pl = ntohl(*(__be32 *)(xh + off + 2));
+ goto done;
+ default:
+ optlen = xh[off + 1] + 2;
+ if (optlen > len)
+ /* ignore obscure options */
+ goto done;
+ break;
+ }
+ off += optlen;
+ len -= optlen;
+ }
+
+done:
+ return 1;
+}
+
+static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
+{
+ struct ipv6hdr *ip6h;
+ struct ipv6_opt_hdr *ip6xh;
+ unsigned int hl, ixhl;
+ unsigned int pl;
+ int ntkoff;
+ u8 nexthdr;
+
+ ntkoff = skb_network_offset(skb);
+
+ hl = sizeof(*ip6h);
+
+ if (!pskb_may_pull(skb, hl + ntkoff))
+ goto fail;
+
+ ip6h = ipv6_hdr(skb);
+
+ pl = ntohs(ip6h->payload_len);
+ nexthdr = ip6h->nexthdr;
+
+ do {
+ switch (nexthdr) {
+ case NEXTHDR_FRAGMENT:
+ goto ignore_skb;
+ case NEXTHDR_ROUTING:
+ case NEXTHDR_HOP:
+ case NEXTHDR_DEST:
+ if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
+ goto fail;
+ ip6xh = (void *)(skb_network_header(skb) + hl);
+ ixhl = ipv6_optlen(ip6xh);
+ if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
+ goto fail;
+ if ((nexthdr == NEXTHDR_HOP) &&
+ !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
+ goto fail;
+ nexthdr = ip6xh->nexthdr;
+ hl += ixhl;
+ break;
+ case IPPROTO_ICMPV6:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
+ if (!tcf_csum_ipv6_icmp(skb, ip6h,
+ hl, pl + sizeof(*ip6h)))
+ goto fail;
+ goto done;
+ case IPPROTO_TCP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
+ if (!tcf_csum_ipv6_tcp(skb, ip6h,
+ hl, pl + sizeof(*ip6h)))
+ goto fail;
+ goto done;
+ case IPPROTO_UDP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
+ if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
+ pl + sizeof(*ip6h), 0))
+ goto fail;
+ goto done;
+ case IPPROTO_UDPLITE:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
+ if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
+ pl + sizeof(*ip6h), 1))
+ goto fail;
+ goto done;
+ default:
+ goto ignore_skb;
+ }
+ } while (pskb_may_pull(skb, hl + 1 + ntkoff));
+
+done:
+ignore_skb:
+ return 1;
+
+fail:
+ return 0;
+}
+
+static int tcf_csum(struct sk_buff *skb,
+ struct tc_action *a, struct tcf_result *res)
+{
+ struct tcf_csum *p = a->priv;
+ int action;
+ u32 update_flags;
+
+ spin_lock(&p->tcf_lock);
+ p->tcf_tm.lastuse = jiffies;
+ p->tcf_bstats.bytes += qdisc_pkt_len(skb);
+ p->tcf_bstats.packets++;
+ action = p->tcf_action;
+ update_flags = p->update_flags;
+ spin_unlock(&p->tcf_lock);
+
+ if (unlikely(action == TC_ACT_SHOT))
+ goto drop;
+
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ if (!tcf_csum_ipv4(skb, update_flags))
+ goto drop;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ if (!tcf_csum_ipv6(skb, update_flags))
+ goto drop;
+ break;
+ }
+
+ return action;
+
+drop:
+ spin_lock(&p->tcf_lock);
+ p->tcf_qstats.drops++;
+ spin_unlock(&p->tcf_lock);
+ return TC_ACT_SHOT;
+}
+
+static int tcf_csum_dump(struct sk_buff *skb,
+ struct tc_action *a, int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_csum *p = a->priv;
+ struct tc_csum opt = {
+ .update_flags = p->update_flags,
+ .index = p->tcf_index,
+ .action = p->tcf_action,
+ .refcnt = p->tcf_refcnt - ref,
+ .bindcnt = p->tcf_bindcnt - bind,
+ };
+ struct tcf_t t;
+
+ NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt);
+ t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
+ NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t);
+
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct tc_action_ops act_csum_ops = {
+ .kind = "csum",
+ .hinfo = &csum_hash_info,
+ .type = TCA_ACT_CSUM,
+ .capab = TCA_CAP_NONE,
+ .owner = THIS_MODULE,
+ .act = tcf_csum,
+ .dump = tcf_csum_dump,
+ .cleanup = tcf_csum_cleanup,
+ .lookup = tcf_hash_search,
+ .init = tcf_csum_init,
+ .walk = tcf_generic_walker
+};
+
+MODULE_DESCRIPTION("Checksum updating actions");
+MODULE_LICENSE("GPL");
+
+static int __init csum_init_module(void)
+{
+ return tcf_register_action(&act_csum_ops);
+}
+
+static void __exit csum_cleanup_module(void)
+{
+ tcf_unregister_action(&act_csum_ops);
+}
+
+module_init(csum_init_module);
+module_exit(csum_cleanup_module);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e17096e3913c..5b271a18bc3a 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -111,44 +111,41 @@ static u32 flow_get_proto(struct sk_buff *skb)
}
}
-static int has_ports(u8 protocol)
-{
- switch (protocol) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- case IPPROTO_UDPLITE:
- case IPPROTO_SCTP:
- case IPPROTO_DCCP:
- case IPPROTO_ESP:
- return 1;
- default:
- return 0;
- }
-}
-
static u32 flow_get_proto_src(struct sk_buff *skb)
{
switch (skb->protocol) {
case htons(ETH_P_IP): {
struct iphdr *iph;
+ int poff;
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
- has_ports(iph->protocol) &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 2))
- return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4));
+ if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ break;
+ poff = proto_ports_offset(iph->protocol);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) {
+ iph = ip_hdr(skb);
+ return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
+ poff));
+ }
break;
}
case htons(ETH_P_IPV6): {
struct ipv6hdr *iph;
+ int poff;
- if (!pskb_network_may_pull(skb, sizeof(*iph) + 2))
+ if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ipv6_hdr(skb);
- if (has_ports(iph->nexthdr))
- return ntohs(*(__be16 *)&iph[1]);
+ poff = proto_ports_offset(iph->nexthdr);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) {
+ iph = ipv6_hdr(skb);
+ return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
+ poff));
+ }
break;
}
}
@@ -161,24 +158,36 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
switch (skb->protocol) {
case htons(ETH_P_IP): {
struct iphdr *iph;
+ int poff;
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
- has_ports(iph->protocol) &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 4))
- return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2));
+ if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ break;
+ poff = proto_ports_offset(iph->protocol);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
+ iph = ip_hdr(skb);
+ return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
+ 2 + poff));
+ }
break;
}
case htons(ETH_P_IPV6): {
struct ipv6hdr *iph;
+ int poff;
- if (!pskb_network_may_pull(skb, sizeof(*iph) + 4))
+ if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ipv6_hdr(skb);
- if (has_ports(iph->nexthdr))
- return ntohs(*(__be16 *)((void *)&iph[1] + 2));
+ poff = proto_ports_offset(iph->nexthdr);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) {
+ iph = ipv6_hdr(skb);
+ return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
+ poff + 2));
+ }
break;
}
}
@@ -297,6 +306,11 @@ static u32 flow_get_vlan_tag(const struct sk_buff *skb)
return tag & VLAN_VID_MASK;
}
+static u32 flow_get_rxhash(struct sk_buff *skb)
+{
+ return skb_get_rxhash(skb);
+}
+
static u32 flow_key_get(struct sk_buff *skb, int key)
{
switch (key) {
@@ -334,6 +348,8 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
return flow_get_skgid(skb);
case FLOW_KEY_VLAN_TAG:
return flow_get_vlan_tag(skb);
+ case FLOW_KEY_RXHASH:
+ return flow_get_rxhash(skb);
default:
WARN_ON(1);
return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 3bcac8aa333c..34da5e29ea1a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -223,6 +223,11 @@ META_COLLECTOR(int_maclen)
dst->value = skb->mac_len;
}
+META_COLLECTOR(int_rxhash)
+{
+ dst->value = skb_get_rxhash(skb);
+}
+
/**************************************************************************
* Netfilter
**************************************************************************/
@@ -541,6 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
[META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
[META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
[META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
+ [META_ID(RXHASH)] = META_FUNC(int_rxhash),
}
};
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 408eea7086aa..b8020784d0e9 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -240,7 +240,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
if (q)
goto out;
- q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+ q = qdisc_match_from_root(dev->ingress_queue.qdisc_sleeping, handle);
out:
return q;
}
@@ -360,7 +360,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
}
- if (!s || tsize != s->tsize || (!tab && tsize > 0))
+ if (tsize != s->tsize || (!tab && tsize > 0))
return ERR_PTR(-EINVAL);
spin_lock(&qdisc_stab_lock);
@@ -701,7 +701,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
}
for (i = 0; i < num_q; i++) {
- struct netdev_queue *dev_queue = &dev->rx_queue;
+ struct netdev_queue *dev_queue = &dev->ingress_queue;
if (!ingress)
dev_queue = netdev_get_tx_queue(dev, i);
@@ -979,7 +979,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /* ingress */
- q = dev->rx_queue.qdisc_sleeping;
+ q = dev->ingress_queue.qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -1044,7 +1044,7 @@ replay:
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /*ingress */
- q = dev->rx_queue.qdisc_sleeping;
+ q = dev->ingress_queue.qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -1124,7 +1124,7 @@ create_n_graft:
if (!(n->nlmsg_flags&NLM_F_CREATE))
return -ENOENT;
if (clid == TC_H_INGRESS)
- q = qdisc_create(dev, &dev->rx_queue, p,
+ q = qdisc_create(dev, &dev->ingress_queue, p,
tcm->tcm_parent, tcm->tcm_parent,
tca, &err);
else {
@@ -1304,7 +1304,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
goto done;
- dev_queue = &dev->rx_queue;
+ dev_queue = &dev->ingress_queue;
if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
goto done;
@@ -1595,7 +1595,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
goto done;
- dev_queue = &dev->rx_queue;
+ dev_queue = &dev->ingress_queue;
if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
goto done;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2aeb3a4386a1..545278a1c478 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -753,7 +753,7 @@ void dev_activate(struct net_device *dev)
need_watchdog = 0;
netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
- transition_one_qdisc(dev, &dev->rx_queue, NULL);
+ transition_one_qdisc(dev, &dev->ingress_queue, NULL);
if (need_watchdog) {
dev->trans_start = jiffies;
@@ -812,7 +812,7 @@ static bool some_qdisc_is_busy(struct net_device *dev)
void dev_deactivate(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
- dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
+ dev_deactivate_queue(dev, &dev->ingress_queue, &noop_qdisc);
dev_watchdog_down(dev);
@@ -838,7 +838,7 @@ void dev_init_scheduler(struct net_device *dev)
{
dev->qdisc = &noop_qdisc;
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
- dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ dev_init_scheduler_queue(dev, &dev->ingress_queue, &noop_qdisc);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
@@ -861,7 +861,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
- shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ shutdown_scheduler_queue(dev, &dev->ingress_queue, &noop_qdisc);
qdisc_destroy(dev->qdisc);
dev->qdisc = &noop_qdisc;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 201cbac2b32c..3cf478d012dd 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -123,40 +123,39 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
case htons(ETH_P_IP):
{
const struct iphdr *iph;
+ int poff;
if (!pskb_network_may_pull(skb, sizeof(*iph)))
goto err;
iph = ip_hdr(skb);
h = (__force u32)iph->daddr;
h2 = (__force u32)iph->saddr ^ iph->protocol;
- if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
- (iph->protocol == IPPROTO_TCP ||
- iph->protocol == IPPROTO_UDP ||
- iph->protocol == IPPROTO_UDPLITE ||
- iph->protocol == IPPROTO_SCTP ||
- iph->protocol == IPPROTO_DCCP ||
- iph->protocol == IPPROTO_ESP) &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 4))
- h2 ^= *(((u32*)iph) + iph->ihl);
+ if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ break;
+ poff = proto_ports_offset(iph->protocol);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
+ iph = ip_hdr(skb);
+ h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
+ }
break;
}
case htons(ETH_P_IPV6):
{
struct ipv6hdr *iph;
+ int poff;
if (!pskb_network_may_pull(skb, sizeof(*iph)))
goto err;
iph = ipv6_hdr(skb);
h = (__force u32)iph->daddr.s6_addr32[3];
h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
- if ((iph->nexthdr == IPPROTO_TCP ||
- iph->nexthdr == IPPROTO_UDP ||
- iph->nexthdr == IPPROTO_UDPLITE ||
- iph->nexthdr == IPPROTO_SCTP ||
- iph->nexthdr == IPPROTO_DCCP ||
- iph->nexthdr == IPPROTO_ESP) &&
- pskb_network_may_pull(skb, sizeof(*iph) + 4))
- h2 ^= *(u32*)&iph[1];
+ poff = proto_ports_offset(iph->nexthdr);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
+ iph = ipv6_hdr(skb);
+ h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
+ }
break;
}
default:
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0b85e5256434..5f1fb8bd862d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -48,6 +48,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 476caaf100ed..6c8556459a75 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -37,6 +37,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/net.h>
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index ccb6dc48d15b..397296fb156f 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -43,6 +43,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <linux/interrupt.h>
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 732689140fb8..95e0c8eda1a0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -47,6 +47,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -336,7 +338,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
memcpy(saddr, baddr, sizeof(union sctp_addr));
SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr);
} else {
- printk(KERN_ERR "%s: asoc:%p Could not find a valid source "
+ pr_err("%s: asoc:%p Could not find a valid source "
"address for the dest:%pI6\n",
__func__, asoc, &daddr->v6.sin6_addr);
}
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index f73ec0ea93ba..8ef8e7d9eb61 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -38,6 +38,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <net/sctp/sctp.h>
@@ -134,8 +136,7 @@ void sctp_dbg_objcnt_init(void)
ent = proc_create("sctp_dbg_objcnt", 0,
proc_net_sctp, &sctp_objcnt_ops);
if (!ent)
- printk(KERN_WARNING
- "sctp_dbg_objcnt: Unable to create /proc entry.\n");
+ pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
}
/* Cleanup the objcount entry in the proc filesystem. */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index bcc4590ccaf2..60600d337a3a 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -41,6 +41,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c04b2eb59186..8c6d379b4bb6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -46,6 +46,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/list.h> /* For struct list_head */
#include <linux/socket.h>
@@ -1463,23 +1465,23 @@ static void sctp_check_transmitted(struct sctp_outq *q,
/* Display the end of the
* current range.
*/
- SCTP_DEBUG_PRINTK("-%08x",
- dbg_last_ack_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x",
+ dbg_last_ack_tsn);
}
/* Start a new range. */
- SCTP_DEBUG_PRINTK(",%08x", tsn);
+ SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
dbg_ack_tsn = tsn;
break;
case 1: /* The last TSN was NOT ACKed. */
if (dbg_last_kept_tsn != dbg_kept_tsn) {
/* Display the end of current range. */
- SCTP_DEBUG_PRINTK("-%08x",
- dbg_last_kept_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x",
+ dbg_last_kept_tsn);
}
- SCTP_DEBUG_PRINTK("\n");
+ SCTP_DEBUG_PRINTK_CONT("\n");
/* FALL THROUGH... */
default:
@@ -1526,18 +1528,18 @@ static void sctp_check_transmitted(struct sctp_outq *q,
break;
if (dbg_last_kept_tsn != dbg_kept_tsn)
- SCTP_DEBUG_PRINTK("-%08x",
- dbg_last_kept_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x",
+ dbg_last_kept_tsn);
- SCTP_DEBUG_PRINTK(",%08x", tsn);
+ SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
dbg_kept_tsn = tsn;
break;
case 0:
if (dbg_last_ack_tsn != dbg_ack_tsn)
- SCTP_DEBUG_PRINTK("-%08x",
- dbg_last_ack_tsn);
- SCTP_DEBUG_PRINTK("\n");
+ SCTP_DEBUG_PRINTK_CONT("-%08x",
+ dbg_last_ack_tsn);
+ SCTP_DEBUG_PRINTK_CONT("\n");
/* FALL THROUGH... */
default:
@@ -1556,17 +1558,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
switch (dbg_prt_state) {
case 0:
if (dbg_last_ack_tsn != dbg_ack_tsn) {
- SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_ack_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);
} else {
- SCTP_DEBUG_PRINTK("\n");
+ SCTP_DEBUG_PRINTK_CONT("\n");
}
break;
case 1:
if (dbg_last_kept_tsn != dbg_kept_tsn) {
- SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_kept_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);
} else {
- SCTP_DEBUG_PRINTK("\n");
+ SCTP_DEBUG_PRINTK_CONT("\n");
}
}
#endif /* SCTP_DEBUG */
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index db3a42b8b349..2e63e9dc010e 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -22,6 +22,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/socket.h>
@@ -192,7 +194,7 @@ static __init int sctpprobe_init(void)
if (ret)
goto remove_proc;
- pr_info("SCTP probe registered (port=%d)\n", port);
+ pr_info("probe registered (port=%d)\n", port);
return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5027b83f1cc0..1ef29c74d85e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -46,6 +46,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -707,8 +709,7 @@ static int sctp_ctl_sock_init(void)
&init_net);
if (err < 0) {
- printk(KERN_ERR
- "SCTP: Failed to create the SCTP control socket.\n");
+ pr_err("Failed to create the SCTP control socket\n");
return err;
}
return 0;
@@ -798,7 +799,7 @@ static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len)
static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp)
{
/* PF_INET only supports AF_INET addresses. */
- return (AF_INET == family);
+ return AF_INET == family;
}
/* Address matching with wildcards allowed. */
@@ -1206,7 +1207,7 @@ SCTP_STATIC __init int sctp_init(void)
__get_free_pages(GFP_ATOMIC, order);
} while (!sctp_assoc_hashtable && --order > 0);
if (!sctp_assoc_hashtable) {
- printk(KERN_ERR "SCTP: Failed association hash alloc.\n");
+ pr_err("Failed association hash alloc\n");
status = -ENOMEM;
goto err_ahash_alloc;
}
@@ -1220,7 +1221,7 @@ SCTP_STATIC __init int sctp_init(void)
sctp_ep_hashtable = (struct sctp_hashbucket *)
kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
if (!sctp_ep_hashtable) {
- printk(KERN_ERR "SCTP: Failed endpoint_hash alloc.\n");
+ pr_err("Failed endpoint_hash alloc\n");
status = -ENOMEM;
goto err_ehash_alloc;
}
@@ -1239,7 +1240,7 @@ SCTP_STATIC __init int sctp_init(void)
__get_free_pages(GFP_ATOMIC, order);
} while (!sctp_port_hashtable && --order > 0);
if (!sctp_port_hashtable) {
- printk(KERN_ERR "SCTP: Failed bind hash alloc.");
+ pr_err("Failed bind hash alloc\n");
status = -ENOMEM;
goto err_bhash_alloc;
}
@@ -1248,8 +1249,7 @@ SCTP_STATIC __init int sctp_init(void)
INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
}
- printk(KERN_INFO "SCTP: Hash tables configured "
- "(established %d bind %d)\n",
+ pr_info("Hash tables configured (established %d bind %d)\n",
sctp_assoc_hashsize, sctp_port_hashsize);
/* Disable ADDIP by default. */
@@ -1290,8 +1290,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Initialize the control inode/socket for handling OOTB packets. */
if ((status = sctp_ctl_sock_init())) {
- printk (KERN_ERR
- "SCTP: Failed to initialize the SCTP control sock.\n");
+ pr_err("Failed to initialize the SCTP control sock\n");
goto err_ctl_sock_init;
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 246f92924658..2cc46f0962ca 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -50,6 +50,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ip.h>
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index f5e5e27cac5e..b21b218d564f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -47,6 +47,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -1146,26 +1148,23 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_DISPOSITION_VIOLATION:
if (net_ratelimit())
- printk(KERN_ERR "sctp protocol violation state %d "
- "chunkid %d\n", state, subtype.chunk);
+ pr_err("protocol violation state %d chunkid %d\n",
+ state, subtype.chunk);
break;
case SCTP_DISPOSITION_NOT_IMPL:
- printk(KERN_WARNING "sctp unimplemented feature in state %d, "
- "event_type %d, event_id %d\n",
- state, event_type, subtype.chunk);
+ pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
+ state, event_type, subtype.chunk);
break;
case SCTP_DISPOSITION_BUG:
- printk(KERN_ERR "sctp bug in state %d, "
- "event_type %d, event_id %d\n",
+ pr_err("bug in state %d, event_type %d, event_id %d\n",
state, event_type, subtype.chunk);
BUG();
break;
default:
- printk(KERN_ERR "sctp impossible disposition %d "
- "in state %d, event_type %d, event_id %d\n",
+ pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
status, state, event_type, subtype.chunk);
BUG();
break;
@@ -1679,8 +1678,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_cmd_send_asconf(asoc);
break;
default:
- printk(KERN_WARNING "Impossible command: %u, %p\n",
- cmd->verb, cmd->obj.ptr);
+ pr_warn("Impossible command: %u, %p\n",
+ cmd->verb, cmd->obj.ptr);
break;
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d344dc481ccc..4b4eb7c96bbd 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -50,6 +50,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ip.h>
@@ -1138,18 +1140,16 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
if (net_ratelimit())
- printk(KERN_WARNING
- "%s association %p could not find address %pI6\n",
- __func__,
- asoc,
- &from_addr.v6.sin6_addr);
+ pr_warn("%s association %p could not find address %pI6\n",
+ __func__,
+ asoc,
+ &from_addr.v6.sin6_addr);
} else {
if (net_ratelimit())
- printk(KERN_WARNING
- "%s association %p could not find address %pI4\n",
- __func__,
- asoc,
- &from_addr.v4.sin_addr.s_addr);
+ pr_warn("%s association %p could not find address %pI4\n",
+ __func__,
+ asoc,
+ &from_addr.v4.sin_addr.s_addr);
}
return SCTP_DISPOSITION_DISCARD;
}
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 6d9b3aafcc5d..546d4387fb3c 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -46,6 +46,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/skbuff.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
@@ -66,15 +68,19 @@ static const sctp_sm_table_entry_t bug = {
.name = "sctp_sf_bug"
};
-#define DO_LOOKUP(_max, _type, _table) \
- if ((event_subtype._type > (_max))) { \
- printk(KERN_WARNING \
- "sctp table %p possible attack:" \
- " event %d exceeds max %d\n", \
- _table, event_subtype._type, _max); \
- return &bug; \
- } \
- return &_table[event_subtype._type][(int)state];
+#define DO_LOOKUP(_max, _type, _table) \
+({ \
+ const sctp_sm_table_entry_t *rtn; \
+ \
+ if ((event_subtype._type > (_max))) { \
+ pr_warn("table %p possible attack: event %d exceeds max %d\n", \
+ _table, event_subtype._type, _max); \
+ rtn = &bug; \
+ } else \
+ rtn = &_table[event_subtype._type][(int)state]; \
+ \
+ rtn; \
+})
const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
sctp_state_t state,
@@ -83,21 +89,15 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
switch (event_type) {
case SCTP_EVENT_T_CHUNK:
return sctp_chunk_event_lookup(event_subtype.chunk, state);
- break;
case SCTP_EVENT_T_TIMEOUT:
- DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
- timeout_event_table);
- break;
-
+ return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
+ timeout_event_table);
case SCTP_EVENT_T_OTHER:
- DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other, other_event_table);
- break;
-
+ return DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other,
+ other_event_table);
case SCTP_EVENT_T_PRIMITIVE:
- DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive,
- primitive_event_table);
- break;
-
+ return DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive,
+ primitive_event_table);
default:
/* Yikes! We got an illegal event type. */
return &bug;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fbb70770ad05..e34ca9cc1167 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -57,6 +57,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
@@ -2469,9 +2471,8 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
if (params.sack_delay == 0 && params.sack_freq == 0)
return 0;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
- printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value "
- "in delayed_ack socket option deprecated\n");
- printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
+ pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
+ pr_warn("Use struct sctp_sack_info instead\n");
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
@@ -2879,10 +2880,8 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
int val;
if (optlen == sizeof(int)) {
- printk(KERN_WARNING
- "SCTP: Use of int in maxseg socket option deprecated\n");
- printk(KERN_WARNING
- "SCTP: Use struct sctp_assoc_value instead\n");
+ pr_warn("Use of int in maxseg socket option deprecated\n");
+ pr_warn("Use struct sctp_assoc_value instead\n");
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
params.assoc_id = 0;
@@ -3132,10 +3131,8 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
int assoc_id = 0;
if (optlen == sizeof(int)) {
- printk(KERN_WARNING
- "SCTP: Use of int in max_burst socket option deprecated\n");
- printk(KERN_WARNING
- "SCTP: Use struct sctp_assoc_value instead\n");
+ pr_warn("Use of int in max_burst socket option deprecated\n");
+ pr_warn("Use struct sctp_assoc_value instead\n");
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
@@ -3606,7 +3603,40 @@ out:
/* The SCTP ioctl handler. */
SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
- return -ENOIOCTLCMD;
+ int rc = -ENOTCONN;
+
+ sctp_lock_sock(sk);
+
+ /*
+ * SEQPACKET-style sockets in LISTENING state are valid, for
+ * SCTP, so only discard TCP-style sockets in LISTENING state.
+ */
+ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
+ goto out;
+
+ switch (cmd) {
+ case SIOCINQ: {
+ struct sk_buff *skb;
+ unsigned int amount = 0;
+
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb != NULL) {
+ /*
+ * We will only return the amount of this packet since
+ * that is all that will be read.
+ */
+ amount = skb->len;
+ }
+ rc = put_user(amount, (int __user *)arg);
+ break;
+ }
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+out:
+ sctp_release_sock(sk);
+ return rc;
}
/* This is the function which gets called during socket creation to
@@ -3865,7 +3895,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
}
out:
- return (retval);
+ return retval;
}
@@ -3921,7 +3951,7 @@ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
}
out:
- return (retval);
+ return retval;
}
/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
@@ -4292,9 +4322,8 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
if (copy_from_user(&params, optval, len))
return -EFAULT;
} else if (len == sizeof(struct sctp_assoc_value)) {
- printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value "
- "in delayed_ack socket option deprecated\n");
- printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
+ pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
+ pr_warn("Use struct sctp_sack_info instead\n");
if (copy_from_user(&params, optval, len))
return -EFAULT;
} else
@@ -4940,10 +4969,8 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
struct sctp_association *asoc;
if (len == sizeof(int)) {
- printk(KERN_WARNING
- "SCTP: Use of int in maxseg socket option deprecated\n");
- printk(KERN_WARNING
- "SCTP: Use struct sctp_assoc_value instead\n");
+ pr_warn("Use of int in maxseg socket option deprecated\n");
+ pr_warn("Use struct sctp_assoc_value instead\n");
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
@@ -5034,10 +5061,8 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
struct sctp_association *asoc;
if (len == sizeof(int)) {
- printk(KERN_WARNING
- "SCTP: Use of int in max_burst socket option deprecated\n");
- printk(KERN_WARNING
- "SCTP: Use struct sctp_assoc_value instead\n");
+ pr_warn("Use of int in max_burst socket option deprecated\n");
+ pr_warn("Use struct sctp_assoc_value instead\n");
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
@@ -5580,7 +5605,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
/* Note: sk->sk_num gets filled in if ephemeral port request. */
ret = sctp_get_port_local(sk, &addr);
- return (ret ? 1 : 0);
+ return ret ? 1 : 0;
}
/*
@@ -5597,8 +5622,7 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
if (net_ratelimit()) {
- printk(KERN_INFO
- "SCTP: failed to load transform for %s: %ld\n",
+ pr_info("failed to load transform for %s: %ld\n",
sctp_hmac_alg, PTR_ERR(tfm));
}
return -ENOSYS;
@@ -5727,13 +5751,12 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* Is it readable? Reconsider this code with TCP-style support. */
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* The association is either gone or not ready. */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 132046cb82fc..d3ae493d234a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -48,6 +48,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/random.h>
@@ -244,10 +246,9 @@ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
struct dst_entry *dst;
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
- printk(KERN_WARNING "%s: Reported pmtu %d too low, "
- "using default minimum of %d\n",
- __func__, pmtu,
- SCTP_DEFAULT_MINSEGMENT);
+ pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
+ __func__, pmtu,
+ SCTP_DEFAULT_MINSEGMENT);
/* Use default minimum segment size and disable
* pmtu discovery on this transport.
*/
diff --git a/net/socket.c b/net/socket.c
index 2270b941bcc7..717a5f1c8792 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -535,14 +535,13 @@ void sock_release(struct socket *sock)
}
EXPORT_SYMBOL(sock_release);
-int sock_tx_timestamp(struct msghdr *msg, struct sock *sk,
- union skb_shared_tx *shtx)
+int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
{
- shtx->flags = 0;
+ *tx_flags = 0;
if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
- shtx->hardware = 1;
+ *tx_flags |= SKBTX_HW_TSTAMP;
if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
- shtx->software = 1;
+ *tx_flags |= SKBTX_SW_TSTAMP;
return 0;
}
EXPORT_SYMBOL(sock_tx_timestamp);
@@ -1919,7 +1918,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
* Afterwards, it will be a kernel pointer. Thus the compiler-assisted
* checking falls down on this.
*/
- if (copy_from_user(ctl_buf, (void __user *)msg_sys.msg_control,
+ if (copy_from_user(ctl_buf,
+ (void __user __force *)msg_sys.msg_control,
ctl_len))
goto out_freectl;
msg_sys.msg_control = ctl_buf;
@@ -3054,14 +3054,19 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen)
{
mm_segment_t oldfs = get_fs();
+ char __user *uoptval;
+ int __user *uoptlen;
int err;
+ uoptval = (char __user __force *) optval;
+ uoptlen = (int __user __force *) optlen;
+
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
- err = sock_getsockopt(sock, level, optname, optval, optlen);
+ err = sock_getsockopt(sock, level, optname, uoptval, uoptlen);
else
- err = sock->ops->getsockopt(sock, level, optname, optval,
- optlen);
+ err = sock->ops->getsockopt(sock, level, optname, uoptval,
+ uoptlen);
set_fs(oldfs);
return err;
}
@@ -3071,13 +3076,16 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
char *optval, unsigned int optlen)
{
mm_segment_t oldfs = get_fs();
+ char __user *uoptval;
int err;
+ uoptval = (char __user __force *) optval;
+
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
- err = sock_setsockopt(sock, level, optname, optval, optlen);
+ err = sock_setsockopt(sock, level, optname, uoptval, optlen);
else
- err = sock->ops->setsockopt(sock, level, optname, optval,
+ err = sock->ops->setsockopt(sock, level, optname, uoptval,
optlen);
set_fs(oldfs);
return err;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dcfc66bab2bb..597c493392ad 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1049,7 +1049,7 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
out:
if (acred->machine_cred != gss_cred->gc_machine_cred)
return 0;
- return (rc->cr_uid == acred->uid);
+ return rc->cr_uid == acred->uid;
}
/*
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index 310b78e99456..c586e92bcf76 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -76,19 +76,19 @@ static int
der_length_size( int length)
{
if (length < (1<<7))
- return(1);
+ return 1;
else if (length < (1<<8))
- return(2);
+ return 2;
#if (SIZEOF_INT == 2)
else
- return(3);
+ return 3;
#else
else if (length < (1<<16))
- return(3);
+ return 3;
else if (length < (1<<24))
- return(4);
+ return 4;
else
- return(5);
+ return 5;
#endif
}
@@ -121,14 +121,14 @@ der_read_length(unsigned char **buf, int *bufsize)
int ret;
if (*bufsize < 1)
- return(-1);
+ return -1;
sf = *(*buf)++;
(*bufsize)--;
if (sf & 0x80) {
if ((sf &= 0x7f) > ((*bufsize)-1))
- return(-1);
+ return -1;
if (sf > SIZEOF_INT)
- return (-1);
+ return -1;
ret = 0;
for (; sf; sf--) {
ret = (ret<<8) + (*(*buf)++);
@@ -138,7 +138,7 @@ der_read_length(unsigned char **buf, int *bufsize)
ret = sf;
}
- return(ret);
+ return ret;
}
/* returns the length of a token, given the mech oid and the body size */
@@ -148,7 +148,7 @@ g_token_size(struct xdr_netobj *mech, unsigned int body_size)
{
/* set body_size to sequence contents size */
body_size += 2 + (int) mech->len; /* NEED overflow check */
- return(1 + der_length_size(body_size) + body_size);
+ return 1 + der_length_size(body_size) + body_size;
}
EXPORT_SYMBOL_GPL(g_token_size);
@@ -186,27 +186,27 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
int ret = 0;
if ((toksize-=1) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if (*buf++ != 0x60)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if ((seqsize = der_read_length(&buf, &toksize)) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if (seqsize != toksize)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if ((toksize-=1) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if (*buf++ != 0x06)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if ((toksize-=1) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
toid.len = *buf++;
if ((toksize-=toid.len) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
toid.data = buf;
buf+=toid.len;
@@ -217,17 +217,17 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
to return G_BAD_TOK_HEADER if the token header is in fact bad */
if ((toksize-=2) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if (ret)
- return(ret);
+ return ret;
if (!ret) {
*buf_in = buf;
*body_size = toksize;
}
- return(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(g_verify_token_header);
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 415c013ba382..62ac90c62cb1 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -162,5 +162,5 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
*seqnum = ((plain[0]) |
(plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
- return (0);
+ return 0;
}
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 2689de39dc78..8b4061049d76 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -331,7 +331,7 @@ gss_delete_sec_context(struct gss_ctx **context_handle)
*context_handle);
if (!*context_handle)
- return(GSS_S_NO_CONTEXT);
+ return GSS_S_NO_CONTEXT;
if ((*context_handle)->internal_ctx_id)
(*context_handle)->mech_type->gm_ops
->gss_delete_sec_context((*context_handle)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cace6049e4a5..aa5dbda6608c 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -376,7 +376,7 @@ int rpc_queue_empty(struct rpc_wait_queue *queue)
spin_lock_bh(&queue->lock);
res = queue->qlen;
spin_unlock_bh(&queue->lock);
- return (res == 0);
+ return res == 0;
}
EXPORT_SYMBOL_GPL(rpc_queue_empty);
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index c048543ffbeb..2ddc351b3be9 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -89,7 +89,7 @@ int tipc_addr_domain_valid(u32 addr)
int tipc_addr_node_valid(u32 addr)
{
- return (tipc_addr_domain_valid(addr) && tipc_node(addr));
+ return tipc_addr_domain_valid(addr) && tipc_node(addr);
}
int tipc_in_scope(u32 domain, u32 addr)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a008c6689305..ecfaac10d0b4 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -143,6 +143,19 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
}
+static void bclink_set_last_sent(void)
+{
+ if (bcl->next_out)
+ bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
+ else
+ bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
+}
+
+u32 tipc_bclink_get_last_sent(void)
+{
+ return bcl->fsm_msg_cnt;
+}
+
/**
* bclink_set_gap - set gap according to contents of current deferred pkt queue
*
@@ -171,7 +184,7 @@ static void bclink_set_gap(struct tipc_node *n_ptr)
static int bclink_ack_allowed(u32 n)
{
- return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
+ return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
}
@@ -237,8 +250,10 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
/* Try resolving broadcast link congestion, if necessary */
- if (unlikely(bcl->next_out))
+ if (unlikely(bcl->next_out)) {
tipc_link_push_queue(bcl);
+ bclink_set_last_sent();
+ }
if (unlikely(released && !list_empty(&bcl->waiting_ports)))
tipc_link_wakeup_ports(bcl, 0);
spin_unlock_bh(&bc_lock);
@@ -395,7 +410,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
if (unlikely(res == -ELINKCONG))
buf_discard(buf);
else
- bcl->stats.sent_info++;
+ bclink_set_last_sent();
if (bcl->out_queue_size > bcl->stats.max_queue_sz)
bcl->stats.max_queue_sz = bcl->out_queue_size;
@@ -529,15 +544,6 @@ receive:
tipc_node_unlock(node);
}
-u32 tipc_bclink_get_last_sent(void)
-{
- u32 last_sent = mod(bcl->next_out_no - 1);
-
- if (bcl->next_out)
- last_sent = mod(buf_seqno(bcl->next_out) - 1);
- return last_sent;
-}
-
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
{
return (n_ptr->bclink.supported &&
@@ -570,6 +576,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
msg = buf_msg(buf);
msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tipc_net_id);
+ bcl->stats.sent_info++;
}
/* Send buffer over bearers until all targets reached */
@@ -609,11 +616,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
bcbearer->remains = bcbearer->remains_new;
}
- /* Unable to reach all targets */
+ /*
+ * Unable to reach all targets (indicate success, since currently
+ * there isn't code in place to properly block & unblock the
+ * pseudo-bearer used by the broadcast link)
+ */
- bcbearer->bearer.publ.blocked = 1;
- bcl->stats.bearer_congs++;
- return 1;
+ return TIPC_OK;
}
/**
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 52ae17b2583e..9c10c6b7c12b 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -63,7 +63,7 @@ static int media_name_valid(const char *name)
len = strlen(name);
if ((len + 1) > TIPC_MAX_MEDIA_NAME)
return 0;
- return (strspn(name, tipc_alphabet) == len);
+ return strspn(name, tipc_alphabet) == len;
}
/**
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 696468117985..466b861dab91 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -169,6 +169,7 @@ void tipc_core_stop(void)
tipc_nametbl_stop();
tipc_ref_table_stop();
tipc_socket_stop();
+ tipc_log_resize(0);
}
/**
@@ -203,7 +204,9 @@ static int __init tipc_init(void)
{
int res;
- tipc_log_resize(CONFIG_TIPC_LOG);
+ if (tipc_log_resize(CONFIG_TIPC_LOG) != 0)
+ warn("Unable to create log buffer\n");
+
info("Activated (version " TIPC_MOD_VER
" compiled " __DATE__ " " __TIME__ ")\n");
@@ -230,7 +233,6 @@ static void __exit tipc_exit(void)
tipc_core_stop_net();
tipc_core_stop();
info("Deactivated\n");
- tipc_log_resize(0);
}
module_init(tipc_init);
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 1885a7edb0c8..6569d45bfb9a 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -134,7 +134,7 @@ void tipc_printbuf_reset(struct print_buf *pb)
int tipc_printbuf_empty(struct print_buf *pb)
{
- return (!pb->buf || (pb->crs == pb->buf));
+ return !pb->buf || (pb->crs == pb->buf);
}
/**
@@ -169,7 +169,7 @@ int tipc_printbuf_validate(struct print_buf *pb)
tipc_printf(pb, err);
}
}
- return (pb->crs - pb->buf + 1);
+ return pb->crs - pb->buf + 1;
}
/**
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index fc1fcf5e6b53..f28d1ae93125 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -203,6 +203,14 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
return;
}
spin_lock_bh(&n_ptr->lock);
+
+ /* Don't talk to neighbor during cleanup after last session */
+
+ if (n_ptr->cleanup_required) {
+ spin_unlock_bh(&n_ptr->lock);
+ return;
+ }
+
link = n_ptr->links[b_ptr->identity];
if (!link) {
dbg("creating link\n");
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 6230d16020c4..6e988ba485fd 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -72,17 +72,26 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
{
struct sk_buff *clone;
struct net_device *dev;
+ int delta;
clone = skb_clone(buf, GFP_ATOMIC);
- if (clone) {
- skb_reset_network_header(clone);
- dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
- clone->dev = dev;
- dev_hard_header(clone, dev, ETH_P_TIPC,
- &dest->dev_addr.eth_addr,
- dev->dev_addr, clone->len);
- dev_queue_xmit(clone);
+ if (!clone)
+ return 0;
+
+ dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+ delta = dev->hard_header_len - skb_headroom(buf);
+
+ if ((delta > 0) &&
+ pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+ kfree_skb(clone);
+ return 0;
}
+
+ skb_reset_network_header(clone);
+ clone->dev = dev;
+ dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr,
+ dev->dev_addr, clone->len);
+ dev_queue_xmit(clone);
return 0;
}
@@ -92,15 +101,12 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
* Accept only packets explicitly sent to this node, or broadcast packets;
* ignores packets sent using Ethernet multicast, and traffic sent to other
* nodes (which can happen if interface is running in promiscuous mode).
- * Routine truncates any Ethernet padding/CRC appended to the message,
- * and ensures message size matches actual length
*/
static int recv_msg(struct sk_buff *buf, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
- u32 size;
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(buf);
@@ -109,13 +115,9 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
if (likely(eb_ptr->bearer)) {
if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
- size = msg_size((struct tipc_msg *)buf->data);
- skb_trim(buf, size);
- if (likely(buf->len == size)) {
- buf->next = NULL;
- tipc_recv_msg(buf, eb_ptr->bearer);
- return 0;
- }
+ buf->next = NULL;
+ tipc_recv_msg(buf, eb_ptr->bearer);
+ return 0;
}
}
kfree_skb(buf);
@@ -133,6 +135,16 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
struct eth_bearer *eb_ptr = &eth_bearers[0];
struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
+ int pending_dev = 0;
+
+ /* Find unused Ethernet bearer structure */
+
+ while (eb_ptr->dev) {
+ if (!eb_ptr->bearer)
+ pending_dev++;
+ if (++eb_ptr == stop)
+ return pending_dev ? -EAGAIN : -EDQUOT;
+ }
/* Find device with specified name */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a3616b99529b..b8cf1e9d0b86 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -239,13 +239,13 @@ int tipc_link_is_up(struct link *l_ptr)
{
if (!l_ptr)
return 0;
- return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
+ return link_working_working(l_ptr) || link_working_unknown(l_ptr);
}
int tipc_link_is_active(struct link *l_ptr)
{
- return ((l_ptr->owner->active_links[0] == l_ptr) ||
- (l_ptr->owner->active_links[1] == l_ptr));
+ return (l_ptr->owner->active_links[0] == l_ptr) ||
+ (l_ptr->owner->active_links[1] == l_ptr);
}
/**
@@ -1802,6 +1802,15 @@ static int link_recv_buf_validate(struct sk_buff *buf)
return pskb_may_pull(buf, hdr_size);
}
+/**
+ * tipc_recv_msg - process TIPC messages arriving from off-node
+ * @head: pointer to message buffer chain
+ * @tb_ptr: pointer to bearer message arrived on
+ *
+ * Invoked with no locks held. Bearer pointer must point to a valid bearer
+ * structure (i.e. cannot be NULL), but bearer can be inactive.
+ */
+
void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
{
read_lock_bh(&tipc_net_lock);
@@ -1819,6 +1828,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
head = head->next;
+ /* Ensure bearer is still enabled */
+
+ if (unlikely(!b_ptr->active))
+ goto cont;
+
/* Ensure message is well-formed */
if (unlikely(!link_recv_buf_validate(buf)))
@@ -1855,13 +1869,22 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
goto cont;
}
- /* Locate unicast link endpoint that should handle message */
+ /* Locate neighboring node that sent message */
n_ptr = tipc_node_find(msg_prevnode(msg));
if (unlikely(!n_ptr))
goto cont;
tipc_node_lock(n_ptr);
+ /* Don't talk to neighbor during cleanup after last session */
+
+ if (n_ptr->cleanup_required) {
+ tipc_node_unlock(n_ptr);
+ goto cont;
+ }
+
+ /* Locate unicast link endpoint that should handle message */
+
l_ptr = n_ptr->links[b_ptr->identity];
if (unlikely(!l_ptr)) {
tipc_node_unlock(n_ptr);
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 2e5385c47d30..26151d30589d 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -279,12 +279,12 @@ static inline int between(u32 lower, u32 upper, u32 n)
static inline int less_eq(u32 left, u32 right)
{
- return (mod(right - left) < 32768u);
+ return mod(right - left) < 32768u;
}
static inline int less(u32 left, u32 right)
{
- return (less_eq(left, right) && (mod(right) != mod(left)));
+ return less_eq(left, right) && (mod(right) != mod(left));
}
static inline u32 lesser(u32 left, u32 right)
@@ -299,32 +299,32 @@ static inline u32 lesser(u32 left, u32 right)
static inline int link_working_working(struct link *l_ptr)
{
- return (l_ptr->state == WORKING_WORKING);
+ return l_ptr->state == WORKING_WORKING;
}
static inline int link_working_unknown(struct link *l_ptr)
{
- return (l_ptr->state == WORKING_UNKNOWN);
+ return l_ptr->state == WORKING_UNKNOWN;
}
static inline int link_reset_unknown(struct link *l_ptr)
{
- return (l_ptr->state == RESET_UNKNOWN);
+ return l_ptr->state == RESET_UNKNOWN;
}
static inline int link_reset_reset(struct link *l_ptr)
{
- return (l_ptr->state == RESET_RESET);
+ return l_ptr->state == RESET_RESET;
}
static inline int link_blocked(struct link *l_ptr)
{
- return (l_ptr->exp_msg_count || l_ptr->blocked);
+ return l_ptr->exp_msg_count || l_ptr->blocked;
}
static inline int link_congested(struct link *l_ptr)
{
- return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
+ return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
}
#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 995d2da35b01..031aad18efce 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -104,7 +104,7 @@ static inline u32 msg_user(struct tipc_msg *m)
static inline u32 msg_isdata(struct tipc_msg *m)
{
- return (msg_user(m) <= TIPC_CRITICAL_IMPORTANCE);
+ return msg_user(m) <= TIPC_CRITICAL_IMPORTANCE;
}
static inline void msg_set_user(struct tipc_msg *m, u32 n)
@@ -289,7 +289,7 @@ static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
static inline int msg_is_dest(struct tipc_msg *m, u32 d)
{
- return(msg_short(m) || (msg_destnode(m) == d));
+ return msg_short(m) || (msg_destnode(m) == d);
}
static inline u32 msg_routed(struct tipc_msg *m)
@@ -632,7 +632,7 @@ static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
static inline u32 msg_max_pkt(struct tipc_msg *m)
{
- return (msg_bits(m, 9, 16, 0xffff) * 4);
+ return msg_bits(m, 9, 16, 0xffff) * 4;
}
static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 8ba79620db3f..9ca4b0689237 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -116,7 +116,7 @@ DEFINE_RWLOCK(tipc_nametbl_lock);
static int hash(int x)
{
- return(x & (tipc_nametbl_size - 1));
+ return x & (tipc_nametbl_size - 1);
}
/**
@@ -613,8 +613,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
}
/*
- * tipc_nametbl_translate(): Translate tipc_name -> tipc_portid.
- * Very time-critical.
+ * tipc_nametbl_translate - translate name to port id
*
* Note: on entry 'destnode' is the search domain used during translation;
* on exit it passes back the node address of the matching port (if any)
@@ -685,7 +684,6 @@ found:
}
spin_unlock_bh(&seq->lock);
not_found:
- *destnode = 0;
read_unlock_bh(&tipc_nametbl_lock);
return 0;
}
@@ -877,7 +875,7 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
u32 index)
{
char portIdStr[27];
- char *scopeStr;
+ const char *scope_str[] = {"", " zone", " cluster", " node"};
struct publication *publ = sseq->zone_list;
tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
@@ -893,15 +891,8 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
tipc_node(publ->node), publ->ref);
tipc_printf(buf, "%-26s ", portIdStr);
if (depth > 3) {
- if (publ->node != tipc_own_addr)
- scopeStr = "";
- else if (publ->scope == TIPC_NODE_SCOPE)
- scopeStr = "node";
- else if (publ->scope == TIPC_CLUSTER_SCOPE)
- scopeStr = "cluster";
- else
- scopeStr = "zone";
- tipc_printf(buf, "%-10u %s", publ->key, scopeStr);
+ tipc_printf(buf, "%-10u %s", publ->key,
+ scope_str[publ->scope]);
}
publ = publ->zone_list_next;
@@ -951,24 +942,19 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
static void nametbl_header(struct print_buf *buf, u32 depth)
{
- tipc_printf(buf, "Type ");
-
- if (depth > 1)
- tipc_printf(buf, "Lower Upper ");
- if (depth > 2)
- tipc_printf(buf, "Port Identity ");
- if (depth > 3)
- tipc_printf(buf, "Publication");
-
- tipc_printf(buf, "\n-----------");
-
- if (depth > 1)
- tipc_printf(buf, "--------------------- ");
- if (depth > 2)
- tipc_printf(buf, "-------------------------- ");
- if (depth > 3)
- tipc_printf(buf, "------------------");
-
+ const char *header[] = {
+ "Type ",
+ "Lower Upper ",
+ "Port Identity ",
+ "Publication Scope"
+ };
+
+ int i;
+
+ if (depth > 4)
+ depth = 4;
+ for (i = 0; i < depth; i++)
+ tipc_printf(buf, header[i]);
tipc_printf(buf, "\n");
}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f61b7694138b..7e05af47a196 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -248,6 +248,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
/* Handle message for another node */
msg_dbg(msg, "NET>SEND>: ");
+ skb_trim(buf, msg_size(msg));
tipc_link_send(buf, dnode, msg_link_selector(msg));
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index b634942caba5..7c49cd056df7 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -237,23 +237,22 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
int tipc_node_has_active_links(struct tipc_node *n_ptr)
{
- return (n_ptr &&
- ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
+ return n_ptr->active_links[0] != NULL;
}
int tipc_node_has_redundant_links(struct tipc_node *n_ptr)
{
- return (n_ptr->working_links > 1);
+ return n_ptr->working_links > 1;
}
static int tipc_node_has_active_routes(struct tipc_node *n_ptr)
{
- return (n_ptr && (n_ptr->last_router >= 0));
+ return n_ptr && (n_ptr->last_router >= 0);
}
int tipc_node_is_up(struct tipc_node *n_ptr)
{
- return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr));
+ return tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr);
}
struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
@@ -384,6 +383,20 @@ static void node_established_contact(struct tipc_node *n_ptr)
tipc_highest_allowed_slave);
}
+static void node_cleanup_finished(unsigned long node_addr)
+{
+ struct tipc_node *n_ptr;
+
+ read_lock_bh(&tipc_net_lock);
+ n_ptr = tipc_node_find(node_addr);
+ if (n_ptr) {
+ tipc_node_lock(n_ptr);
+ n_ptr->cleanup_required = 0;
+ tipc_node_unlock(n_ptr);
+ }
+ read_unlock_bh(&tipc_net_lock);
+}
+
static void node_lost_contact(struct tipc_node *n_ptr)
{
struct cluster *c_ptr;
@@ -458,6 +471,11 @@ static void node_lost_contact(struct tipc_node *n_ptr)
tipc_k_signal((Handler)ns->handle_node_down,
(unsigned long)ns->usr_handle);
}
+
+ /* Prevent re-contact with node until all cleanup is done */
+
+ n_ptr->cleanup_required = 1;
+ tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr);
}
/**
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 6f990da5d143..45f3db3a595d 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -52,6 +52,7 @@
* @active_links: pointers to active links to node
* @links: pointers to all links to node
* @working_links: number of working links to node (both active and standby)
+ * @cleanup_required: non-zero if cleaning up after a prior loss of contact
* @link_cnt: number of links to node
* @permit_changeover: non-zero if node has redundant links to this system
* @routers: bitmap (used for multicluster communication)
@@ -78,6 +79,7 @@ struct tipc_node {
struct link *links[MAX_BEARERS];
int link_cnt;
int working_links;
+ int cleanup_required;
int permit_changeover;
u32 routers[512/32];
int last_router;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 0737680e9266..d760336f2ca8 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -588,19 +588,10 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
if (!p_ptr) {
err = TIPC_ERR_NO_PORT;
} else if (p_ptr->publ.connected) {
- if (port_peernode(p_ptr) != msg_orignode(msg))
+ if ((port_peernode(p_ptr) != msg_orignode(msg)) ||
+ (port_peerport(p_ptr) != msg_origport(msg))) {
err = TIPC_ERR_NO_PORT;
- if (port_peerport(p_ptr) != msg_origport(msg))
- err = TIPC_ERR_NO_PORT;
- if (!err && msg_routed(msg)) {
- u32 seqno = msg_transp_seqno(msg);
- u32 myno = ++p_ptr->last_in_seqno;
- if (seqno != myno) {
- err = TIPC_ERR_NO_PORT;
- abort_buf = port_build_self_abort_msg(p_ptr, err);
- }
- }
- if (msg_type(msg) == CONN_ACK) {
+ } else if (msg_type(msg) == CONN_ACK) {
int wakeup = tipc_port_congested(p_ptr) &&
p_ptr->publ.congested &&
p_ptr->wakeup;
@@ -1473,7 +1464,7 @@ int tipc_forward2name(u32 ref,
msg_set_destnode(msg, destnode);
msg_set_destport(msg, destport);
- if (likely(destport || destnode)) {
+ if (likely(destport)) {
p_ptr->sent++;
if (likely(destnode == tipc_own_addr))
return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
@@ -1551,7 +1542,7 @@ int tipc_forward_buf2name(u32 ref,
skb_push(buf, LONG_H_SIZE);
skb_copy_to_linear_data(buf, msg, LONG_H_SIZE);
msg_dbg(buf_msg(buf),"PREP:");
- if (likely(destport || destnode)) {
+ if (likely(destport)) {
p_ptr->sent++;
if (destnode == tipc_own_addr)
return tipc_port_recv_msg(buf);
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 8d1652aab298..e74bd9563739 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -157,7 +157,7 @@ static inline u32 tipc_peer_node(struct port *p_ptr)
static inline int tipc_port_congested(struct port *p_ptr)
{
- return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
+ return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
}
/**
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 66e889ba48fd..33217fc3d697 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -64,6 +64,7 @@ struct tipc_sock {
struct sock sk;
struct tipc_port *p;
struct tipc_portid peer_name;
+ long conn_timeout;
};
#define tipc_sk(sk) ((struct tipc_sock *)(sk))
@@ -240,9 +241,9 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
sock->state = state;
sock_init_data(sock, sk);
- sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
sk->sk_backlog_rcv = backlog_rcv;
tipc_sk(sk)->p = tp_ptr;
+ tipc_sk(sk)->conn_timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
spin_unlock_bh(tp_ptr->lock);
@@ -429,36 +430,55 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
* to handle any preventable race conditions, so TIPC will do the same ...
*
* TIPC sets the returned events as follows:
- * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
- * or if a connection-oriented socket is does not have an active connection
- * (i.e. a read operation will not block).
- * b) POLLOUT is set except when a socket's connection has been terminated
- * (i.e. a write operation will not block).
- * c) POLLHUP is set when a socket's connection has been terminated.
- *
- * IMPORTANT: The fact that a read or write operation will not block does NOT
- * imply that the operation will succeed!
+ *
+ * socket state flags set
+ * ------------ ---------
+ * unconnected no read flags
+ * no write flags
+ *
+ * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
+ * no write flags
+ *
+ * connected POLLIN/POLLRDNORM if data in rx queue
+ * POLLOUT if port is not congested
+ *
+ * disconnecting POLLIN/POLLRDNORM/POLLHUP
+ * no write flags
+ *
+ * listening POLLIN if SYN in rx queue
+ * no write flags
+ *
+ * ready POLLIN/POLLRDNORM if data in rx queue
+ * [connectionless] POLLOUT (since port cannot be congested)
+ *
+ * IMPORTANT: The fact that a read or write operation is indicated does NOT
+ * imply that the operation will succeed, merely that it should be performed
+ * and will not block.
*/
static unsigned int poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- u32 mask;
+ u32 mask = 0;
poll_wait(file, sk_sleep(sk), wait);
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sock->state == SS_UNCONNECTED) ||
- (sock->state == SS_DISCONNECTING))
- mask = (POLLRDNORM | POLLIN);
- else
- mask = 0;
-
- if (sock->state == SS_DISCONNECTING)
- mask |= POLLHUP;
- else
- mask |= POLLOUT;
+ switch ((int)sock->state) {
+ case SS_READY:
+ case SS_CONNECTED:
+ if (!tipc_sk_port(sk)->congested)
+ mask |= POLLOUT;
+ /* fall thru' */
+ case SS_CONNECTING:
+ case SS_LISTENING:
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ mask |= (POLLIN | POLLRDNORM);
+ break;
+ case SS_DISCONNECTING:
+ mask = (POLLIN | POLLRDNORM | POLLHUP);
+ break;
+ }
return mask;
}
@@ -1026,9 +1046,8 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
struct sk_buff *buf;
struct tipc_msg *msg;
unsigned int sz;
- int sz_to_copy;
+ int sz_to_copy, target, needed;
int sz_copied = 0;
- int needed;
char __user *crs = m->msg_iov->iov_base;
unsigned char *buf_crs;
u32 err;
@@ -1050,6 +1069,8 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
goto exit;
}
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+
restart:
/* Look for a message in receive queue; wait if necessary */
@@ -1138,7 +1159,7 @@ restart:
if ((sz_copied < buf_len) && /* didn't get all requested data */
(!skb_queue_empty(&sk->sk_receive_queue) ||
- (flags & MSG_WAITALL)) && /* and more is ready or required */
+ (sz_copied < target)) && /* and more is ready or required */
(!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
(!err)) /* and haven't reached a FIN */
goto restart;
@@ -1174,7 +1195,7 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
if (msg_connected(msg))
threshold *= 4;
- return (queue_size >= threshold);
+ return queue_size >= threshold;
}
/**
@@ -1365,6 +1386,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
struct msghdr m = {NULL,};
struct sk_buff *buf;
struct tipc_msg *msg;
+ long timeout;
int res;
lock_sock(sk);
@@ -1379,7 +1401,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
/* For now, TIPC does not support the non-blocking form of connect() */
if (flags & O_NONBLOCK) {
- res = -EWOULDBLOCK;
+ res = -EOPNOTSUPP;
goto exit;
}
@@ -1425,11 +1447,12 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
+ timeout = tipc_sk(sk)->conn_timeout;
release_sock(sk);
res = wait_event_interruptible_timeout(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state != SS_CONNECTING)),
- sk->sk_rcvtimeo);
+ timeout ? timeout : MAX_SCHEDULE_TIMEOUT);
lock_sock(sk);
if (res > 0) {
@@ -1692,7 +1715,7 @@ static int setsockopt(struct socket *sock,
res = tipc_set_portunreturnable(tport->ref, value);
break;
case TIPC_CONN_TIMEOUT:
- sk->sk_rcvtimeo = msecs_to_jiffies(value);
+ tipc_sk(sk)->conn_timeout = msecs_to_jiffies(value);
/* no need to set "res", since already 0 at this point */
break;
default:
@@ -1747,7 +1770,7 @@ static int getsockopt(struct socket *sock,
res = tipc_portunreturnable(tport->ref, &value);
break;
case TIPC_CONN_TIMEOUT:
- value = jiffies_to_msecs(sk->sk_rcvtimeo);
+ value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout);
/* no need to set "res", since already 0 at this point */
break;
case TIPC_NODE_RECVQ_DEPTH:
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ab6eab4c45e2..1a5b9a6bd128 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -604,6 +604,6 @@ int tipc_ispublished(struct tipc_name const *name)
{
u32 domain = 0;
- return(tipc_nametbl_translate(name->type, name->instance,&domain) != 0);
+ return tipc_nametbl_translate(name->type, name->instance, &domain) != 0;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0b39b2451ea5..c586da3f4f18 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2033,11 +2033,10 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d6d046b9f6f2..9c21ebf9780e 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -253,11 +253,16 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
WARN_ON(err);
wdev->netdev->features |= NETIF_F_NETNS_LOCAL;
}
+
+ return err;
}
wiphy_net_set(&rdev->wiphy, net);
- return err;
+ err = device_rename(&rdev->wiphy.dev, dev_name(&rdev->wiphy.dev));
+ WARN_ON(err);
+
+ return 0;
}
static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
@@ -428,7 +433,7 @@ int wiphy_register(struct wiphy *wiphy)
/* sanity check ifmodes */
WARN_ON(!ifmodes);
- ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
+ ifmodes &= ((1 << NUM_NL80211_IFTYPES) - 1) & ~1;
if (WARN_ON(ifmodes != wiphy->interface_modes))
wiphy->interface_modes = ifmodes;
@@ -683,8 +688,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work);
INIT_LIST_HEAD(&wdev->event_list);
spin_lock_init(&wdev->event_lock);
- INIT_LIST_HEAD(&wdev->action_registrations);
- spin_lock_init(&wdev->action_registrations_lock);
+ INIT_LIST_HEAD(&wdev->mgmt_registrations);
+ spin_lock_init(&wdev->mgmt_registrations_lock);
mutex_lock(&rdev->devlist_mtx);
list_add_rcu(&wdev->list, &rdev->netdev_list);
@@ -724,6 +729,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
dev->ethtool_ops = &cfg80211_ethtool_ops;
if ((wdev->iftype == NL80211_IFTYPE_STATION ||
+ wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
dev->priv_flags |= IFF_DONT_BRIDGE;
break;
@@ -732,6 +738,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
case NL80211_IFTYPE_ADHOC:
cfg80211_leave_ibss(rdev, dev, true);
break;
+ case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
wdev_lock(wdev);
#ifdef CONFIG_CFG80211_WEXT
@@ -804,7 +811,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
sysfs_remove_link(&dev->dev.kobj, "phy80211");
list_del_rcu(&wdev->list);
rdev->devlist_generation++;
- cfg80211_mlme_purge_actions(wdev);
+ cfg80211_mlme_purge_registrations(wdev);
#ifdef CONFIG_CFG80211_WEXT
kfree(wdev->wext.keys);
#endif
@@ -910,52 +917,3 @@ static void __exit cfg80211_exit(void)
destroy_workqueue(cfg80211_wq);
}
module_exit(cfg80211_exit);
-
-static int ___wiphy_printk(const char *level, const struct wiphy *wiphy,
- struct va_format *vaf)
-{
- if (!wiphy)
- return printk("%s(NULL wiphy *): %pV", level, vaf);
-
- return printk("%s%s: %pV", level, wiphy_name(wiphy), vaf);
-}
-
-int __wiphy_printk(const char *level, const struct wiphy *wiphy,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
- int r;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- r = ___wiphy_printk(level, wiphy, &vaf);
- va_end(args);
-
- return r;
-}
-EXPORT_SYMBOL(__wiphy_printk);
-
-#define define_wiphy_printk_level(func, kern_level) \
-int func(const struct wiphy *wiphy, const char *fmt, ...) \
-{ \
- struct va_format vaf; \
- va_list args; \
- int r; \
- \
- va_start(args, fmt); \
- \
- vaf.fmt = fmt; \
- vaf.va = &args; \
- \
- r = ___wiphy_printk(kern_level, wiphy, &vaf); \
- va_end(args); \
- \
- return r; \
-} \
-EXPORT_SYMBOL(func);
-
-define_wiphy_printk_level(wiphy_debug, KERN_DEBUG);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 63d57ae399c3..5d89310b3587 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -86,7 +86,7 @@ struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy)
static inline
bool wiphy_idx_valid(int wiphy_idx)
{
- return (wiphy_idx >= 0);
+ return wiphy_idx >= 0;
}
@@ -95,7 +95,10 @@ extern struct mutex cfg80211_mutex;
extern struct list_head cfg80211_rdev_list;
extern int cfg80211_rdev_list_generation;
-#define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex))
+static inline void assert_cfg80211_lock(void)
+{
+ lockdep_assert_held(&cfg80211_mutex);
+}
/*
* You can use this to mark a wiphy_idx as not having an associated wiphy.
@@ -202,8 +205,8 @@ static inline void wdev_unlock(struct wireless_dev *wdev)
mutex_unlock(&wdev->mtx);
}
-#define ASSERT_RDEV_LOCK(rdev) WARN_ON(!mutex_is_locked(&(rdev)->mtx));
-#define ASSERT_WDEV_LOCK(wdev) WARN_ON(!mutex_is_locked(&(wdev)->mtx));
+#define ASSERT_RDEV_LOCK(rdev) lockdep_assert_held(&(rdev)->mtx)
+#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
enum cfg80211_event_type {
EVENT_CONNECT_RESULT,
@@ -331,16 +334,17 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
const u8 *resp_ie, size_t resp_ie_len,
u16 status, bool wextev,
struct cfg80211_bss *bss);
-int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
- const u8 *match_data, int match_len);
-void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid);
-void cfg80211_mlme_purge_actions(struct wireless_dev *wdev);
-int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- bool channel_type_valid,
- const u8 *buf, size_t len, u64 *cookie);
+int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
+ u16 frame_type, const u8 *match_data,
+ int match_len);
+void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
+void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
+int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid,
+ const u8 *buf, size_t len, u64 *cookie);
/* SME */
int __cfg80211_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 27a8ce9343c3..8cb6e08373b9 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -88,6 +88,25 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
if (wdev->ssid_len)
return -EALREADY;
+ if (!params->basic_rates) {
+ /*
+ * If no rates were explicitly configured,
+ * use the mandatory rate set for 11b or
+ * 11a for maximum compatibility.
+ */
+ struct ieee80211_supported_band *sband =
+ rdev->wiphy.bands[params->channel->band];
+ int j;
+ u32 flag = params->channel->band == IEEE80211_BAND_5GHZ ?
+ IEEE80211_RATE_MANDATORY_A :
+ IEEE80211_RATE_MANDATORY_B;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ if (sband->bitrates[j].flags & flag)
+ params->basic_rates |= BIT(j);
+ }
+ }
+
if (WARN_ON(wdev->connect_keys))
kfree(wdev->connect_keys);
wdev->connect_keys = connkeys;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index d1a3fb99fdf2..46f371160896 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -149,7 +149,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
const u8 *bssid = mgmt->bssid;
int i;
- bool found = false;
+ bool found = false, was_current = false;
ASSERT_WDEV_LOCK(wdev);
@@ -159,6 +159,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
found = true;
+ was_current = true;
} else for (i = 0; i < MAX_AUTH_BSSES; i++) {
if (wdev->auth_bsses[i] &&
memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
@@ -183,7 +184,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
- if (wdev->sme_state == CFG80211_SME_CONNECTED) {
+ if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) {
u16 reason_code;
bool from_ap;
@@ -747,31 +748,51 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
}
EXPORT_SYMBOL(cfg80211_new_sta);
-struct cfg80211_action_registration {
+struct cfg80211_mgmt_registration {
struct list_head list;
u32 nlpid;
int match_len;
+ __le16 frame_type;
+
u8 match[];
};
-int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
- const u8 *match_data, int match_len)
+int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
+ u16 frame_type, const u8 *match_data,
+ int match_len)
{
- struct cfg80211_action_registration *reg, *nreg;
+ struct cfg80211_mgmt_registration *reg, *nreg;
int err = 0;
+ u16 mgmt_type;
+
+ if (!wdev->wiphy->mgmt_stypes)
+ return -EOPNOTSUPP;
+
+ if ((frame_type & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT)
+ return -EINVAL;
+
+ if (frame_type & ~(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE))
+ return -EINVAL;
+
+ mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
+ if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].rx & BIT(mgmt_type)))
+ return -EINVAL;
nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL);
if (!nreg)
return -ENOMEM;
- spin_lock_bh(&wdev->action_registrations_lock);
+ spin_lock_bh(&wdev->mgmt_registrations_lock);
- list_for_each_entry(reg, &wdev->action_registrations, list) {
+ list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
int mlen = min(match_len, reg->match_len);
+ if (frame_type != le16_to_cpu(reg->frame_type))
+ continue;
+
if (memcmp(reg->match, match_data, mlen) == 0) {
err = -EALREADY;
break;
@@ -786,69 +807,83 @@ int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
memcpy(nreg->match, match_data, match_len);
nreg->match_len = match_len;
nreg->nlpid = snd_pid;
- list_add(&nreg->list, &wdev->action_registrations);
+ nreg->frame_type = cpu_to_le16(frame_type);
+ list_add(&nreg->list, &wdev->mgmt_registrations);
out:
- spin_unlock_bh(&wdev->action_registrations_lock);
+ spin_unlock_bh(&wdev->mgmt_registrations_lock);
return err;
}
-void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid)
+void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
{
- struct cfg80211_action_registration *reg, *tmp;
+ struct cfg80211_mgmt_registration *reg, *tmp;
- spin_lock_bh(&wdev->action_registrations_lock);
+ spin_lock_bh(&wdev->mgmt_registrations_lock);
- list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) {
+ list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
if (reg->nlpid == nlpid) {
list_del(&reg->list);
kfree(reg);
}
}
- spin_unlock_bh(&wdev->action_registrations_lock);
+ spin_unlock_bh(&wdev->mgmt_registrations_lock);
}
-void cfg80211_mlme_purge_actions(struct wireless_dev *wdev)
+void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
{
- struct cfg80211_action_registration *reg, *tmp;
+ struct cfg80211_mgmt_registration *reg, *tmp;
- spin_lock_bh(&wdev->action_registrations_lock);
+ spin_lock_bh(&wdev->mgmt_registrations_lock);
- list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) {
+ list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
list_del(&reg->list);
kfree(reg);
}
- spin_unlock_bh(&wdev->action_registrations_lock);
+ spin_unlock_bh(&wdev->mgmt_registrations_lock);
}
-int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- bool channel_type_valid,
- const u8 *buf, size_t len, u64 *cookie)
+int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid,
+ const u8 *buf, size_t len, u64 *cookie)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const struct ieee80211_mgmt *mgmt;
+ u16 stype;
+
+ if (!wdev->wiphy->mgmt_stypes)
+ return -EOPNOTSUPP;
- if (rdev->ops->action == NULL)
+ if (!rdev->ops->mgmt_tx)
return -EOPNOTSUPP;
+
if (len < 24 + 1)
return -EINVAL;
mgmt = (const struct ieee80211_mgmt *) buf;
- if (!ieee80211_is_action(mgmt->frame_control))
+
+ if (!ieee80211_is_mgmt(mgmt->frame_control))
return -EINVAL;
- if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
+
+ stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
+ if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].tx & BIT(stype >> 4)))
+ return -EINVAL;
+
+ if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
/* Verify that we are associated with the destination AP */
wdev_lock(wdev);
if (!wdev->current_bss ||
memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
ETH_ALEN) != 0 ||
- (wdev->iftype == NL80211_IFTYPE_STATION &&
+ ((wdev->iftype == NL80211_IFTYPE_STATION ||
+ wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
memcmp(wdev->current_bss->pub.bssid, mgmt->da,
ETH_ALEN) != 0)) {
wdev_unlock(wdev);
@@ -862,64 +897,75 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
return -EINVAL;
/* Transmit the Action frame as requested by user space */
- return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type,
- channel_type_valid, buf, len, cookie);
+ return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, channel_type,
+ channel_type_valid, buf, len, cookie);
}
-bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf,
- size_t len, gfp_t gfp)
+bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
+ size_t len, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- struct cfg80211_action_registration *reg;
- const u8 *action_data;
- int action_data_len;
+ struct cfg80211_mgmt_registration *reg;
+ const struct ieee80211_txrx_stypes *stypes =
+ &wiphy->mgmt_stypes[wdev->iftype];
+ struct ieee80211_mgmt *mgmt = (void *)buf;
+ const u8 *data;
+ int data_len;
bool result = false;
+ __le16 ftype = mgmt->frame_control &
+ cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
+ u16 stype;
- /* frame length - min size excluding category */
- action_data_len = len - (IEEE80211_MIN_ACTION_SIZE - 1);
+ stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
- /* action data starts with category */
- action_data = buf + IEEE80211_MIN_ACTION_SIZE - 1;
+ if (!(stypes->rx & BIT(stype)))
+ return false;
- spin_lock_bh(&wdev->action_registrations_lock);
+ data = buf + ieee80211_hdrlen(mgmt->frame_control);
+ data_len = len - ieee80211_hdrlen(mgmt->frame_control);
+
+ spin_lock_bh(&wdev->mgmt_registrations_lock);
+
+ list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
+ if (reg->frame_type != ftype)
+ continue;
- list_for_each_entry(reg, &wdev->action_registrations, list) {
- if (reg->match_len > action_data_len)
+ if (reg->match_len > data_len)
continue;
- if (memcmp(reg->match, action_data, reg->match_len))
+ if (memcmp(reg->match, data, reg->match_len))
continue;
/* found match! */
/* Indicate the received Action frame to user space */
- if (nl80211_send_action(rdev, dev, reg->nlpid, freq,
- buf, len, gfp))
+ if (nl80211_send_mgmt(rdev, dev, reg->nlpid, freq,
+ buf, len, gfp))
continue;
result = true;
break;
}
- spin_unlock_bh(&wdev->action_registrations_lock);
+ spin_unlock_bh(&wdev->mgmt_registrations_lock);
return result;
}
-EXPORT_SYMBOL(cfg80211_rx_action);
+EXPORT_SYMBOL(cfg80211_rx_mgmt);
-void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
- const u8 *buf, size_t len, bool ack, gfp_t gfp)
+void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie,
+ const u8 *buf, size_t len, bool ack, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
/* Indicate TX status of the Action frame to user space */
- nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
+ nl80211_send_mgmt_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
}
-EXPORT_SYMBOL(cfg80211_action_tx_status);
+EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
void cfg80211_cqm_rssi_notify(struct net_device *dev,
enum nl80211_cqm_rssi_threshold_event rssi_event,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 37902a54e9c1..9c84825803ce 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -136,6 +136,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
.len = sizeof(struct nl80211_sta_flag_update),
},
[NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 },
+ [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
@@ -156,6 +158,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 },
+ [NL80211_ATTR_FRAME_TYPE] = { .type = NLA_U16 },
};
/* policy for the attributes */
@@ -407,12 +410,14 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
break;
case NL80211_IFTYPE_ADHOC:
if (!wdev->current_bss)
return -ENOLINK;
break;
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
if (wdev->sme_state != CFG80211_SME_CONNECTED)
return -ENOLINK;
break;
@@ -437,6 +442,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
struct ieee80211_rate *rate;
int i;
u16 ifmodes = dev->wiphy.interface_modes;
+ const struct ieee80211_txrx_stypes *mgmt_stypes =
+ dev->wiphy.mgmt_stypes;
hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
if (!hdr)
@@ -471,6 +478,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
dev->wiphy.max_num_pmkids);
+ if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
+
nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
if (!nl_modes)
goto nla_put_failure;
@@ -587,7 +597,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(flush_pmksa, FLUSH_PMKSA);
CMD(remain_on_channel, REMAIN_ON_CHANNEL);
CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
- CMD(action, ACTION);
+ CMD(mgmt_tx, FRAME);
if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
i++;
NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -608,6 +618,55 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
nla_nest_end(msg, nl_cmds);
+ if (mgmt_stypes) {
+ u16 stypes;
+ struct nlattr *nl_ftypes, *nl_ifs;
+ enum nl80211_iftype ift;
+
+ nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES);
+ if (!nl_ifs)
+ goto nla_put_failure;
+
+ for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
+ nl_ftypes = nla_nest_start(msg, ift);
+ if (!nl_ftypes)
+ goto nla_put_failure;
+ i = 0;
+ stypes = mgmt_stypes[ift].tx;
+ while (stypes) {
+ if (stypes & 1)
+ NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
+ (i << 4) | IEEE80211_FTYPE_MGMT);
+ stypes >>= 1;
+ i++;
+ }
+ nla_nest_end(msg, nl_ftypes);
+ }
+
+ nla_nest_end(msg, nl_ifs);
+
+ nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES);
+ if (!nl_ifs)
+ goto nla_put_failure;
+
+ for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
+ nl_ftypes = nla_nest_start(msg, ift);
+ if (!nl_ftypes)
+ goto nla_put_failure;
+ i = 0;
+ stypes = mgmt_stypes[ift].rx;
+ while (stypes) {
+ if (stypes & 1)
+ NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
+ (i << 4) | IEEE80211_FTYPE_MGMT);
+ stypes >>= 1;
+ i++;
+ }
+ nla_nest_end(msg, nl_ftypes);
+ }
+ nla_nest_end(msg, nl_ifs);
+ }
+
return genlmsg_end(msg, hdr);
nla_put_failure:
@@ -709,7 +768,8 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_AP ||
wdev->iftype == NL80211_IFTYPE_WDS ||
wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
- wdev->iftype == NL80211_IFTYPE_MONITOR;
+ wdev->iftype == NL80211_IFTYPE_MONITOR ||
+ wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
@@ -776,7 +836,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_registered_device *rdev;
struct net_device *netdev = NULL;
struct wireless_dev *wdev;
- int result, rem_txq_params = 0;
+ int result = 0, rem_txq_params = 0;
struct nlattr *nl_txq_params;
u32 changed;
u8 retry_short = 0, retry_long = 0;
@@ -1636,7 +1696,8 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
if (err)
goto unlock_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
err = -EOPNOTSUPP;
goto out;
}
@@ -1728,7 +1789,8 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2071,10 +2133,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
switch (dev->ieee80211_ptr->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
/* disallow mesh-specific things */
if (params.plink_action)
err = -EINVAL;
break;
+ case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
/* disallow everything but AUTHORIZED flag */
if (params.plink_action)
@@ -2176,7 +2240,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
goto out_rtnl;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
err = -EINVAL;
goto out;
}
@@ -2229,7 +2294,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
err = -EINVAL;
goto out;
}
@@ -2603,7 +2669,8 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3306,6 +3373,7 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
}
switch (wdev->iftype) {
+ case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
if (intbss == wdev->current_bss)
NLA_PUT_U32(msg, NL80211_BSS_STATUS,
@@ -3572,12 +3640,28 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
if (err)
goto unlock_rtnl;
+ if (key.idx >= 0) {
+ int i;
+ bool ok = false;
+ for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) {
+ if (key.p.cipher == rdev->wiphy.cipher_suites[i]) {
+ ok = true;
+ break;
+ }
+ }
+ if (!ok) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
if (!rdev->ops->auth) {
err = -EOPNOTSUPP;
goto out;
}
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3624,7 +3708,8 @@ unlock_rtnl:
return err;
}
-static int nl80211_crypto_settings(struct genl_info *info,
+static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
struct cfg80211_crypto_settings *settings,
int cipher_limit)
{
@@ -3632,6 +3717,19 @@ static int nl80211_crypto_settings(struct genl_info *info,
settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT];
+ if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) {
+ u16 proto;
+ proto = nla_get_u16(
+ info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]);
+ settings->control_port_ethertype = cpu_to_be16(proto);
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
+ proto != ETH_P_PAE)
+ return -EINVAL;
+ if (info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT])
+ settings->control_port_no_encrypt = true;
+ } else
+ settings->control_port_ethertype = cpu_to_be16(ETH_P_PAE);
+
if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) {
void *data;
int len, i;
@@ -3718,7 +3816,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3759,7 +3858,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_PREV_BSSID])
prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
- err = nl80211_crypto_settings(info, &crypto, 1);
+ err = nl80211_crypto_settings(rdev, info, &crypto, 1);
if (!err)
err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
ssid, ssid_len, ie, ie_len, use_mfp,
@@ -3802,7 +3901,8 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3868,7 +3968,8 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4018,23 +4119,6 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
goto out;
}
}
- } else {
- /*
- * If no rates were explicitly configured,
- * use the mandatory rate set for 11b or
- * 11a for maximum compatibility.
- */
- struct ieee80211_supported_band *sband =
- wiphy->bands[ibss.channel->band];
- int j;
- u32 flag = ibss.channel->band == IEEE80211_BAND_5GHZ ?
- IEEE80211_RATE_MANDATORY_A :
- IEEE80211_RATE_MANDATORY_B;
-
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].flags & flag)
- ibss.basic_rates |= BIT(j);
- }
}
err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
@@ -4236,7 +4320,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
connect.privacy = info->attrs[NL80211_ATTR_PRIVACY];
- err = nl80211_crypto_settings(info, &connect.crypto,
+ err = nl80211_crypto_settings(rdev, info, &connect.crypto,
NL80211_MAX_NR_CIPHER_SUITES);
if (err)
return err;
@@ -4246,7 +4330,8 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
if (err)
goto unlock_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4322,7 +4407,8 @@ static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
if (err)
goto unlock_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4410,7 +4496,8 @@ static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info)
pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4455,7 +4542,8 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4717,17 +4805,18 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
return err;
}
-static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
+static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
struct net_device *dev;
+ u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION;
int err;
if (!info->attrs[NL80211_ATTR_FRAME_MATCH])
return -EINVAL;
- if (nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]) < 1)
- return -EINVAL;
+ if (info->attrs[NL80211_ATTR_FRAME_TYPE])
+ frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]);
rtnl_lock();
@@ -4736,18 +4825,20 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
goto unlock_rtnl;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
/* not much point in registering if we can't reply */
- if (!rdev->ops->action) {
+ if (!rdev->ops->mgmt_tx) {
err = -EOPNOTSUPP;
goto out;
}
- err = cfg80211_mlme_register_action(dev->ieee80211_ptr, info->snd_pid,
+ err = cfg80211_mlme_register_mgmt(dev->ieee80211_ptr, info->snd_pid,
+ frame_type,
nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
out:
@@ -4758,7 +4849,7 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
return err;
}
-static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
+static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
struct net_device *dev;
@@ -4781,13 +4872,14 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
if (err)
goto unlock_rtnl;
- if (!rdev->ops->action) {
+ if (!rdev->ops->mgmt_tx) {
err = -EOPNOTSUPP;
goto out;
}
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4824,17 +4916,17 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
}
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
- NL80211_CMD_ACTION);
+ NL80211_CMD_FRAME);
if (IS_ERR(hdr)) {
err = PTR_ERR(hdr);
goto free_msg;
}
- err = cfg80211_mlme_action(rdev, dev, chan, channel_type,
- channel_type_valid,
- nla_data(info->attrs[NL80211_ATTR_FRAME]),
- nla_len(info->attrs[NL80211_ATTR_FRAME]),
- &cookie);
+ err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, channel_type,
+ channel_type_valid,
+ nla_data(info->attrs[NL80211_ATTR_FRAME]),
+ nla_len(info->attrs[NL80211_ATTR_FRAME]),
+ &cookie);
if (err)
goto free_msg;
@@ -4881,7 +4973,7 @@ static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
- goto unlock_rdev;
+ goto unlock_rtnl;
wdev = dev->ieee80211_ptr;
@@ -4905,6 +4997,7 @@ static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
unlock_rdev:
cfg80211_unlock_rdev(rdev);
dev_put(dev);
+unlock_rtnl:
rtnl_unlock();
out:
@@ -5005,7 +5098,8 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
goto unlock_rdev;
}
- if (wdev->iftype != NL80211_IFTYPE_STATION) {
+ if (wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
err = -EOPNOTSUPP;
goto unlock_rdev;
}
@@ -5333,14 +5427,14 @@ static struct genl_ops nl80211_ops[] = {
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = NL80211_CMD_REGISTER_ACTION,
- .doit = nl80211_register_action,
+ .cmd = NL80211_CMD_REGISTER_FRAME,
+ .doit = nl80211_register_mgmt,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = NL80211_CMD_ACTION,
- .doit = nl80211_action,
+ .cmd = NL80211_CMD_FRAME,
+ .doit = nl80211_tx_mgmt,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
},
@@ -6040,9 +6134,9 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
nl80211_mlme_mcgrp.id, gfp);
}
-int nl80211_send_action(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u32 nlpid,
- int freq, const u8 *buf, size_t len, gfp_t gfp)
+int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u32 nlpid,
+ int freq, const u8 *buf, size_t len, gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
@@ -6052,7 +6146,7 @@ int nl80211_send_action(struct cfg80211_registered_device *rdev,
if (!msg)
return -ENOMEM;
- hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION);
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
if (!hdr) {
nlmsg_free(msg);
return -ENOMEM;
@@ -6080,10 +6174,10 @@ int nl80211_send_action(struct cfg80211_registered_device *rdev,
return -ENOBUFS;
}
-void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
- const u8 *buf, size_t len, bool ack,
- gfp_t gfp)
+void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u64 cookie,
+ const u8 *buf, size_t len, bool ack,
+ gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
@@ -6092,7 +6186,7 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION_TX_STATUS);
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME_TX_STATUS);
if (!hdr) {
nlmsg_free(msg);
return;
@@ -6179,7 +6273,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
- cfg80211_mlme_unregister_actions(wdev, notify->pid);
+ cfg80211_mlme_unregister_socket(wdev, notify->pid);
rcu_read_unlock();
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 2ad7fbc7d9f1..30d2f939150d 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -74,13 +74,13 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo, gfp_t gfp);
-int nl80211_send_action(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u32 nlpid, int freq,
- const u8 *buf, size_t len, gfp_t gfp);
-void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
- const u8 *buf, size_t len, bool ack,
- gfp_t gfp);
+int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u32 nlpid, int freq,
+ const u8 *buf, size_t len, gfp_t gfp);
+void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u64 cookie,
+ const u8 *buf, size_t len, bool ack,
+ gfp_t gfp);
void
nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 1332c445d1c7..c774bc0f155e 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -14,6 +14,7 @@
* See COPYING for more details.
*/
+#include <linux/kernel.h>
#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
@@ -45,7 +46,7 @@ static const struct radiotap_align_size rtap_namespace_sizes[] = {
};
static const struct ieee80211_radiotap_namespace radiotap_ns = {
- .n_bits = sizeof(rtap_namespace_sizes) / sizeof(rtap_namespace_sizes[0]),
+ .n_bits = ARRAY_SIZE(rtap_namespace_sizes),
.align_size = rtap_namespace_sizes,
};
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f180db0de66c..d14bbf960c18 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -36,6 +36,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/random.h>
+#include <linux/ctype.h>
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <net/cfg80211.h>
@@ -73,7 +74,11 @@ const struct ieee80211_regdomain *cfg80211_regdomain;
* - last_request
*/
static DEFINE_MUTEX(reg_mutex);
-#define assert_reg_lock() WARN_ON(!mutex_is_locked(&reg_mutex))
+
+static inline void assert_reg_lock(void)
+{
+ lockdep_assert_held(&reg_mutex);
+}
/* Used to queue up regulatory hints */
static LIST_HEAD(reg_requests_list);
@@ -181,14 +186,6 @@ static bool is_alpha2_set(const char *alpha2)
return false;
}
-static bool is_alpha_upper(char letter)
-{
- /* ASCII A - Z */
- if (letter >= 65 && letter <= 90)
- return true;
- return false;
-}
-
static bool is_unknown_alpha2(const char *alpha2)
{
if (!alpha2)
@@ -220,7 +217,7 @@ static bool is_an_alpha2(const char *alpha2)
{
if (!alpha2)
return false;
- if (is_alpha_upper(alpha2[0]) && is_alpha_upper(alpha2[1]))
+ if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
return true;
return false;
}
@@ -1399,6 +1396,11 @@ static DECLARE_WORK(reg_work, reg_todo);
static void queue_regulatory_request(struct regulatory_request *request)
{
+ if (isalpha(request->alpha2[0]))
+ request->alpha2[0] = toupper(request->alpha2[0]);
+ if (isalpha(request->alpha2[1]))
+ request->alpha2[1] = toupper(request->alpha2[1]);
+
spin_lock(&reg_requests_lock);
list_add_tail(&request->list, &reg_requests_list);
spin_unlock(&reg_requests_lock);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a8c2d6b877ae..f161b9844542 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -411,7 +411,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
ASSERT_WDEV_LOCK(wdev);
- if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
return;
if (wdev->sme_state != CFG80211_SME_CONNECTING)
@@ -548,7 +549,8 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
ASSERT_WDEV_LOCK(wdev);
- if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
return;
if (wdev->sme_state != CFG80211_SME_CONNECTED)
@@ -644,7 +646,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
ASSERT_WDEV_LOCK(wdev);
- if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
return;
if (wdev->sme_state != CFG80211_SME_CONNECTED)
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9f2cef3e0ca0..74a9e3cce452 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -110,6 +110,13 @@ static int wiphy_resume(struct device *dev)
return ret;
}
+static const void *wiphy_namespace(struct device *d)
+{
+ struct wiphy *wiphy = container_of(d, struct wiphy, dev);
+
+ return wiphy_net(wiphy);
+}
+
struct class ieee80211_class = {
.name = "ieee80211",
.owner = THIS_MODULE,
@@ -120,6 +127,8 @@ struct class ieee80211_class = {
#endif
.suspend = wiphy_suspend,
.resume = wiphy_resume,
+ .ns_type = &net_ns_type_operations,
+ .namespace = wiphy_namespace,
};
int wiphy_sysfs_init(void)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 0c8a1e8b7690..fb5448f7d55a 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -183,7 +183,14 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
return -EINVAL;
break;
default:
- return -EINVAL;
+ /*
+ * We don't know anything about this algorithm,
+ * allow using it -- but the driver must check
+ * all parameters! We still check below whether
+ * or not the driver supports this algorithm,
+ * of course.
+ */
+ break;
}
if (params->seq) {
@@ -221,7 +228,7 @@ const unsigned char bridge_tunnel_header[] __aligned(2) =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
EXPORT_SYMBOL(bridge_tunnel_header);
-unsigned int ieee80211_hdrlen(__le16 fc)
+unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
{
unsigned int hdrlen = 24;
@@ -319,7 +326,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
case cpu_to_le16(IEEE80211_FCTL_TODS):
if (unlikely(iftype != NL80211_IFTYPE_AP &&
- iftype != NL80211_IFTYPE_AP_VLAN))
+ iftype != NL80211_IFTYPE_AP_VLAN &&
+ iftype != NL80211_IFTYPE_P2P_GO))
return -1;
break;
case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
@@ -347,7 +355,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
break;
case cpu_to_le16(IEEE80211_FCTL_FROMDS):
if ((iftype != NL80211_IFTYPE_STATION &&
- iftype != NL80211_IFTYPE_MESH_POINT) ||
+ iftype != NL80211_IFTYPE_P2P_CLIENT &&
+ iftype != NL80211_IFTYPE_MESH_POINT) ||
(is_multicast_ether_addr(dst) &&
!compare_ether_addr(src, addr)))
return -1;
@@ -424,6 +433,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
switch (iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -432,6 +442,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
hdrlen = 24;
break;
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
memcpy(hdr.addr1, bssid, ETH_ALEN);
@@ -771,7 +782,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
/* if it's part of a bridge, reject changing type to station/ibss */
if ((dev->priv_flags & IFF_BRIDGE_PORT) &&
- (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION))
+ (ntype == NL80211_IFTYPE_ADHOC ||
+ ntype == NL80211_IFTYPE_STATION ||
+ ntype == NL80211_IFTYPE_P2P_CLIENT))
return -EBUSY;
if (ntype != otype) {
@@ -782,6 +795,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
cfg80211_leave_ibss(rdev, dev, false);
break;
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
cfg80211_disconnect(rdev, dev,
WLAN_REASON_DEAUTH_LEAVING, true);
break;
@@ -810,9 +824,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
if (dev->ieee80211_ptr->use_4addr)
break;
/* fall through */
+ case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_ADHOC:
dev->priv_flags |= IFF_DONT_BRIDGE;
break;
+ case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
@@ -823,7 +839,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
/* monitor can't bridge anyway */
break;
case NL80211_IFTYPE_UNSPECIFIED:
- case __NL80211_IFTYPE_AFTER_LAST:
+ case NUM_NL80211_IFTYPES:
/* not happening */
break;
}
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 8f5116f5af19..dc675a3daa3d 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -611,7 +611,7 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
#endif
#ifdef CONFIG_CFG80211_WEXT
- if (dev->ieee80211_ptr && dev->ieee80211_ptr &&
+ if (dev->ieee80211_ptr &&
dev->ieee80211_ptr->wiphy &&
dev->ieee80211_ptr->wiphy->wext &&
dev->ieee80211_ptr->wiphy->wext->get_wireless_stats)
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 9818198add8a..6fffe62d7c25 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -197,6 +197,8 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
wdev->wext.connect.ssid_len = len;
wdev->wext.connect.crypto.control_port = false;
+ wdev->wext.connect.crypto.control_port_ethertype =
+ cpu_to_be16(ETH_P_PAE);
err = cfg80211_mgd_wext_connect(rdev, wdev);
out:
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5e86d4e97dce..f7af98dff409 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -507,14 +507,14 @@ static int x25_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
int rc = -EOPNOTSUPP;
- lock_kernel();
+ lock_sock(sk);
if (sk->sk_state != TCP_LISTEN) {
memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
rc = 0;
}
- unlock_kernel();
+ release_sock(sk);
return rc;
}
@@ -688,7 +688,6 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
int len, i, rc = 0;
- lock_kernel();
if (!sock_flag(sk, SOCK_ZAPPED) ||
addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25) {
@@ -704,12 +703,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
}
+ lock_sock(sk);
x25_sk(sk)->source_addr = addr->sx25_addr;
x25_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
+ release_sock(sk);
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
out:
- unlock_kernel();
return rc;
}
@@ -751,7 +751,6 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
struct x25_route *rt;
int rc = 0;
- lock_kernel();
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
@@ -829,7 +828,6 @@ out_put_route:
x25_route_put(rt);
out:
release_sock(sk);
- unlock_kernel();
return rc;
}
@@ -869,8 +867,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
struct sk_buff *skb;
int rc = -EINVAL;
- lock_kernel();
- if (!sk || sk->sk_state != TCP_LISTEN)
+ if (!sk)
goto out;
rc = -EOPNOTSUPP;
@@ -878,6 +875,10 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
goto out;
lock_sock(sk);
+ rc = -EINVAL;
+ if (sk->sk_state != TCP_LISTEN)
+ goto out2;
+
rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
if (rc)
goto out2;
@@ -897,7 +898,6 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
out2:
release_sock(sk);
out:
- unlock_kernel();
return rc;
}
@@ -909,7 +909,6 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
struct x25_sock *x25 = x25_sk(sk);
int rc = 0;
- lock_kernel();
if (peer) {
if (sk->sk_state != TCP_ESTABLISHED) {
rc = -ENOTCONN;
@@ -923,19 +922,6 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
*uaddr_len = sizeof(*sx25);
out:
- unlock_kernel();
- return rc;
-}
-
-static unsigned int x25_datagram_poll(struct file *file, struct socket *sock,
- poll_table *wait)
-{
- int rc;
-
- lock_kernel();
- rc = datagram_poll(file, sock, wait);
- unlock_kernel();
-
return rc;
}
@@ -1746,7 +1732,7 @@ static const struct proto_ops x25_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = x25_accept,
.getname = x25_getname,
- .poll = x25_datagram_poll,
+ .poll = datagram_poll,
.ioctl = x25_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_x25_ioctl,