aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/common.c2
-rw-r--r--net/ax25/af_ax25.c6
-rw-r--r--net/bluetooth/bnep/sock.c2
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/hidp/sock.c2
-rw-r--r--net/bluetooth/l2cap.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/core/dev.c140
-rw-r--r--net/core/net_namespace.c164
-rw-r--r--net/core/netpoll.c37
-rw-r--r--net/core/skbuff.c16
-rw-r--r--net/core/sock.c105
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/econet/af_econet.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/esp4.c12
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c76
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c5
-rw-r--r--net/ipv4/netfilter/Kconfig4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c27
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tcp_vegas.c37
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/esp6.c13
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/tcp_ipv6.c12
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/irnet/irnet_ppp.c10
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/key/af_key.c6
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/netrom/af_netrom.c6
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rose/af_rose.c6
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/rxkad.c9
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c6
-rw-r--r--net/sunrpc/sysctl.c3
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c34
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/x25/af_x25.c2
56 files changed, 441 insertions, 379 deletions
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 7c0b5151d526..e0d37d6dc1f8 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1044,7 +1044,7 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol)
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
goto out;
rc = -ENOMEM;
- sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, 1);
+ sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto);
if (!sk)
goto out;
rc = 0;
diff --git a/net/atm/common.c b/net/atm/common.c
index e166d9e0ffd9..eba09a04f6bf 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -133,7 +133,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
sock->sk = NULL;
if (sock->type == SOCK_STREAM)
return -EINVAL;
- sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, 1);
+ sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 993e5c75e909..8378afd54b30 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -836,7 +836,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol)
return -ESOCKTNOSUPPORT;
}
- if ((sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, 1)) == NULL)
+ sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto);
+ if (sk == NULL)
return -ENOMEM;
ax25 = sk->sk_protinfo = ax25_create_cb();
@@ -861,7 +862,8 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
struct sock *sk;
ax25_cb *ax25, *oax25;
- if ((sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot, 1)) == NULL)
+ sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot);
+ if (sk == NULL)
return NULL;
if ((ax25 = ax25_create_cb()) == NULL) {
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index f718965f296c..9ebd3c64474d 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -213,7 +213,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol)
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, 1);
+ sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto);
if (!sk)
return -ENOMEM;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index cf700c20d11e..783edab12ce8 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -204,7 +204,7 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol)
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, 1);
+ sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto);
if (!sk)
return -ENOMEM;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 8825102c517c..14991323c273 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -645,7 +645,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol)
sock->ops = &hci_sock_ops;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, 1);
+ sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
if (!sk)
return -ENOMEM;
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 1de2b6fbcac0..3292b956a7c4 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -255,7 +255,7 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol)
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, 1);
+ sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto);
if (!sk)
return -ENOMEM;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 6fbbae78b304..477e052b17b5 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -607,7 +607,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
{
struct sock *sk;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, 1);
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
if (!sk)
return NULL;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 266b6972667d..c46d51035e77 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -287,7 +287,7 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int
struct rfcomm_dlc *d;
struct sock *sk;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, 1);
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
if (!sk)
return NULL;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 82d0dfdfa7e2..93ad1aae3f38 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -421,7 +421,7 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro
{
struct sock *sk;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, 1);
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
if (!sk)
return NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index 853c8b575f1d..be6cedab5aa8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1751,9 +1751,6 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
*
* return values:
* NET_RX_SUCCESS (no congestion)
- * NET_RX_CN_LOW (low congestion)
- * NET_RX_CN_MOD (moderate congestion)
- * NET_RX_CN_HIGH (high congestion)
* NET_RX_DROP (packet was dropped)
*
*/
@@ -2001,6 +1998,21 @@ out:
}
#endif
+/**
+ * netif_receive_skb - process receive buffer from network
+ * @skb: buffer to process
+ *
+ * netif_receive_skb() is the main receive data processing function.
+ * It always succeeds. The buffer may be dropped during processing
+ * for congestion control or by the protocol layers.
+ *
+ * This function may only be called from softirq context and interrupts
+ * should be enabled.
+ *
+ * Return values (usually ignored):
+ * NET_RX_SUCCESS: no congestion
+ * NET_RX_DROP: packet was dropped
+ */
int netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
@@ -2172,7 +2184,15 @@ static void net_rx_action(struct softirq_action *h)
weight = n->weight;
- work = n->poll(n, weight);
+ /* This NAPI_STATE_SCHED test is for avoiding a race
+ * with netpoll's poll_napi(). Only the entity which
+ * obtains the lock and sees NAPI_STATE_SCHED set will
+ * actually make the ->poll() call. Therefore we avoid
+ * accidently calling ->poll() when NAPI is not scheduled.
+ */
+ work = 0;
+ if (test_bit(NAPI_STATE_SCHED, &n->state))
+ work = n->poll(n, weight);
WARN_ON_ONCE(work > weight);
@@ -3488,6 +3508,60 @@ static void net_set_todo(struct net_device *dev)
spin_unlock(&net_todo_list_lock);
}
+static void rollback_registered(struct net_device *dev)
+{
+ BUG_ON(dev_boot_phase);
+ ASSERT_RTNL();
+
+ /* Some devices call without registering for initialization unwind. */
+ if (dev->reg_state == NETREG_UNINITIALIZED) {
+ printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
+ "was registered\n", dev->name, dev);
+
+ WARN_ON(1);
+ return;
+ }
+
+ BUG_ON(dev->reg_state != NETREG_REGISTERED);
+
+ /* If device is running, close it first. */
+ dev_close(dev);
+
+ /* And unlink it from device chain. */
+ unlist_netdevice(dev);
+
+ dev->reg_state = NETREG_UNREGISTERING;
+
+ synchronize_net();
+
+ /* Shutdown queueing discipline. */
+ dev_shutdown(dev);
+
+
+ /* Notify protocols, that we are about to destroy
+ this device. They should clean all the things.
+ */
+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+
+ /*
+ * Flush the unicast and multicast chains
+ */
+ dev_addr_discard(dev);
+
+ if (dev->uninit)
+ dev->uninit(dev);
+
+ /* Notifier chain MUST detach us from master device. */
+ BUG_TRAP(!dev->master);
+
+ /* Remove entries from kobject tree */
+ netdev_unregister_kobject(dev);
+
+ synchronize_net();
+
+ dev_put(dev);
+}
+
/**
* register_netdevice - register a network device
* @dev: device to register
@@ -3625,8 +3699,10 @@ int register_netdevice(struct net_device *dev)
/* Notify protocols, that a new device appeared. */
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
ret = notifier_to_errno(ret);
- if (ret)
- unregister_netdevice(dev);
+ if (ret) {
+ rollback_registered(dev);
+ dev->reg_state = NETREG_UNREGISTERED;
+ }
out:
return ret;
@@ -3903,59 +3979,9 @@ void synchronize_net(void)
void unregister_netdevice(struct net_device *dev)
{
- BUG_ON(dev_boot_phase);
- ASSERT_RTNL();
-
- /* Some devices call without registering for initialization unwind. */
- if (dev->reg_state == NETREG_UNINITIALIZED) {
- printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
- "was registered\n", dev->name, dev);
-
- WARN_ON(1);
- return;
- }
-
- BUG_ON(dev->reg_state != NETREG_REGISTERED);
-
- /* If device is running, close it first. */
- dev_close(dev);
-
- /* And unlink it from device chain. */
- unlist_netdevice(dev);
-
- dev->reg_state = NETREG_UNREGISTERING;
-
- synchronize_net();
-
- /* Shutdown queueing discipline. */
- dev_shutdown(dev);
-
-
- /* Notify protocols, that we are about to destroy
- this device. They should clean all the things.
- */
- call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-
- /*
- * Flush the unicast and multicast chains
- */
- dev_addr_discard(dev);
-
- if (dev->uninit)
- dev->uninit(dev);
-
- /* Notifier chain MUST detach us from master device. */
- BUG_TRAP(!dev->master);
-
- /* Remove entries from kobject tree */
- netdev_unregister_kobject(dev);
-
+ rollback_registered(dev);
/* Finish processing unregister after unlock */
net_set_todo(dev);
-
- synchronize_net();
-
- dev_put(dev);
}
/**
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6f71db8c4428..e9f0964ce70b 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -17,74 +17,13 @@ static DEFINE_MUTEX(net_mutex);
LIST_HEAD(net_namespace_list);
-static struct kmem_cache *net_cachep;
-
struct net init_net;
EXPORT_SYMBOL_GPL(init_net);
-static struct net *net_alloc(void)
-{
- return kmem_cache_zalloc(net_cachep, GFP_KERNEL);
-}
-
-static void net_free(struct net *net)
-{
- if (!net)
- return;
-
- if (unlikely(atomic_read(&net->use_count) != 0)) {
- printk(KERN_EMERG "network namespace not free! Usage: %d\n",
- atomic_read(&net->use_count));
- return;
- }
-
- kmem_cache_free(net_cachep, net);
-}
-
-static void cleanup_net(struct work_struct *work)
-{
- struct pernet_operations *ops;
- struct net *net;
-
- net = container_of(work, struct net, work);
-
- mutex_lock(&net_mutex);
-
- /* Don't let anyone else find us. */
- rtnl_lock();
- list_del(&net->list);
- rtnl_unlock();
-
- /* Run all of the network namespace exit methods */
- list_for_each_entry_reverse(ops, &pernet_list, list) {
- if (ops->exit)
- ops->exit(net);
- }
-
- mutex_unlock(&net_mutex);
-
- /* Ensure there are no outstanding rcu callbacks using this
- * network namespace.
- */
- rcu_barrier();
-
- /* Finally it is safe to free my network namespace structure */
- net_free(net);
-}
-
-
-void __put_net(struct net *net)
-{
- /* Cleanup the network namespace in process context */
- INIT_WORK(&net->work, cleanup_net);
- schedule_work(&net->work);
-}
-EXPORT_SYMBOL_GPL(__put_net);
-
/*
* setup_net runs the initializers for the network namespace object.
*/
-static int setup_net(struct net *net)
+static __net_init int setup_net(struct net *net)
{
/* Must be called with net_mutex held */
struct pernet_operations *ops;
@@ -112,9 +51,19 @@ out_undo:
if (ops->exit)
ops->exit(net);
}
+
+ rcu_barrier();
goto out;
}
+#ifdef CONFIG_NET_NS
+static struct kmem_cache *net_cachep;
+
+static struct net *net_alloc(void)
+{
+ return kmem_cache_zalloc(net_cachep, GFP_KERNEL);
+}
+
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
struct net *new_net = NULL;
@@ -125,10 +74,6 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
if (!(flags & CLONE_NEWNET))
return old_net;
-#ifndef CONFIG_NET_NS
- return ERR_PTR(-EINVAL);
-#endif
-
err = -ENOMEM;
new_net = net_alloc();
if (!new_net)
@@ -155,14 +100,78 @@ out:
return new_net;
}
+static void net_free(struct net *net)
+{
+ if (!net)
+ return;
+
+ if (unlikely(atomic_read(&net->use_count) != 0)) {
+ printk(KERN_EMERG "network namespace not free! Usage: %d\n",
+ atomic_read(&net->use_count));
+ return;
+ }
+
+ kmem_cache_free(net_cachep, net);
+}
+
+static void cleanup_net(struct work_struct *work)
+{
+ struct pernet_operations *ops;
+ struct net *net;
+
+ net = container_of(work, struct net, work);
+
+ mutex_lock(&net_mutex);
+
+ /* Don't let anyone else find us. */
+ rtnl_lock();
+ list_del(&net->list);
+ rtnl_unlock();
+
+ /* Run all of the network namespace exit methods */
+ list_for_each_entry_reverse(ops, &pernet_list, list) {
+ if (ops->exit)
+ ops->exit(net);
+ }
+
+ mutex_unlock(&net_mutex);
+
+ /* Ensure there are no outstanding rcu callbacks using this
+ * network namespace.
+ */
+ rcu_barrier();
+
+ /* Finally it is safe to free my network namespace structure */
+ net_free(net);
+}
+
+void __put_net(struct net *net)
+{
+ /* Cleanup the network namespace in process context */
+ INIT_WORK(&net->work, cleanup_net);
+ schedule_work(&net->work);
+}
+EXPORT_SYMBOL_GPL(__put_net);
+
+#else
+struct net *copy_net_ns(unsigned long flags, struct net *old_net)
+{
+ if (flags & CLONE_NEWNET)
+ return ERR_PTR(-EINVAL);
+ return old_net;
+}
+#endif
+
static int __init net_ns_init(void)
{
int err;
printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
+#ifdef CONFIG_NET_NS
net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
SMP_CACHE_BYTES,
SLAB_PANIC, NULL);
+#endif
mutex_lock(&net_mutex);
err = setup_net(&init_net);
@@ -185,29 +194,28 @@ static int register_pernet_operations(struct list_head *list,
struct net *net, *undo_net;
int error;
- error = 0;
list_add_tail(&ops->list, list);
- for_each_net(net) {
- if (ops->init) {
+ if (ops->init) {
+ for_each_net(net) {
error = ops->init(net);
if (error)
goto out_undo;
}
}
-out:
- return error;
+ return 0;
out_undo:
/* If I have an error cleanup all namespaces I initialized */
list_del(&ops->list);
- for_each_net(undo_net) {
- if (undo_net == net)
- goto undone;
- if (ops->exit)
+ if (ops->exit) {
+ for_each_net(undo_net) {
+ if (undo_net == net)
+ goto undone;
ops->exit(undo_net);
+ }
}
undone:
- goto out;
+ return error;
}
static void unregister_pernet_operations(struct pernet_operations *ops)
@@ -215,8 +223,8 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
struct net *net;
list_del(&ops->list);
- for_each_net(net)
- if (ops->exit)
+ if (ops->exit)
+ for_each_net(net)
ops->exit(net);
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index bf8d18f1b013..c499b5c69bed 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -116,6 +116,29 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
* network adapter, forcing superfluous retries and possibly timeouts.
* Thus, we set our budget to greater than 1.
*/
+static int poll_one_napi(struct netpoll_info *npinfo,
+ struct napi_struct *napi, int budget)
+{
+ int work;
+
+ /* net_rx_action's ->poll() invocations and our's are
+ * synchronized by this test which is only made while
+ * holding the napi->poll_lock.
+ */
+ if (!test_bit(NAPI_STATE_SCHED, &napi->state))
+ return budget;
+
+ npinfo->rx_flags |= NETPOLL_RX_DROP;
+ atomic_inc(&trapped);
+
+ work = napi->poll(napi, budget);
+
+ atomic_dec(&trapped);
+ npinfo->rx_flags &= ~NETPOLL_RX_DROP;
+
+ return budget - work;
+}
+
static void poll_napi(struct netpoll *np)
{
struct netpoll_info *npinfo = np->dev->npinfo;
@@ -123,17 +146,13 @@ static void poll_napi(struct netpoll *np)
int budget = 16;
list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
- if (test_bit(NAPI_STATE_SCHED, &napi->state) &&
- napi->poll_owner != smp_processor_id() &&
+ if (napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) {
- npinfo->rx_flags |= NETPOLL_RX_DROP;
- atomic_inc(&trapped);
-
- napi->poll(napi, budget);
-
- atomic_dec(&trapped);
- npinfo->rx_flags &= ~NETPOLL_RX_DROP;
+ budget = poll_one_napi(npinfo, napi, budget);
spin_unlock(&napi->poll_lock);
+
+ if (!budget)
+ break;
}
}
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 573e17240197..64b50ff7a413 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2028,8 +2028,8 @@ void __init skb_init(void)
* Fill the specified scatter-gather list with mappings/pointers into a
* region of the buffer space attached to a socket buffer.
*/
-int
-skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+static int
+__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
@@ -2078,7 +2078,8 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
+ elt += __skb_to_sgvec(list, sg+elt, offset - start,
+ copy);
if ((len -= copy) == 0)
return elt;
offset += copy;
@@ -2090,6 +2091,15 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
return elt;
}
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+ int nsg = __skb_to_sgvec(skb, sg, offset, len);
+
+ __sg_mark_end(&sg[nsg - 1]);
+
+ return nsg;
+}
+
/**
* skb_cow_data - Check that a socket buffer's data buffers are writable
* @skb: The socket buffer to check.
diff --git a/net/core/sock.c b/net/core/sock.c
index bba9949681ff..12ad2067a988 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -857,46 +857,43 @@ static inline void sock_lock_init(struct sock *sk)
af_family_keys + sk->sk_family);
}
-/**
- * sk_alloc - All socket objects are allocated here
- * @net: the applicable net namespace
- * @family: protocol family
- * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
- * @prot: struct proto associated with this new sock instance
- * @zero_it: if we should zero the newly allocated sock
- */
-struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot, int zero_it)
+static void sock_copy(struct sock *nsk, const struct sock *osk)
+{
+#ifdef CONFIG_SECURITY_NETWORK
+ void *sptr = nsk->sk_security;
+#endif
+
+ memcpy(nsk, osk, osk->sk_prot->obj_size);
+#ifdef CONFIG_SECURITY_NETWORK
+ nsk->sk_security = sptr;
+ security_sk_clone(osk, nsk);
+#endif
+}
+
+static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
+ int family)
{
- struct sock *sk = NULL;
- struct kmem_cache *slab = prot->slab;
+ struct sock *sk;
+ struct kmem_cache *slab;
+ slab = prot->slab;
if (slab != NULL)
sk = kmem_cache_alloc(slab, priority);
else
sk = kmalloc(prot->obj_size, priority);
- if (sk) {
- if (zero_it) {
- memset(sk, 0, prot->obj_size);
- sk->sk_family = family;
- /*
- * See comment in struct sock definition to understand
- * why we need sk_prot_creator -acme
- */
- sk->sk_prot = sk->sk_prot_creator = prot;
- sock_lock_init(sk);
- sk->sk_net = get_net(net);
- }
-
+ if (sk != NULL) {
if (security_sk_alloc(sk, family, priority))
goto out_free;
if (!try_module_get(prot->owner))
- goto out_free;
+ goto out_free_sec;
}
+
return sk;
+out_free_sec:
+ security_sk_free(sk);
out_free:
if (slab != NULL)
kmem_cache_free(slab, sk);
@@ -905,10 +902,53 @@ out_free:
return NULL;
}
+static void sk_prot_free(struct proto *prot, struct sock *sk)
+{
+ struct kmem_cache *slab;
+ struct module *owner;
+
+ owner = prot->owner;
+ slab = prot->slab;
+
+ security_sk_free(sk);
+ if (slab != NULL)
+ kmem_cache_free(slab, sk);
+ else
+ kfree(sk);
+ module_put(owner);
+}
+
+/**
+ * sk_alloc - All socket objects are allocated here
+ * @net: the applicable net namespace
+ * @family: protocol family
+ * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ * @prot: struct proto associated with this new sock instance
+ * @zero_it: if we should zero the newly allocated sock
+ */
+struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ struct proto *prot)
+{
+ struct sock *sk;
+
+ sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
+ if (sk) {
+ sk->sk_family = family;
+ /*
+ * See comment in struct sock definition to understand
+ * why we need sk_prot_creator -acme
+ */
+ sk->sk_prot = sk->sk_prot_creator = prot;
+ sock_lock_init(sk);
+ sk->sk_net = get_net(net);
+ }
+
+ return sk;
+}
+
void sk_free(struct sock *sk)
{
struct sk_filter *filter;
- struct module *owner = sk->sk_prot_creator->owner;
if (sk->sk_destruct)
sk->sk_destruct(sk);
@@ -925,25 +965,22 @@ void sk_free(struct sock *sk)
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
__FUNCTION__, atomic_read(&sk->sk_omem_alloc));
- security_sk_free(sk);
put_net(sk->sk_net);
- if (sk->sk_prot_creator->slab != NULL)
- kmem_cache_free(sk->sk_prot_creator->slab, sk);
- else
- kfree(sk);
- module_put(owner);
+ sk_prot_free(sk->sk_prot_creator, sk);
}
struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
{
- struct sock *newsk = sk_alloc(sk->sk_net, sk->sk_family, priority, sk->sk_prot, 0);
+ struct sock *newsk;
+ newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
if (newsk != NULL) {
struct sk_filter *filter;
sock_copy(newsk, sk);
/* SANITY */
+ get_net(newsk->sk_net);
sk_node_init(&newsk->sk_node);
sock_lock_init(newsk);
bh_lock_sock(newsk);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index aabe98d9402f..57d574951838 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -474,7 +474,7 @@ static struct proto dn_proto = {
static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
{
struct dn_scp *scp;
- struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, 1);
+ struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto);
if (!sk)
goto out;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 9cae16b4e0b7..f70df073c588 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -624,7 +624,7 @@ static int econet_create(struct net *net, struct socket *sock, int protocol)
sock->state = SS_UNCONNECTED;
err = -ENOBUFS;
- sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto, 1);
+ sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto);
if (sk == NULL)
goto out;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 621b128897d7..d2f22e74b267 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -323,7 +323,7 @@ lookup_protocol:
BUG_TRAP(answer_prot->slab != NULL);
err = -ENOBUFS;
- sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, 1);
+ sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
if (sk == NULL)
goto out;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index cad4278025ad..c31bccb9b526 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -111,9 +111,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
goto unlock;
}
sg_init_table(sg, nfrags);
- sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data +
- esp->conf.ivlen -
- skb->data, clen));
+ skb_to_sgvec(skb, sg,
+ esph->enc_data +
+ esp->conf.ivlen -
+ skb->data, clen);
err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
@@ -205,8 +206,9 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
sg_init_table(sg, nfrags);
- sg_mark_end(sg, skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen,
- elen));
+ skb_to_sgvec(skb, sg,
+ sizeof(*esph) + esp->conf.ivlen,
+ elen);
err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 6a1fec416eaf..427b593c1069 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -48,8 +48,6 @@
/* for sysctl */
#include <linux/fs.h>
#include <linux/sysctl.h>
-/* for proc_net_create/proc_net_remove */
-#include <linux/proc_fs.h>
#include <net/net_namespace.h>
#include <net/ip_vs.h>
@@ -547,71 +545,6 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
}
-
-#ifdef CONFIG_IP_VS_LBLCR_DEBUG
-static struct ip_vs_lblcr_table *lblcr_table_list;
-
-/*
- * /proc/net/ip_vs_lblcr to display the mappings of
- * destination IP address <==> its serverSet
- */
-static int
-ip_vs_lblcr_getinfo(char *buffer, char **start, off_t offset, int length)
-{
- off_t pos=0, begin;
- int len=0, size;
- struct ip_vs_lblcr_table *tbl;
- unsigned long now = jiffies;
- int i;
- struct ip_vs_lblcr_entry *en;
-
- tbl = lblcr_table_list;
-
- size = sprintf(buffer, "LastTime Dest IP address Server set\n");
- pos += size;
- len += size;
-
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
- read_lock_bh(&tbl->lock);
- list_for_each_entry(en, &tbl->bucket[i], list) {
- char tbuf[16];
- struct ip_vs_dest_list *d;
-
- sprintf(tbuf, "%u.%u.%u.%u", NIPQUAD(en->addr));
- size = sprintf(buffer+len, "%8lu %-16s ",
- now-en->lastuse, tbuf);
-
- read_lock(&en->set.lock);
- for (d=en->set.list; d!=NULL; d=d->next) {
- size += sprintf(buffer+len+size,
- "%u.%u.%u.%u ",
- NIPQUAD(d->dest->addr));
- }
- read_unlock(&en->set.lock);
- size += sprintf(buffer+len+size, "\n");
- len += size;
- pos += size;
- if (pos <= offset)
- len=0;
- if (pos >= offset+length) {
- read_unlock_bh(&tbl->lock);
- goto done;
- }
- }
- read_unlock_bh(&tbl->lock);
- }
-
- done:
- begin = len - (pos - offset);
- *start = buffer + begin;
- len -= begin;
- if(len>length)
- len = length;
- return len;
-}
-#endif
-
-
static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
{
int i;
@@ -650,9 +583,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
add_timer(&tbl->periodic_timer);
-#ifdef CONFIG_IP_VS_LBLCR_DEBUG
- lblcr_table_list = tbl;
-#endif
return 0;
}
@@ -843,18 +773,12 @@ static int __init ip_vs_lblcr_init(void)
{
INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
sysctl_header = register_sysctl_table(lblcr_root_table);
-#ifdef CONFIG_IP_VS_LBLCR_DEBUG
- proc_net_create(&init_net, "ip_vs_lblcr", 0, ip_vs_lblcr_getinfo);
-#endif
return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
}
static void __exit ip_vs_lblcr_cleanup(void)
{
-#ifdef CONFIG_IP_VS_LBLCR_DEBUG
- proc_net_remove(&init_net, "ip_vs_lblcr");
-#endif
unregister_sysctl_table(sysctl_header);
unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index c99f2a33fb9e..0d4d9721cbd4 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -72,7 +72,6 @@ struct ip_vs_sync_thread_data {
int state;
};
-#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
#define FULL_CONN_SIZE \
(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
@@ -284,6 +283,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
struct ip_vs_sync_conn *s;
struct ip_vs_sync_conn_options *opt;
struct ip_vs_conn *cp;
+ struct ip_vs_protocol *pp;
char *p;
int i;
@@ -342,7 +342,8 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
p += SIMPLE_CONN_SIZE;
atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
- cp->timeout = IP_VS_SYNC_CONN_TIMEOUT;
+ pp = ip_vs_proto_get(s->protocol);
+ cp->timeout = pp->timeout_table[cp->state];
ip_vs_conn_put(cp);
if (p > buffer+buflen) {
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index fa97947c6ae1..9aca9c55687c 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -128,7 +128,7 @@ config IP_NF_MATCH_ADDRTYPE
eg. UNICAST, LOCAL, BROADCAST, ...
If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. If unsure, say `N'.
+ <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
# `filter', generic and specific targets
config IP_NF_FILTER
@@ -371,7 +371,7 @@ config IP_NF_RAW
and OUTPUT chains.
If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. If unsure, say `N'.
+ <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
# ARP tables
config IP_NF_ARPTABLES
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 9be0daa9c0ec..ffdccc0972e0 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -304,7 +304,7 @@ static void icmp_put(struct seq_file *seq)
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
snmp_fold_field((void **) icmpmsg_statistics,
- icmpmibmap[i].index));
+ icmpmibmap[i].index | 0x100));
}
/*
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2e6ad6dbba6c..c64072bb504b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2453,7 +2453,7 @@ void __init tcp_init(void)
0,
&tcp_hashinfo.ehash_size,
NULL,
- 0);
+ thash_entries ? 0 : 512 * 1024);
tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
rwlock_init(&tcp_hashinfo.ehash[i].lock);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 69d8c38ccd39..ca9590f4f520 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1330,12 +1330,15 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
cached_fack_count = 0;
}
- for (i=0; i<num_sacks; i++, sp++) {
+ for (i = 0; i < num_sacks; i++) {
struct sk_buff *skb;
__u32 start_seq = ntohl(sp->start_seq);
__u32 end_seq = ntohl(sp->end_seq);
int fack_count;
int dup_sack = (found_dup_sack && (i == first_sack_index));
+ int next_dup = (found_dup_sack && (i+1 == first_sack_index));
+
+ sp++;
if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) {
if (dup_sack) {
@@ -1361,7 +1364,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
flag |= FLAG_DATA_LOST;
tcp_for_write_queue_from(skb, sk) {
- int in_sack;
+ int in_sack = 0;
u8 sacked;
if (skb == tcp_send_head(sk))
@@ -1380,7 +1383,23 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
break;
- in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
+ dup_sack = (found_dup_sack && (i == first_sack_index));
+
+ /* Due to sorting DSACK may reside within this SACK block! */
+ if (next_dup) {
+ u32 dup_start = ntohl(sp->start_seq);
+ u32 dup_end = ntohl(sp->end_seq);
+
+ if (before(TCP_SKB_CB(skb)->seq, dup_end)) {
+ in_sack = tcp_match_skb_to_sack(sk, skb, dup_start, dup_end);
+ if (in_sack > 0)
+ dup_sack = 1;
+ }
+ }
+
+ /* DSACK info lost if out-of-mem, try SACK still */
+ if (in_sack <= 0)
+ in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
if (in_sack < 0)
break;
@@ -2059,7 +2078,7 @@ static void tcp_update_scoreboard(struct sock *sk)
if (!tcp_skb_timedout(sk, skb))
break;
- if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
+ if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
tcp_verify_retransmit_hint(tp, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ad759f1c3777..eec02b29ffcf 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -858,16 +858,16 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
u8 *newkey, u8 newkeylen)
{
/* Add Key to the list */
- struct tcp4_md5sig_key *key;
+ struct tcp_md5sig_key *key;
struct tcp_sock *tp = tcp_sk(sk);
struct tcp4_md5sig_key *keys;
- key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
+ key = tcp_v4_md5_do_lookup(sk, addr);
if (key) {
/* Pre-existing entry - just update that one. */
- kfree(key->base.key);
- key->base.key = newkey;
- key->base.keylen = newkeylen;
+ kfree(key->key);
+ key->key = newkey;
+ key->keylen = newkeylen;
} else {
struct tcp_md5sig_info *md5sig;
@@ -1083,7 +1083,7 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
sg_set_buf(&sg[block++], key->key, key->keylen);
nbytes += key->keylen;
- sg_mark_end(sg, block);
+ __sg_mark_end(&sg[block - 1]);
/* Now store the Hash into the packet */
err = crypto_hash_init(desc);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index b49dedcda52d..007304e99842 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -266,26 +266,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
*/
diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
- /* Slow start. */
- if (diff > gamma) {
- /* Going too fast. Time to slow down
- * and switch to congestion avoidance.
- */
- tp->snd_ssthresh = 2;
-
- /* Set cwnd to match the actual rate
- * exactly:
- * cwnd = (actual rate) * baseRTT
- * Then we add 1 because the integer
- * truncation robs us of full link
- * utilization.
- */
- tp->snd_cwnd = min(tp->snd_cwnd,
- (target_cwnd >>
- V_PARAM_SHIFT)+1);
+ if (diff > gamma && tp->snd_ssthresh > 2 ) {
+ /* Going too fast. Time to slow down
+ * and switch to congestion avoidance.
+ */
+ tp->snd_ssthresh = 2;
+
+ /* Set cwnd to match the actual rate
+ * exactly:
+ * cwnd = (actual rate) * baseRTT
+ * Then we add 1 because the integer
+ * truncation robs us of full link
+ * utilization.
+ */
+ tp->snd_cwnd = min(tp->snd_cwnd,
+ (target_cwnd >>
+ V_PARAM_SHIFT)+1);
- }
+ } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ /* Slow start. */
tcp_slow_start(tp);
} else {
/* Congestion avoidance. */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 348bd8d06112..1bd8d818f8e9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4288,8 +4288,4 @@ void __exit addrconf_cleanup(void)
del_timer(&addr_chk_timer);
rtnl_unlock();
-
-#ifdef CONFIG_PROC_FS
- proc_net_remove(&init_net, "if_inet6");
-#endif
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 1b1caf3aa1c1..ecbd38894fdd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -162,7 +162,7 @@ lookup_protocol:
BUG_TRAP(answer_prot->slab != NULL);
err = -ENOBUFS;
- sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, 1);
+ sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
if (sk == NULL)
goto out;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index ab17b5e62355..7db66f10e00d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -110,9 +110,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
goto unlock;
}
sg_init_table(sg, nfrags);
- sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data +
- esp->conf.ivlen -
- skb->data, clen));
+ skb_to_sgvec(skb, sg,
+ esph->enc_data +
+ esp->conf.ivlen -
+ skb->data, clen);
err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
@@ -209,9 +210,9 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
}
}
sg_init_table(sg, nfrags);
- sg_mark_end(sg, skb_to_sgvec(skb, sg,
- sizeof(*esph) + esp->conf.ivlen,
- elen));
+ skb_to_sgvec(skb, sg,
+ sizeof(*esph) + esp->conf.ivlen,
+ elen);
ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 20cfc90d5597..36f7dbfb6dbb 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1670,7 +1670,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f
filp, buffer, lenp, ppos);
else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) ||
- (strcmp(ctl->procname, "base_reacable_time_ms") == 0))
+ (strcmp(ctl->procname, "base_reachable_time_ms") == 0))
ret = proc_dointvec_ms_jiffies(ctl, write,
filp, buffer, lenp, ppos);
else
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 85208026278b..4b9032880959 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -561,16 +561,16 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
char *newkey, u8 newkeylen)
{
/* Add key to the list */
- struct tcp6_md5sig_key *key;
+ struct tcp_md5sig_key *key;
struct tcp_sock *tp = tcp_sk(sk);
struct tcp6_md5sig_key *keys;
- key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
+ key = tcp_v6_md5_do_lookup(sk, peer);
if (key) {
/* modify existing entry - just update that one */
- kfree(key->base.key);
- key->base.key = newkey;
- key->base.keylen = newkeylen;
+ kfree(key->key);
+ key->key = newkey;
+ key->keylen = newkeylen;
} else {
/* reallocate new list if current one is full. */
if (!tp->md5sig_info) {
@@ -781,7 +781,7 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
sg_set_buf(&sg[block++], key->key, key->keylen);
nbytes += key->keylen;
- sg_mark_end(sg, block);
+ __sg_mark_end(&sg[block - 1]);
/* Now store the hash into the packet */
err = crypto_hash_init(desc);
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 29b063d43120..a195a66e0cc7 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1381,7 +1381,7 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol)
goto out;
rc = -ENOMEM;
- sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, 1);
+ sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto);
if (!sk)
goto out;
#ifdef IPX_REFCNT_DEBUG
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 0328ae2654f4..48ce59a6e026 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1078,7 +1078,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
}
/* Allocate networking socket */
- sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto, 1);
+ sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto);
if (sk == NULL)
return -ENOMEM;
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 2f9f8dce5a69..e0eab5927c4f 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -731,15 +731,25 @@ dev_irnet_ioctl(struct inode * inode,
/* Get termios */
case TCGETS:
DEBUG(FS_INFO, "Get termios.\n");
+#ifndef TCGETS2
if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios))
break;
+#else
+ if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios))
+ break;
+#endif
err = 0;
break;
/* Set termios */
case TCSETSF:
DEBUG(FS_INFO, "Set termios.\n");
+#ifndef TCGETS2
if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp))
break;
+#else
+ if(user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp))
+ break;
+#endif
err = 0;
break;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 43e01c8d382b..aef664580355 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -216,7 +216,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
- sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1);
+ sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
if (!sk)
return NULL;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7969f8a716df..10c89d47f685 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -152,7 +152,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol)
return -EPROTONOSUPPORT;
err = -ENOMEM;
- sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, 1);
+ sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto);
if (sk == NULL)
goto out;
@@ -395,9 +395,9 @@ static inline int pfkey_sec_ctx_len(struct sadb_x_sec_ctx *sec_ctx)
static inline int verify_sec_ctx_len(void *p)
{
struct sadb_x_sec_ctx *sec_ctx = (struct sadb_x_sec_ctx *)p;
- int len;
+ int len = sec_ctx->sadb_x_ctx_len;
- if (sec_ctx->sadb_x_ctx_len > PAGE_SIZE)
+ if (len > PAGE_SIZE)
return -EINVAL;
len = pfkey_sec_ctx_len(sec_ctx);
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8ebc2769dfda..5c0b484237c8 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -869,7 +869,7 @@ static void llc_sk_init(struct sock* sk)
*/
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot)
{
- struct sock *sk = sk_alloc(net, family, priority, prot, 1);
+ struct sock *sk = sk_alloc(net, family, priority, prot);
if (!sk)
goto out;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index d7a600a5720a..21a9fcc03796 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -363,7 +363,7 @@ config NETFILTER_XT_TARGET_TRACE
the tables, chains, rules.
If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. If unsure, say `N'.
+ <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
config NETFILTER_XT_TARGET_SECMARK
tristate '"SECMARK" target support'
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 4d6171bc0829..000c2fb462d0 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -999,7 +999,7 @@ struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced)
*vmalloced = 0;
size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
- hash = (void*)__get_free_pages(GFP_KERNEL,
+ hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
get_order(sizeof(struct hlist_head)
* size));
if (!hash) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4f994c0fb3f8..260171255576 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -396,7 +396,7 @@ static int __netlink_create(struct net *net, struct socket *sock,
sock->ops = &netlink_ops;
- sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
+ sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
if (!sk)
return -ENOMEM;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 3a4d479ea64e..972250c974f1 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -423,7 +423,8 @@ static int nr_create(struct net *net, struct socket *sock, int protocol)
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
- if ((sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, 1)) == NULL)
+ sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
+ if (sk == NULL)
return -ENOMEM;
nr = nr_sk(sk);
@@ -465,7 +466,8 @@ static struct sock *nr_make_new(struct sock *osk)
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
- if ((sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot, 1)) == NULL)
+ sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot);
+ if (sk == NULL)
return NULL;
nr = nr_sk(sk);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d0936506b731..4cb2dfba0993 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -995,7 +995,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
sock->state = SS_UNCONNECTED;
err = -ENOBUFS;
- sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, 1);
+ sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
if (sk == NULL)
goto out;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 509defe53ee5..ed2d65cd8010 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -513,7 +513,8 @@ static int rose_create(struct net *net, struct socket *sock, int protocol)
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
- if ((sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL)
+ sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
+ if (sk == NULL)
return -ENOMEM;
rose = rose_sk(sk);
@@ -551,7 +552,8 @@ static struct sock *rose_make_new(struct sock *osk)
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
- if ((sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL)
+ sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto);
+ if (sk == NULL)
return NULL;
rose = rose_sk(sk);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c680017f5c8e..d6389450c4bf 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -627,7 +627,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol)
sock->ops = &rxrpc_rpc_ops;
sock->state = SS_UNCONNECTED;
- sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1);
+ sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
if (!sk)
return -ENOMEM;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index eebefb6ef139..c387cf68a08c 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -237,7 +237,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
len = data_size + call->conn->size_align - 1;
len &= ~(call->conn->size_align - 1);
- sg_init_table(sg, skb_to_sgvec(skb, sg, 0, len));
+ sg_init_table(sg, nsg);
+ skb_to_sgvec(skb, sg, 0, len);
crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
_leave(" = 0");
@@ -344,7 +345,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
goto nomem;
sg_init_table(sg, nsg);
- sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, 8));
+ skb_to_sgvec(skb, sg, 0, 8);
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
@@ -426,7 +427,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
}
sg_init_table(sg, nsg);
- sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, skb->len));
+ skb_to_sgvec(skb, sg, 0, skb->len);
/* decrypt from the session key */
payload = call->conn->key->payload.data;
@@ -701,7 +702,7 @@ static void rxkad_sg_set_buf2(struct scatterlist sg[2],
nsg++;
}
- sg_mark_end(sg, nsg);
+ __sg_mark_end(&sg[nsg - 1]);
ASSERTCMP(sg[0].length + sg[1].length, ==, buflen);
}
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 8af1004abefe..6d5fa6bb371b 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -556,7 +556,7 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
return &sctp_hmac_list[id];
}
-static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id)
+static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id)
{
int found = 0;
int i;
@@ -573,7 +573,7 @@ static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id)
/* See if the HMAC_ID is one that we claim as supported */
int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
- __u16 hmac_id)
+ __be16 hmac_id)
{
struct sctp_hmac_algo_param *hmacs;
__u16 n_elt;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index eb4deaf58914..7f31ff638bc6 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -631,7 +631,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct sctp6_sock *newsctp6sk;
- newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot, 1);
+ newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot);
if (!newsk)
goto out;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index f5cd96f5fe74..40c1a47d1b8d 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -552,7 +552,8 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
{
struct inet_sock *inet = inet_sk(sk);
struct inet_sock *newinet;
- struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, sk->sk_prot, 1);
+ struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL,
+ sk->sk_prot);
if (!newsk)
goto out;
diff --git a/net/socket.c b/net/socket.c
index 540013ea8620..5d879fd3d01d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1250,11 +1250,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
goto out_release_both;
fd1 = sock_alloc_fd(&newfile1);
- if (unlikely(fd1 < 0))
+ if (unlikely(fd1 < 0)) {
+ err = fd1;
goto out_release_both;
+ }
fd2 = sock_alloc_fd(&newfile2);
if (unlikely(fd2 < 0)) {
+ err = fd2;
put_filp(newfile1);
put_unused_fd(fd1);
goto out_release_both;
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 91cd8f0d1e10..ab7cbd6575c4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -211,8 +211,8 @@ encryptor(struct scatterlist *sg, void *data)
if (thislen == 0)
return 0;
- sg_mark_end(desc->infrags, desc->fragno);
- sg_mark_end(desc->outfrags, desc->fragno);
+ __sg_mark_end(&desc->infrags[desc->fragno - 1]);
+ __sg_mark_end(&desc->outfrags[desc->fragno - 1]);
ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
desc->infrags, thislen);
@@ -293,7 +293,7 @@ decryptor(struct scatterlist *sg, void *data)
if (thislen == 0)
return 0;
- sg_mark_end(desc->frags, desc->fragno);
+ __sg_mark_end(&desc->frags[desc->fragno - 1]);
ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
desc->frags, thislen);
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 864b541bbf51..2be714e9b382 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -87,9 +87,8 @@ proc_dodebug(ctl_table *table, int write, struct file *file,
left--, s++;
*(unsigned int *) table->data = value;
/* Display the RPC tasks on writing to rpc_debug */
- if (table->ctl_name == CTL_RPCDEBUG) {
+ if (strcmp(table->procname, "rpc_debug") == 0)
rpc_show_tasks();
- }
} else {
if (!access_ok(VERIFY_WRITE, buffer, left))
return -EFAULT;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 12db63580427..9e11ce715958 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -181,7 +181,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
struct rpcrdma_read_chunk *cur_rchunk = NULL;
struct rpcrdma_write_array *warray = NULL;
struct rpcrdma_write_chunk *cur_wchunk = NULL;
- u32 *iptr = headerp->rm_body.rm_chunks;
+ __be32 *iptr = headerp->rm_body.rm_chunks;
if (type == rpcrdma_readch || type == rpcrdma_areadch) {
/* a read chunk - server will RDMA Read our memory */
@@ -217,25 +217,25 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
xdr_encode_hyper(
- (u32 *)&cur_rchunk->rc_target.rs_offset,
+ (__be32 *)&cur_rchunk->rc_target.rs_offset,
seg->mr_base);
dprintk("RPC: %s: read chunk "
"elem %d@0x%llx:0x%x pos %d (%s)\n", __func__,
- seg->mr_len, seg->mr_base, seg->mr_rkey, pos,
- n < nsegs ? "more" : "last");
+ seg->mr_len, (unsigned long long)seg->mr_base,
+ seg->mr_rkey, pos, n < nsegs ? "more" : "last");
cur_rchunk++;
r_xprt->rx_stats.read_chunk_count++;
} else { /* write/reply */
cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
xdr_encode_hyper(
- (u32 *)&cur_wchunk->wc_target.rs_offset,
+ (__be32 *)&cur_wchunk->wc_target.rs_offset,
seg->mr_base);
dprintk("RPC: %s: %s chunk "
"elem %d@0x%llx:0x%x (%s)\n", __func__,
(type == rpcrdma_replych) ? "reply" : "write",
- seg->mr_len, seg->mr_base, seg->mr_rkey,
- n < nsegs ? "more" : "last");
+ seg->mr_len, (unsigned long long)seg->mr_base,
+ seg->mr_rkey, n < nsegs ? "more" : "last");
cur_wchunk++;
if (type == rpcrdma_replych)
r_xprt->rx_stats.reply_chunk_count++;
@@ -257,14 +257,14 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
* finish off header. If write, marshal discrim and nchunks.
*/
if (cur_rchunk) {
- iptr = (u32 *) cur_rchunk;
+ iptr = (__be32 *) cur_rchunk;
*iptr++ = xdr_zero; /* finish the read chunk list */
*iptr++ = xdr_zero; /* encode a NULL write chunk list */
*iptr++ = xdr_zero; /* encode a NULL reply chunk */
} else {
warray->wc_discrim = xdr_one;
warray->wc_nchunks = htonl(nchunks);
- iptr = (u32 *) cur_wchunk;
+ iptr = (__be32 *) cur_wchunk;
if (type == rpcrdma_writech) {
*iptr++ = xdr_zero; /* finish the write chunk list */
*iptr++ = xdr_zero; /* encode a NULL reply chunk */
@@ -559,7 +559,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
* RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
*/
static int
-rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
+rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **iptrp)
{
unsigned int i, total_len;
struct rpcrdma_write_chunk *cur_wchunk;
@@ -573,11 +573,11 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
ifdebug(FACILITY) {
u64 off;
- xdr_decode_hyper((u32 *)&seg->rs_offset, &off);
+ xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
__func__,
ntohl(seg->rs_length),
- off,
+ (unsigned long long)off,
ntohl(seg->rs_handle));
}
total_len += ntohl(seg->rs_length);
@@ -585,7 +585,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
}
/* check and adjust for properly terminated write chunk */
if (wrchunk) {
- u32 *w = (u32 *) cur_wchunk;
+ __be32 *w = (__be32 *) cur_wchunk;
if (*w++ != xdr_zero)
return -1;
cur_wchunk = (struct rpcrdma_write_chunk *) w;
@@ -593,7 +593,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
return -1;
- *iptrp = (u32 *) cur_wchunk;
+ *iptrp = (__be32 *) cur_wchunk;
return total_len;
}
@@ -721,7 +721,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
struct rpc_rqst *rqst;
struct rpc_xprt *xprt = rep->rr_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- u32 *iptr;
+ __be32 *iptr;
int i, rdmalen, status;
/* Check status. If bad, signal disconnect and return rep to pool */
@@ -801,7 +801,7 @@ repost:
r_xprt->rx_stats.total_rdma_reply += rdmalen;
} else {
/* else ordinary inline */
- iptr = (u32 *)((unsigned char *)headerp + 28);
+ iptr = (__be32 *)((unsigned char *)headerp + 28);
rep->rr_len -= 28; /*sizeof *headerp;*/
status = rep->rr_len;
}
@@ -816,7 +816,7 @@ repost:
headerp->rm_body.rm_chunks[2] != xdr_one ||
req->rl_nchunks == 0)
goto badheader;
- iptr = (u32 *)((unsigned char *)headerp + 28);
+ iptr = (__be32 *)((unsigned char *)headerp + 28);
rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
if (rdmalen < 0)
goto badheader;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e36b4b5a5222..6b792265dc06 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -201,7 +201,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
return -EPROTOTYPE;
}
- sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
+ sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
if (!sk) {
tipc_deleteport(ref);
return -ENOMEM;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 9163ec526c2a..515e7a692f9b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -602,7 +602,7 @@ static struct sock * unix_create1(struct net *net, struct socket *sock)
if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
goto out;
- sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, 1);
+ sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
if (!sk)
goto out;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index fc416f9606a9..92cfe8e3e0b8 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -472,7 +472,7 @@ static struct proto x25_proto = {
static struct sock *x25_alloc_socket(struct net *net)
{
struct x25_sock *x25;
- struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, 1);
+ struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto);
if (!sk)
goto out;