aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorSowmini Varadhan <sowmini.varadhan@oracle.com>2016-07-14 03:51:01 -0700
committerDavid S. Miller <davem@davemloft.net>2016-07-15 11:36:57 -0700
commita93d01f5777e99f24b5b3948e06673ada148337c (patch)
tree1c8f2320199c9d7e0881fd6020c99d8fd967662d /net/rds
parentdevlink: fix trace format string (diff)
downloadlinux-dev-a93d01f5777e99f24b5b3948e06673ada148337c.tar.xz
linux-dev-a93d01f5777e99f24b5b3948e06673ada148337c.zip
RDS: TCP: avoid bad page reference in rds_tcp_listen_data_ready
As the existing comments in rds_tcp_listen_data_ready() indicate, it is possible under some race-windows to get to this function with the accept() socket. If that happens, we could run into a sequence whereby thread 1 thread 2 rds_tcp_accept_one() thread sets up new_sock via ->accept(). The sk_user_data is now sock_def_readable data comes in for new_sock, ->sk_data_ready is called, and we land in rds_tcp_listen_data_ready rds_tcp_set_callbacks() takes the sk_callback_lock and sets up sk_user_data to be the cp read_lock sk_callback_lock ready = cp unlock sk_callback_lock page fault on ready In the above sequence, we end up with a panic on a bad page reference when trying to execute (*ready)(). Instead we need to call sock_def_readable() safely, which is what this patch achieves. Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/tcp.c7
-rw-r--r--net/rds/tcp.h1
-rw-r--r--net/rds/tcp_listen.c2
3 files changed, 10 insertions, 0 deletions
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index d24f6c142d03..b411bb764f07 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -551,6 +551,13 @@ static void rds_tcp_kill_sock(struct net *net)
}
}
+void *rds_tcp_listen_sock_def_readable(struct net *net)
+{
+ struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+ return rtn->rds_tcp_listen_sock->sk->sk_user_data;
+}
+
static int rds_tcp_dev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 1c3160faa963..9a1cc8906576 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -70,6 +70,7 @@ void rds_tcp_listen_stop(struct socket *);
void rds_tcp_listen_data_ready(struct sock *sk);
int rds_tcp_accept_one(struct socket *sock);
int rds_tcp_keepalive(struct socket *sock);
+void *rds_tcp_listen_sock_def_readable(struct net *net);
/* tcp_recv.c */
int rds_tcp_recv_init(void);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index ca975a217a49..73040e319e4b 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -183,6 +183,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
*/
if (sk->sk_state == TCP_LISTEN)
rds_tcp_accept_work(sk);
+ else
+ ready = rds_tcp_listen_sock_def_readable(sock_net(sk));
out:
read_unlock_bh(&sk->sk_callback_lock);