aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2008-11-06 00:37:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-11-06 13:51:50 -0800
commitf8d570a4745835f2238a33b537218a1bb03fc671 (patch)
tree776c2909523c684f0954949a2947ff0a792ba457
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6 (diff)
downloadlinux-dev-f8d570a4745835f2238a33b537218a1bb03fc671.tar.xz
linux-dev-f8d570a4745835f2238a33b537218a1bb03fc671.zip
net: Fix recursive descent in __scm_destroy().
__scm_destroy() walks the list of file descriptors in the scm_fp_list pointed to by the scm_cookie argument. Those, in turn, can close sockets and invoke __scm_destroy() again. There is nothing which limits how deeply this can occur. The idea for how to fix this is from Linus. Basically, we do all of the fput()s at the top level by collecting all of the scm_fp_list objects hit by an fput(). Inside of the initial __scm_destroy() we keep running the list until it is empty. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/net/scm.h5
-rw-r--r--net/core/scm.c24
3 files changed, 26 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b483f39a7112..295b7c756ca6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1349,6 +1349,8 @@ struct task_struct {
*/
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
+
+ struct list_head *scm_work_list;
};
/*
diff --git a/include/net/scm.h b/include/net/scm.h
index 06df126103ca..33e9986beb86 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -14,8 +14,9 @@
struct scm_fp_list
{
- int count;
- struct file *fp[SCM_MAX_FD];
+ struct list_head list;
+ int count;
+ struct file *fp[SCM_MAX_FD];
};
struct scm_cookie
diff --git a/net/core/scm.c b/net/core/scm.c
index 10f5c65f6a47..ab242cc1acca 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -75,6 +75,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
if (!fpl)
return -ENOMEM;
*fplp = fpl;
+ INIT_LIST_HEAD(&fpl->list);
fpl->count = 0;
}
fpp = &fpl->fp[fpl->count];
@@ -106,9 +107,25 @@ void __scm_destroy(struct scm_cookie *scm)
if (fpl) {
scm->fp = NULL;
- for (i=fpl->count-1; i>=0; i--)
- fput(fpl->fp[i]);
- kfree(fpl);
+ if (current->scm_work_list) {
+ list_add_tail(&fpl->list, current->scm_work_list);
+ } else {
+ LIST_HEAD(work_list);
+
+ current->scm_work_list = &work_list;
+
+ list_add(&fpl->list, &work_list);
+ while (!list_empty(&work_list)) {
+ fpl = list_first_entry(&work_list, struct scm_fp_list, list);
+
+ list_del(&fpl->list);
+ for (i=fpl->count-1; i>=0; i--)
+ fput(fpl->fp[i]);
+ kfree(fpl);
+ }
+
+ current->scm_work_list = NULL;
+ }
}
}
@@ -284,6 +301,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
if (new_fpl) {
+ INIT_LIST_HEAD(&new_fpl->list);
for (i=fpl->count-1; i>=0; i--)
get_file(fpl->fp[i]);
memcpy(new_fpl, fpl, sizeof(*fpl));