diff options
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 57 | 
1 files changed, 35 insertions, 22 deletions
| diff --git a/kernel/fork.c b/kernel/fork.c index 4c14942a0ee3..bf9fef6d1bfe 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -165,6 +165,18 @@ void free_task(struct task_struct *tsk)  }  EXPORT_SYMBOL(free_task); +static inline void free_signal_struct(struct signal_struct *sig) +{ +	taskstats_tgid_free(sig); +	kmem_cache_free(signal_cachep, sig); +} + +static inline void put_signal_struct(struct signal_struct *sig) +{ +	if (atomic_dec_and_test(&sig->sigcnt)) +		free_signal_struct(sig); +} +  void __put_task_struct(struct task_struct *tsk)  {  	WARN_ON(!tsk->exit_state); @@ -173,6 +185,7 @@ void __put_task_struct(struct task_struct *tsk)  	exit_creds(tsk);  	delayacct_tsk_free(tsk); +	put_signal_struct(tsk->signal);  	if (!profile_handoff_task(tsk))  		free_task(tsk); @@ -864,8 +877,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)  	if (!sig)  		return -ENOMEM; -	atomic_set(&sig->count, 1); +	sig->nr_threads = 1;  	atomic_set(&sig->live, 1); +	atomic_set(&sig->sigcnt, 1);  	init_waitqueue_head(&sig->wait_chldexit);  	if (clone_flags & CLONE_NEWPID)  		sig->flags |= SIGNAL_UNKILLABLE; @@ -889,13 +903,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)  	return 0;  } -void __cleanup_signal(struct signal_struct *sig) -{ -	thread_group_cputime_free(sig); -	tty_kref_put(sig->tty); -	kmem_cache_free(signal_cachep, sig); -} -  static void copy_flags(unsigned long clone_flags, struct task_struct *p)  {  	unsigned long new_flags = p->flags; @@ -1079,6 +1086,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,   	}  	mpol_fix_fork_child_flag(p);  #endif +#ifdef CONFIG_CPUSETS +	p->cpuset_mem_spread_rotor = node_random(p->mems_allowed); +	p->cpuset_slab_spread_rotor = node_random(p->mems_allowed); +#endif  #ifdef CONFIG_TRACE_IRQFLAGS  	p->irq_events = 0;  #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW @@ -1112,8 +1123,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,  	p->memcg_batch.memcg = NULL;  #endif -	p->bts = NULL; -  	/* Perform scheduler related setup. Assign this task to a CPU. */  	sched_fork(p, clone_flags); @@ -1247,8 +1256,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,  	}  	if (clone_flags & CLONE_THREAD) { -		atomic_inc(¤t->signal->count); +		current->signal->nr_threads++;  		atomic_inc(¤t->signal->live); +		atomic_inc(¤t->signal->sigcnt);  		p->group_leader = current->group_leader;  		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);  	} @@ -1261,7 +1271,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,  				p->nsproxy->pid_ns->child_reaper = p;  			p->signal->leader_pid = pid; -			tty_kref_put(p->signal->tty);  			p->signal->tty = tty_kref_get(current->signal->tty);  			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));  			attach_pid(p, PIDTYPE_SID, task_session(current)); @@ -1294,7 +1303,7 @@ bad_fork_cleanup_mm:  		mmput(p->mm);  bad_fork_cleanup_signal:  	if (!(clone_flags & CLONE_THREAD)) -		__cleanup_signal(p->signal); +		free_signal_struct(p->signal);  bad_fork_cleanup_sighand:  	__cleanup_sighand(p->sighand);  bad_fork_cleanup_fs: @@ -1329,6 +1338,16 @@ noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_re  	return regs;  } +static inline void init_idle_pids(struct pid_link *links) +{ +	enum pid_type type; + +	for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { +		INIT_HLIST_NODE(&links[type].node); /* not really needed */ +		links[type].pid = &init_struct_pid; +	} +} +  struct task_struct * __cpuinit fork_idle(int cpu)  {  	struct task_struct *task; @@ -1336,8 +1355,10 @@ struct task_struct * __cpuinit fork_idle(int cpu)  	task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL,  			    &init_struct_pid, 0); -	if (!IS_ERR(task)) +	if (!IS_ERR(task)) { +		init_idle_pids(task->pids);  		init_idle(task, cpu); +	}  	return task;  } @@ -1509,14 +1530,6 @@ static void check_unshare_flags(unsigned long *flags_ptr)  		*flags_ptr |= CLONE_SIGHAND;  	/* -	 * If unsharing signal handlers and the task was created -	 * using CLONE_THREAD, then must unshare the thread -	 */ -	if ((*flags_ptr & CLONE_SIGHAND) && -	    (atomic_read(¤t->signal->count) > 1)) -		*flags_ptr |= CLONE_THREAD; - -	/*  	 * If unsharing namespace, must also unshare filesystem information.  	 */  	if (*flags_ptr & CLONE_NEWNS) | 
