]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] clone-detached-2.5.31-B0
authorIngo Molnar <mingo@elte.hu>
Tue, 13 Aug 2002 07:36:26 +0000 (00:36 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Tue, 13 Aug 2002 07:36:26 +0000 (00:36 -0700)
the attached patch implements the per-CPU thread-structure cache to do
detached exit, if the parent does not want to be notified of child exit
via a signal.

include/linux/sched.h
kernel/exit.c
kernel/fork.c
kernel/signal.c

index d44342a05d5bc6779e5d4d12e6a156bee4dc010a..63a6b938b1457b6e4863dba1d3c085c1be475835 100644 (file)
@@ -47,6 +47,7 @@ struct exec_domain;
 #define CLONE_SYSVSEM  0x00040000      /* share system V SEM_UNDO semantics */
 #define CLONE_SETTLS   0x00080000      /* create a new TLS for the child */
 #define CLONE_SETTID   0x00100000      /* write the TID back to userspace */
+#define CLONE_DETACHED 0x00200000      /* parent wants no child-exit signal */
 
 #define CLONE_SIGNAL   (CLONE_SIGHAND | CLONE_THREAD)
 
index 9d35d8ece02f95d97f2e73f22e4665e34553d247..8c51bf9c8aee789f98496ce2e3e1d571c36c4cb6 100644 (file)
@@ -56,10 +56,11 @@ static inline void __unhash_process(struct task_struct *p)
 
 static void release_task(struct task_struct * p)
 {
-       if (p == current)
+       if (p->state != TASK_ZOMBIE)
                BUG();
 #ifdef CONFIG_SMP
-       wait_task_inactive(p);
+       if (p != current)
+               wait_task_inactive(p);
 #endif
        atomic_dec(&p->user->processes);
        security_ops->task_free_security(p);
@@ -67,10 +68,12 @@ static void release_task(struct task_struct * p)
        unhash_process(p);
 
        release_thread(p);
-       current->cmin_flt += p->min_flt + p->cmin_flt;
-       current->cmaj_flt += p->maj_flt + p->cmaj_flt;
-       current->cnswap += p->nswap + p->cnswap;
-       sched_exit(p);
+       if (p != current) {
+               current->cmin_flt += p->min_flt + p->cmin_flt;
+               current->cmaj_flt += p->maj_flt + p->cmaj_flt;
+               current->cnswap += p->nswap + p->cnswap;
+               sched_exit(p);
+       }
        put_task_struct(p);
 }
 
@@ -479,14 +482,15 @@ static void exit_notify(void)
 
        write_lock_irq(&tasklist_lock);
        current->state = TASK_ZOMBIE;
-       do_notify_parent(current, current->exit_signal);
+       if (current->exit_signal != -1)
+               do_notify_parent(current, current->exit_signal);
        while ((p = eldest_child(current))) {
                list_del_init(&p->sibling);
                p->ptrace = 0;
 
                p->parent = p->real_parent;
                list_add_tail(&p->sibling,&p->parent->children);
-               if (p->state == TASK_ZOMBIE)
+               if (p->state == TASK_ZOMBIE && p->exit_signal != -1)
                        do_notify_parent(p, p->exit_signal);
                /*
                 * process group orphan check
@@ -555,6 +559,9 @@ fake_volatile:
 
        tsk->exit_code = code;
        exit_notify();
+       preempt_disable();
+       if (current->exit_signal == -1)
+               release_task(current);
        schedule();
        BUG();
 /*
index 017740dc62c4c485679e5830b5cf39953416c9fe..95baf7236910662ce3c13ee7d2ce5f8abfc2d731 100644 (file)
@@ -50,6 +50,31 @@ struct task_struct *pidhash[PIDHASH_SZ];
 
 rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;  /* outer */
 
+/*
+ * A per-CPU task cache - this relies on the fact that
+ * the very last portion of sys_exit() is executed with
+ * preemption turned off.
+ */
+static task_t *task_cache[NR_CPUS] __cacheline_aligned;
+
+void __put_task_struct(struct task_struct *tsk)
+{
+       if (tsk != current) {
+               free_thread_info(tsk->thread_info);
+               kmem_cache_free(task_struct_cachep,tsk);
+       } else {
+               int cpu = smp_processor_id();
+
+               tsk = task_cache[cpu];
+               if (tsk) {
+                       free_thread_info(tsk->thread_info);
+                       kmem_cache_free(task_struct_cachep,tsk);
+               }
+               task_cache[cpu] = current;
+       }
+}
+
+/* Protects next_safe and last_pid. */
 void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
 {
        unsigned long flags;
@@ -123,13 +148,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        return tsk;
 }
 
-void __put_task_struct(struct task_struct *tsk)
-{
-       free_thread_info(tsk->thread_info);
-       kmem_cache_free(task_struct_cachep,tsk);
-}
-
-/* Protects next_safe and last_pid. */
 spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED;
 
 static int get_pid(unsigned long flags)
@@ -737,7 +755,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        /* ok, now we should be set up.. */
        p->swappable = 1;
-       p->exit_signal = clone_flags & CSIGNAL;
+       if (clone_flags & CLONE_DETACHED)
+               p->exit_signal = -1;
+       else
+               p->exit_signal = clone_flags & CSIGNAL;
        p->pdeath_signal = 0;
 
        /*
index fd9dd81c2815b1bb9f7fac590c86dbd5d0136b84..b97914bf8a856e952b0acc5f50275439a4447e80 100644 (file)
@@ -768,12 +768,15 @@ static void wake_up_parent(struct task_struct *parent)
 /*
  * Let a parent know about a status change of a child.
  */
-
 void do_notify_parent(struct task_struct *tsk, int sig)
 {
        struct siginfo info;
        int why, status;
 
+       /* is the thread detached? */
+       if (sig == -1 || tsk->exit_signal == -1)
+               BUG();
+
        info.si_signo = sig;
        info.si_errno = 0;
        info.si_pid = tsk->pid;
@@ -823,9 +826,11 @@ void do_notify_parent(struct task_struct *tsk, int sig)
 void
 notify_parent(struct task_struct *tsk, int sig)
 {
-       read_lock(&tasklist_lock);
-       do_notify_parent(tsk, sig);
-       read_unlock(&tasklist_lock);
+       if (sig != -1) {
+               read_lock(&tasklist_lock);
+               do_notify_parent(tsk, sig);
+               read_unlock(&tasklist_lock);
+       }
 }
 
 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER