]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] O(1) count_active_tasks
authorRobert Love <rml@tech9.net>
Tue, 28 May 2002 12:03:31 +0000 (05:03 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Tue, 28 May 2002 12:03:31 +0000 (05:03 -0700)
This is William Irwin's algorithmically O(1) version of
count_active_tasks (which is currently O(n) for n total tasks on the
system).

I like it a lot: we become O(1) because now we count uninterruptible
tasks, so we can return (nr_uninterruptible + nr_running).  It does not
introduce any overhead or hurt the case for small n, so I have no
complaints.

This copy has a small optimization over the original posting, but is
otherwise the same thing wli posted earlier.  I have tested to make sure
this returns accurate results and that the kernel profile improves.

include/linux/sched.h
kernel/sched.c
kernel/timer.c

index 743b1aea2acaed45950cbe521f7b91c5aec6c994..b62a724d437cb82767e363f31c886e0a2e999c6a 100644 (file)
@@ -80,6 +80,7 @@ extern unsigned long avenrun[];               /* Load averages */
 extern int nr_threads;
 extern int last_pid;
 extern unsigned long nr_running(void);
+extern unsigned long nr_uninterruptible(void);
 
 #include <linux/time.h>
 #include <linux/param.h>
index a433a34f6470d2bf05a2deb84926651c6f9ff97f..07caec2ae996f8870899da95325619eda817365e 100644 (file)
@@ -137,6 +137,7 @@ struct runqueue {
        spinlock_t lock;
        spinlock_t frozen;
        unsigned long nr_running, nr_switches, expired_timestamp;
+       signed long nr_uninterruptible;
        task_t *curr, *idle;
        prio_array_t *active, *expired, arrays[2];
        int prev_nr_running[NR_CPUS];
@@ -244,6 +245,8 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
 static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
 {
        rq->nr_running--;
+       if (p->state == TASK_UNINTERRUPTIBLE)
+               rq->nr_uninterruptible++;
        dequeue_task(p, p->array);
        p->array = NULL;
 }
@@ -323,11 +326,15 @@ static int try_to_wake_up(task_t * p)
 {
        unsigned long flags;
        int success = 0;
+       long old_state;
        runqueue_t *rq;
 
        rq = task_rq_lock(p, &flags);
+       old_state = p->state;
        p->state = TASK_RUNNING;
        if (!p->array) {
+               if (old_state == TASK_UNINTERRUPTIBLE)
+                       rq->nr_uninterruptible--;
                activate_task(p, rq);
                if (p->prio < rq->curr->prio)
                        resched_task(rq->curr);
@@ -433,6 +440,16 @@ unsigned long nr_running(void)
        return sum;
 }
 
+unsigned long nr_uninterruptible(void)
+{
+       unsigned long i, sum = 0;
+
+       for (i = 0; i < smp_num_cpus; i++)
+               sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible;
+
+       return sum;
+}
+
 unsigned long nr_context_switches(void)
 {
        unsigned long i, sum = 0;
index 6fc0466711cc0f05966d0437f5e621e4945275af..5175ebf0bf321423279b57bf81153d35cf05f6fc 100644 (file)
@@ -597,17 +597,7 @@ void update_process_times(int user_tick)
  */
 static unsigned long count_active_tasks(void)
 {
-       struct task_struct *p;
-       unsigned long nr = 0;
-
-       read_lock(&tasklist_lock);
-       for_each_task(p) {
-               if ((p->state == TASK_RUNNING ||
-                    (p->state & TASK_UNINTERRUPTIBLE)))
-                       nr += FIXED_1;
-       }
-       read_unlock(&tasklist_lock);
-       return nr;
+       return (nr_running() + nr_uninterruptible()) * FIXED_1;
 }
 
 /*