diff -urN linux/arch/i386/kernel/process.c count_active/arch/i386/kernel/process.c --- linux/arch/i386/kernel/process.c Fri Mar 8 20:47:29 2002 +++ count_active/arch/i386/kernel/process.c Fri Mar 8 20:26:46 2002 @@ -803,7 +803,7 @@ unsigned long ebp, esp, eip; unsigned long stack_page; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || p->__state == TASK_RUNNING) return 0; stack_page = (unsigned long)p; esp = p->thread.esp; diff -urN linux/arch/i386/kernel/ptrace.c count_active/arch/i386/kernel/ptrace.c --- linux/arch/i386/kernel/ptrace.c Wed Nov 21 10:42:41 2001 +++ count_active/arch/i386/kernel/ptrace.c Fri Mar 8 20:50:34 2002 @@ -299,7 +299,7 @@ long tmp; ret = 0; - if (child->state == TASK_ZOMBIE) /* already dead */ + if (child->__state == TASK_ZOMBIE) /* already dead */ break; child->exit_code = SIGKILL; /* make sure the single step bit is not set. */ @@ -448,7 +448,7 @@ between a syscall stop and SIGTRAP delivery */ current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0); - current->state = TASK_STOPPED; + __set_current_state(TASK_STOPPED); notify_parent(current, SIGCHLD); schedule(); /* diff -urN linux/arch/i386/kernel/semaphore.c count_active/arch/i386/kernel/semaphore.c --- linux/arch/i386/kernel/semaphore.c Fri Nov 9 13:58:02 2001 +++ count_active/arch/i386/kernel/semaphore.c Fri Mar 8 20:40:08 2002 @@ -58,7 +58,7 @@ { struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); - tsk->state = TASK_UNINTERRUPTIBLE; + __set_task_state(tsk, TASK_UNINTERRUPTIBLE); add_wait_queue_exclusive(&sem->wait, &wait); spin_lock_irq(&semaphore_lock); @@ -78,12 +78,12 @@ spin_unlock_irq(&semaphore_lock); schedule(); - tsk->state = TASK_UNINTERRUPTIBLE; + __set_task_state(tsk, TASK_UNINTERRUPTIBLE); spin_lock_irq(&semaphore_lock); } spin_unlock_irq(&semaphore_lock); remove_wait_queue(&sem->wait, &wait); - tsk->state = TASK_RUNNING; + __set_task_state(tsk, TASK_RUNNING); wake_up(&sem->wait); } @@ -92,7 +92,7 @@ int retval = 0; struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); - tsk->state = TASK_INTERRUPTIBLE; + __set_task_state(tsk, TASK_INTERRUPTIBLE); add_wait_queue_exclusive(&sem->wait, &wait); spin_lock_irq(&semaphore_lock); @@ -128,11 +128,11 @@ spin_unlock_irq(&semaphore_lock); schedule(); - tsk->state = TASK_INTERRUPTIBLE; + __set_task_state(tsk, TASK_INTERRUPTIBLE); spin_lock_irq(&semaphore_lock); } spin_unlock_irq(&semaphore_lock); - tsk->state = TASK_RUNNING; + __set_task_state(tsk, TASK_RUNNING); remove_wait_queue(&sem->wait, &wait); wake_up(&sem->wait); return retval; diff -urN linux/arch/i386/kernel/signal.c count_active/arch/i386/kernel/signal.c --- linux/arch/i386/kernel/signal.c Mon Feb 25 11:37:53 2002 +++ count_active/arch/i386/kernel/signal.c Fri Mar 8 20:45:46 2002 @@ -83,7 +83,7 @@ regs->eax = -EINTR; while (1) { - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); schedule(); if (do_signal(regs, &saveset)) return -EINTR; @@ -112,7 +112,7 @@ regs->eax = -EINTR; while (1) { - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); schedule(); if (do_signal(regs, &saveset)) return -EINTR; @@ -611,7 +611,7 @@ if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { /* Let the debugger run. */ current->exit_code = signr; - current->state = TASK_STOPPED; + __set_current_state(TASK_STOPPED); notify_parent(current, SIGCHLD); schedule(); @@ -668,7 +668,7 @@ case SIGSTOP: { struct signal_struct *sig; - current->state = TASK_STOPPED; + __set_current_state(TASK_STOPPED); current->exit_code = signr; sig = current->p_pptr->sig; if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) diff -urN linux/arch/i386/kernel/sys_i386.c count_active/arch/i386/kernel/sys_i386.c --- linux/arch/i386/kernel/sys_i386.c Mon Mar 19 12:35:09 2001 +++ count_active/arch/i386/kernel/sys_i386.c Fri Mar 8 20:53:12 2002 @@ -249,7 +249,7 @@ asmlinkage int sys_pause(void) { - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); schedule(); return -ERESTARTNOHAND; } diff -urN linux/drivers/block/ll_rw_blk.c count_active/drivers/block/ll_rw_blk.c --- linux/drivers/block/ll_rw_blk.c Fri Mar 8 20:47:30 2002 +++ count_active/drivers/block/ll_rw_blk.c Fri Mar 8 19:46:30 2002 @@ -525,7 +525,7 @@ spin_unlock_irq(&io_request_lock); } while (rq == NULL); remove_wait_queue(&q->wait_for_requests[rw], &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); return rq; } diff -urN linux/drivers/block/paride/pcd.c count_active/drivers/block/paride/pcd.c --- linux/drivers/block/paride/pcd.c Sat Oct 27 02:03:47 2001 +++ count_active/drivers/block/paride/pcd.c Fri Mar 8 19:42:44 2002 @@ -587,8 +587,8 @@ } static void pcd_sleep( int cs ) - -{ current->state = TASK_INTERRUPTIBLE; +{ + __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(cs); } diff -urN linux/drivers/block/paride/pf.c count_active/drivers/block/paride/pf.c --- linux/drivers/block/paride/pf.c Fri Dec 21 09:41:53 2001 +++ count_active/drivers/block/paride/pf.c Fri Mar 8 19:43:52 2002 @@ -675,8 +675,8 @@ #define PF_RESET_TMO 30 /* in tenths of a second */ static void pf_sleep( int cs ) - -{ current->state = TASK_INTERRUPTIBLE; +{ + __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(cs); } diff -urN linux/drivers/block/paride/pg.c count_active/drivers/block/paride/pg.c --- linux/drivers/block/paride/pg.c Fri Dec 21 09:41:53 2001 +++ count_active/drivers/block/paride/pg.c Fri Mar 8 19:45:44 2002 @@ -355,8 +355,8 @@ #define DRIVE (0xa0+0x10*PG.drive) static void pg_sleep( int cs ) - -{ current->state = TASK_INTERRUPTIBLE; +{ + __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(cs); } diff -urN linux/drivers/block/paride/pt.c count_active/drivers/block/paride/pt.c --- linux/drivers/block/paride/pt.c Fri Dec 21 09:41:53 2001 +++ count_active/drivers/block/paride/pt.c Fri Mar 8 19:44:42 2002 @@ -467,8 +467,8 @@ } static void pt_sleep( int cs ) - -{ current->state = TASK_INTERRUPTIBLE; +{ + __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(cs); } diff -urN linux/drivers/char/n_tty.c count_active/drivers/char/n_tty.c --- linux/drivers/char/n_tty.c Fri Apr 6 10:42:55 2001 +++ count_active/drivers/char/n_tty.c Fri Mar 8 19:49:50 2002 @@ -1044,7 +1044,7 @@ set_bit(TTY_DONT_FLIP, &tty->flags); continue; } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); /* Deal with packet mode. */ if (tty->packet && b == buf) { @@ -1113,7 +1113,7 @@ if (!waitqueue_active(&tty->read_wait)) tty->minimum_to_wake = minimum; - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); size = b - buf; if (size) { retval = size; @@ -1189,7 +1189,7 @@ schedule(); } break_out: - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); return (b - buf) ? b - buf : retval; } diff -urN linux/drivers/char/pc_keyb.c count_active/drivers/char/pc_keyb.c --- linux/drivers/char/pc_keyb.c Fri Mar 8 20:47:30 2002 +++ count_active/drivers/char/pc_keyb.c Fri Mar 8 19:54:57 2002 @@ -1134,7 +1134,7 @@ schedule(); goto repeat; } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&queue->proc_list, &wait); } while (i > 0 && !queue_empty()) { diff -urN linux/drivers/char/qpmouse.c count_active/drivers/char/qpmouse.c --- linux/drivers/char/qpmouse.c Fri Sep 7 09:28:38 2001 +++ count_active/drivers/char/qpmouse.c Fri Mar 8 19:56:13 2002 @@ -243,7 +243,7 @@ if (inb_p(qp_status)&(QP_RX_FULL)) inb_p(qp_data); - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout((5*HZ + 99) / 100); retries++; } @@ -271,7 +271,7 @@ schedule(); goto repeat; } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&queue->proc_list, &wait); } while (i > 0 && !queue_empty()) { diff -urN linux/drivers/char/random.c count_active/drivers/char/random.c --- linux/drivers/char/random.c Fri Mar 8 20:47:30 2002 +++ count_active/drivers/char/random.c Fri Mar 8 19:51:32 2002 @@ -1535,7 +1535,7 @@ break; /* This break makes the device work */ /* like a named pipe */ } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&random_read_wait, &wait); /* diff -urN linux/drivers/char/selection.c count_active/drivers/char/selection.c --- linux/drivers/char/selection.c Fri Sep 7 09:28:38 2001 +++ count_active/drivers/char/selection.c Fri Mar 8 19:53:27 2002 @@ -306,7 +306,7 @@ pasted += count; } remove_wait_queue(&vt->paste_wait, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); return 0; } diff -urN linux/drivers/char/tty_ioctl.c count_active/drivers/char/tty_ioctl.c --- linux/drivers/char/tty_ioctl.c Mon Sep 17 22:52:35 2001 +++ count_active/drivers/char/tty_ioctl.c Fri Mar 8 19:50:40 2002 @@ -65,7 +65,7 @@ if (tty->driver.wait_until_sent) tty->driver.wait_until_sent(tty, timeout); stop_waiting: - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); } diff -urN linux/drivers/char/vt.c count_active/drivers/char/vt.c --- linux/drivers/char/vt.c Fri Nov 16 10:08:28 2001 +++ count_active/drivers/char/vt.c Fri Mar 8 19:52:20 2002 @@ -1144,7 +1144,7 @@ schedule(); } remove_wait_queue(&vt_activate_queue, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); return retval; } diff -urN linux/drivers/pci/pci.c count_active/drivers/pci/pci.c --- linux/drivers/pci/pci.c Fri Mar 8 20:43:44 2002 +++ count_active/drivers/pci/pci.c Fri Mar 8 19:57:46 2002 @@ -1822,13 +1822,13 @@ if (mem_flags == SLAB_KERNEL) { DECLARE_WAITQUEUE (wait, current); - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue (&pool->waitq, &wait); spin_unlock_irqrestore (&pool->lock, flags); schedule_timeout (POOL_TIMEOUT_JIFFIES); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue (&pool->waitq, &wait); goto restart; } diff -urN linux/fs/binfmt_elf.c count_active/fs/binfmt_elf.c --- linux/fs/binfmt_elf.c Fri Mar 8 20:47:35 2002 +++ count_active/fs/binfmt_elf.c Fri Mar 8 20:25:16 2002 @@ -1134,7 +1134,7 @@ notes[1].type = NT_PRPSINFO; notes[1].datasz = sizeof(psinfo); notes[1].data = &psinfo; - i = current->state ? ffz(~current->state) + 1 : 0; + i = current->__state ? ffz(~current->__state) + 1 : 0; psinfo.pr_state = i; psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i]; psinfo.pr_zomb = psinfo.pr_sname == 'Z'; diff -urN linux/fs/buffer.c count_active/fs/buffer.c --- linux/fs/buffer.c Fri Mar 8 20:47:35 2002 +++ count_active/fs/buffer.c Fri Mar 8 20:20:59 2002 @@ -153,7 +153,7 @@ break; schedule(); } while (buffer_locked(bh)); - tsk->state = TASK_RUNNING; + __set_task_state(tsk, TASK_RUNNING); remove_wait_queue(&bh->b_wait, &wait); put_bh(bh); } @@ -2980,11 +2980,11 @@ /* update interval */ interval = bdf_prm.b_un.interval; if (interval) { - tsk->state = TASK_INTERRUPTIBLE; + __set_task_state(tsk, TASK_INTERRUPTIBLE); schedule_timeout(interval); } else { stop_kupdate: - tsk->state = TASK_STOPPED; + __set_task_state(tsk, TASK_STOPPED); schedule(); /* wait for SIGCONT */ } /* check for sigstop */ diff -urN linux/fs/dquot.c count_active/fs/dquot.c --- linux/fs/dquot.c Fri Mar 8 20:47:35 2002 +++ count_active/fs/dquot.c Fri Mar 8 20:23:43 2002 @@ -251,7 +251,7 @@ goto repeat; } remove_wait_queue(&dquot->dq_wait_lock, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); } static inline void wait_on_dquot(struct dquot *dquot) @@ -285,7 +285,7 @@ goto repeat; } remove_wait_queue(&dquot->dq_wait_free, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); } /* Wait for all duplicated dquot references to be dropped */ @@ -301,7 +301,7 @@ goto repeat; } remove_wait_queue(&dquot->dq_wait_free, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); } /* diff -urN linux/fs/inode.c count_active/fs/inode.c --- linux/fs/inode.c Fri Mar 8 20:47:35 2002 +++ count_active/fs/inode.c Fri Mar 8 20:21:15 2002 @@ -175,7 +175,7 @@ goto repeat; } remove_wait_queue(&inode->i_wait, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); } static inline void wait_on_inode(struct inode *inode) diff -urN linux/fs/iobuf.c count_active/fs/iobuf.c --- linux/fs/iobuf.c Fri Apr 27 14:23:25 2001 +++ count_active/fs/iobuf.c Fri Mar 8 20:23:10 2002 @@ -140,7 +140,7 @@ if (atomic_read(&kiobuf->io_count) != 0) goto repeat; } - tsk->state = TASK_RUNNING; + __set_task_state(tsk, TASK_RUNNING); remove_wait_queue(&kiobuf->wait_queue, &wait); } diff -urN linux/fs/lockd/clntproc.c count_active/fs/lockd/clntproc.c --- linux/fs/lockd/clntproc.c Thu Oct 11 07:52:18 2001 +++ count_active/fs/lockd/clntproc.c Fri Mar 8 20:02:33 2002 @@ -224,7 +224,7 @@ return call; } printk("nlmclnt_alloc_call: failed, waiting for memory\n"); - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(5*HZ); } return NULL; diff -urN linux/fs/locks.c count_active/fs/locks.c --- linux/fs/locks.c Fri Mar 8 20:47:36 2002 +++ count_active/fs/locks.c Fri Mar 8 20:21:41 2002 @@ -597,7 +597,7 @@ int result = 0; DECLARE_WAITQUEUE(wait, current); - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(fl_wait, &wait); if (timeout == 0) schedule(); @@ -606,7 +606,7 @@ if (signal_pending(current)) result = -ERESTARTSYS; remove_wait_queue(fl_wait, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); return result; } diff -urN linux/fs/namei.c count_active/fs/namei.c --- linux/fs/namei.c Fri Mar 8 20:43:48 2002 +++ count_active/fs/namei.c Fri Mar 8 20:19:02 2002 @@ -340,7 +340,7 @@ if (current->total_link_count >= 40) goto loop; if (current->need_resched) { - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); schedule(); } current->link_count++; diff -urN linux/fs/nfsd/vfs.c count_active/fs/nfsd/vfs.c --- linux/fs/nfsd/vfs.c Fri Dec 21 09:41:55 2001 +++ count_active/fs/nfsd/vfs.c Fri Mar 8 20:08:19 2002 @@ -738,7 +738,7 @@ dprintk("nfsd: write defer %d\n", current->pid); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout((HZ+99)/100); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); dprintk("nfsd: write resume %d\n", current->pid); } diff -urN linux/fs/pipe.c count_active/fs/pipe.c --- linux/fs/pipe.c Fri Mar 8 20:47:36 2002 +++ count_active/fs/pipe.c Fri Mar 8 20:18:48 2002 @@ -27,12 +27,12 @@ void pipe_wait(struct inode * inode) { DECLARE_WAITQUEUE(wait, current); - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(PIPE_WAIT(*inode), &wait); up(PIPE_SEM(*inode)); schedule(); remove_wait_queue(PIPE_WAIT(*inode), &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); down(PIPE_SEM(*inode)); } diff -urN linux/fs/proc/array.c count_active/fs/proc/array.c --- linux/fs/proc/array.c Fri Mar 8 20:47:36 2002 +++ count_active/fs/proc/array.c Fri Mar 8 20:13:51 2002 @@ -130,7 +130,7 @@ static inline const char * get_task_state(struct task_struct *tsk) { - unsigned int state = tsk->state & (TASK_RUNNING | + unsigned int state = tsk->__state & (TASK_RUNNING | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE | TASK_ZOMBIE | diff -urN linux/fs/proc/base.c count_active/fs/proc/base.c --- linux/fs/proc/base.c Mon Feb 25 11:38:09 2002 +++ count_active/fs/proc/base.c Fri Mar 8 20:12:55 2002 @@ -286,9 +286,17 @@ read: proc_info_read, }; -#define MAY_PTRACE(p) \ -(p==current||(p->p_pptr==current&&(p->ptrace & PT_PTRACED)&&p->state==TASK_STOPPED)) +static inline int may_ptrace(task_t *p) +{ + if (p == current) + return 1; + + if (p->p_pptr == current) + if (p->ptrace & PT_PTRACED && p->__state == TASK_STOPPED) + return 1; + return 0; +} static int mem_open(struct inode* inode, struct file* file) { @@ -306,7 +314,7 @@ struct mm_struct *mm; - if (!MAY_PTRACE(task)) + if (!may_ptrace(task)) return -ESRCH; page = (char *)__get_free_page(GFP_USER); @@ -364,7 +372,7 @@ struct task_struct *task = file->f_dentry->d_inode->u.proc_i.task; unsigned long dst = *ppos; - if (!MAY_PTRACE(task)) + if (!may_ptrace(task)) return -ESRCH; page = (char *)__get_free_page(GFP_USER); diff -urN linux/fs/select.c count_active/fs/select.c --- linux/fs/select.c Mon Sep 10 13:04:33 2001 +++ count_active/fs/select.c Fri Mar 8 20:19:27 2002 @@ -223,7 +223,7 @@ } __timeout = schedule_timeout(__timeout); } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); poll_freewait(&table); @@ -405,7 +405,7 @@ break; timeout = schedule_timeout(timeout); } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); return count; } diff -urN linux/include/linux/sched.h count_active/include/linux/sched.h --- linux/include/linux/sched.h Fri Mar 8 20:47:44 2002 +++ count_active/include/linux/sched.h Fri Mar 8 20:58:09 2002 @@ -92,21 +92,44 @@ #define TASK_ZOMBIE 4 #define TASK_STOPPED 8 -#define __set_task_state(tsk, state_value) \ - do { (tsk)->state = (state_value); } while (0) +extern int nr_uninterruptible_counts[NR_CPUS]; + +static inline void update_nr_uninterruptible(long before, long after) +{ + int cpu = smp_processor_id(); + + if (!(before & TASK_UNINTERRUPTIBLE) && (after & TASK_UNINTERRUPTIBLE)) + nr_uninterruptible_counts[cpu]++; + + else if ((before & TASK_UNINTERRUPTIBLE) && + !(after & TASK_UNINTERRUPTIBLE)) + nr_uninterruptible_counts[cpu]--; +} + +#define __set_task_state(tsk, state_value) \ + do { \ + update_nr_uninterruptible((tsk)->__state, state_value); \ + (tsk)->__state = (state_value); \ + } while (0) + #ifdef CONFIG_SMP #define set_task_state(tsk, state_value) \ - set_mb((tsk)->state, (state_value)) + set_mb((tsk)->__state, (state_value)) #else #define set_task_state(tsk, state_value) \ __set_task_state((tsk), (state_value)) #endif -#define __set_current_state(state_value) \ - do { current->state = (state_value); } while (0) +#define __set_current_state(state_value) \ + do { \ + task_t *task = current; \ + update_nr_uninterruptible(task->__state, state_value); \ + task->__state = (state_value); \ + } while (0) + #ifdef CONFIG_SMP #define set_current_state(state_value) \ - set_mb(current->state, (state_value)) + set_mb(current->__state, (state_value)) #else #define set_current_state(state_value) \ __set_current_state(state_value) @@ -291,7 +314,7 @@ /* * offsets of these are hardcoded elsewhere - touch with care */ - volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ + volatile long __state; /* -1 unrunnable, 0 runnable, >0 stopped */ unsigned long flags; /* per process flags, defined below */ int sigpending; mm_segment_t addr_limit; /* thread address space: @@ -465,7 +488,7 @@ */ #define INIT_TASK(tsk) \ { \ - state: 0, \ + __state: 0, \ flags: 0, \ sigpending: 0, \ addr_limit: KERNEL_DS, \ @@ -801,7 +824,7 @@ break; \ schedule(); \ } \ - current->state = TASK_RUNNING; \ + __set_current_state(TASK_RUNNING); \ remove_wait_queue(&wq, &__wait); \ } while (0) @@ -829,7 +852,7 @@ ret = -ERESTARTSYS; \ break; \ } \ - current->state = TASK_RUNNING; \ + __set_current_state(TASK_RUNNING); \ remove_wait_queue(&wq, &__wait); \ } while (0) diff -urN linux/ipc/msg.c count_active/ipc/msg.c --- linux/ipc/msg.c Fri Sep 14 14:17:00 2001 +++ count_active/ipc/msg.c Fri Mar 8 20:41:31 2002 @@ -237,7 +237,7 @@ static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss) { mss->tsk=current; - current->state=TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); list_add_tail(&mss->list,&msq->q_senders); } @@ -668,7 +668,7 @@ ss_add(msq, &s); msg_unlock(msqid); schedule(); - current->state= TASK_RUNNING; + __set_current_state(TASK_RUNNING); msq = msg_lock(msqid); err = -EIDRM; @@ -809,11 +809,11 @@ else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); msg_unlock(msqid); schedule(); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); msg = (struct msg_msg*) msr_d.r_msg; if(!IS_ERR(msg)) diff -urN linux/ipc/sem.c count_active/ipc/sem.c --- linux/ipc/sem.c Sun Sep 30 12:26:42 2001 +++ count_active/ipc/sem.c Fri Mar 8 20:46:21 2002 @@ -928,7 +928,7 @@ struct sem_array* tmp; queue.status = -EINTR; queue.sleeper = current; - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); sem_unlock(semid); schedule(); diff -urN linux/kernel/exit.c count_active/kernel/exit.c --- linux/kernel/exit.c Fri Mar 8 20:47:44 2002 +++ count_active/kernel/exit.c Fri Mar 8 19:35:48 2002 @@ -86,7 +86,7 @@ read_lock(&tasklist_lock); for_each_task(p) { if ((p == ignored_task) || (p->pgrp != pgrp) || - (p->state == TASK_ZOMBIE) || + (p->__state == TASK_ZOMBIE) || (p->p_pptr->pid == 1)) continue; if ((p->p_pptr->pgrp != pgrp) && @@ -113,7 +113,7 @@ for_each_task(p) { if (p->pgrp != pgrp) continue; - if (p->state != TASK_STOPPED) + if (p->__state != TASK_STOPPED) continue; retval = 1; break; @@ -439,7 +439,7 @@ */ write_lock_irq(&tasklist_lock); - current->state = TASK_ZOMBIE; + __set_current_state(TASK_ZOMBIE); do_notify_parent(current, current->exit_signal); while (current->p_cptr != NULL) { p = current->p_cptr; @@ -452,7 +452,7 @@ if (p->p_osptr) p->p_osptr->p_ysptr = p; p->p_pptr->p_cptr = p; - if (p->state == TASK_ZOMBIE) + if (p->__state == TASK_ZOMBIE) do_notify_parent(p, p->exit_signal); /* * process group orphan check @@ -553,7 +553,7 @@ add_wait_queue(¤t->wait_chldexit,&wait); repeat: flag = 0; - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); read_lock(&tasklist_lock); tsk = current; do { @@ -578,7 +578,7 @@ && !(options & __WALL)) continue; flag = 1; - switch (p->state) { + switch (p->__state) { case TASK_STOPPED: if (!p->exit_code) continue; @@ -634,7 +634,7 @@ } retval = -ECHILD; end_wait4: - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(¤t->wait_chldexit,&wait); return retval; } diff -urN linux/kernel/fork.c count_active/kernel/fork.c --- linux/kernel/fork.c Fri Mar 8 20:47:45 2002 +++ count_active/kernel/fork.c Fri Mar 8 19:34:28 2002 @@ -627,7 +627,7 @@ p->did_exec = 0; p->swappable = 0; - p->state = TASK_UNINTERRUPTIBLE; + __set_task_state(p, TASK_UNINTERRUPTIBLE); copy_flags(clone_flags, p); p->pid = get_pid(clone_flags); diff -urN linux/kernel/ksyms.c count_active/kernel/ksyms.c --- linux/kernel/ksyms.c Fri Mar 8 20:47:45 2002 +++ count_active/kernel/ksyms.c Fri Mar 8 19:21:04 2002 @@ -565,3 +565,4 @@ EXPORT_SYMBOL(tasklist_lock); EXPORT_SYMBOL(pidhash); +EXPORT_SYMBOL(nr_uninterruptible_counts); diff -urN linux/kernel/ptrace.c count_active/kernel/ptrace.c --- linux/kernel/ptrace.c Fri Mar 8 20:47:45 2002 +++ count_active/kernel/ptrace.c Fri Mar 8 19:37:57 2002 @@ -28,7 +28,7 @@ return -ESRCH; if (!kill) { - if (child->state != TASK_STOPPED) + if (child->__state != TASK_STOPPED) return -ESRCH; #ifdef CONFIG_SMP wait_task_inactive(child); diff -urN linux/kernel/sched.c count_active/kernel/sched.c --- linux/kernel/sched.c Fri Mar 8 20:47:45 2002 +++ count_active/kernel/sched.c Fri Mar 8 19:32:50 2002 @@ -148,6 +148,8 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; +int nr_uninterruptible_counts[NR_CPUS]; + #define cpu_rq(cpu) (runqueues + (cpu)) #define this_rq() cpu_rq(smp_processor_id()) #define task_rq(p) cpu_rq((p)->cpu) @@ -328,7 +330,7 @@ runqueue_t *rq; rq = lock_task_rq(p, &flags); - p->state = TASK_RUNNING; + __set_task_state(p, TASK_RUNNING); if (!p->array) { activate_task(p, rq); if ((rq->curr == rq->idle) || (p->prio < rq->curr->prio)) @@ -348,7 +350,7 @@ { runqueue_t *rq = this_rq(); - p->state = TASK_RUNNING; + __set_task_state(p, TASK_RUNNING); if (!rt_task(p)) { /* * We decrease the sleep average of forking parents @@ -755,10 +757,10 @@ prev->sleep_timestamp = jiffies; spin_lock_irq(&rq->lock); - switch (prev->state) { + switch (prev->__state) { case TASK_INTERRUPTIBLE: if (unlikely(signal_pending(prev))) { - prev->state = TASK_RUNNING; + __set_task_state(prev, TASK_RUNNING); break; } default: @@ -837,7 +839,7 @@ wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); p = curr->task; - state = p->state; + state = p->__state; if ((state & mode) && try_to_wake_up(p, sync) && ((curr->flags & WQ_FLAG_EXCLUSIVE) && @@ -915,7 +917,7 @@ { SLEEP_ON_VAR - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); SLEEP_ON_HEAD schedule(); @@ -926,7 +928,7 @@ { SLEEP_ON_VAR - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); SLEEP_ON_HEAD timeout = schedule_timeout(timeout); @@ -939,7 +941,7 @@ { SLEEP_ON_VAR - current->state = TASK_UNINTERRUPTIBLE; + __set_current_state(TASK_UNINTERRUPTIBLE); SLEEP_ON_HEAD schedule(); @@ -950,7 +952,7 @@ { SLEEP_ON_VAR - current->state = TASK_UNINTERRUPTIBLE; + __set_current_state(TASK_UNINTERRUPTIBLE); SLEEP_ON_HEAD timeout = schedule_timeout(timeout); @@ -980,7 +982,7 @@ if (new_mask & (1UL << smp_processor_id())) return; #if CONFIG_SMP - current->state = TASK_UNINTERRUPTIBLE; + __set_current_state(TASK_UNINTERRUPTIBLE); smp_migrate_task(__ffs(new_mask), current); schedule(); @@ -1234,7 +1236,7 @@ prio_array_t *array; list_t *queue; - if (unlikely(prev->state != TASK_RUNNING)) { + if (unlikely(prev->__state != TASK_RUNNING)) { schedule(); return 0; } @@ -1350,7 +1352,7 @@ static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" }; printk("%-13.13s ", p->comm); - state = p->state ? __ffs(p->state) + 1 : 0; + state = p->__state ? __ffs(p->__state) + 1 : 0; if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *)) printk(stat_nam[state]); else @@ -1471,7 +1473,7 @@ deactivate_task(idle, rq); idle->array = NULL; idle->prio = MAX_PRIO; - idle->state = TASK_RUNNING; + idle->__state = TASK_RUNNING; idle->cpu = cpu; double_rq_unlock(idle_rq, rq); idle->need_resched = 1; diff -urN linux/kernel/signal.c count_active/kernel/signal.c --- linux/kernel/signal.c Fri Mar 8 20:47:45 2002 +++ count_active/kernel/signal.c Fri Mar 8 20:44:49 2002 @@ -382,7 +382,7 @@ switch (sig) { case SIGKILL: case SIGCONT: /* Wake up the process if stopped. */ - if (t->state == TASK_STOPPED) + if (t->__state == TASK_STOPPED) wake_up_process(t); t->exit_code = 0; rm_sig_from_queue(SIGSTOP, t); @@ -478,10 +478,10 @@ * process of changing - but no harm is done by that * other than doing an extra (lightweight) IPI interrupt. */ - if ((t->state == TASK_RUNNING) && (t->cpu != cpu())) + if ((t->__state == TASK_RUNNING) && (t->cpu != cpu())) kick_if_running(t); #endif - if (t->state & TASK_INTERRUPTIBLE) { + if (t->__state & TASK_INTERRUPTIBLE) { wake_up_process(t); return; } @@ -744,7 +744,7 @@ status = tsk->exit_code & 0x7f; why = SI_KERNEL; /* shouldn't happen */ - switch (tsk->state) { + switch (tsk->__state) { case TASK_STOPPED: /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */ if (tsk->ptrace & PT_PTRACED) @@ -945,7 +945,7 @@ recalc_sigpending(current); spin_unlock_irq(¤t->sigmask_lock); - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); timeout = schedule_timeout(timeout); spin_lock_irq(¤t->sigmask_lock); diff -urN linux/kernel/softirq.c count_active/kernel/softirq.c --- linux/kernel/softirq.c Fri Mar 8 20:47:45 2002 +++ count_active/kernel/softirq.c Fri Mar 8 19:37:25 2002 @@ -54,7 +54,7 @@ { struct task_struct * tsk = ksoftirqd_task(cpu); - if (tsk && tsk->state != TASK_RUNNING) + if (tsk && tsk->__state != TASK_RUNNING) wake_up_process(tsk); } @@ -258,7 +258,7 @@ printk("Attempt to kill tasklet from interrupt\n"); while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); do sys_sched_yield(); while (test_bit(TASKLET_STATE_SCHED, &t->state)); diff -urN linux/kernel/timer.c count_active/kernel/timer.c --- linux/kernel/timer.c Fri Mar 8 20:47:45 2002 +++ count_active/kernel/timer.c Fri Mar 8 19:39:18 2002 @@ -592,17 +592,12 @@ */ static unsigned long count_active_tasks(void) { - struct task_struct *p; - unsigned long nr = 0; + unsigned long k, nr = 0; - read_lock(&tasklist_lock); - for_each_task(p) { - if ((p->state == TASK_RUNNING || - (p->state & TASK_UNINTERRUPTIBLE))) - nr += FIXED_1; - } - read_unlock(&tasklist_lock); - return nr; + for (k = 0; k < NR_CPUS; ++k) + nr += nr_uninterruptible_counts[k] * FIXED_1; + + return nr + FIXED_1 * nr_running(); } /* @@ -846,7 +841,7 @@ printk(KERN_ERR "schedule_timeout: wrong timeout " "value %lx from %p\n", timeout, __builtin_return_address(0)); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); goto out; } } @@ -901,7 +896,7 @@ expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); expire = schedule_timeout(expire); if (expire) { diff -urN linux/lib/rwsem-spinlock.c count_active/lib/rwsem-spinlock.c --- linux/lib/rwsem-spinlock.c Wed Apr 25 13:31:03 2001 +++ count_active/lib/rwsem-spinlock.c Fri Mar 8 20:40:32 2002 @@ -142,7 +142,7 @@ set_task_state(tsk, TASK_UNINTERRUPTIBLE); } - tsk->state = TASK_RUNNING; + __set_task_state(tsk, TASK_RUNNING); out: rwsemtrace(sem,"Leaving __down_read"); @@ -188,7 +188,7 @@ set_task_state(tsk, TASK_UNINTERRUPTIBLE); } - tsk->state = TASK_RUNNING; + __set_task_state(tsk, TASK_RUNNING); out: rwsemtrace(sem,"Leaving __down_write"); diff -urN linux/mm/memory.c count_active/mm/memory.c --- linux/mm/memory.c Fri Mar 8 20:47:47 2002 +++ count_active/mm/memory.c Fri Mar 8 19:59:30 2002 @@ -1396,7 +1396,7 @@ pgd_t *pgd; pmd_t *pmd; - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); pgd = pgd_offset(mm, address); /* diff -urN linux/net/core/datagram.c count_active/net/core/datagram.c --- linux/net/core/datagram.c Fri Dec 21 09:42:05 2001 +++ count_active/net/core/datagram.c Fri Mar 8 20:34:31 2002 @@ -91,7 +91,7 @@ *timeo_p = schedule_timeout(*timeo_p); ready: - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(sk->sleep, &wait); return 0; @@ -100,7 +100,7 @@ out_err: *err = error; out: - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(sk->sleep, &wait); return error; out_noerr: diff -urN linux/net/core/dev.c count_active/net/core/dev.c --- linux/net/core/dev.c Fri Mar 8 20:43:57 2002 +++ count_active/net/core/dev.c Fri Mar 8 20:37:16 2002 @@ -2713,9 +2713,9 @@ /* Rebroadcast unregister notification */ notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); } - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ/4); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); if ((jiffies - warning_time) > 10*HZ) { printk(KERN_EMERG "unregister_netdevice: waiting for %s to " "become free. Usage count = %d\n", diff -urN linux/net/core/sock.c count_active/net/core/sock.c --- linux/net/core/sock.c Fri Dec 21 09:42:05 2001 +++ count_active/net/core/sock.c Fri Mar 8 20:35:10 2002 @@ -843,14 +843,14 @@ add_wait_queue_exclusive(&sk->lock.wq, &wait); for(;;) { - current->state = TASK_UNINTERRUPTIBLE; + __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_bh(&sk->lock.slock); schedule(); spin_lock_bh(&sk->lock.slock); if(!sk->lock.users) break; } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&sk->lock.wq, &wait); } diff -urN linux/net/ipv4/tcp.c count_active/net/ipv4/tcp.c --- linux/net/ipv4/tcp.c Fri Dec 21 09:42:05 2001 +++ count_active/net/ipv4/tcp.c Fri Mar 8 20:31:45 2002 @@ -729,7 +729,7 @@ *timeo = current_timeo; } out: - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(sk->sleep, &wait); return err; @@ -1902,7 +1902,7 @@ lock_sock(sk); } while (!signal_pending(tsk) && timeout); - tsk->state = TASK_RUNNING; + __set_task_state(tsk, TASK_RUNNING); remove_wait_queue(sk->sleep, &wait); } @@ -2076,7 +2076,7 @@ */ add_wait_queue_exclusive(sk->sleep, &wait); for (;;) { - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); release_sock(sk); if (sk->tp_pinfo.af_tcp.accept_queue == NULL) timeo = schedule_timeout(timeo); @@ -2094,7 +2094,7 @@ if (!timeo) break; } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(sk->sleep, &wait); return err; }