From: george anzinger The MAJOR problem was a hang in the kernel if a user tried to delete a repeating timer that had a signal delivery pending. I was putting the task in a loop waiting for that same task to pick up the signal. OUCH! A minor issue relates to the need by the glibc folks, to specify a particular thread to get the signal. I had this code in all along, but somewhere in 2.5 the signal code was made POSIX compliant, i.e. deliver to the first thread that doesn't have it masked out. This now uses the code from the above mentioned clean up. Most signals go to the group delivery signal code, however, those specifying THREAD_ID (an extension to the POSIX standard) are sent to the specified thread. That thread MUST be in the same thread group as the thread that creates the timer. 25-akpm/kernel/posix-timers.c | 68 +++++++++++++++++++++++++----------------- 1 files changed, 42 insertions(+), 26 deletions(-) diff -puN kernel/posix-timers.c~posix-timer-hang-fix-2 kernel/posix-timers.c --- 25/kernel/posix-timers.c~posix-timer-hang-fix-2 Thu Apr 10 14:56:29 2003 +++ 25-akpm/kernel/posix-timers.c Thu Apr 10 14:56:29 2003 @@ -77,6 +77,20 @@ static spinlock_t idr_lock = SPIN_LOCK_U # define timer_active(tmr) BARFY // error to use outside of SMP # define set_timer_inactive(tmr) do { } while (0) #endif + +/* + * For some reason mips/mips64 define the SIGEV constants plus 128. + * Here we define a mask to get rid of the common bits. The + * optimizer should make this costless to all but mips. + */ +#if (ARCH == mips) || (ARCH == mips64) +#define MIPS_SIGEV ~(SIGEV_NONE & \ + SIGEV_SIGNAL & \ + SIGEV_THREAD & \ + SIGEV_THREAD_ID) +#else +#define MIPS_SIGEV (int)-1 +#endif /* * The timer ID is turned into a timer address by idr_find(). * Verifying a valid ID consists of: @@ -225,10 +239,9 @@ static void schedule_next_timer(struct k struct now_struct now; /* Set up the timer for the next interval (if there is one) */ - if (!timr->it_incr) { - set_timer_inactive(timr); + if (!timr->it_incr) return; - } + posix_get_now(&now); do { posix_bump_timer(timr); @@ -258,7 +271,7 @@ void do_schedule_next_timer(struct sigin timr = lock_timer(info->si_tid, &flags); - if (!timr || !timr->it_requeue_pending) + if (!timr || timr->it_requeue_pending != info->si_sys_private) goto exit; schedule_next_timer(timr); @@ -276,7 +289,17 @@ exit: * indicating that the signal was either not queued or was queued * without an info block. In this case, we will not get a call back to * do_schedule_next_timer() so we do it here. This should be rare... + + * An interesting problem can occure if, while a signal, and thus a call + * back is pending, the timer is rearmed, i.e. stopped and restarted. + * We then need to sort out the call back and do the right thing. What + * we do is to put a counter in the info block and match it with the + * timers copy on the call back. If they don't match, we just ignore + * the call back. Note that we do allow the timer to be deleted while + * a signal is pending. The standard says we can allow that signal to + * be delivered, and we do. */ +static int pendcount = 1; static void timer_notify_task(struct k_itimer *timr) { @@ -291,12 +314,19 @@ static void timer_notify_task(struct k_i info.si_code = SI_TIMER; info.si_tid = timr->it_id; info.si_value = timr->it_sigev_value; - if (!timr->it_incr) - set_timer_inactive(timr); - else - timr->it_requeue_pending = info.si_sys_private = 1; - - ret = send_sig_info(info.si_signo, &info, timr->it_process); + if (timr->it_incr){ + /* + * Don't allow a call back counter of zero... + * and avoid the test by using 2. + */ + pendcount += 2; + timr->it_requeue_pending = info.si_sys_private = pendcount; + } + if( timr->it_sigev_notify & SIGEV_THREAD_ID & MIPS_SIGEV){ + ret = send_sig_info(info.si_signo, &info, timr->it_process); + }else + ret = send_group_sig_info(info.si_signo, &info, + timr->it_process); switch (ret) { default: @@ -332,24 +362,11 @@ static void posix_timer_fn(unsigned long unsigned long flags; spin_lock_irqsave(&timr->it_lock, flags); + set_timer_inactive(timr); timer_notify_task(timr); unlock_timer(timr, flags); } -/* - * For some reason mips/mips64 define the SIGEV constants plus 128. - * Here we define a mask to get rid of the common bits. The - * optimizer should make this costless to all but mips. - */ -#if defined(ARCH) && ((ARCH == mips) || (ARCH == mips64)) -#define MIPS_SIGEV ~(SIGEV_NONE & \ - SIGEV_SIGNAL & \ - SIGEV_THREAD & \ - SIGEV_THREAD_ID) -#else -#define MIPS_SIGEV (int)-1 -#endif - static inline struct task_struct * good_sigevent(sigevent_t * event) { struct task_struct *rtn = current; @@ -848,8 +865,7 @@ static inline int do_timer_delete(struct { timer->it_incr = 0; #ifdef CONFIG_SMP - if (timer_active(timer) && - !del_timer(&timer->it_timer) && !timer->it_requeue_pending) + if (timer_active(timer) && !del_timer(&timer->it_timer)) /* * It can only be active if on an other cpu. Since * we have cleared the interval stuff above, it should _