aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChris Wright <chrisw@osdl.org>2004-06-17 18:25:30 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-06-17 18:25:30 -0700
commit684c531cc11dfcd7df4f950ebcb0012f79788ca9 (patch)
treea0a47ab2bc8d499660a9e7b24f0c288712248470 /kernel
parentcefb76b6bb7ca143dc5a84ccde9e84f0fd0fb272 (diff)
downloadhistory-684c531cc11dfcd7df4f950ebcb0012f79788ca9.tar.gz
[PATCH] RLIM: enforce rlimits on queued signals
Add a user_struct pointer to the sigqueue structure. Charge sigqueue allocation and destruction to the user_struct rather than a global pool. This per user rlimit accounting obsoletes the global queued_signals accouting. The patch as charges the sigqueue struct allocation to the queue that it's pending on (the receiver of the signal). So the owner of the queue is charged for whoever writes to it (much like quota for a 777 file). The patch started out charging the task which allocated the sigqueue struct. In most cases, these are always the same user (permission for sending a signal), so those cases are moot. In the cases where it isn't the same user, it's a privileged user sending a signal to another user. It seems wrong to charge the allocation to the privleged user, when the other user could block receipt as long as it feels. The flipside is, someone else can fill your queue (expectation is that someone else is privileged). I think it's right the way it is. The change to revert is very small. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/signal.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index dcc6477f77ff04..25b4147e86f0b2 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -265,17 +265,19 @@ next_signal(struct sigpending *pending, sigset_t *mask)
return sig;
}
-struct sigqueue *__sigqueue_alloc(void)
+static struct sigqueue *__sigqueue_alloc(void)
{
struct sigqueue *q = 0;
- if (atomic_read(&nr_queued_signals) < max_queued_signals)
+ if (atomic_read(&current->user->sigpending) <
+ current->rlim[RLIMIT_SIGPENDING].rlim_cur)
q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
if (q) {
- atomic_inc(&nr_queued_signals);
INIT_LIST_HEAD(&q->list);
q->flags = 0;
q->lock = 0;
+ q->user = get_uid(current->user);
+ atomic_inc(&q->user->sigpending);
}
return(q);
}
@@ -284,8 +286,9 @@ static inline void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
+ atomic_dec(&q->user->sigpending);
+ free_uid(q->user);
kmem_cache_free(sigqueue_cachep, q);
- atomic_dec(&nr_queued_signals);
}
static void flush_sigqueue(struct sigpending *queue)
@@ -720,12 +723,14 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
make sure at least one signal gets delivered and don't
pass on the info struct. */
- if (atomic_read(&nr_queued_signals) < max_queued_signals)
+ if (atomic_read(&t->user->sigpending) <
+ t->rlim[RLIMIT_SIGPENDING].rlim_cur)
q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
if (q) {
- atomic_inc(&nr_queued_signals);
q->flags = 0;
+ q->user = get_uid(t->user);
+ atomic_inc(&q->user->sigpending);
list_add_tail(&q->list, &signals->list);
switch ((unsigned long) info) {
case 0: