aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2005-01-07 21:47:53 -0800
committerLinus Torvalds <torvalds@evo.osdl.org>2005-01-07 21:47:53 -0800
commit0cc0f9fcae0fc4deda39d8a47cd24962d1d64a08 (patch)
treeb890034a4d3594b7d7e1048c6bf526fc45c5fe1e /kernel
parent38ff2da8b3b71f5bf6d7adf5281ce8e1df2dc0e9 (diff)
downloadhistory-0cc0f9fcae0fc4deda39d8a47cd24962d1d64a08.tar.gz
[PATCH] export sched_setscheduler() for kernel module use
This patch exports sched_setscheduler() so that it can be used by a kernel module to set a kthread's scheduling policy and associated parameters. Signed-off-by: Dean Nelson <dcn@sgi.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c91
1 files changed, 45 insertions, 46 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ddc0534eed8a5d..cd4776363889fe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2940,7 +2940,7 @@ void set_user_nice(task_t *p, long nice)
*/
rq = task_rq_lock(p, &flags);
/*
- * The RT priorities are set via setscheduler(), but we still
+ * The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
* it wont have any effect on scheduling until the task is
* not SCHED_NORMAL:
@@ -3072,67 +3072,48 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
p->prio = p->static_prio;
}
-/*
- * setscheduler - change the scheduling policy and/or RT priority of a thread.
+/**
+ * sched_setscheduler - change the scheduling policy and/or RT priority of
+ * a thread.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
*/
-static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param)
{
- struct sched_param lp;
- int retval = -EINVAL;
+ int retval;
int oldprio, oldpolicy = -1;
prio_array_t *array;
unsigned long flags;
runqueue_t *rq;
- task_t *p;
-
- if (!param || pid < 0)
- goto out_nounlock;
-
- retval = -EFAULT;
- if (copy_from_user(&lp, param, sizeof(struct sched_param)))
- goto out_nounlock;
- /*
- * We play safe to avoid deadlocks.
- */
- read_lock_irq(&tasklist_lock);
-
- p = find_process_by_pid(pid);
-
- retval = -ESRCH;
- if (!p)
- goto out_unlock;
recheck:
/* double check policy once rq lock held */
if (policy < 0)
policy = oldpolicy = p->policy;
- else {
- retval = -EINVAL;
- if (policy != SCHED_FIFO && policy != SCHED_RR &&
+ else if (policy != SCHED_FIFO && policy != SCHED_RR &&
policy != SCHED_NORMAL)
- goto out_unlock;
- }
+ return -EINVAL;
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
*/
- retval = -EINVAL;
- if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)
- goto out_unlock;
- if ((policy == SCHED_NORMAL) != (lp.sched_priority == 0))
- goto out_unlock;
+ if (param->sched_priority < 0 ||
+ param->sched_priority > MAX_USER_RT_PRIO-1)
+ return -EINVAL;
+ if ((policy == SCHED_NORMAL) != (param->sched_priority == 0))
+ return -EINVAL;
- retval = -EPERM;
if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
!capable(CAP_SYS_NICE))
- goto out_unlock;
+ return -EPERM;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!capable(CAP_SYS_NICE))
- goto out_unlock;
+ return -EPERM;
- retval = security_task_setscheduler(p, policy, &lp);
+ retval = security_task_setscheduler(p, policy, param);
if (retval)
- goto out_unlock;
+ return retval;
/*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
@@ -3147,9 +3128,8 @@ recheck:
array = p->array;
if (array)
deactivate_task(p, rq);
- retval = 0;
oldprio = p->prio;
- __setscheduler(p, policy, lp.sched_priority);
+ __setscheduler(p, policy, param->sched_priority);
if (array) {
__activate_task(p, rq);
/*
@@ -3164,22 +3144,41 @@ recheck:
resched_task(rq->curr);
}
task_rq_unlock(rq, &flags);
-out_unlock:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sched_setscheduler);
+
+static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+{
+ int retval;
+ struct sched_param lparam;
+ struct task_struct *p;
+
+ if (!param || pid < 0)
+ return -EINVAL;
+ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
+ return -EFAULT;
+ read_lock_irq(&tasklist_lock);
+ p = find_process_by_pid(pid);
+ if (!p) {
+ read_unlock_irq(&tasklist_lock);
+ return -ESRCH;
+ }
+ retval = sched_setscheduler(p, policy, &lparam);
read_unlock_irq(&tasklist_lock);
-out_nounlock:
return retval;
}
/**
* sys_sched_setscheduler - set/change the scheduler policy and RT priority
* @pid: the pid in question.
- * @policy: new policy
+ * @policy: new policy.
* @param: structure containing the new RT priority.
*/
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
struct sched_param __user *param)
{
- return setscheduler(pid, policy, param);
+ return do_sched_setscheduler(pid, policy, param);
}
/**
@@ -3189,7 +3188,7 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
*/
asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
{
- return setscheduler(pid, -1, param);
+ return do_sched_setscheduler(pid, -1, param);
}
/**