From 057de3e45b5e87c9f4bf0d75b055999a65a85ea9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:37 -0500 Subject: [PATCH] mm: Prepare decoupling the page fault disabling logic commit 5e918aee03250bd6416175794d4673ff4b9ed583 in tip. Add a pagefault_disabled variable to task_struct to allow decoupling the pagefault-disabled logic from the preempt count. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- include/linux/sched.h | 1 + include/linux/uaccess.h | 33 +++------------------------------ kernel/fork.c | 1 + mm/memory.c | 29 +++++++++++++++++++++++++++++ 4 files changed, 34 insertions(+), 30 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 53e6ffe..bdcdabd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1224,6 +1224,7 @@ struct task_struct { struct plist_node pushable_tasks; struct mm_struct *mm, *active_mm; + int pagefault_disabled; #if defined(SPLIT_RSS_COUNTING) struct task_rss_stat rss_stat; #endif diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d512d98..9c7e38a 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,37 +6,10 @@ /* * These routines enable/disable the pagefault handler in that - * it will not take any locks and go straight to the fixup table. - * - * They have great resemblance to the preempt_disable/enable calls - * and in fact they are identical; this is because currently there is - * no other way to make the pagefault handlers do this. So we do - * disable preemption but we don't necessarily care about that. + * it will not take any MM locks and go straight to the fixup table. */ -static inline void pagefault_disable(void) -{ - inc_preempt_count(); - /* - * make sure to have issued the store before a pagefault - * can hit. - */ - barrier(); -} - -static inline void pagefault_enable(void) -{ - /* - * make sure to issue those last loads/stores before enabling - * the pagefault handler again. - */ - barrier(); - dec_preempt_count(); - /* - * make sure we do.. - */ - barrier(); - preempt_check_resched(); -} +extern void pagefault_disable(void); +extern void pagefault_enable(void); #ifndef ARCH_HAS_NOCACHE_UACCESS diff --git a/kernel/fork.c b/kernel/fork.c index 4c14942..0a1b8d1 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1098,6 +1098,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif + p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; diff --git a/mm/memory.c b/mm/memory.c index 90f674e..17056a3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3107,6 +3107,35 @@ unlock: return 0; } +void pagefault_disable(void) +{ + inc_preempt_count(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault + * can hit. + */ + barrier(); +} +EXPORT_SYMBOL(pagefault_disable); + +void pagefault_enable(void) +{ + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + current->pagefault_disabled--; + dec_preempt_count(); + /* + * make sure we do.. + */ + barrier(); + preempt_check_resched(); +} +EXPORT_SYMBOL(pagefault_enable); + /* * By the time we get here, we already hold the mm semaphore */ -- 1.7.0.4