diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2005-01-07 22:09:06 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-01-07 22:09:06 -0800 |
commit | 44b7f61e2868c2d4895c93b0d3382389a7a66f6b (patch) | |
tree | 1c05de8fbd29e7fa658d24af65842b029e6e860c /mm | |
parent | c254df93a2064d49effb25dce3a9dc37982f8b53 (diff) | |
download | history-44b7f61e2868c2d4895c93b0d3382389a7a66f6b.tar.gz |
[PATCH] Lock initializer cleanup (Core)
Kernel core files converted to use the new lock initializers.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/highmem.c | 2 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/mlock.c | 2 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 2 | ||||
-rw-r--r-- | mm/pdflush.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/swapfile.c | 2 | ||||
-rw-r--r-- | mm/thrash.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 |
10 files changed, 10 insertions, 10 deletions
diff --git a/mm/highmem.c b/mm/highmem.c index 21d2daa37eb087..c37d66cb1af127 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -53,7 +53,7 @@ static void page_pool_free(void *page, void *data) #ifdef CONFIG_HIGHMEM static int pkmap_count[LAST_PKMAP]; static unsigned int last_pkmap_nr; -static spinlock_t kmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); pte_t * pkmap_page_table; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 54bbfbfc24b905..4eb5ae3fbe10d3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -18,7 +18,7 @@ unsigned long max_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES]; -static spinlock_t hugetlb_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(hugetlb_lock); static void enqueue_huge_page(struct page *page) { diff --git a/mm/mlock.c b/mm/mlock.c index a75902f0485efc..0c41018fb37b99 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -204,7 +204,7 @@ asmlinkage long sys_munlockall(void) * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */ -static spinlock_t shmlock_user_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(shmlock_user_lock); int user_shm_lock(size_t size, struct user_struct *user) { diff --git a/mm/nommu.c b/mm/nommu.c index 7ee66abb00bc1b..9439b1cd441568 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -136,7 +136,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, return(i); } -rwlock_t vmlist_lock = RW_LOCK_UNLOCKED; +DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist; void vfree(void *addr) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3868e29e85bef9..967958c7fae138 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -233,7 +233,7 @@ void out_of_memory(int gfp_mask) * oom_lock protects out_of_memory()'s static variables. * It's a global lock; this is not performance-critical. */ - static spinlock_t oom_lock = SPIN_LOCK_UNLOCKED; + static DEFINE_SPINLOCK(oom_lock); static unsigned long first, last, count, lastkill; unsigned long now, since; diff --git a/mm/pdflush.c b/mm/pdflush.c index 1e682bed9a5eee..21b7f127b7d303 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c @@ -45,7 +45,7 @@ static void start_one_pdflush_thread(void); * All the pdflush threads. Protected by pdflush_lock */ static LIST_HEAD(pdflush_list); -static spinlock_t pdflush_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(pdflush_lock); /* * The count of currently-running pdflush threads. Protected diff --git a/mm/shmem.c b/mm/shmem.c index 2a97375d5eceab..c34e131967a917 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -189,7 +189,7 @@ static struct backing_dev_info shmem_backing_dev_info = { }; static LIST_HEAD(shmem_swaplist); -static spinlock_t shmem_swaplist_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(shmem_swaplist_lock); static void shmem_free_blocks(struct inode *inode, long pages) { diff --git a/mm/swapfile.c b/mm/swapfile.c index 479015ed0b2f65..5a00ea36120088 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -32,7 +32,7 @@ #include <asm/tlbflush.h> #include <linux/swapops.h> -spinlock_t swaplock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(swaplock); unsigned int nr_swapfiles; long total_swap_pages; static int swap_overflow; diff --git a/mm/thrash.c b/mm/thrash.c index 985b6bbe964dec..735a91baa6ff37 100644 --- a/mm/thrash.c +++ b/mm/thrash.c @@ -13,7 +13,7 @@ #include <linux/sched.h> #include <linux/swap.h> -static spinlock_t swap_token_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(swap_token_lock); static unsigned long swap_token_timeout; unsigned long swap_token_check; struct mm_struct * swap_token_mm = &init_mm; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f8c46383a91009..e0fa3b39f2d12a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -20,7 +20,7 @@ #include <asm/tlbflush.h> -rwlock_t vmlist_lock = RW_LOCK_UNLOCKED; +DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist; static void unmap_area_pte(pmd_t *pmd, unsigned long address, |