From: Thomas Gleixner Kernel core files converted to use the new lock initializers. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Signed-off-by: Andrew Morton --- 25-akpm/kernel/audit.c | 8 ++++---- 25-akpm/kernel/capability.c | 2 +- 25-akpm/kernel/dma.c | 2 +- 25-akpm/kernel/exec_domain.c | 2 +- 25-akpm/kernel/fork.c | 4 ++-- 25-akpm/kernel/intermodule.c | 2 +- 25-akpm/kernel/kprobes.c | 2 +- 25-akpm/kernel/module.c | 2 +- 25-akpm/kernel/pid.c | 2 +- 25-akpm/kernel/posix-timers.c | 2 +- 25-akpm/kernel/printk.c | 4 ++-- 25-akpm/kernel/profile.c | 2 +- 25-akpm/kernel/resource.c | 2 +- 25-akpm/kernel/sys.c | 2 +- 25-akpm/kernel/timer.c | 2 +- 25-akpm/kernel/user.c | 2 +- 25-akpm/kernel/workqueue.c | 2 +- 25-akpm/lib/kobject_uevent.c | 2 +- 25-akpm/mm/highmem.c | 2 +- 25-akpm/mm/hugetlb.c | 2 +- 25-akpm/mm/mlock.c | 2 +- 25-akpm/mm/nommu.c | 2 +- 25-akpm/mm/oom_kill.c | 2 +- 25-akpm/mm/pdflush.c | 2 +- 25-akpm/mm/shmem.c | 2 +- 25-akpm/mm/swapfile.c | 2 +- 25-akpm/mm/thrash.c | 2 +- 25-akpm/mm/vmalloc.c | 2 +- 28 files changed, 33 insertions(+), 33 deletions(-) diff -puN kernel/audit.c~lock-initializer-cleanup-core kernel/audit.c --- 25/kernel/audit.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.936216128 -0800 +++ 25-akpm/kernel/audit.c 2004-12-03 20:57:15.977209896 -0800 @@ -98,8 +98,8 @@ static struct sock *audit_sock; * The second list is a list of pre-allocated audit buffers (if more * than AUDIT_MAXFREE are in use, the audit buffer is freed instead of * being placed on the freelist). */ -static spinlock_t audit_txlist_lock = SPIN_LOCK_UNLOCKED; -static spinlock_t audit_freelist_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(audit_txlist_lock); +static DEFINE_SPINLOCK(audit_freelist_lock); static int audit_freelist_count = 0; static LIST_HEAD(audit_txlist); static LIST_HEAD(audit_freelist); @@ -169,7 +169,7 @@ static inline int audit_rate_check(void) { static unsigned long last_check = 0; static int messages = 0; - static spinlock_t lock = SPIN_LOCK_UNLOCKED; + static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned long now; unsigned long elapsed; @@ -199,7 +199,7 @@ static inline int audit_rate_check(void) void audit_log_lost(const char *message) { static unsigned long last_msg = 0; - static spinlock_t lock = SPIN_LOCK_UNLOCKED; + static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned long now; int print; diff -puN kernel/capability.c~lock-initializer-cleanup-core kernel/capability.c --- 25/kernel/capability.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.937215976 -0800 +++ 25-akpm/kernel/capability.c 2004-12-03 20:57:15.977209896 -0800 @@ -23,7 +23,7 @@ EXPORT_SYMBOL(cap_bset); * This global lock protects task->cap_* for all tasks including current. * Locking rule: acquire this prior to tasklist_lock. */ -spinlock_t task_capability_lock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(task_capability_lock); /* * For sys_getproccap() and sys_setproccap(), any of the three diff -puN kernel/dma.c~lock-initializer-cleanup-core kernel/dma.c --- 25/kernel/dma.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.939215672 -0800 +++ 25-akpm/kernel/dma.c 2004-12-03 20:57:15.978209744 -0800 @@ -38,7 +38,7 @@ */ -spinlock_t dma_spin_lock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(dma_spin_lock); /* * If our port doesn't define this it has no PC like DMA diff -puN kernel/exec_domain.c~lock-initializer-cleanup-core kernel/exec_domain.c --- 25/kernel/exec_domain.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.940215520 -0800 +++ 25-akpm/kernel/exec_domain.c 2004-12-03 20:57:15.978209744 -0800 @@ -22,7 +22,7 @@ static void default_handler(int, struct pt_regs *); static struct exec_domain *exec_domains = &default_exec_domain; -static rwlock_t exec_domains_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(exec_domains_lock); static u_long ident_map[32] = { diff -puN kernel/fork.c~lock-initializer-cleanup-core kernel/fork.c --- 25/kernel/fork.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.941215368 -0800 +++ 25-akpm/kernel/fork.c 2004-12-03 20:57:15.979209592 -0800 @@ -59,7 +59,7 @@ int max_threads; /* tunable limit on nr DEFINE_PER_CPU(unsigned long, process_counts) = 0; -rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ + __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ EXPORT_SYMBOL(tasklist_lock); @@ -286,7 +286,7 @@ static inline void mm_free_pgd(struct mm #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ -spinlock_t mmlist_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; + __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); #define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) diff -puN kernel/intermodule.c~lock-initializer-cleanup-core kernel/intermodule.c --- 25/kernel/intermodule.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.943215064 -0800 +++ 25-akpm/kernel/intermodule.c 2004-12-03 20:57:15.979209592 -0800 @@ -14,7 +14,7 @@ */ static struct list_head ime_list = LIST_HEAD_INIT(ime_list); -static spinlock_t ime_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(ime_lock); static int kmalloc_failed; struct inter_module_entry { diff -puN kernel/kprobes.c~lock-initializer-cleanup-core kernel/kprobes.c --- 25/kernel/kprobes.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.944214912 -0800 +++ 25-akpm/kernel/kprobes.c 2004-12-03 20:57:15.980209440 -0800 @@ -43,7 +43,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; unsigned int kprobe_cpu = NR_CPUS; -static spinlock_t kprobe_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(kprobe_lock); /* Locks kprobe: irqs must be disabled */ void lock_kprobes(void) diff -puN kernel/module.c~lock-initializer-cleanup-core kernel/module.c --- 25/kernel/module.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.946214608 -0800 +++ 25-akpm/kernel/module.c 2004-12-03 20:57:15.981209288 -0800 @@ -53,7 +53,7 @@ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) /* Protects module list */ -static spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(modlist_lock); /* List of modules, protected by module_mutex AND modlist_lock */ static DECLARE_MUTEX(module_mutex); diff -puN kernel/pid.c~lock-initializer-cleanup-core kernel/pid.c --- 25/kernel/pid.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.947214456 -0800 +++ 25-akpm/kernel/pid.c 2004-12-03 20:57:15.982209136 -0800 @@ -60,7 +60,7 @@ typedef struct pidmap { static pidmap_t pidmap_array[PIDMAP_ENTRIES] = { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }; -static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); fastcall void free_pidmap(int pid) { diff -puN kernel/posix-timers.c~lock-initializer-cleanup-core kernel/posix-timers.c --- 25/kernel/posix-timers.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.948214304 -0800 +++ 25-akpm/kernel/posix-timers.c 2004-12-03 20:57:15.983208984 -0800 @@ -89,7 +89,7 @@ static inline u64 mpy_l_X_l_ll(unsigned */ static kmem_cache_t *posix_timers_cache; static struct idr posix_timers_id; -static spinlock_t idr_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(idr_lock); /* * Just because the timer is not in the timer list does NOT mean it is diff -puN kernel/printk.c~lock-initializer-cleanup-core kernel/printk.c --- 25/kernel/printk.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.950214000 -0800 +++ 25-akpm/kernel/printk.c 2004-12-03 20:57:15.984208832 -0800 @@ -78,7 +78,7 @@ static int console_locked; * It is also used in interesting ways to provide interlocking in * release_console_sem(). */ -static spinlock_t logbuf_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(logbuf_lock); static char __log_buf[__LOG_BUF_LEN]; static char *log_buf = __log_buf; @@ -875,7 +875,7 @@ void tty_write_message(struct tty_struct */ int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst) { - static spinlock_t ratelimit_lock = SPIN_LOCK_UNLOCKED; + static DEFINE_SPINLOCK(ratelimit_lock); static unsigned long toks = 10*5*HZ; static unsigned long last_msg; static int missed; diff -puN kernel/profile.c~lock-initializer-cleanup-core kernel/profile.c --- 25/kernel/profile.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.951213848 -0800 +++ 25-akpm/kernel/profile.c 2004-12-03 20:57:15.985208680 -0800 @@ -83,7 +83,7 @@ void __init profile_init(void) #ifdef CONFIG_PROFILING static DECLARE_RWSEM(profile_rwsem); -static rwlock_t handoff_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(handoff_lock); static struct notifier_block * task_exit_notifier; static struct notifier_block * task_free_notifier; static struct notifier_block * munmap_notifier; diff -puN kernel/resource.c~lock-initializer-cleanup-core kernel/resource.c --- 25/kernel/resource.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.953213544 -0800 +++ 25-akpm/kernel/resource.c 2004-12-03 20:57:15.985208680 -0800 @@ -39,7 +39,7 @@ struct resource iomem_resource = { EXPORT_SYMBOL(iomem_resource); -static rwlock_t resource_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(resource_lock); #ifdef CONFIG_PROC_FS diff -puN kernel/sys.c~lock-initializer-cleanup-core kernel/sys.c --- 25/kernel/sys.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.954213392 -0800 +++ 25-akpm/kernel/sys.c 2004-12-03 20:57:15.986208528 -0800 @@ -90,7 +90,7 @@ int cad_pid = 1; */ static struct notifier_block *reboot_notifier_list; -rwlock_t notifier_lock = RW_LOCK_UNLOCKED; +DEFINE_RWLOCK(notifier_lock); /** * notifier_chain_register - Add notifier to a notifier chain diff -puN kernel/timer.c~lock-initializer-cleanup-core kernel/timer.c --- 25/kernel/timer.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.956213088 -0800 +++ 25-akpm/kernel/timer.c 2004-12-03 20:57:15.988208224 -0800 @@ -1400,7 +1400,7 @@ void __init init_timers(void) struct time_interpolator *time_interpolator; static struct time_interpolator *time_interpolator_list; -static spinlock_t time_interpolator_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(time_interpolator_lock); static inline u64 time_interpolator_get_cycles(unsigned int src) { diff -puN kernel/user.c~lock-initializer-cleanup-core kernel/user.c --- 25/kernel/user.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.957212936 -0800 +++ 25-akpm/kernel/user.c 2004-12-03 20:57:15.988208224 -0800 @@ -26,7 +26,7 @@ static kmem_cache_t *uid_cachep; static struct list_head uidhash_table[UIDHASH_SZ]; -static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(uidhash_lock); struct user_struct root_user = { .__count = ATOMIC_INIT(1), diff -puN kernel/workqueue.c~lock-initializer-cleanup-core kernel/workqueue.c --- 25/kernel/workqueue.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.958212784 -0800 +++ 25-akpm/kernel/workqueue.c 2004-12-03 20:57:15.989208072 -0800 @@ -64,7 +64,7 @@ struct workqueue_struct { /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove threads to each one as cpus come/go. */ -static spinlock_t workqueue_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); /* If it's single threaded, it isn't in the list of workqueues. */ diff -puN lib/kobject_uevent.c~lock-initializer-cleanup-core lib/kobject_uevent.c --- 25/lib/kobject_uevent.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.960212480 -0800 +++ 25-akpm/lib/kobject_uevent.c 2004-12-03 20:57:15.989208072 -0800 @@ -179,7 +179,7 @@ static inline int send_uevent(const char #ifdef CONFIG_HOTPLUG char hotplug_path[HOTPLUG_PATH_LEN] = "/sbin/hotplug"; u64 hotplug_seqnum; -static spinlock_t sequence_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(sequence_lock); /** * kobject_hotplug - notify userspace by executing /sbin/hotplug diff -puN mm/highmem.c~lock-initializer-cleanup-core mm/highmem.c --- 25/mm/highmem.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.961212328 -0800 +++ 25-akpm/mm/highmem.c 2004-12-03 20:57:15.990207920 -0800 @@ -53,7 +53,7 @@ static void page_pool_free(void *page, v #ifdef CONFIG_HIGHMEM static int pkmap_count[LAST_PKMAP]; static unsigned int last_pkmap_nr; -static spinlock_t kmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); pte_t * pkmap_page_table; diff -puN mm/hugetlb.c~lock-initializer-cleanup-core mm/hugetlb.c --- 25/mm/hugetlb.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.962212176 -0800 +++ 25-akpm/mm/hugetlb.c 2004-12-03 20:57:15.990207920 -0800 @@ -17,7 +17,7 @@ unsigned long max_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES]; -static spinlock_t hugetlb_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(hugetlb_lock); static void enqueue_huge_page(struct page *page) { diff -puN mm/mlock.c~lock-initializer-cleanup-core mm/mlock.c --- 25/mm/mlock.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.963212024 -0800 +++ 25-akpm/mm/mlock.c 2004-12-03 20:57:15.991207768 -0800 @@ -204,7 +204,7 @@ asmlinkage long sys_munlockall(void) * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */ -static spinlock_t shmlock_user_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(shmlock_user_lock); int user_shm_lock(size_t size, struct user_struct *user) { diff -puN mm/nommu.c~lock-initializer-cleanup-core mm/nommu.c --- 25/mm/nommu.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.965211720 -0800 +++ 25-akpm/mm/nommu.c 2004-12-03 20:57:15.991207768 -0800 @@ -137,7 +137,7 @@ int get_user_pages(struct task_struct *t return(i); } -rwlock_t vmlist_lock = RW_LOCK_UNLOCKED; +DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist; void vfree(void *addr) diff -puN mm/oom_kill.c~lock-initializer-cleanup-core mm/oom_kill.c --- 25/mm/oom_kill.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.966211568 -0800 +++ 25-akpm/mm/oom_kill.c 2004-12-03 20:57:15.992207616 -0800 @@ -227,7 +227,7 @@ void out_of_memory(int gfp_mask) * oom_lock protects out_of_memory()'s static variables. * It's a global lock; this is not performance-critical. */ - static spinlock_t oom_lock = SPIN_LOCK_UNLOCKED; + static DEFINE_SPINLOCK(oom_lock); static unsigned long first, last, count, lastkill; unsigned long now, since; diff -puN mm/pdflush.c~lock-initializer-cleanup-core mm/pdflush.c --- 25/mm/pdflush.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.967211416 -0800 +++ 25-akpm/mm/pdflush.c 2004-12-03 20:57:15.992207616 -0800 @@ -45,7 +45,7 @@ static void start_one_pdflush_thread(voi * All the pdflush threads. Protected by pdflush_lock */ static LIST_HEAD(pdflush_list); -static spinlock_t pdflush_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(pdflush_lock); /* * The count of currently-running pdflush threads. Protected diff -puN mm/shmem.c~lock-initializer-cleanup-core mm/shmem.c --- 25/mm/shmem.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.969211112 -0800 +++ 25-akpm/mm/shmem.c 2004-12-03 20:57:15.994207312 -0800 @@ -189,7 +189,7 @@ static struct backing_dev_info shmem_bac }; static LIST_HEAD(shmem_swaplist); -static spinlock_t shmem_swaplist_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(shmem_swaplist_lock); static void shmem_free_blocks(struct inode *inode, long pages) { diff -puN mm/swapfile.c~lock-initializer-cleanup-core mm/swapfile.c --- 25/mm/swapfile.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.970210960 -0800 +++ 25-akpm/mm/swapfile.c 2004-12-03 20:57:15.995207160 -0800 @@ -32,7 +32,7 @@ #include #include -spinlock_t swaplock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(swaplock); unsigned int nr_swapfiles; long total_swap_pages; static int swap_overflow; diff -puN mm/thrash.c~lock-initializer-cleanup-core mm/thrash.c --- 25/mm/thrash.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.972210656 -0800 +++ 25-akpm/mm/thrash.c 2004-12-03 20:57:15.995207160 -0800 @@ -13,7 +13,7 @@ #include #include -static spinlock_t swap_token_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(swap_token_lock); static unsigned long swap_token_timeout; unsigned long swap_token_check; struct mm_struct * swap_token_mm = &init_mm; diff -puN mm/vmalloc.c~lock-initializer-cleanup-core mm/vmalloc.c --- 25/mm/vmalloc.c~lock-initializer-cleanup-core 2004-12-03 20:57:15.973210504 -0800 +++ 25-akpm/mm/vmalloc.c 2004-12-03 20:57:15.996207008 -0800 @@ -20,7 +20,7 @@ #include -rwlock_t vmlist_lock = RW_LOCK_UNLOCKED; +DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist; static void unmap_area_pte(pmd_t *pmd, unsigned long address, _