From: Thomas Gleixner To make spinlock/rwlock initialization consistent all over the kernel, this patch converts explicit lock-initializers into spin_lock_init() and rwlock_init() calls. Currently, spinlocks and rwlocks are initialized in two different ways: lock = SPIN_LOCK_UNLOCKED spin_lock_init(&lock) rwlock = RW_LOCK_UNLOCKED rwlock_init(&rwlock) this patch converts all explicit lock initializations to spin_lock_init() or rwlock_init(). (Besides consistency this also helps automatic lock validators and debugging code.) The conversion was done with a script, it was verified manually and it was reviewed, compiled and tested as far as possible on x86, ARM, PPC. There is no runtime overhead or actual code change resulting out of this patch, because spin_lock_init() and rwlock_init() are macros and are thus equivalent to the explicit initialization method. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Signed-off-by: Andrew Morton --- 25-akpm/ipc/util.c | 2 +- 25-akpm/kernel/fork.c | 8 ++++---- 25-akpm/kernel/futex.c | 2 +- 25-akpm/mm/swapfile.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff -puN ipc/util.c~lock-initializer-unifying-core ipc/util.c --- 25/ipc/util.c~lock-initializer-unifying-core 2004-10-26 20:06:05.560223192 -0700 +++ 25-akpm/ipc/util.c 2004-10-26 20:06:05.567222128 -0700 @@ -193,7 +193,7 @@ found: if(ids->seq > ids->seq_max) ids->seq = 0; - new->lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&new->lock); new->deleted = 0; rcu_read_lock(); spin_lock(&new->lock); diff -puN kernel/fork.c~lock-initializer-unifying-core kernel/fork.c --- 25/kernel/fork.c~lock-initializer-unifying-core 2004-10-26 20:06:05.561223040 -0700 +++ 25-akpm/kernel/fork.c 2004-10-26 20:06:05.569221824 -0700 @@ -299,8 +299,8 @@ static struct mm_struct * mm_init(struct INIT_LIST_HEAD(&mm->mmlist); mm->core_waiters = 0; mm->nr_ptes = 0; - mm->page_table_lock = SPIN_LOCK_UNLOCKED; - mm->ioctx_list_lock = RW_LOCK_UNLOCKED; + spin_lock_init(&mm->page_table_lock); + rwlock_init(&mm->ioctx_list_lock); mm->ioctx_list = NULL; mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm); mm->free_area_cache = TASK_UNMAPPED_BASE; @@ -500,7 +500,7 @@ static inline struct fs_struct *__copy_f /* We don't need to lock fs - think why ;-) */ if (fs) { atomic_set(&fs->count, 1); - fs->lock = RW_LOCK_UNLOCKED; + rwlock_init(&fs->lock); fs->umask = old->umask; read_lock(&old->lock); fs->rootmnt = mntget(old->rootmnt); @@ -582,7 +582,7 @@ static int copy_files(unsigned long clon atomic_set(&newf->count, 1); - newf->file_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&newf->file_lock); newf->next_fd = 0; newf->max_fds = NR_OPEN_DEFAULT; newf->max_fdset = __FD_SETSIZE; diff -puN kernel/futex.c~lock-initializer-unifying-core kernel/futex.c --- 25/kernel/futex.c~lock-initializer-unifying-core 2004-10-26 20:06:05.563222736 -0700 +++ 25-akpm/kernel/futex.c 2004-10-26 20:06:05.569221824 -0700 @@ -732,7 +732,7 @@ static int __init init(void) for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { INIT_LIST_HEAD(&futex_queues[i].chain); - futex_queues[i].lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&futex_queues[i].lock); } return 0; } diff -puN mm/swapfile.c~lock-initializer-unifying-core mm/swapfile.c --- 25/mm/swapfile.c~lock-initializer-unifying-core 2004-10-26 20:06:05.564222584 -0700 +++ 25-akpm/mm/swapfile.c 2004-10-26 20:06:05.571221520 -0700 @@ -1368,7 +1368,7 @@ asmlinkage long sys_swapon(const char __ p->highest_bit = 0; p->cluster_nr = 0; p->inuse_pages = 0; - p->sdev_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&p->sdev_lock); p->next = -1; if (swap_flags & SWAP_FLAG_PREFER) { p->prio = _