From: "Jun'ichi Nomura" Fix another bug in dm-raid1.c that the dirty region may stay in or be moved to clean list and freed while in use. It happens as follows: CPU0 CPU1 ------------------------------------------------------------------------------ rh_dec() if (atomic_dec_and_test(pending)) rh_inc() if the region is clean mark the region dirty and remove from clean list mark the region clean and move to clean list atomic_inc(pending) At this stage, the region is in clean list and will be mistakenly reclaimed by rh_update_states() later. Signed-off-by: Jun'ichi Nomura Signed-off-by: Andrew Morton --- drivers/md/dm-raid1.c | 12 +++++++++--- 1 files changed, 9 insertions(+), 3 deletions(-) diff -puN drivers/md/dm-raid1.c~md-fix-rh_dec-rh_inc-race-in-dm-raid1c drivers/md/dm-raid1.c --- 25/drivers/md/dm-raid1.c~md-fix-rh_dec-rh_inc-race-in-dm-raid1c 2005-06-16 16:49:46.000000000 -0700 +++ 25-akpm/drivers/md/dm-raid1.c 2005-06-16 16:49:46.000000000 -0700 @@ -378,16 +378,18 @@ static void rh_inc(struct region_hash *r read_lock(&rh->hash_lock); reg = __rh_find(rh, region); + + atomic_inc(®->pending); + + spin_lock_irq(&rh->region_lock); if (reg->state == RH_CLEAN) { rh->log->type->mark_region(rh->log, reg->key); - spin_lock_irq(&rh->region_lock); reg->state = RH_DIRTY; list_del_init(®->list); /* take off the clean list */ - spin_unlock_irq(&rh->region_lock); } + spin_unlock_irq(&rh->region_lock); - atomic_inc(®->pending); read_unlock(&rh->hash_lock); } @@ -411,6 +413,10 @@ static void rh_dec(struct region_hash *r if (atomic_dec_and_test(®->pending)) { spin_lock_irqsave(&rh->region_lock, flags); + if (atomic_read(®->pending)) { /* check race */ + spin_unlock_irqrestore(&rh->region_lock, flags); + return; + } if (reg->state == RH_RECOVERING) { list_add_tail(®->list, &rh->quiesced_regions); } else { _