aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorLogan Gunthorpe <logang@deltatee.com>2022-04-07 10:57:07 -0600
committerSong Liu <song@kernel.org>2022-04-25 14:00:36 -0700
commit8fbcba6b999beb9fd0b95cd2efe00a1215e36406 (patch)
tree1407cad0fb3f92970af5cfb04a89e66fc65c542e /drivers/md/raid5.c
parent92d9aac92b7cc92c770e736c70c3acae7b803278 (diff)
downloadlinux-8fbcba6b999beb9fd0b95cd2efe00a1215e36406.tar.gz
md/raid5: Cleanup setup_conf() error returns
Be more careful about the error returns. Most errors in this function are actually ENOMEM, but it forcibly returns EIO if conf has been allocated. Instead return ret and ensure it is set appropriately before each goto abort. Signed-off-by: Logan Gunthorpe <logang@deltatee.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Song Liu <song@kernel.org>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f22e0da01f137..79b03c79c66fd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7163,7 +7163,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
int i;
int group_cnt;
struct r5worker_group *new_group;
- int ret;
+ int ret = -ENOMEM;
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -7222,6 +7222,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
spin_lock_init(&conf->device_lock);
seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock);
mutex_init(&conf->cache_size_mutex);
+
init_waitqueue_head(&conf->wait_for_quiescent);
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
@@ -7299,11 +7300,13 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->level = mddev->new_level;
conf->chunk_sectors = mddev->new_chunk_sectors;
- if (raid5_alloc_percpu(conf) != 0)
+ ret = raid5_alloc_percpu(conf);
+ if (ret)
goto abort;
pr_debug("raid456: run(%s) called.\n", mdname(mddev));
+ ret = -EIO;
rdev_for_each(rdev, mddev) {
raid_disk = rdev->raid_disk;
if (raid_disk >= max_disks
@@ -7367,6 +7370,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (grow_stripes(conf, conf->min_nr_stripes)) {
pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
+ ret = -ENOMEM;
goto abort;
} else
pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
@@ -7380,7 +7384,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.count_objects = raid5_cache_count;
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
- if (register_shrinker(&conf->shrinker)) {
+ ret = register_shrinker(&conf->shrinker);
+ if (ret) {
pr_warn("md/raid:%s: couldn't register shrinker.\n",
mdname(mddev));
goto abort;
@@ -7391,17 +7396,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (!conf->thread) {
pr_warn("md/raid:%s: couldn't allocate thread.\n",
mdname(mddev));
+ ret = -ENOMEM;
goto abort;
}
return conf;
abort:
- if (conf) {
+ if (conf)
free_conf(conf);
- return ERR_PTR(-EIO);
- } else
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)