aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2021-07-30 16:09:08 +0100
committerMark Brown <broonie@kernel.org>2021-07-30 16:09:08 +0100
commitd666b327d149f2f5c2287fd0e84c6a31cf7cb28e (patch)
tree18a779ad45e7e8fc2e233caf7506039c6e21e273
parente09a1a407d22cace3c781640b5fe154f398ae98b (diff)
downloadlinux-2.6-d666b327d149f2f5c2287fd0e84c6a31cf7cb28e.tar.gz
writeback: use READ_ONCE for unlocked reads of writeback stats
We do some unlocked reads of writeback statistics like avg_write_bandwidth, dirty_ratelimit, or bw_time_stamp. Generally we are fine with getting somewhat out-of-date values but actually getting different values in various parts of the functions because the compiler decided to reload value from original memory location could confuse calculations. Use READ_ONCE for these unlocked accesses and WRITE_ONCE for the updates to be on the safe side. Link: https://lkml.kernel.org/r/20210713104716.22868-5-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Cc: Michael Stapelberg <stapelberg+linux@google.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--mm/page-writeback.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ae5ace94e86eea..57b98ea365e27b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -183,7 +183,7 @@ static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
static void wb_min_max_ratio(struct bdi_writeback *wb,
unsigned long *minp, unsigned long *maxp)
{
- unsigned long this_bw = wb->avg_write_bandwidth;
+ unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
unsigned long long min = wb->bdi->min_ratio;
unsigned long long max = wb->bdi->max_ratio;
@@ -892,7 +892,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
static void wb_position_ratio(struct dirty_throttle_control *dtc)
{
struct bdi_writeback *wb = dtc->wb;
- unsigned long write_bw = wb->avg_write_bandwidth;
+ unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
unsigned long wb_thresh = dtc->wb_thresh;
@@ -1115,7 +1115,7 @@ out:
&wb->bdi->tot_write_bandwidth) <= 0);
}
wb->write_bandwidth = bw;
- wb->avg_write_bandwidth = avg;
+ WRITE_ONCE(wb->avg_write_bandwidth, avg);
}
static void update_dirty_limit(struct dirty_throttle_control *dtc)
@@ -1324,7 +1324,7 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
else
dirty_ratelimit -= step;
- wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
+ WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
@@ -1336,11 +1336,12 @@ static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
{
struct bdi_writeback *wb = gdtc->wb;
unsigned long now = jiffies;
- unsigned long elapsed = now - wb->bw_time_stamp;
+ unsigned long elapsed;
unsigned long dirtied;
unsigned long written;
spin_lock(&wb->list_lock);
+ elapsed = now - wb->bw_time_stamp;
dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
@@ -1361,7 +1362,7 @@ static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
wb->dirtied_stamp = dirtied;
wb->written_stamp = written;
- wb->bw_time_stamp = now;
+ WRITE_ONCE(wb->bw_time_stamp, now);
spin_unlock(&wb->list_lock);
}
@@ -1385,7 +1386,7 @@ static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
spin_lock(&wb->list_lock);
wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
wb->written_stamp = wb_stat(wb, WB_WRITTEN);
- wb->bw_time_stamp = now;
+ WRITE_ONCE(wb->bw_time_stamp, now);
spin_unlock(&wb->list_lock);
}
}
@@ -1410,7 +1411,7 @@ static unsigned long dirty_poll_interval(unsigned long dirty,
static unsigned long wb_max_pause(struct bdi_writeback *wb,
unsigned long wb_dirty)
{
- unsigned long bw = wb->avg_write_bandwidth;
+ unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
unsigned long t;
/*
@@ -1432,8 +1433,8 @@ static long wb_min_pause(struct bdi_writeback *wb,
unsigned long dirty_ratelimit,
int *nr_dirtied_pause)
{
- long hi = ilog2(wb->avg_write_bandwidth);
- long lo = ilog2(wb->dirty_ratelimit);
+ long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
+ long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
long t; /* target pause */
long pause; /* estimated next pause */
int pages; /* target nr_dirtied_pause */
@@ -1713,12 +1714,12 @@ free_running:
if (dirty_exceeded && !wb->dirty_exceeded)
wb->dirty_exceeded = 1;
- if (time_is_before_jiffies(wb->bw_time_stamp +
+ if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
BANDWIDTH_INTERVAL))
__wb_update_bandwidth(gdtc, mdtc, true);
/* throttle according to the chosen dtc */
- dirty_ratelimit = wb->dirty_ratelimit;
+ dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
RATELIMIT_CALC_SHIFT;
max_pause = wb_max_pause(wb, sdtc->wb_dirty);
@@ -2368,7 +2369,8 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
* but if there's constant writeback being submitted, this makes sure
* writeback bandwidth is updated once in a while.
*/
- if (time_is_before_jiffies(wb->bw_time_stamp + BANDWIDTH_INTERVAL))
+ if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
+ BANDWIDTH_INTERVAL))
wb_update_bandwidth(wb);
return ret;
}