summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlan Huang <mmpgouride@gmail.com>2023-03-28 10:58:24 -0400
committerPaul E. McKenney <paulmck@kernel.org>2023-03-29 16:21:02 -0700
commitc72371753921a6f986f6f3385fbf814e1276be3d (patch)
tree92dacdb92f489e08b1544b96e7bfcfbf994b3cdb
parenta7dcda34850ac783b20927699e886f207676ea7a (diff)
downloadperfbook-paulmck.2023.03.29a.tar.gz
CodeSamples/count: Add necessary partial memory barrierspaulmck.2023.03.29a
This patch adds several necessary partial memory barriers, the first one changes READ_ONCE to smp_load_acquire to make sure the reading from theftp[t] happens before the reading from counterp[t]. The litmus testing below represents the original code pattern, and the result is "Sometimes": C counter_sig {} P0(int *theft, int *counter) { int r0; int r1; r0 = READ_ONCE(*theft); r1 = READ_ONCE(*counter); } P1(int *theft, int *counter) { WRITE_ONCE(*counter, 1); smp_mb(); WRITE_ONCE(*theft, 1); } exists (0:r0=1 /\ 0:r1=0) The second change switches from WRITE_ONCE to smp_store_release to make sure that setting counterp[t] happens before the setting theftp[p] to THEFT_IDLE. Here is the litmus test. The result is "Sometimes": C counter_sig_2 { int theft = 1; int counter = 1; } P0(int *theft, int *counter) { WRITE_ONCE(*counter, 0); WRITE_ONCE(*theft, 0); } P1(int *theft, int *counter) { if (READ_ONCE(*theft) == 0) { WRITE_ONCE(*counter, READ_ONCE(*counter)+1); } } P2(int *counter) { int r0; r0 = READ_ONCE(*counter); } exists (2:r0=2) Note that I also changed the reading of theft variable to smp_load_acquire in add_count/sub_count's fast path to make sure that reading theft happens before reading counter. Signed-off-by: Alan Huang <mmpgouride@gmail.com> Akira Yokosawa <akiyks@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
-rw-r--r--CodeSamples/count/count_lim_sig.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/CodeSamples/count/count_lim_sig.c b/CodeSamples/count/count_lim_sig.c
index 023d6215..59da8077 100644
--- a/CodeSamples/count/count_lim_sig.c
+++ b/CodeSamples/count/count_lim_sig.c
@@ -81,7 +81,7 @@ static void flush_local_count(void) //\lnlbl{flush:b}
for_each_tid(t, tid) { //\lnlbl{flush:loop2:b}
if (theftp[t] == NULL) //\lnlbl{flush:skip:nonexist}
continue; //\lnlbl{flush:next2}
- while (READ_ONCE(*theftp[t]) != THEFT_READY) {//\lnlbl{flush:loop3:b}
+ while (smp_load_acquire(theftp[t]) != THEFT_READY) {//\lnlbl{flush:loop3:b}
poll(NULL, 0, 1); //\lnlbl{flush:block}
if (READ_ONCE(*theftp[t]) == THEFT_REQ)//\lnlbl{flush:check:REQ}
pthread_kill(tid, SIGUSR1);//\lnlbl{flush:signal2}
@@ -90,7 +90,7 @@ static void flush_local_count(void) //\lnlbl{flush:b}
*counterp[t] = 0;
globalreserve -= *countermaxp[t];
*countermaxp[t] = 0; //\lnlbl{flush:thiev:e}
- WRITE_ONCE(*theftp[t], THEFT_IDLE); //\lnlbl{flush:IDLE}
+ smp_store_release(theftp[t], THEFT_IDLE); //\lnlbl{flush:IDLE}
} //\lnlbl{flush:loop2:e}
} //\lnlbl{flush:e}
@@ -116,7 +116,7 @@ int add_count(unsigned long delta) //\lnlbl{b}
WRITE_ONCE(counting, 1); //\lnlbl{fast:b}
barrier(); //\lnlbl{barrier:1}
- if (READ_ONCE(theft) <= THEFT_REQ && //\lnlbl{check:b}
+ if (smp_load_acquire(&theft) <= THEFT_REQ && //\lnlbl{check:b}
countermax - counter >= delta) { //\lnlbl{check:e}
WRITE_ONCE(counter, counter + delta); //\lnlbl{add:f}
fastpath = 1; //\lnlbl{fasttaken}
@@ -155,7 +155,7 @@ int sub_count(unsigned long delta)
WRITE_ONCE(counting, 1);
barrier();
- if (READ_ONCE(theft) <= THEFT_REQ &&
+ if (smp_load_acquire(&theft) <= THEFT_REQ &&
counter >= delta) {
WRITE_ONCE(counter, counter - delta);
fastpath = 1;