aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHauke Mehrtens <hauke@hauke-m.de>2021-10-09 18:31:28 +0200
committerHauke Mehrtens <hauke@hauke-m.de>2021-10-18 23:01:19 +0200
commitbb2e9faedc8957dea5fa1c50a959ca5d71141b33 (patch)
tree7fe8b50d3217e6686fa45fb7193a3adec1215b4d
parent8695d06becac6e01e0908ba7b945ab2141a3aa0b (diff)
downloadbackports-bb2e9faedc8957dea5fa1c50a959ca5d71141b33.tar.gz
headers: Add tasklet_disable_in_atomic()
The tasklet_disable_in_atomic() function was added in kernel 5.13 and is used by ath9k now. The code was copied from the upstream kernel. Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
-rw-r--r--backport/backport-include/linux/interrupt.h23
-rw-r--r--backport/compat/Makefile1
-rw-r--r--backport/compat/backport-5.13.c30
3 files changed, 54 insertions, 0 deletions
diff --git a/backport/backport-include/linux/interrupt.h b/backport/backport-include/linux/interrupt.h
index f334a963..41d50d7c 100644
--- a/backport/backport-include/linux/interrupt.h
+++ b/backport/backport-include/linux/interrupt.h
@@ -50,4 +50,27 @@ tasklet_setup(struct tasklet_struct *t,
#endif
+#if LINUX_VERSION_IS_LESS(5,13,0)
+
+#define tasklet_unlock_spin_wait LINUX_BACKPORT(tasklet_unlock_spin_wait)
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
+
+#else
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
+#endif
+
+/*
+ * Do not use in new code. Disabling tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+#define tasklet_disable_in_atomic LINUX_BACKPORT(tasklet_disable_in_atomic)
+static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+{
+ tasklet_disable_nosync(t);
+ tasklet_unlock_spin_wait(t);
+ smp_mb();
+}
+#endif
+
#endif /* _BP_LINUX_INTERRUPT_H */
diff --git a/backport/compat/Makefile b/backport/compat/Makefile
index e927a0c8..2761e5f5 100644
--- a/backport/compat/Makefile
+++ b/backport/compat/Makefile
@@ -19,6 +19,7 @@ compat-$(CPTCFG_KERNEL_5_3) += backport-5.3.o
compat-$(CPTCFG_KERNEL_5_5) += backport-5.5.o
compat-$(CPTCFG_KERNEL_5_10) += backport-5.10.o
compat-$(CPTCFG_KERNEL_5_11) += backport-5.11.o
+compat-$(CPTCFG_KERNEL_5_13) += backport-5.13.o
compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/verify.o
compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/pkcs7.asn1.o
diff --git a/backport/compat/backport-5.13.c b/backport/compat/backport-5.13.c
new file mode 100644
index 00000000..c10b3321
--- /dev/null
+++ b/backport/compat/backport-5.13.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/interrupt.h>
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+/*
+ * Do not use in new code. Waiting for tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+void tasklet_unlock_spin_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /*
+ * Prevent a live lock when current preempted soft
+ * interrupt processing or prevents ksoftirqd from
+ * running. If the tasklet runs on a different CPU
+ * then this has no effect other than doing the BH
+ * disable/enable dance for nothing.
+ */
+ local_bh_disable();
+ local_bh_enable();
+ } else {
+ cpu_relax();
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(tasklet_unlock_spin_wait);
+#endif