From f8ae916b4a177f6852fd37fea0ad9e0a09a866c5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 13 Jul 2010 15:41:55 +0200 Subject: [PATCH] net: iptables: Fix xt_info locking commit 5bbbedcfeec4fb13f514cabf4383b62c2e141f76 in tip. xt_info locking is an open coded rw_lock which works fine in mainline, but on RT it's racy. Replace it with a real rwlock. Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- include/linux/netfilter/x_tables.h | 34 +++++++++++++++++++++++++--------- net/netfilter/x_tables.c | 5 +++++ 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index dcc03d7..ae2ef0f 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -464,7 +464,11 @@ extern void xt_free_table_info(struct xt_table_info *info); * necessary for reading the counters. */ struct xt_info_lock { +#ifndef CONFIG_PREEMPT_RT spinlock_t lock; +#else + rwlock_t lock; +#endif unsigned char readers; }; DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); @@ -491,11 +495,14 @@ static inline int xt_info_rdlock_bh(void) preempt_disable_rt(); cpu = smp_processor_id(); lock = &per_cpu(xt_info_locks, cpu); - if (likely(!lock->readers++)) { - preempt_enable_rt(); - spin_lock(&lock->lock); - } else - preempt_enable_rt(); + +#ifndef CONFIG_PREEMPT_RT + if (likely(!--lock->readers)) + spin_unlock(&lock->lock); +#else + preempt_enable_rt(); + read_lock(&lock->lock); +#endif return cpu; } @@ -503,13 +510,14 @@ static inline void xt_info_rdunlock_bh(int cpu) { struct xt_info_lock *lock = &per_cpu(xt_info_locks, cpu); - preempt_disable_rt(); - +#ifndef CONFIG_PREEMPT_RT if (likely(!--lock->readers)) { preempt_enable_rt(); spin_unlock(&lock->lock); - } else - preempt_enable_rt(); + } +#else + read_unlock(&lock->lock); +#endif local_bh_enable(); } @@ -521,12 +529,20 @@ static inline void xt_info_rdunlock_bh(int cpu) */ static inline void xt_info_wrlock(unsigned int cpu) { +#ifndef CONFIG_PREEMPT_RT spin_lock(&per_cpu(xt_info_locks, cpu).lock); +#else + write_lock(&per_cpu(xt_info_locks, cpu).lock); +#endif } static inline void xt_info_wrunlock(unsigned int cpu) { +#ifndef CONFIG_PREEMPT_RT spin_unlock(&per_cpu(xt_info_locks, cpu).lock); +#else + write_unlock(&per_cpu(xt_info_locks, cpu).lock); +#endif } /* diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 665f5be..da3955e 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1244,7 +1244,12 @@ static int __init xt_init(void) for_each_possible_cpu(i) { struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); + +#ifndef CONFIG_PREEMPT_RT spin_lock_init(&lock->lock); +#else + rwlock_init(&lock->lock); +#endif lock->readers = 0; } -- 1.7.0.4