aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter/x_tables.c
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2015-06-11 01:34:55 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2015-06-12 14:27:10 +0200
commit482cfc318559e2527dfd8513582d2fdb276e47c2 (patch)
tree66160977937cbc47d024b93806b85475dab1c026 /net/netfilter/x_tables.c
parent71ae0dff02d756e4d2ca710b79f2ff5390029a5f (diff)
downloadlinux-482cfc318559e2527dfd8513582d2fdb276e47c2.tar.gz
netfilter: xtables: avoid percpu ruleset duplication
We store the rule blob per (possible) cpu. Unfortunately this means we can waste lot of memory on big smp machines. ipt_entry structure ('rule head') is 112 byte, so e.g. with maxcpu=64 one single rule eats close to 8k RAM. Since previous patch made counters percpu it appears there is nothing left in the rule blob that needs to be percpu. On my test system (144 possible cpus, 400k dummy rules) this change saves close to 9 Gigabyte of RAM. Reported-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Florian Westphal <fw@strlen.de> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter/x_tables.c')
-rw-r--r--net/netfilter/x_tables.c23
1 files changed, 8 insertions, 15 deletions
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 83032464a4bdd..6062ce3e862ce 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -659,7 +659,6 @@ EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
struct xt_table_info *xt_alloc_table_info(unsigned int size)
{
struct xt_table_info *newinfo;
- int cpu;
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
@@ -671,19 +670,14 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
newinfo->size = size;
- for_each_possible_cpu(cpu) {
- if (size <= PAGE_SIZE)
- newinfo->entries[cpu] = kmalloc_node(size,
- GFP_KERNEL,
- cpu_to_node(cpu));
- else
- newinfo->entries[cpu] = vmalloc_node(size,
- cpu_to_node(cpu));
+ if (size <= PAGE_SIZE)
+ newinfo->entries = kmalloc(size, GFP_KERNEL);
+ else
+ newinfo->entries = vmalloc(size);
- if (newinfo->entries[cpu] == NULL) {
- xt_free_table_info(newinfo);
- return NULL;
- }
+ if (newinfo->entries == NULL) {
+ xt_free_table_info(newinfo);
+ return NULL;
}
return newinfo;
@@ -694,8 +688,7 @@ void xt_free_table_info(struct xt_table_info *info)
{
int cpu;
- for_each_possible_cpu(cpu)
- kvfree(info->entries[cpu]);
+ kvfree(info->entries);
if (info->jumpstack != NULL) {
for_each_possible_cpu(cpu)