aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2005-01-04 04:14:34 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-01-04 04:14:34 -0800
commitcec8e41165b7016d80e031920159fbc5eb3f879f (patch)
tree68b7e36925348ee27055f2b0b7a21f92340929ee /net
parente653f2ec2e7e940c5c4f1181fb350efd3251f4cc (diff)
downloadhistory-cec8e41165b7016d80e031920159fbc5eb3f879f.tar.gz
[PATCH] Conntrack Hash Allocation using __get_free_pages
Here is a patch that just makes it use get_free_pages to test the TLB theory. Another obvious improvement would be to not use list_heads for the hash table buckets - a single pointer would likely suffice and it would cut the hash table in half, saving cache, TLB and memory. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 0fc1a677a2b297..24730193877864 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -75,6 +75,7 @@ static kmem_cache_t *ip_conntrack_expect_cachep;
struct ip_conntrack ip_conntrack_untracked;
unsigned int ip_ct_log_invalid;
static LIST_HEAD(unconfirmed);
+static int ip_conntrack_vmalloc;
DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
@@ -1309,6 +1310,16 @@ static int kill_all(struct ip_conntrack *i, void *data)
return 1;
}
+static void free_conntrack_hash(void)
+{
+ if (ip_conntrack_vmalloc)
+ vfree(ip_conntrack_hash);
+ else
+ free_pages((unsigned long)ip_conntrack_hash,
+ get_order(sizeof(struct list_head)
+ * ip_conntrack_htable_size));
+}
+
/* Mishearing the voices in his head, our hero wonders how he's
supposed to kill the mall. */
void ip_conntrack_cleanup(void)
@@ -1328,7 +1339,7 @@ void ip_conntrack_cleanup(void)
kmem_cache_destroy(ip_conntrack_cachep);
kmem_cache_destroy(ip_conntrack_expect_cachep);
- vfree(ip_conntrack_hash);
+ free_conntrack_hash();
nf_unregister_sockopt(&so_getorigdst);
}
@@ -1366,8 +1377,20 @@ int __init ip_conntrack_init(void)
return ret;
}
- ip_conntrack_hash = vmalloc(sizeof(struct list_head)
- * ip_conntrack_htable_size);
+ /* AK: the hash table is twice as big than needed because it
+ uses list_head. it would be much nicer to caches to use a
+ single pointer list head here. */
+ ip_conntrack_vmalloc = 0;
+ ip_conntrack_hash
+ =(void*)__get_free_pages(GFP_KERNEL,
+ get_order(sizeof(struct list_head)
+ *ip_conntrack_htable_size));
+ if (!ip_conntrack_hash) {
+ ip_conntrack_vmalloc = 1;
+ printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n");
+ ip_conntrack_hash = vmalloc(sizeof(struct list_head)
+ * ip_conntrack_htable_size);
+ }
if (!ip_conntrack_hash) {
printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
goto err_unreg_sockopt;
@@ -1375,7 +1398,7 @@ int __init ip_conntrack_init(void)
ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
sizeof(struct ip_conntrack), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL,NULL);
if (!ip_conntrack_cachep) {
printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
goto err_free_hash;
@@ -1416,7 +1439,7 @@ int __init ip_conntrack_init(void)
err_free_conntrack_slab:
kmem_cache_destroy(ip_conntrack_cachep);
err_free_hash:
- vfree(ip_conntrack_hash);
+ free_conntrack_hash();
err_unreg_sockopt:
nf_unregister_sockopt(&so_getorigdst);