aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorClaudio Imbrenda <imbrenda@linux.ibm.com>2020-06-22 18:21:41 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2020-06-22 13:49:56 -0400
commit17b9f93e15e2a77cc868288072ac4d39c33fff3b (patch)
tree4ecad51438796aaf976ceb583d28cef3972de36f
parent4a4f8af26f3c1491843ca700d05a534c0e767980 (diff)
downloadkvm-unit-tests-17b9f93e15e2a77cc868288072ac4d39c33fff3b.tar.gz
lib/vmalloc: add locking and a check for initialization
Make sure init_alloc_vpage is never called when vmalloc is in use. Get both init_alloc_vpage and setup_vm to use the lock. For setup_vm we only check at the end because at least on some architectures setup_mmu can call init_alloc_vpage, which would cause a deadlock. Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Message-Id: <20200622162141.279716-9-imbrenda@linux.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--lib/vmalloc.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/lib/vmalloc.c b/lib/vmalloc.c
index 83e34aa..10f15af 100644
--- a/lib/vmalloc.c
+++ b/lib/vmalloc.c
@@ -37,11 +37,6 @@ void *alloc_vpage(void)
return alloc_vpages(1);
}
-void init_alloc_vpage(void *top)
-{
- vfree_top = top;
-}
-
void *vmap(phys_addr_t phys, size_t size)
{
void *mem, *p;
@@ -96,6 +91,14 @@ void __attribute__((__weak__)) find_highmem(void)
{
}
+void init_alloc_vpage(void *top)
+{
+ spin_lock(&lock);
+ assert(alloc_ops != &vmalloc_ops);
+ vfree_top = top;
+ spin_unlock(&lock);
+}
+
void setup_vm()
{
phys_addr_t base, top;
@@ -124,5 +127,8 @@ void setup_vm()
free_pages(phys_to_virt(base), top - base);
}
+ spin_lock(&lock);
+ assert(alloc_ops != &vmalloc_ops);
alloc_ops = &vmalloc_ops;
+ spin_unlock(&lock);
}