aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrii Nakryiko <andriin@fb.com>2020-04-10 13:26:12 -0700
committerDaniel Borkmann <daniel@iogearbox.net>2020-04-14 21:28:57 +0200
commit1f6cb19be2e231fe092f40decb71f066eba090d7 (patch)
treef403308df2df8c60baedaa5eed2444e617e320f0
parent4178417cc5359c329790a4a8f4a6604612338cca (diff)
downloadlinux-at91-1f6cb19be2e231fe092f40decb71f066eba090d7.tar.gz
bpf: Prevent re-mmap()'ing BPF map as writable for initially r/o mapping
VM_MAYWRITE flag during initial memory mapping determines if already mmap()'ed pages can be later remapped as writable ones through mprotect() call. To prevent user application to rewrite contents of memory-mapped as read-only and subsequently frozen BPF map, remove VM_MAYWRITE flag completely on initially read-only mapping. Alternatively, we could treat any memory-mapping on unfrozen map as writable and bump writecnt instead. But there is little legitimate reason to map BPF map as read-only and then re-mmap() it as writable through mprotect(), instead of just mmap()'ing it as read/write from the very beginning. Also, at the suggestion of Jann Horn, drop unnecessary refcounting in mmap operations. We can just rely on VMA holding reference to BPF map's file properly. Fixes: fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY") Reported-by: Jann Horn <jannh@google.com> Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Jann Horn <jannh@google.com> Link: https://lore.kernel.org/bpf/20200410202613.3679837-1-andriin@fb.com
-rw-r--r--kernel/bpf/syscall.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 64783da3420202..d85f372395407d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -586,9 +586,7 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma)
{
struct bpf_map *map = vma->vm_file->private_data;
- bpf_map_inc_with_uref(map);
-
- if (vma->vm_flags & VM_WRITE) {
+ if (vma->vm_flags & VM_MAYWRITE) {
mutex_lock(&map->freeze_mutex);
map->writecnt++;
mutex_unlock(&map->freeze_mutex);
@@ -600,13 +598,11 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma)
{
struct bpf_map *map = vma->vm_file->private_data;
- if (vma->vm_flags & VM_WRITE) {
+ if (vma->vm_flags & VM_MAYWRITE) {
mutex_lock(&map->freeze_mutex);
map->writecnt--;
mutex_unlock(&map->freeze_mutex);
}
-
- bpf_map_put_with_uref(map);
}
static const struct vm_operations_struct bpf_map_default_vmops = {
@@ -635,14 +631,16 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
/* set default open/close callbacks */
vma->vm_ops = &bpf_map_default_vmops;
vma->vm_private_data = map;
+ vma->vm_flags &= ~VM_MAYEXEC;
+ if (!(vma->vm_flags & VM_WRITE))
+ /* disallow re-mapping with PROT_WRITE */
+ vma->vm_flags &= ~VM_MAYWRITE;
err = map->ops->map_mmap(map, vma);
if (err)
goto out;
- bpf_map_inc_with_uref(map);
-
- if (vma->vm_flags & VM_WRITE)
+ if (vma->vm_flags & VM_MAYWRITE)
map->writecnt++;
out:
mutex_unlock(&map->freeze_mutex);