aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-01 09:04:59 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-01 09:04:59 -0800
commit2d28e01dca1a233468319517b4834783e1aaf517 (patch)
treed057513afe10e56091011848759becd127f2efad
parent6357c8127bea35c52085a0ae0f97e73de2419825 (diff)
parentcb6acd01e2e43fd8bad11155752b7699c3d0fb76 (diff)
downloadlinux-oxnas-2d28e01dca1a233468319517b4834783e1aaf517.tar.gz
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "2 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: hugetlbfs: fix races and page leaks during migration kasan: turn off asan-stack for clang-8 and earlier
-rw-r--r--fs/hugetlbfs/inode.c12
-rw-r--r--lib/Kconfig.kasan22
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/migrate.c11
-rw-r--r--scripts/Makefile.kasan2
5 files changed, 59 insertions, 4 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 32920a10100e23..a7fa037b876b7f 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
+
+ /*
+ * page_private is subpool pointer in hugetlb pages. Transfer to
+ * new page. PagePrivate is not associated with page_private for
+ * hugetlb pages and can not be set here as only page_huge_active
+ * pages can be migrated.
+ */
+ if (page_private(page)) {
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ }
+
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
else
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index d8c474b6691e92..9737059ec58bb2 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -113,6 +113,28 @@ config KASAN_INLINE
endchoice
+config KASAN_STACK_ENABLE
+ bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ default !(CLANG_VERSION < 90000)
+ depends on KASAN
+ help
+ The LLVM stack address sanitizer has a know problem that
+ causes excessive stack usage in a lot of functions, see
+ https://bugs.llvm.org/show_bug.cgi?id=38809
+ Disabling asan-stack makes it safe to run kernels build
+ with clang-8 with KASAN enabled, though it loses some of
+ the functionality.
+ This feature is always disabled when compile-testing with clang-8
+ or earlier to avoid cluttering the output in stack overflow
+ warnings, but clang-8 users can still enable it for builds without
+ CONFIG_COMPILE_TEST. On gcc and later clang versions it is
+ assumed to always be safe to use and enabled by default.
+
+config KASAN_STACK
+ int
+ default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
+ default 0
+
config KASAN_S390_4_LEVEL_PAGING
bool "KASan: use 4-level paging"
depends on KASAN && S390
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index afef61656c1e1a..8dfdffc34a99bc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3624,7 +3624,6 @@ retry_avoidcopy:
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
- set_page_huge_active(new_page);
mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
mmu_notifier_invalidate_range_start(&range);
@@ -3645,6 +3644,7 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
+ set_page_huge_active(new_page);
/* Make the old page be freed below */
new_page = old_page;
}
@@ -3729,6 +3729,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
pte_t new_pte;
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
+ bool new_page = false;
/*
* Currently, we are forced to kill the process in the event the
@@ -3790,7 +3791,7 @@ retry:
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
- set_page_huge_active(page);
+ new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3861,6 +3862,15 @@ retry:
}
spin_unlock(ptl);
+
+ /*
+ * Only make newly allocated pages active. Existing pages found
+ * in the pagecache could be !page_huge_active() if they have been
+ * isolated for migration.
+ */
+ if (new_page)
+ set_page_huge_active(page);
+
unlock_page(page);
out:
return ret;
@@ -4095,7 +4105,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
* the set_pte_at() write.
*/
__SetPageUptodate(page);
- set_page_huge_active(page);
mapping = dst_vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4163,6 +4172,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
+ set_page_huge_active(page);
if (vm_shared)
unlock_page(page);
ret = 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index d4fd680be3b0fa..181f5d2718a9b1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1315,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
lock_page(hpage);
}
+ /*
+ * Check for pages which are in the process of being freed. Without
+ * page_mapping() set, hugetlbfs specific move page routine will not
+ * be called and we could leak usage counts for subpools.
+ */
+ if (page_private(hpage) && !page_mapping(hpage)) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
@@ -1345,6 +1355,7 @@ put_anon:
put_new_page = NULL;
}
+out_unlock:
unlock_page(hpage);
out:
if (rc != -EAGAIN)
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 25c259df8ffab1..6deabedc67fc89 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -26,7 +26,7 @@ else
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
$(call cc-param,asan-globals=1) \
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
- $(call cc-param,asan-stack=1) \
+ $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
$(call cc-param,asan-use-after-scope=1) \
$(call cc-param,asan-instrument-allocas=1)
endif