Commit 2d28e01d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "2 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  hugetlbfs: fix races and page leaks during migration
  kasan: turn off asan-stack for clang-8 and earlier
parents 6357c812 cb6acd01
......@@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
/*
* page_private is subpool pointer in hugetlb pages. Transfer to
* new page. PagePrivate is not associated with page_private for
* hugetlb pages and can not be set here as only page_huge_active
* pages can be migrated.
*/
if (page_private(page)) {
set_page_private(newpage, page_private(page));
set_page_private(page, 0);
}
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
else
......
......@@ -113,6 +113,28 @@ config KASAN_INLINE
endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
default !(CLANG_VERSION < 90000)
depends on KASAN
help
The LLVM stack address sanitizer has a know problem that
causes excessive stack usage in a lot of functions, see
https://bugs.llvm.org/show_bug.cgi?id=38809
Disabling asan-stack makes it safe to run kernels build
with clang-8 with KASAN enabled, though it loses some of
the functionality.
This feature is always disabled when compile-testing with clang-8
or earlier to avoid cluttering the output in stack overflow
warnings, but clang-8 users can still enable it for builds without
CONFIG_COMPILE_TEST. On gcc and later clang versions it is
assumed to always be safe to use and enabled by default.
config KASAN_STACK
int
default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
default 0
config KASAN_S390_4_LEVEL_PAGING
bool "KASan: use 4-level paging"
depends on KASAN && S390
......
......@@ -3624,7 +3624,6 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
set_page_huge_active(new_page);
mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
mmu_notifier_invalidate_range_start(&range);
......@@ -3645,6 +3644,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
set_page_huge_active(new_page);
/* Make the old page be freed below */
new_page = old_page;
}
......@@ -3729,6 +3729,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
pte_t new_pte;
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
bool new_page = false;
/*
* Currently, we are forced to kill the process in the event the
......@@ -3790,7 +3791,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
set_page_huge_active(page);
new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
int err = huge_add_to_page_cache(page, mapping, idx);
......@@ -3861,6 +3862,15 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
}
spin_unlock(ptl);
/*
* Only make newly allocated pages active. Existing pages found
* in the pagecache could be !page_huge_active() if they have been
* isolated for migration.
*/
if (new_page)
set_page_huge_active(page);
unlock_page(page);
out:
return ret;
......@@ -4095,7 +4105,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
* the set_pte_at() write.
*/
__SetPageUptodate(page);
set_page_huge_active(page);
mapping = dst_vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
......@@ -4163,6 +4172,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
set_page_huge_active(page);
if (vm_shared)
unlock_page(page);
ret = 0;
......
......@@ -1315,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
lock_page(hpage);
}
/*
* Check for pages which are in the process of being freed. Without
* page_mapping() set, hugetlbfs specific move page routine will not
* be called and we could leak usage counts for subpools.
*/
if (page_private(hpage) && !page_mapping(hpage)) {
rc = -EBUSY;
goto out_unlock;
}
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
......@@ -1345,6 +1355,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
put_new_page = NULL;
}
out_unlock:
unlock_page(hpage);
out:
if (rc != -EAGAIN)
......
......@@ -26,7 +26,7 @@ else
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
$(call cc-param,asan-globals=1) \
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
$(call cc-param,asan-stack=1) \
$(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
$(call cc-param,asan-use-after-scope=1) \
$(call cc-param,asan-instrument-allocas=1)
endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment