aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorAlistair Popple <apopple@nvidia.com>2021-11-05 13:45:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-06 13:30:43 -0700
commit3d88705c10677d1fc3f14786361aee649839aa7e (patch)
tree7a77dd189e581520d222b90f1a77cef3986554d5 /mm/rmap.c
parent32befe9e27859b87e70e0aba9b60bfb8000d9a66 (diff)
downloadlinux-3d88705c10677d1fc3f14786361aee649839aa7e.tar.gz
mm/rmap.c: avoid double faults migrating device private pages
During migration special page table entries are installed for each page being migrated. These entries store the pfn and associated permissions of ptes mapping the page being migarted. Device-private pages use special swap pte entries to distinguish read-only vs. writeable pages which the migration code checks when creating migration entries. Normally this follows a fast path in migrate_vma_collect_pmd() which correctly copies the permissions of device-private pages over to migration entries when migrating pages back to the CPU. However the slow-path falls back to using try_to_migrate() which unconditionally creates read-only migration entries for device-private pages. This leads to unnecessary double faults on the CPU as the new pages are always mapped read-only even when they could be mapped writeable. Fix this by correctly copying device-private permissions in try_to_migrate_one(). Link: https://lkml.kernel.org/r/20211018045247.3128058-1-apopple@nvidia.com Signed-off-by: Alistair Popple <apopple@nvidia.com> Reported-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 6aebd17472512..d65a74e140f95 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1807,6 +1807,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
update_hiwater_rss(mm);
if (is_zone_device_page(page)) {
+ unsigned long pfn = page_to_pfn(page);
swp_entry_t entry;
pte_t swp_pte;
@@ -1815,8 +1816,11 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
* pte. do_swap_page() will wait until the migration
* pte is removed and then restart fault handling.
*/
- entry = make_readable_migration_entry(
- page_to_pfn(page));
+ entry = pte_to_swp_entry(pteval);
+ if (is_writable_device_private_entry(entry))
+ entry = make_writable_migration_entry(pfn);
+ else
+ entry = make_readable_migration_entry(pfn);
swp_pte = swp_entry_to_pte(entry);
/*