aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hmm.c
diff options
context:
space:
mode:
authorPhilip Yang <Philip.Yang@amd.com>2019-05-23 16:32:31 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-06-06 16:31:41 -0300
commit789c2af88f24d1db983aae49b5c4561e6e02ff5b (patch)
treef48479dc50289b0380b239bcc60e0fcd216bc559 /mm/hmm.c
parent085ea25064a9169eba5f2ed6484c111ab0f3ee79 (diff)
downloadlinux-789c2af88f24d1db983aae49b5c4561e6e02ff5b.tar.gz
mm/hmm: support automatic NUMA balancing
While the page is migrating by NUMA balancing, HMM failed to detect this condition and still return the old page. Application will use the new page migrated, but driver pass the old page physical address to GPU, this crash the application later. Use pte_protnone(pte) to return this condition and then hmm_vma_do_fault will allocate new page. Signed-off-by: Philip Yang <Philip.Yang@amd.com> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/hmm.c')
-rw-r--r--mm/hmm.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 4db5dcf110ba6..dce4e70e648a9 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -548,7 +548,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
{
- if (pte_none(pte) || !pte_present(pte))
+ if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
return 0;
return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
range->flags[HMM_PFN_WRITE] :