From: Roland McGrath Under some circumstances, ptrace PEEK/POKE_TEXT can cause page permissions to be permanently changed. Thsi causes changes in application behaviour when run under gdb. Fix that by only marking the pte as writeable if the vma is marked for writing. A write fault thus unshares the page but doesn't necessarily make it writeable. --- 25-akpm/mm/memory.c | 38 ++++++++++++++++++++++++++++++++------ 1 files changed, 32 insertions(+), 6 deletions(-) diff -puN mm/memory.c~ptrace-page-permission-fix mm/memory.c --- 25/mm/memory.c~ptrace-page-permission-fix Fri Feb 6 11:11:45 2004 +++ 25-akpm/mm/memory.c Fri Feb 6 11:11:45 2004 @@ -752,7 +752,8 @@ int get_user_pages(struct task_struct *t spin_lock(&mm->page_table_lock); do { struct page *map; - while (!(map = follow_page(mm, start, write))) { + int lookup_write = write; + while (!(map = follow_page(mm, start, lookup_write))) { spin_unlock(&mm->page_table_lock); switch (handle_mm_fault(mm,vma,start,write)) { case VM_FAULT_MINOR: @@ -768,6 +769,14 @@ int get_user_pages(struct task_struct *t default: BUG(); } + /* + * Now that we have performed a write fault + * and surely no longer have a shared page we + * shouldn't write, we shouldn't ignore an + * unwritable page in the page table if + * we are forcing write access. + */ + lookup_write = write && !force; spin_lock(&mm->page_table_lock); } if (pages) { @@ -957,6 +966,19 @@ int remap_page_range(struct vm_area_stru EXPORT_SYMBOL(remap_page_range); /* + * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when + * servicing faults for write access. In the normal case, do always want + * pte_mkwrite. But get_user_pages can cause write faults for mappings + * that do not have writing enabled, when used by access_process_vm. + */ +static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) +{ + if (likely(vma->vm_flags & VM_WRITE)) + pte = pte_mkwrite(pte); + return pte; +} + +/* * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock */ static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address, @@ -965,7 +987,8 @@ static inline void break_cow(struct vm_a pte_t entry; flush_cache_page(vma, address); - entry = pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))); + entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)), + vma); ptep_establish(vma, address, page_table, entry); update_mmu_cache(vma, address, entry); } @@ -1017,7 +1040,8 @@ static int do_wp_page(struct mm_struct * unlock_page(old_page); if (reuse) { flush_cache_page(vma, address); - entry = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))); + entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)), + vma); ptep_establish(vma, address, page_table, entry); update_mmu_cache(vma, address, entry); pte_unmap(page_table); @@ -1285,7 +1309,7 @@ static int do_swap_page(struct mm_struct mm->rss++; pte = mk_pte(page, vma->vm_page_prot); if (write_access && can_share_swap_page(page)) - pte = pte_mkdirty(pte_mkwrite(pte)); + pte = maybe_mkwrite(pte_mkdirty(pte), vma); unlock_page(page); flush_icache_page(vma, page); @@ -1352,7 +1376,9 @@ do_anonymous_page(struct mm_struct *mm, goto out; } mm->rss++; - entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); + entry = maybe_mkwrite(pte_mkdirty(mk_pte(page, + vma->vm_page_prot)), + vma); lru_cache_add_active(page); mark_page_accessed(page); } @@ -1468,7 +1494,7 @@ retry: flush_icache_page(vma, new_page); entry = mk_pte(new_page, vma->vm_page_prot); if (write_access) - entry = pte_mkwrite(pte_mkdirty(entry)); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); set_pte(page_table, entry); pte_chain = page_add_rmap(new_page, page_table, pte_chain); pte_unmap(page_table); _