From: "Martin J. Bligh" This was to fix the issue you had with boehm-gc & 4/4 split. mm/usercopy.c | 40 +++++++++++++++++++++++++++------------- 1 files changed, 27 insertions(+), 13 deletions(-) diff -puN mm/usercopy.c~4g4g-locked-userspace-copy mm/usercopy.c --- 25/mm/usercopy.c~4g4g-locked-userspace-copy 2003-12-25 00:36:06.000000000 -0800 +++ 25-akpm/mm/usercopy.c 2003-12-25 00:36:06.000000000 -0800 @@ -30,21 +30,22 @@ static inline struct page *pin_page(unsi struct page *page = NULL; int ret; - spin_lock(&mm->page_table_lock); /* * Do a quick atomic lookup first - this is the fastpath. */ +retry: page = follow_page(mm, addr, write); if (likely(page != NULL)) { if (!PageReserved(page)) get_page(page); - spin_unlock(&mm->page_table_lock); return page; } /* * No luck - bad address or need to fault in the page: */ + + /* Release the lock so get_user_pages can sleep */ spin_unlock(&mm->page_table_lock); /* @@ -53,15 +54,23 @@ static inline struct page *pin_page(unsi * filemap_copy_from_user() to recover: drop its atomic kmap and use * a sleeping kmap instead. */ - if (in_atomic()) + if (in_atomic()) { + spin_lock(&mm->page_table_lock); return NULL; + } down_read(&mm->mmap_sem); - ret = get_user_pages(current, mm, addr, 1, write, 0, &page, NULL); + ret = get_user_pages(current, mm, addr, 1, write, 0, NULL, NULL); up_read(&mm->mmap_sem); + spin_lock(&mm->page_table_lock); + if (ret <= 0) return NULL; - return page; + + /* + * Go try the follow_page again. + */ + goto retry; } static inline void unpin_page(struct page *page) @@ -76,9 +85,13 @@ static inline void unpin_page(struct pag */ static int rw_vm(unsigned long addr, void *buf, int len, int write) { + struct mm_struct *mm = current->mm ? : &init_mm; + if (!len) return 0; + spin_lock(&mm->page_table_lock); + /* ignore errors, just check how much was sucessfully transfered */ while (len) { struct page *page = NULL; @@ -126,6 +139,7 @@ static int rw_vm(unsigned long addr, voi buf += bytes; addr += bytes; } + spin_unlock(&mm->page_table_lock); return len; } @@ -139,18 +153,18 @@ static int str_vm(unsigned long addr, vo if (!len) return len; - down_read(&mm->mmap_sem); + spin_lock(&mm->page_table_lock); + /* ignore errors, just check how much was sucessfully transfered */ while (len) { - int bytes, ret, offset, left, copied; + int bytes, offset, left, copied; char *maddr; - ret = get_user_pages(current, mm, addr, 1, copy == 2, 0, &page, NULL); - if (ret <= 0) { - up_read(&mm->mmap_sem); + page = pin_page(addr, copy == 2); + if (!page) { + spin_unlock(&mm->page_table_lock); return -EFAULT; } - bytes = len; offset = addr & (PAGE_SIZE-1); if (bytes > PAGE_SIZE-offset) @@ -170,14 +184,14 @@ static int str_vm(unsigned long addr, vo } BUG_ON(bytes < 0 || copied < 0); kunmap_atomic(maddr, KM_USER_COPY); - page_cache_release(page); + unpin_page(page); len -= copied; buf += copied; addr += copied; if (left) break; } - up_read(&mm->mmap_sem); + spin_unlock(&mm->page_table_lock); return len; } _