From: David Howells <dhowells@redhat.com>

The attached patch fixes a number of problems in the VM routines:

 (1) Some inline funcs don't compile if CONFIG_MMU is not set.

 (2) swapper_pml4 needn't exist if CONFIG_MMU is not set.

 (3) __free_pages_ok() doesn't counter set_page_refs() different behaviour if
     CONFIG_MMU is not set.

 (4) swsusp.c invokes TLB flushing functions without including the header file
     that declares them.

Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/include/linux/mm.h    |   10 ++++++++++
 25-akpm/init/Kconfig          |    5 +++--
 25-akpm/kernel/power/swsusp.c |    1 +
 25-akpm/kernel/sysctl.c       |    4 ++++
 25-akpm/mm/Makefile           |    4 ++--
 25-akpm/mm/bootmem.c          |   10 ++++++----
 25-akpm/mm/internal.h         |   13 +++++++++++++
 25-akpm/mm/page_alloc.c       |   13 +++++++++++--
 25-akpm/mm/tiny-shmem.c       |    2 ++
 9 files changed, 52 insertions(+), 10 deletions(-)

diff -puN include/linux/mm.h~vm-routine-fixes include/linux/mm.h
--- 25/include/linux/mm.h~vm-routine-fixes	2004-12-03 20:53:57.586369888 -0800
+++ 25-akpm/include/linux/mm.h	2004-12-03 20:53:57.602367456 -0800
@@ -37,6 +37,10 @@ extern int sysctl_legacy_va_layout;
 #include <asm/processor.h>
 #include <asm/atomic.h>
 
+#ifndef CONFIG_MMU
+#define swapper_pml4 NULL
+#endif
+
 #ifndef MM_VM_SIZE
 #define MM_VM_SIZE(mm)	TASK_SIZE
 #endif
@@ -641,6 +645,7 @@ extern void remove_shrinker(struct shrin
  * inlining and the symmetry break with pte_alloc_map() that does all
  * of this out-of-line.
  */
+#ifdef CONFIG_MMU
 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 {
 	if (pgd_none(*pgd))
@@ -663,6 +668,7 @@ static inline pgd_t *pgd_alloc_k(struct 
 		return __pgd_alloc(mm, pml4, address);
 	return pml4_pgd_offset_k(pml4, address);
 }
+#endif
 
 extern void free_area_init(unsigned long * zones_size);
 extern void free_area_init_node(int nid, pg_data_t *pgdat,
@@ -685,12 +691,14 @@ struct vm_area_struct *vma_prio_tree_nex
 	for (prio_tree_iter_init(iter, root, begin, end), vma = NULL;	\
 		(vma = vma_prio_tree_next(vma, iter)); )
 
+#ifdef CONFIG_MMU
 static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
 					struct list_head *list)
 {
 	vma->shared.vm_set.parent = NULL;
 	list_add_tail(&vma->shared.vm_set.list, list);
 }
+#endif
 
 /* mmap.c */
 extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
@@ -810,6 +818,7 @@ static inline void __vm_stat_account(str
 }
 #endif /* CONFIG_PROC_FS */
 
+#ifdef CONFIG_MMU
 static inline void vm_stat_account(struct vm_area_struct *vma)
 {
 	__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
@@ -821,6 +830,7 @@ static inline void vm_stat_unaccount(str
 	__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
 							-vma_pages(vma));
 }
+#endif
 
 #ifndef CONFIG_DEBUG_PAGEALLOC
 static inline void
diff -puN init/Kconfig~vm-routine-fixes init/Kconfig
--- 25/init/Kconfig~vm-routine-fixes	2004-12-03 20:53:57.587369736 -0800
+++ 25-akpm/init/Kconfig	2004-12-03 20:53:57.603367304 -0800
@@ -316,8 +316,9 @@ config CC_OPTIMIZE_FOR_SIZE
 	  If unsure, say N.
 
 config SHMEM
-	default y
-	bool "Use full shmem filesystem" if EMBEDDED && MMU
+	bool "Use full shmem filesystem"
+	default y if EMBEDDED
+	depends on MMU
 	help
 	  The shmem is an internal filesystem used to manage shared memory.
 	  It is backed by swap and manages resource limits. It is also exported
diff -puN kernel/power/swsusp.c~vm-routine-fixes kernel/power/swsusp.c
--- 25/kernel/power/swsusp.c~vm-routine-fixes	2004-12-03 20:53:57.589369432 -0800
+++ 25-akpm/kernel/power/swsusp.c	2004-12-03 20:53:57.604367152 -0800
@@ -67,6 +67,7 @@
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
+#include <asm/tlbflush.h>
 #include <asm/io.h>
 
 #include "power.h"
diff -puN kernel/sysctl.c~vm-routine-fixes kernel/sysctl.c
--- 25/kernel/sysctl.c~vm-routine-fixes	2004-12-03 20:53:57.591369128 -0800
+++ 25-akpm/kernel/sysctl.c	2004-12-03 20:53:57.605367000 -0800
@@ -755,6 +755,7 @@ static ctl_table vm_table[] = {
 		.strategy	= &sysctl_intvec,
 		.extra1		= &zero,
 	},
+#ifdef CONFIG_MMU
 	{
 		.ctl_name	= VM_MAX_MAP_COUNT,
 		.procname	= "max_map_count",
@@ -763,6 +764,7 @@ static ctl_table vm_table[] = {
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec
 	},
+#endif
 	{
 		.ctl_name	= VM_LAPTOP_MODE,
 		.procname	= "laptop_mode",
@@ -904,6 +906,7 @@ static ctl_table fs_table[] = {
 		.proc_handler	= &proc_dointvec,
 	},
 #endif
+#ifdef CONFIG_MMU
 	{
 		.ctl_name	= FS_LEASE_TIME,
 		.procname	= "lease-break-time",
@@ -928,6 +931,7 @@ static ctl_table fs_table[] = {
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec,
 	},
+#endif
 	{ .ctl_name = 0 }
 };
 
diff -puN mm/bootmem.c~vm-routine-fixes mm/bootmem.c
--- 25/mm/bootmem.c~vm-routine-fixes	2004-12-03 20:53:57.592368976 -0800
+++ 25-akpm/mm/bootmem.c	2004-12-03 20:53:57.606366848 -0800
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <asm/dma.h>
 #include <asm/io.h>
+#include "internal.h"
 
 /*
  * Access to this subsystem has to be serialized externally. (this is
@@ -275,17 +276,18 @@ static unsigned long __init free_all_boo
 	for (i = 0; i < idx; ) {
 		unsigned long v = ~map[i / BITS_PER_LONG];
 		if (gofast && v == ~0UL) {
-			int j;
+			int j, order;
 
 			count += BITS_PER_LONG;
 			__ClearPageReserved(page);
-			set_page_count(page, 1);
+			order = ffs(BITS_PER_LONG) - 1;
+			set_page_refs(page, order);
 			for (j = 1; j < BITS_PER_LONG; j++) {
 				if (j + 16 < BITS_PER_LONG)
 					prefetchw(page + j + 16);
 				__ClearPageReserved(page + j);
 			}
-			__free_pages(page, ffs(BITS_PER_LONG)-1);
+			__free_pages(page, order);
 			i += BITS_PER_LONG;
 			page += BITS_PER_LONG;
 		} else if (v) {
@@ -294,7 +296,7 @@ static unsigned long __init free_all_boo
 				if (v & m) {
 					count++;
 					__ClearPageReserved(page);
-					set_page_count(page, 1);
+					set_page_refs(page, 0);
 					__free_page(page);
 				}
 			}
diff -puN /dev/null mm/internal.h
--- /dev/null	2003-09-15 06:40:47.000000000 -0700
+++ 25-akpm/mm/internal.h	2004-12-03 20:53:57.606366848 -0800
@@ -0,0 +1,13 @@
+/* internal.h: mm/ internal definitions
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* page_alloc.c */
+extern void set_page_refs(struct page *page, int order);
diff -puN mm/Makefile~vm-routine-fixes mm/Makefile
--- 25/mm/Makefile~vm-routine-fixes	2004-12-03 20:53:57.594368672 -0800
+++ 25-akpm/mm/Makefile	2004-12-03 20:53:57.607366696 -0800
@@ -5,10 +5,10 @@
 mmu-y			:= nommu.o
 mmu-$(CONFIG_MMU)	:= fremap.o highmem.o madvise.o memory.o mincore.o \
 			   mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
-			   vmalloc.o
+			   vmalloc.o prio_tree.o
 
 obj-y			:= bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
-			   page_alloc.o page-writeback.o pdflush.o prio_tree.o \
+			   page_alloc.o page-writeback.o pdflush.o \
 			   readahead.o slab.o swap.o truncate.o vmscan.o \
 			   $(mmu-y)
 
diff -puN mm/page_alloc.c~vm-routine-fixes mm/page_alloc.c
--- 25/mm/page_alloc.c~vm-routine-fixes	2004-12-03 20:53:57.595368520 -0800
+++ 25-akpm/mm/page_alloc.c	2004-12-03 20:53:57.608366544 -0800
@@ -34,6 +34,7 @@
 #include <linux/nodemask.h>
 
 #include <asm/tlbflush.h>
+#include "internal.h"
 
 nodemask_t node_online_map = NODE_MASK_NONE;
 nodemask_t node_possible_map = NODE_MASK_ALL;
@@ -283,6 +284,13 @@ void __free_pages_ok(struct page *page, 
 	arch_free_page(page, order);
 
 	mod_page_state(pgfree, 1 << order);
+
+#ifndef CONFIG_MMU
+	if (order > 0)
+		for (i = 1 ; i < (1 << order) ; ++i)
+			__put_page(page + i);
+#endif
+
 	for (i = 0 ; i < (1 << order) ; ++i)
 		free_pages_check(__FUNCTION__, page + i);
 	list_add(&page->lru, &list);
@@ -325,7 +333,7 @@ expand(struct zone *zone, struct page *p
 	return page;
 }
 
-static inline void set_page_refs(struct page *page, int order)
+void set_page_refs(struct page *page, int order)
 {
 #ifdef CONFIG_MMU
 	set_page_count(page, 1);
@@ -335,9 +343,10 @@ static inline void set_page_refs(struct 
 	/*
 	 * We need to reference all the pages for this order, otherwise if
 	 * anyone accesses one of the pages with (get/put) it will be freed.
+	 * - eg: access_process_vm()
 	 */
 	for (i = 0; i < (1 << order); i++)
-		set_page_count(page+i, 1);
+		set_page_count(page + i, 1);
 #endif /* CONFIG_MMU */
 }
 
diff -puN mm/tiny-shmem.c~vm-routine-fixes mm/tiny-shmem.c
--- 25/mm/tiny-shmem.c~vm-routine-fixes	2004-12-03 20:53:57.597368216 -0800
+++ 25-akpm/mm/tiny-shmem.c	2004-12-03 20:53:57.609366392 -0800
@@ -112,7 +112,9 @@ int shmem_zero_setup(struct vm_area_stru
 	if (vma->vm_file)
 		fput(vma->vm_file);
 	vma->vm_file = file;
+#ifdef CONFIG_MMU
 	vma->vm_ops = &generic_file_vm_ops;
+#endif
 	return 0;
 }
 
_