--- 2.2.18pre15/include/linux/mm.h.~1~ Fri Oct 20 01:35:46 2000 +++ 2.2.18pre15/include/linux/mm.h Fri Oct 20 20:55:37 2000 @@ -81,6 +81,8 @@ #define VM_LOCKED 0x2000 #define VM_IO 0x4000 /* Memory mapped I/O or similar */ +#define VM_RESERVED 0x8000 /* Don't unmap it from swap_out */ + #define VM_STACK_FLAGS 0x0177 /* --- 2.2.18pre15/include/linux/wrapper.h.~1~ Tue Feb 1 18:24:19 2000 +++ 2.2.18pre15/include/linux/wrapper.h Fri Oct 20 20:56:52 2000 @@ -33,6 +33,14 @@ #define vma_get_end(v) v->vm_end #define vma_get_page_prot(v) v->vm_page_prot +/* + * mem_map_reserve()/unreserve() are going to be obsoleted by + * setting the VM_RESERVED in vma->vm_flags. + * + * Instead of marking the pages as reserved, just mark the vma as reserved + * this will improve performance (it's zero cost unlike the PG_reserved check) + * and it will be trivial for not physically contigous mappings too. + */ #define mem_map_reserve(p) set_bit(PG_reserved, &mem_map[p].flags) #define mem_map_unreserve(p) clear_bit(PG_reserved, &mem_map[p].flags) #define mem_map_inc_count(p) atomic_inc(&(mem_map[p].count)) --- 2.2.18pre15/mm/vmscan.c.~1~ Tue Sep 5 02:28:50 2000 +++ 2.2.18pre15/mm/vmscan.c Fri Oct 20 20:57:19 2000 @@ -251,7 +251,7 @@ unsigned long end; /* Don't swap out areas which are locked down */ - if (vma->vm_flags & VM_LOCKED) + if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) return 0; pgdir = pgd_offset(tsk->mm, address);