--- 2.4.0-test10-pre4/include/linux/mm.h.~1~ Fri Oct 20 17:56:09 2000 +++ 2.4.0-test10-pre4/include/linux/mm.h Fri Oct 20 18:23:47 2000 @@ -95,6 +95,7 @@ #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ +#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ #define VM_STACK_FLAGS 0x00000177 --- 2.4.0-test10-pre4/include/linux/wrapper.h.~1~ Tue Aug 8 06:01:36 2000 +++ 2.4.0-test10-pre4/include/linux/wrapper.h Fri Oct 20 18:26:02 2000 @@ -29,8 +29,17 @@ #define vma_get_end(v) v->vm_end #define vma_get_page_prot(v) v->vm_page_prot +/* + * mem_map_reserve()/unreserve() are going to be obsoleted by + * vma_reserve(). (unreserve shouldn't be necessary) + * + * Instead of marking the pages as reserved, just mark the vma as reserved + * this will improve performance (it's zero cost unlike the PG_reserved check) + * and it will be trivial for not physically contigous mappings too. + */ #define mem_map_reserve(p) set_bit(PG_reserved, &p->flags) #define mem_map_unreserve(p) clear_bit(PG_reserved, &p->flags) #define mem_map_inc_count(p) atomic_inc(&(p->count)) #define mem_map_dec_count(p) atomic_dec(&(p->count)) +#define vma_reserve(vma) ((vma)->vm_flags |= VM_RESERVED) #endif --- 2.4.0-test10-pre4/mm/vmscan.c.~1~ Fri Oct 20 17:56:10 2000 +++ 2.4.0-test10-pre4/mm/vmscan.c Fri Oct 20 18:11:09 2000 @@ -318,7 +318,7 @@ unsigned long end; /* Don't swap out areas which are locked down */ - if (vma->vm_flags & VM_LOCKED) + if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) return 0; pgdir = pgd_offset(mm, address);