aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@cc.helsinki.fi>1993-11-29 14:21:10 +0200
committerNicolas Pitre <nico@cam.org>2007-08-19 14:19:21 -0400
commit75bb5836a8a8c0ee44ffd60a51f357b9568f1381 (patch)
tree806df8b9b14f217f49d026d76bd26af4cd05cdfb /mm
parent04cb41e1214fd968f1f50d1ecbf30839c41e3efa (diff)
downloadarchive-75bb5836a8a8c0ee44ffd60a51f357b9568f1381.tar.gz
Linux 0.99 patchlevel 14v0.99-pl14
Linux 0.99 patchlevel 14 is available on nic.funet.fi in the usual place (pub/OS/Linux/PEOPLE/Linus). There are no diffs relative to pl13, as too much has changed (the directory structure changed and the sound driver was added). Diffs relative to the last ALPHA version (13t) are in the "pl13-ALPHA's" subdirectory along with the actual ALPHA versions. The changes to pl13t are rather minor: most of them are just more printf format fixes to make gcc-2.5.x happy (Chip Salzenberg). Only one very minor bugfix which made pl13t not notice the WP bit on a 486. It would seem to be a good idea to use gcc-2.5.x to compile the kernel, as that seems to fix at least one known bug in earlier gcc versions. I hope that pl14 will be even more stable than pl13 has turned out to be, and especially the networking code seems to have become much more dependable. Thanks Alan & co. Changes to the last official release (p13) are too numerous to mention (or even to remember), but they include NTP support, updated SCSI and networking drivers, >16MB swap area handling, added sound support, read-only HPFS filesystem, memory management cleanups (especially cleaned up mmap() some more). Also, pl14 contains updated ext2fs code, along with minor fixes (especially concerning the time values) in other filesystems, and fixed unnamed/named pipe select() semantics. The reorganizations include moving all device drivers to a subdirectory of their own (linux/drivers), centralizing the major number handling (<linux/major.h> etc... Possibly cleaner and/or easier to keep track of different drivers. Finally, the first 4kB of physical memory is no longer cleared on bootup: tytso reports that this feature now enables some portables to use the power-saving features under linux. This could also be useful for the DOS emulator to check where the interrupt pointers pointed at startup. Linus
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile5
-rw-r--r--mm/kmalloc.c338
-rw-r--r--mm/memory.c76
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/swap.c18
-rw-r--r--mm/vmalloc.c28
6 files changed, 419 insertions, 58 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 510861c..37bad38 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -14,14 +14,11 @@
.c.s:
$(CC) $(CFLAGS) -S $<
-OBJS = memory.o swap.o mmap.o vmalloc.o
+OBJS = memory.o swap.o mmap.o kmalloc.o vmalloc.o
mm.o: $(OBJS)
$(LD) -r -o mm.o $(OBJS)
-clean:
- rm -f core *.o *.a *.s
-
dep:
$(CPP) -M *.c > .depend
diff --git a/mm/kmalloc.c b/mm/kmalloc.c
new file mode 100644
index 0000000..e75fa20
--- /dev/null
+++ b/mm/kmalloc.c
@@ -0,0 +1,338 @@
+/*
+ * linux/mm/kmalloc.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds & Roger Wolff.
+ *
+ * Written by R.E. Wolff Sept/Oct '93.
+ *
+ */
+
+#include <linux/mm.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+
+#define GFP_LEVEL_MASK 0xf
+
+/* I want this low enough for a while to catch errors.
+ I want this number to be increased in the near future:
+ loadable device drivers should use this function to get memory */
+
+#define MAX_KMALLOC_K 4
+
+
+/* This defines how many times we should try to allocate a free page before
+ giving up. Normally this shouldn't happen at all. */
+#define MAX_GET_FREE_PAGE_TRIES 4
+
+
+/* Private flags. */
+
+#define MF_USED 0xffaa0055
+#define MF_FREE 0x0055ffaa
+
+
+/*
+ * Much care has gone into making these routines in this file reentrant.
+ *
+ * The fancy bookkeeping of nbytesmalloced and the like are only used to
+ * report them to the user (oooohhhhh, aaaaahhhhh....) are not
+ * protected by cli(). (If that goes wrong. So what?)
+ *
+ * These routines restore the interrupt status to allow calling with ints
+ * off.
+ */
+
+/*
+ * A block header. This is in front of every malloc-block, whether free or not.
+ */
+struct block_header {
+ unsigned long bh_flags;
+ union {
+ unsigned long ubh_length;
+ struct block_header *fbh_next;
+ } vp;
+};
+
+
+#define bh_length vp.ubh_length
+#define bh_next vp.fbh_next
+#define BH(p) ((struct block_header *)(p))
+
+
+/*
+ * The page descriptor is at the front of every page that malloc has in use.
+ */
+struct page_descriptor {
+ struct page_descriptor *next;
+ struct block_header *firstfree;
+ int order;
+ int nfree;
+};
+
+
+#define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
+
+
+/*
+ * A size descriptor describes a specific class of malloc sizes.
+ * Each class of sizes has its own freelist.
+ */
+struct size_descriptor {
+ struct page_descriptor *firstfree;
+ int size;
+ int nblocks;
+
+ int nmallocs;
+ int nfrees;
+ int nbytesmalloced;
+ int npages;
+};
+
+
+struct size_descriptor sizes[] = {
+ { NULL, 32,127, 0,0,0,0 },
+ { NULL, 64, 63, 0,0,0,0 },
+ { NULL, 128, 31, 0,0,0,0 },
+ { NULL, 252, 16, 0,0,0,0 },
+ { NULL, 508, 8, 0,0,0,0 },
+ { NULL,1020, 4, 0,0,0,0 },
+ { NULL,2040, 2, 0,0,0,0 },
+ { NULL,4080, 1, 0,0,0,0 },
+ { NULL, 0, 0, 0,0,0,0 }
+};
+
+
+#define NBLOCKS(order) (sizes[order].nblocks)
+#define BLOCKSIZE(order) (sizes[order].size)
+
+
+
+long kmalloc_init (long start_mem,long end_mem)
+{
+ int order;
+
+/*
+ * Check the static info array. Things will blow up terribly if it's
+ * incorrect. This is a late "compile time" check.....
+ */
+for (order = 0;BLOCKSIZE(order);order++)
+ {
+ if ((NBLOCKS (order)*BLOCKSIZE(order) + sizeof (struct page_descriptor)) >
+ PAGE_SIZE)
+ {
+ printk ("Cannot use %d bytes out of %d in order = %d block mallocs\n",
+ NBLOCKS (order) * BLOCKSIZE(order) +
+ sizeof (struct page_descriptor),
+ (int) PAGE_SIZE,
+ BLOCKSIZE (order));
+ panic ("This only happens if someone messes with kmalloc");
+ }
+ }
+return start_mem;
+}
+
+
+
+int get_order (int size)
+{
+ int order;
+
+ /* Add the size of the header */
+ size += sizeof (struct block_header);
+ for (order = 0;BLOCKSIZE(order);order++)
+ if (size <= BLOCKSIZE (order))
+ return order;
+ return -1;
+}
+
+void * kmalloc (size_t size, int priority)
+{
+ unsigned long flags;
+ int order,tries,i,sz;
+ struct block_header *p;
+ struct page_descriptor *page;
+
+/* Sanity check... */
+if (size > MAX_KMALLOC_K * 1024)
+ {
+ printk ("kmalloc: I refuse to allocate %d bytes (for now max = %d).\n",
+ size,MAX_KMALLOC_K*1024);
+ return (NULL);
+ }
+
+order = get_order (size);
+if (order < 0)
+ {
+ printk ("kmalloc of too large a block (%d bytes).\n",size);
+ return (NULL);
+ }
+
+save_flags(flags);
+
+/* It seems VERY unlikely to me that it would be possible that this
+ loop will get executed more than once. */
+tries = MAX_GET_FREE_PAGE_TRIES;
+while (tries --)
+ {
+ /* Try to allocate a "recently" freed memory block */
+ cli ();
+ if ((page = sizes[order].firstfree) &&
+ (p = page->firstfree))
+ {
+ if (p->bh_flags == MF_FREE)
+ {
+ page->firstfree = p->bh_next;
+ page->nfree--;
+ if (!page->nfree)
+ {
+ sizes[order].firstfree = page->next;
+ page->next = NULL;
+ }
+ restore_flags(flags);
+
+ sizes [order].nmallocs++;
+ sizes [order].nbytesmalloced += size;
+ p->bh_flags = MF_USED; /* As of now this block is officially in use */
+ p->bh_length = size;
+ return p+1; /* Pointer arithmetic: increments past header */
+ }
+ printk ("Problem: block on freelist at %08lx isn't free.\n",(long)p);
+ return (NULL);
+ }
+ restore_flags(flags);
+
+
+ /* Now we're in trouble: We need to get a new free page..... */
+
+ sz = BLOCKSIZE(order); /* sz is the size of the blocks we're dealing with */
+
+ /* This can be done with ints on: This is private to this invocation */
+ page = (struct page_descriptor *) __get_free_page (priority & GFP_LEVEL_MASK);
+ if (!page)
+ {
+ printk ("Couldn't get a free page.....\n");
+ return NULL;
+ }
+#if 0
+ printk ("Got page %08x to use for %d byte mallocs....",(long)page,sz);
+#endif
+ sizes[order].npages++;
+
+ /* Loop for all but last block: */
+ for (i=NBLOCKS(order),p=BH (page+1);i > 1;i--,p=p->bh_next)
+ {
+ p->bh_flags = MF_FREE;
+ p->bh_next = BH ( ((long)p)+sz);
+ }
+ /* Last block: */
+ p->bh_flags = MF_FREE;
+ p->bh_next = NULL;
+
+ page->order = order;
+ page->nfree = NBLOCKS(order);
+ page->firstfree = BH(page+1);
+#if 0
+ printk ("%d blocks per page\n",page->nfree);
+#endif
+ /* Now we're going to muck with the "global" freelist for this size:
+ this should be uniterruptible */
+ cli ();
+ /*
+ * sizes[order].firstfree used to be NULL, otherwise we wouldn't be
+ * here, but you never know....
+ */
+ page->next = sizes[order].firstfree;
+ sizes[order].firstfree = page;
+ restore_flags(flags);
+ }
+
+/* Pray that printk won't cause this to happen again :-) */
+
+printk ("Hey. This is very funny. I tried %d times to allocate a whole\n"
+ "new page for an object only %d bytes long, but some other process\n"
+ "beat me to actually allocating it. Also note that this 'error'\n"
+ "message is soooo very long to catch your attention. I'd appreciate\n"
+ "it if you'd be so kind as to report what conditions caused this to\n"
+ "the author of this kmalloc: wolff@dutecai.et.tudelft.nl.\n"
+ "(Executive summary: This can't happen)\n",
+ MAX_GET_FREE_PAGE_TRIES,
+ size);
+return NULL;
+}
+
+
+void kfree_s (void *ptr,int size)
+{
+unsigned long flags;
+int order;
+register struct block_header *p=((struct block_header *)ptr) -1;
+struct page_descriptor *page,*pg2;
+
+page = PAGE_DESC (p);
+order = page->order;
+if ((order < 0) ||
+ (order > sizeof (sizes)/sizeof (sizes[0])) ||
+ (((long)(page->next)) & ~PAGE_MASK) ||
+ (p->bh_flags != MF_USED))
+ {
+ printk ("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
+ p, page->next, page->order);
+ return;
+ }
+if (size &&
+ size != p->bh_length)
+ {
+ printk ("Trying to free pointer at %p with wrong size: %d instead of %lu.\n",
+ p,size,p->bh_length);
+ return;
+ }
+size = p->bh_length;
+p->bh_flags = MF_FREE; /* As of now this block is officially free */
+
+save_flags(flags);
+cli ();
+p->bh_next = page->firstfree;
+page->firstfree = p;
+page->nfree ++;
+
+if (page->nfree == 1)
+ { /* Page went from full to one free block: put it on the freelist */
+ if (page->next)
+ {
+ printk ("Page %p already on freelist dazed and confused....\n", page);
+ }
+ else
+ {
+ page->next = sizes[order].firstfree;
+ sizes[order].firstfree = page;
+ }
+ }
+
+/* If page is completely free, free it */
+if (page->nfree == NBLOCKS (page->order))
+ {
+#if 0
+ printk ("Freeing page %08x.\n", (long)page);
+#endif
+ if (sizes[order].firstfree == page)
+ {
+ sizes[order].firstfree = page->next;
+ }
+ else
+ {
+ for (pg2=sizes[order].firstfree;
+ (pg2 != NULL) && (pg2->next != page);
+ pg2=pg2->next)
+ /* Nothing */;
+ if (pg2 != NULL)
+ pg2->next = page->next;
+ else
+ printk ("Ooops. page %p doesn't show on freelist.\n", page);
+ }
+ free_page ((long)page);
+ }
+restore_flags(flags);
+
+sizes[order].nfrees++; /* Noncritical (monitoring) admin stuff */
+sizes[order].nbytesmalloced -= size;
+}
diff --git a/mm/memory.c b/mm/memory.c
index 1ea52f1..98ef0d8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -89,7 +89,7 @@ static void free_one_table(unsigned long * page_dir)
return;
*page_dir = 0;
if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {
- printk("Bad page table: [%p]=%08x\n",page_dir,pg_table);
+ printk("Bad page table: [%p]=%08lx\n",page_dir,pg_table);
return;
}
if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
@@ -335,7 +335,7 @@ int zeromap_page_range(unsigned long from, unsigned long size, int mask)
mask |= ZERO_PAGE;
}
if (from & ~PAGE_MASK) {
- printk("zeromap_page_range: from = %08x\n",from);
+ printk("zeromap_page_range: from = %08lx\n",from);
return -EINVAL;
}
dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
@@ -397,7 +397,7 @@ int remap_page_range(unsigned long from, unsigned long to, unsigned long size, i
}
}
if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {
- printk("remap_page_range: from = %08x, to=%08x\n",from,to);
+ printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);
return -EINVAL;
}
dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
@@ -475,7 +475,7 @@ unsigned long put_page(struct task_struct * tsk,unsigned long page,
if ((prot & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT)
printk("put_page: prot = %08x\n",prot);
if (page >= high_memory) {
- printk("put_page: trying to put page %08x at %08x\n",page,address);
+ printk("put_page: trying to put page %08lx at %08lx\n",page,address);
return 0;
}
page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
@@ -509,9 +509,9 @@ unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsig
unsigned long tmp, *page_table;
if (page >= high_memory)
- printk("put_dirty_page: trying to put page %08x at %08x\n",page,address);
+ printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
if (mem_map[MAP_NR(page)] != 1)
- printk("mem_map disagrees with %08x at %08x\n",page,address);
+ printk("mem_map disagrees with %08lx at %08lx\n",page,address);
page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
if (PAGE_PRESENT & *page_table)
page_table = (unsigned long *) (PAGE_MASK & *page_table);
@@ -595,12 +595,12 @@ static void __do_wp_page(unsigned long error_code, unsigned long address,
free_page(new_page);
return;
bad_wp_page:
- printk("do_wp_page: bogus page at address %08x (%08x)\n",address,old_page);
+ printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
*(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
send_sig(SIGKILL, tsk, 1);
goto end_wp_page;
bad_wp_pagetable:
- printk("do_wp_page: bogus page-table at address %08x (%08x)\n",address,pte);
+ printk("do_wp_page: bogus page-table at address %08lx (%08lx)\n",address,pte);
*pde = BAD_PAGETABLE | PAGE_TABLE;
send_sig(SIGKILL, tsk, 1);
end_wp_page:
@@ -645,7 +645,7 @@ void do_wp_page(unsigned long error_code, unsigned long address,
__do_wp_page(error_code, address, tsk, user_esp);
return;
}
- printk("bad page directory entry %08x\n",page);
+ printk("bad page directory entry %08lx\n",page);
*pg_table = 0;
}
@@ -850,13 +850,19 @@ void do_no_page(unsigned long error_code, unsigned long address,
return;
}
address &= 0xfffff000;
+ tmp = 0;
for (mpnt = tsk->mmap; mpnt != NULL; mpnt = mpnt->vm_next) {
if (address < mpnt->vm_start)
- continue;
- if (address >= mpnt->vm_end)
- continue;
- if (!mpnt->vm_ops || !mpnt->vm_ops->nopage)
break;
+ if (address >= mpnt->vm_end) {
+ tmp = mpnt->vm_end;
+ continue;
+ }
+ if (!mpnt->vm_ops || !mpnt->vm_ops->nopage) {
+ ++tsk->min_flt;
+ get_empty_page(tsk,address);
+ return;
+ }
mpnt->vm_ops->nopage(error_code, mpnt, address);
return;
}
@@ -866,10 +872,13 @@ void do_no_page(unsigned long error_code, unsigned long address,
return;
if (address >= tsk->end_data && address < tsk->brk)
return;
- if (address+8192 >= (user_esp & 0xfffff000) &&
- address <= tsk->start_stack)
+ if (mpnt && mpnt == tsk->stk_vma &&
+ address - tmp > mpnt->vm_start - address &&
+ tsk->rlim[RLIMIT_STACK].rlim_cur > mpnt->vm_end - address) {
+ mpnt->vm_start = address;
return;
- current->tss.cr2 = address;
+ }
+ tsk->tss.cr2 = address;
send_sig(SIGSEGV,tsk,1);
return;
}
@@ -883,7 +892,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
unsigned long address;
unsigned long user_esp = 0;
- unsigned long stack_limit;
unsigned int bit;
/* get the address */
@@ -901,23 +909,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
do_wp_page(error_code, address, current, user_esp);
else
do_no_page(error_code, address, current, user_esp);
- if (!user_esp)
- return;
- stack_limit = current->rlim[RLIMIT_STACK].rlim_cur;
- if (stack_limit >= RLIM_INFINITY ||
- stack_limit >= current->start_stack ||
- user_esp >= (current->start_stack - stack_limit)) {
-#if 0
- if (current->stk_vma != NULL) {
- if (current->stk_vma->vm_start > user_esp)
- current->stk_vma->vm_start = user_esp & PAGE_MASK;
- } else
- printk("do_no_page: no stack segment\n");
-#endif
- } else {
- current->tss.cr2 = address;
- send_sig(SIGSEGV, current, 1);
- }
return;
}
if (error_code & PAGE_RW) {
@@ -933,7 +924,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
pg0[0] = PAGE_SHARED;
} else
printk("Unable to handle kernel paging request");
- printk(" at address %08x\n",address);
+ printk(" at address %08lx\n",address);
die_if_kernel("Oops", regs, error_code);
do_exit(SIGKILL);
}
@@ -1030,11 +1021,14 @@ unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
unsigned long address;
/*
- * Physical page 0 is special: it's a "zero-page", and is guaranteed to
- * stay that way - it's write-protected and when there is a c-o-w, the
- * mm handler treats it specially.
+ * Physical page 0 is special; it's not touched by Linux since BIOS
+ * and SMM (for laptops with [34]86/SL chips) may need it. It is read
+ * and write protected to detect null pointer references in the
+ * kernel.
*/
+#if 0
memset((void *) 0, 0, PAGE_SIZE);
+#endif
start_mem = PAGE_ALIGN(start_mem);
address = 0;
pg_dir = swapper_pg_dir;
@@ -1111,14 +1105,16 @@ void mem_init(unsigned long start_low_mem,
nr_free_pages++;
}
tmp = nr_free_pages << PAGE_SHIFT;
- printk("Memory: %dk/%dk available (%dk kernel code, %dk reserved, %dk data)\n",
+ printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
tmp >> 10,
end_mem >> 10,
codepages << (PAGE_SHIFT-10),
reservedpages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10));
+/* test if the WP bit is honoured in supervisor mode */
pg0[0] = PAGE_READONLY;
- *((char *) 0) = 0; /* test if the WP bit is honoured in supervisor mode */
+ invalidate();
+ __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
pg0[0] = 0;
invalidate();
return;
diff --git a/mm/mmap.c b/mm/mmap.c
index 11ed1a9..10e24df 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -182,14 +182,13 @@ void unmap_fixup(struct vm_area_struct *area,
end <= area->vm_start || end > area->vm_end ||
end < addr)
{
- printk("unmap_fixup: area=%x-%x, unmap %x-%x!!\n",
+ printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
area->vm_start, area->vm_end, addr, end);
return;
}
/* Unmapping the whole area */
- if (addr == area->vm_start && end == area->vm_end)
- {
+ if (addr == area->vm_start && end == area->vm_end) {
if (area->vm_ops && area->vm_ops->close)
area->vm_ops->close(area);
return;
@@ -198,8 +197,10 @@ void unmap_fixup(struct vm_area_struct *area,
/* Work out to one of the ends */
if (addr >= area->vm_start && end == area->vm_end)
area->vm_end = addr;
- if (addr == area->vm_start && end <= area->vm_end)
+ if (addr == area->vm_start && end <= area->vm_end) {
+ area->vm_offset += (end - area->vm_start);
area->vm_start = end;
+ }
/* Unmapping a hole */
if (addr > area->vm_start && end < area->vm_end)
@@ -208,6 +209,7 @@ void unmap_fixup(struct vm_area_struct *area,
mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
*mpnt = *area;
+ mpnt->vm_offset += (end - area->vm_start);
mpnt->vm_start = end;
if (mpnt->vm_inode)
mpnt->vm_inode->i_count++;
@@ -368,7 +370,7 @@ void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
vmp->vm_start < mpnt->vm_end) ||
(vmp->vm_end >= mpnt->vm_start &&
vmp->vm_end < mpnt->vm_end))
- printk("insert_vm_struct: ins area %x-%x in area %x-%x\n",
+ printk("insert_vm_struct: ins area %lx-%lx in area %lx-%lx\n",
vmp->vm_start, vmp->vm_end,
mpnt->vm_start, vmp->vm_end);
}
diff --git a/mm/swap.c b/mm/swap.c
index c885362..8adc9eb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -138,7 +138,7 @@ unsigned long swap_duplicate(unsigned long entry)
}
p = type + swap_info;
if (offset >= p->max) {
- printk("swap_free: weirness\n");
+ printk("swap_free: weirdness\n");
return 0;
}
if (!p->swap_map[offset]) {
@@ -166,7 +166,7 @@ void swap_free(unsigned long entry)
p = & swap_info[type];
offset = SWP_OFFSET(entry);
if (offset >= p->max) {
- printk("swap_free: weirness\n");
+ printk("swap_free: weirdness\n");
return;
}
if (!(p->flags & SWP_USED)) {
@@ -180,7 +180,7 @@ void swap_free(unsigned long entry)
if (offset > p->highest_bit)
p->highest_bit = offset;
if (!p->swap_map[offset])
- printk("swap_free: swap-space map bad (entry %08x)\n",entry);
+ printk("swap_free: swap-space map bad (entry %08lx)\n",entry);
else
if (!--p->swap_map[offset])
nr_swap_pages++;
@@ -356,7 +356,7 @@ static int swap_out(unsigned int priority)
if(mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
continue;
if(!(PAGE_PRESENT & pg_table)) {
- printk("swap_out: bad page-table at pg_dir[%d]: %08x\n",
+ printk("swap_out: bad page-table at pg_dir[%d]: %08lx\n",
table, pg_table);
((unsigned long *) p->tss.cr3)[table] = 0;
continue;
@@ -529,8 +529,8 @@ void free_page(unsigned long addr)
}
return;
}
- printk("Trying to free free memory (%08x): memory probabably corrupted\n",addr);
- printk("PC = %08x\n",*(((unsigned long *)&addr)-1));
+ printk("Trying to free free memory (%08lx): memory probabably corrupted\n",addr);
+ printk("PC = %08lx\n",*(((unsigned long *)&addr)-1));
return;
}
}
@@ -557,10 +557,10 @@ last_free_pages[index = (index + 1) & (NR_LAST_FREE_PAGES - 1)] = result; \
restore_flags(flag); \
return result; \
} \
- printk("Free page %08x has mem_map = %d\n", \
+ printk("Free page %08lx has mem_map = %d\n", \
result,mem_map[MAP_NR(result)]); \
} else \
- printk("Result = 0x%08x - memory map destroyed\n", result); \
+ printk("Result = 0x%08lx - memory map destroyed\n", result); \
queue = 0; \
nr = 0; \
} else if (nr) { \
@@ -792,7 +792,7 @@ asmlinkage int sys_swapon(const char * specialfile)
error = -EINVAL;
goto bad_swap;
}
- p->swap_map = vmalloc(p->max);
+ p->swap_map = (unsigned char *) vmalloc(p->max);
if (!p->swap_map) {
error = -ENOMEM;
goto bad_swap;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c33ad8d..ad022e1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/malloc.h>
+#include <asm/segment.h>
struct vm_struct {
unsigned long flags;
@@ -109,6 +110,7 @@ static int do_area(void * addr, unsigned long size,
return -1;
nr -= i;
index = 0;
+ dindex++;
}
return 0;
}
@@ -162,3 +164,29 @@ void * vmalloc(unsigned long size)
}
return addr;
}
+
+int vread(char *buf, char *addr, int count)
+{
+ struct vm_struct **p, *tmp;
+ char *vaddr, *buf_start = buf;
+ int n;
+
+ for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
+ vaddr = (char *) tmp->addr;
+ while (addr < vaddr) {
+ if (count == 0)
+ goto finished;
+ put_fs_byte('\0', buf++), addr++, count--;
+ }
+ n = tmp->size - PAGE_SIZE;
+ if (addr > vaddr)
+ n -= addr - vaddr;
+ while (--n >= 0) {
+ if (count == 0)
+ goto finished;
+ put_fs_byte(*addr++, buf++), count--;
+ }
+ }
+finished:
+ return buf - buf_start;
+}