From: "Eric W. Biederman" kernel/crash.c has been renamed kernel/crash_dump.c to clarify it's purpose. From: Hariprasad Nellitheertha This patch provides the interfaces necessary to read the dump contents, treating it as a high memory device. Signed off by Hariprasad Nellitheertha Signed-off-by: Eric Biederman Signed-off-by: Andrew Morton --- 25-ppc-akpm/arch/i386/mm/highmem.c | 18 ++++++++++++++++++ 25-ppc-akpm/include/asm-i386/highmem.h | 1 + 25-ppc-akpm/include/linux/highmem.h | 1 + 25-ppc-akpm/kernel/crash_dump.c | 33 +++++++++++++++++++++++++++++++++ 4 files changed, 53 insertions(+) diff -puN arch/i386/mm/highmem.c~crashdump-routines-for-copying-dump-pages arch/i386/mm/highmem.c --- 25-ppc/arch/i386/mm/highmem.c~crashdump-routines-for-copying-dump-pages 2005-01-28 23:00:26.747236016 -0800 +++ 25-ppc-akpm/arch/i386/mm/highmem.c 2005-01-28 23:01:06.272227304 -0800 @@ -74,6 +74,24 @@ void kunmap_atomic(void *kvaddr, enum km preempt_check_resched(); } +/* This is the same as kmap_atomic() but can map memory that doesn't + * have a struct page associated with it. + */ +void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +{ + enum fixed_addresses idx; + unsigned long vaddr; + + inc_preempt_count(); + + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); + __flush_tlb_one(vaddr); + + return (void*) vaddr; +} + struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; diff -puN include/asm-i386/highmem.h~crashdump-routines-for-copying-dump-pages include/asm-i386/highmem.h --- 25-ppc/include/asm-i386/highmem.h~crashdump-routines-for-copying-dump-pages 2005-01-28 23:00:26.748235864 -0800 +++ 25-ppc-akpm/include/asm-i386/highmem.h 2005-01-28 23:01:28.081911728 -0800 @@ -72,6 +72,7 @@ void *kmap(struct page *page); void kunmap(struct page *page); void *kmap_atomic(struct page *page, enum km_type type); void kunmap_atomic(void *kvaddr, enum km_type type); +void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() do { } while (0) diff -puN include/linux/highmem.h~crashdump-routines-for-copying-dump-pages include/linux/highmem.h --- 25-ppc/include/linux/highmem.h~crashdump-routines-for-copying-dump-pages 2005-01-28 23:00:26.750235560 -0800 +++ 25-ppc-akpm/include/linux/highmem.h 2005-01-28 23:01:51.314379856 -0800 @@ -28,6 +28,7 @@ static inline void *kmap(struct page *pa #define kmap_atomic(page, idx) page_address(page) #define kunmap_atomic(addr, idx) do { } while (0) +#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) #endif /* CONFIG_HIGHMEM */ diff -puN kernel/crash_dump.c~crashdump-routines-for-copying-dump-pages kernel/crash_dump.c --- 25-ppc/kernel/crash_dump.c~crashdump-routines-for-copying-dump-pages 2005-01-28 23:00:26.751235408 -0800 +++ 25-ppc-akpm/kernel/crash_dump.c 2005-01-28 23:00:26.757234496 -0800 @@ -8,6 +8,39 @@ #include #include #include +#include +#include +#include + #include #include +/* + * Copy a page from "oldmem". For this page, there is no pte mapped + * in the current kernel. We stitch up a pte, similar to kmap_atomic. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, int userbuf) +{ + void *page, *vaddr; + + if (!csize) + return 0; + + page = kmalloc(PAGE_SIZE, GFP_KERNEL); + + vaddr = kmap_atomic_pfn(pfn, KM_PTE0); + copy_page(page, vaddr); + kunmap_atomic(vaddr, KM_PTE0); + + if (userbuf) { + if (copy_to_user(buf, page, csize)) { + kfree(page); + return -EFAULT; + } + } else + memcpy(buf, page, csize); + kfree(page); + + return 0; +} _