aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndi Kleen <andi@firstfloor.org>2010-11-19 13:30:25 +0100
committerAndi Kleen <ak@linux.intel.com>2010-11-19 13:31:39 +0100
commitcb58f049ae6709ddbab71be199390dc6852018cd (patch)
treec298fa1bf42fb1d6b69d50e39e55716e334b1a1b
parentfe61906edce9e70d02481a77a617ba1397573dce (diff)
downloadlinux-mce-2.6-mce/crashdump.tar.gz
MCE: Disable MCEs during crash dumpmce/crashdump
When a crash kernel is doing a crash dump the previous kernel's memory might be poisoned. Avoid machine checking in this case, by disabling machine checks during the access. Signed-off-by: Andi Kleen <ak@linux.intel.com>
-rw-r--r--arch/x86/kernel/crash_dump_32.c5
-rw-r--r--arch/x86/kernel/crash_dump_64.c36
2 files changed, 36 insertions, 5 deletions
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index d5cd13945d5a48..04df58e9763051 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -11,6 +11,7 @@
#include <linux/crash_dump.h>
#include <asm/uaccess.h>
+#include <asm/mce.h>
static void *kdump_buf_page;
@@ -64,7 +65,9 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
vaddr = kmap_atomic_pfn(pfn);
if (!userbuf) {
+ mce_disable_error_reporting();
memcpy(buf, (vaddr + offset), csize);
+ mce_reenable_error_reporting();
kunmap_atomic(vaddr, KM_PTE0);
} else {
if (!kdump_buf_page) {
@@ -73,7 +76,9 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
kunmap_atomic(vaddr, KM_PTE0);
return -EFAULT;
}
+ mce_disable_error_reporting();
copy_page(kdump_buf_page, vaddr);
+ mce_reenable_error_reporting();
kunmap_atomic(vaddr, KM_PTE0);
if (copy_to_user(buf, (kdump_buf_page + offset), csize))
return -EFAULT;
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 994828899e0983..9cba196221c780 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -9,6 +9,8 @@
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/mm.h>
+#include <asm/mce.h>
/* Stores the physical address of elf header of crash image. */
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
@@ -30,22 +32,46 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
+ struct page *p[2] = { NULL, NULL };
+ unsigned long poff = (unsigned long)buf & ~PAGE_MASK;
if (!csize)
return 0;
+ if (userbuf) {
+ if (get_user_pages_fast((unsigned long)buf,
+ ((csize + poff) >> PAGE_SHIFT) + 1,
+ 1, p) < 0)
+ return -EFAULT;
+ }
vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
+ csize = -ENOMEM;
if (!vaddr)
- return -ENOMEM;
+ goto out;
+
+ /* Disable MCEs temporarily so that we don't fault on memory errors */
+ get_cpu();
+ mce_disable_error_reporting();
if (userbuf) {
- if (copy_to_user(buf, vaddr + offset, csize)) {
- iounmap(vaddr);
- return -EFAULT;
- }
+ unsigned len = min(csize, PAGE_SIZE - poff);
+
+ memcpy(page_address(p[0]) + poff, vaddr + offset, len);
+ if (p[1])
+ memcpy(page_address(p[1]), vaddr + offset + len,
+ csize - len);
} else
memcpy(buf, vaddr + offset, csize);
+ mce_reenable_error_reporting();
+ put_cpu();
+
+out:
+ if (p[0])
+ put_page(p[0]);
+ if (p[1])
+ put_page(p[1]);
+
set_iounmap_nonlazy();
iounmap(vaddr);
return csize;