aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-02 10:48:19 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-02 10:48:19 -0800
commit22b63709cf61a3698353adf80f5c4568ebf38dcc (patch)
tree2701bf180544e93ab7521d8cbf3cff39ca99156f
parent6bbbee425761adff761d0f1a1d723d8f5eae93b8 (diff)
downloadpatches-22b63709cf61a3698353adf80f5c4568ebf38dcc.tar.gz
kexec stuff
-rw-r--r--k132
-rw-r--r--k2309
-rw-r--r--k3188
-rw-r--r--k4787
-rw-r--r--k5622
-rw-r--r--k6944
-rw-r--r--series6
7 files changed, 2888 insertions, 0 deletions
diff --git a/k1 b/k1
new file mode 100644
index 00000000000000..7bc70d357c1e22
--- /dev/null
+++ b/k1
@@ -0,0 +1,32 @@
+From vgoyal@redhat.com Wed Nov 20 09:51:42 2013
+From: Vivek Goyal <vgoyal@redhat.com>
+Date: Wed, 20 Nov 2013 12:50:46 -0500
+Subject: [PATCH 1/6] kexec: Export vmcoreinfo note size properly
+To: linux-kernel@vger.kernel.org, kexec@lists.infradead.org
+Cc: ebiederm@xmission.com, hpa@zytor.com, mjg59@srcf.ucam.org, greg@kroah.com, Vivek Goyal <vgoyal@redhat.com>
+Message-ID: <1384969851-7251-2-git-send-email-vgoyal@redhat.com>
+
+
+Right now we seem to be exporting the max data size contained inside
+vmcoreinfo note. But this does not include the size of meta data around
+vmcore info data. Like name of the note and starting and ending elf_note.
+
+I think user space expects total size and that size is put in PT_NOTE
+elf header.
+
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+---
+ kernel/ksysfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -126,7 +126,7 @@ static ssize_t vmcoreinfo_show(struct ko
+ {
+ return sprintf(buf, "%lx %x\n",
+ paddr_vmcoreinfo_note(),
+- (unsigned int)vmcoreinfo_max_size);
++ (unsigned int)sizeof(vmcoreinfo_note));
+ }
+ KERNEL_ATTR_RO(vmcoreinfo);
+
diff --git a/k2 b/k2
new file mode 100644
index 00000000000000..40d7cc0a87e495
--- /dev/null
+++ b/k2
@@ -0,0 +1,309 @@
+From vgoyal@redhat.com Wed Nov 20 09:51:43 2013
+From: Vivek Goyal <vgoyal@redhat.com>
+Date: Wed, 20 Nov 2013 12:50:47 -0500
+Subject: [PATCH 2/6] kexec: Move segment verification code in a separate function
+To: linux-kernel@vger.kernel.org, kexec@lists.infradead.org
+Cc: ebiederm@xmission.com, hpa@zytor.com, mjg59@srcf.ucam.org, greg@kroah.com, Vivek Goyal <vgoyal@redhat.com>
+Message-ID: <1384969851-7251-3-git-send-email-vgoyal@redhat.com>
+
+
+Previously do_kimage_alloc() will allocate a kimage structure, copy
+segment list from user space and then do the segment list sanity verification.
+
+Break down this function in 3 parts. do_kimage_alloc_init() to do actual
+allocation and basic initialization of kimage structure.
+copy_user_segment_list() to copy segment list from user space and
+sanity_check_segment_list() to verify the sanity of segment list as passed
+by user space.
+
+In later patches, I need to only allocate kimage and not copy segment
+list from user space. So breaking down in smaller functions enables
+re-use of code at other places.
+
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+---
+ kernel/kexec.c | 182 +++++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 101 insertions(+), 81 deletions(-)
+
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -120,45 +120,27 @@ static struct page *kimage_alloc_page(st
+ gfp_t gfp_mask,
+ unsigned long dest);
+
+-static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
+- unsigned long nr_segments,
+- struct kexec_segment __user *segments)
++static int copy_user_segment_list(struct kimage *image,
++ unsigned long nr_segments,
++ struct kexec_segment __user *segments)
+ {
++ int ret;
+ size_t segment_bytes;
+- struct kimage *image;
+- unsigned long i;
+- int result;
+-
+- /* Allocate a controlling structure */
+- result = -ENOMEM;
+- image = kzalloc(sizeof(*image), GFP_KERNEL);
+- if (!image)
+- goto out;
+-
+- image->head = 0;
+- image->entry = &image->head;
+- image->last_entry = &image->head;
+- image->control_page = ~0; /* By default this does not apply */
+- image->start = entry;
+- image->type = KEXEC_TYPE_DEFAULT;
+-
+- /* Initialize the list of control pages */
+- INIT_LIST_HEAD(&image->control_pages);
+-
+- /* Initialize the list of destination pages */
+- INIT_LIST_HEAD(&image->dest_pages);
+-
+- /* Initialize the list of unusable pages */
+- INIT_LIST_HEAD(&image->unuseable_pages);
+
+ /* Read in the segments */
+ image->nr_segments = nr_segments;
+ segment_bytes = nr_segments * sizeof(*segments);
+- result = copy_from_user(image->segment, segments, segment_bytes);
+- if (result) {
+- result = -EFAULT;
+- goto out;
+- }
++ ret = copy_from_user(image->segment, segments, segment_bytes);
++ if (ret)
++ ret = -EFAULT;
++
++ return ret;
++}
++
++static int sanity_check_segment_list(struct kimage *image)
++{
++ int result, i;
++ unsigned long nr_segments = image->nr_segments;
+
+ /*
+ * Verify we have good destination addresses. The caller is
+@@ -180,9 +162,9 @@ static int do_kimage_alloc(struct kimage
+ mstart = image->segment[i].mem;
+ mend = mstart + image->segment[i].memsz;
+ if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
+- goto out;
++ return result;
+ if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
+- goto out;
++ return result;
+ }
+
+ /* Verify our destination addresses do not overlap.
+@@ -203,7 +185,7 @@ static int do_kimage_alloc(struct kimage
+ pend = pstart + image->segment[j].memsz;
+ /* Do the segments overlap ? */
+ if ((mend > pstart) && (mstart < pend))
+- goto out;
++ return result;
+ }
+ }
+
+@@ -215,18 +197,61 @@ static int do_kimage_alloc(struct kimage
+ result = -EINVAL;
+ for (i = 0; i < nr_segments; i++) {
+ if (image->segment[i].bufsz > image->segment[i].memsz)
+- goto out;
++ return result;
++ }
++
++ /*
++ * Verify we have good destination addresses. Normally
++ * the caller is responsible for making certain we don't
++ * attempt to load the new image into invalid or reserved
++ * areas of RAM. But crash kernels are preloaded into a
++ * reserved area of ram. We must ensure the addresses
++ * are in the reserved area otherwise preloading the
++ * kernel could corrupt things.
++ */
++
++ if (image->type == KEXEC_TYPE_CRASH) {
++ result = -EADDRNOTAVAIL;
++ for (i = 0; i < nr_segments; i++) {
++ unsigned long mstart, mend;
++
++ mstart = image->segment[i].mem;
++ mend = mstart + image->segment[i].memsz - 1;
++ /* Ensure we are within the crash kernel limits */
++ if ((mstart < crashk_res.start) ||
++ (mend > crashk_res.end))
++ return result;
++ }
+ }
+
+- result = 0;
+-out:
+- if (result == 0)
+- *rimage = image;
+- else
+- kfree(image);
++ return 0;
++}
+
+- return result;
++static struct kimage *do_kimage_alloc_init(void)
++{
++ struct kimage *image;
+
++ /* Allocate a controlling structure */
++ image = kzalloc(sizeof(*image), GFP_KERNEL);
++ if (!image)
++ return NULL;
++
++ image->head = 0;
++ image->entry = &image->head;
++ image->last_entry = &image->head;
++ image->control_page = ~0; /* By default this does not apply */
++ image->type = KEXEC_TYPE_DEFAULT;
++
++ /* Initialize the list of control pages */
++ INIT_LIST_HEAD(&image->control_pages);
++
++ /* Initialize the list of destination pages */
++ INIT_LIST_HEAD(&image->dest_pages);
++
++ /* Initialize the list of unusable pages */
++ INIT_LIST_HEAD(&image->unuseable_pages);
++
++ return image;
+ }
+
+ static void kimage_free_page_list(struct list_head *list);
+@@ -239,10 +264,19 @@ static int kimage_normal_alloc(struct ki
+ struct kimage *image;
+
+ /* Allocate and initialize a controlling structure */
+- image = NULL;
+- result = do_kimage_alloc(&image, entry, nr_segments, segments);
++ image = do_kimage_alloc_init();
++ if (!image)
++ return -ENOMEM;
++
++ image->start = entry;
++
++ result = copy_user_segment_list(image, nr_segments, segments);
+ if (result)
+- goto out;
++ goto out_free_image;
++
++ result = sanity_check_segment_list(image);
++ if (result)
++ goto out_free_image;
+
+ /*
+ * Find a location for the control code buffer, and add it
+@@ -254,22 +288,23 @@ static int kimage_normal_alloc(struct ki
+ get_order(KEXEC_CONTROL_PAGE_SIZE));
+ if (!image->control_code_page) {
+ printk(KERN_ERR "Could not allocate control_code_buffer\n");
+- goto out_free;
++ goto out_free_image;
+ }
+
+ image->swap_page = kimage_alloc_control_pages(image, 0);
+ if (!image->swap_page) {
+ printk(KERN_ERR "Could not allocate swap buffer\n");
+- goto out_free;
++ goto out_free_control_pages;
+ }
+
+ *rimage = image;
+ return 0;
+
+-out_free:
++
++out_free_control_pages:
+ kimage_free_page_list(&image->control_pages);
++out_free_image:
+ kfree(image);
+-out:
+ return result;
+ }
+
+@@ -279,19 +314,17 @@ static int kimage_crash_alloc(struct kim
+ {
+ int result;
+ struct kimage *image;
+- unsigned long i;
+
+- image = NULL;
+ /* Verify we have a valid entry point */
+- if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
+- result = -EADDRNOTAVAIL;
+- goto out;
+- }
++ if ((entry < crashk_res.start) || (entry > crashk_res.end))
++ return -EADDRNOTAVAIL;
+
+ /* Allocate and initialize a controlling structure */
+- result = do_kimage_alloc(&image, entry, nr_segments, segments);
+- if (result)
+- goto out;
++ image = do_kimage_alloc_init();
++ if (!image)
++ return -ENOMEM;
++
++ image->start = entry;
+
+ /* Enable the special crash kernel control page
+ * allocation policy.
+@@ -299,25 +332,13 @@ static int kimage_crash_alloc(struct kim
+ image->control_page = crashk_res.start;
+ image->type = KEXEC_TYPE_CRASH;
+
+- /*
+- * Verify we have good destination addresses. Normally
+- * the caller is responsible for making certain we don't
+- * attempt to load the new image into invalid or reserved
+- * areas of RAM. But crash kernels are preloaded into a
+- * reserved area of ram. We must ensure the addresses
+- * are in the reserved area otherwise preloading the
+- * kernel could corrupt things.
+- */
+- result = -EADDRNOTAVAIL;
+- for (i = 0; i < nr_segments; i++) {
+- unsigned long mstart, mend;
++ result = copy_user_segment_list(image, nr_segments, segments);
++ if (result)
++ goto out_free_image;
+
+- mstart = image->segment[i].mem;
+- mend = mstart + image->segment[i].memsz - 1;
+- /* Ensure we are within the crash kernel limits */
+- if ((mstart < crashk_res.start) || (mend > crashk_res.end))
+- goto out_free;
+- }
++ result = sanity_check_segment_list(image);
++ if (result)
++ goto out_free_image;
+
+ /*
+ * Find a location for the control code buffer, and add
+@@ -329,15 +350,14 @@ static int kimage_crash_alloc(struct kim
+ get_order(KEXEC_CONTROL_PAGE_SIZE));
+ if (!image->control_code_page) {
+ printk(KERN_ERR "Could not allocate control_code_buffer\n");
+- goto out_free;
++ goto out_free_image;
+ }
+
+ *rimage = image;
+ return 0;
+
+-out_free:
++out_free_image:
+ kfree(image);
+-out:
+ return result;
+ }
+
diff --git a/k3 b/k3
new file mode 100644
index 00000000000000..5b4da393810aaf
--- /dev/null
+++ b/k3
@@ -0,0 +1,188 @@
+From vgoyal@redhat.com Wed Nov 20 09:51:43 2013
+From: Vivek Goyal <vgoyal@redhat.com>
+Date: Wed, 20 Nov 2013 12:50:48 -0500
+Subject: [PATCH 3/6] resource: Provide new functions to walk through resources
+To: linux-kernel@vger.kernel.org, kexec@lists.infradead.org
+Cc: ebiederm@xmission.com, hpa@zytor.com, mjg59@srcf.ucam.org, greg@kroah.com, Vivek Goyal <vgoyal@redhat.com>, Yinghai Lu <yinghai@kernel.org>
+Message-ID: <1384969851-7251-4-git-send-email-vgoyal@redhat.com>
+
+
+I have added two more functions to walk through resources.
+Current walk_system_ram_range() deals with pfn and /proc/iomem can contain
+partial pages. By dealing in pfn, callback function loses the info that
+last page of a memory range is a partial page and not the full page. So
+I implemented walk_system_ran_res() which returns u64 values to callback
+functions and now it properly return start and end address.
+
+walk_system_ram_range() uses find_next_system_ram() to find the next
+ram resource. This in turn only travels through siblings of top level
+child and does not travers through all the nodes of the resoruce tree. I
+also need another function where I can walk through all the resources,
+for example figure out where "GART" aperture is. Figure out where
+ACPI memory is.
+
+So I wrote another function walk_ram_res() which walks through all
+/proc/iomem resources and returns matches as asked by caller. Caller
+can specify "name" of resource, start and end.
+
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+Cc: Yinghai Lu <yinghai@kernel.org>
+---
+ include/linux/ioport.h | 6 ++
+ kernel/resource.c | 108 +++++++++++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 110 insertions(+), 4 deletions(-)
+
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -227,6 +227,12 @@ extern int iomem_is_exclusive(u64 addr);
+ extern int
+ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg, int (*func)(unsigned long, unsigned long, void *));
++extern int
++walk_system_ram_res(u64 start, u64 end, void *arg,
++ int (*func)(u64, u64, void *));
++extern int
++walk_ram_res(char *name, unsigned long flags, u64 start, u64 end, void *arg,
++ int (*func)(u64, u64, void *));
+
+ /* True if any part of r1 overlaps r2 */
+ static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -59,10 +59,8 @@ static DEFINE_RWLOCK(resource_lock);
+ static struct resource *bootmem_resource_free;
+ static DEFINE_SPINLOCK(bootmem_resource_lock);
+
+-static void *r_next(struct seq_file *m, void *v, loff_t *pos)
++static struct resource *next_resource(struct resource *p)
+ {
+- struct resource *p = v;
+- (*pos)++;
+ if (p->child)
+ return p->child;
+ while (!p->sibling && p->parent)
+@@ -70,6 +68,13 @@ static void *r_next(struct seq_file *m,
+ return p->sibling;
+ }
+
++static void *r_next(struct seq_file *m, void *v, loff_t *pos)
++{
++ struct resource *p = v;
++ (*pos)++;
++ return (void *)next_resource(p);
++}
++
+ #ifdef CONFIG_PROC_FS
+
+ enum { MAX_IORES_LEVEL = 5 };
+@@ -322,7 +327,71 @@ int release_resource(struct resource *ol
+
+ EXPORT_SYMBOL(release_resource);
+
+-#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
++/*
++ * Finds the lowest iomem reosurce exists with-in [res->start.res->end)
++ * the caller must specify res->start, res->end, res->flags and "name".
++ * If found, returns 0, res is overwritten, if not found, returns -1.
++ * This walks through whole tree and not just first level children.
++ */
++static int find_next_iomem_res(struct resource *res, char *name)
++{
++ resource_size_t start, end;
++ struct resource *p;
++
++ BUG_ON(!res);
++
++ start = res->start;
++ end = res->end;
++ BUG_ON(start >= end);
++
++ read_lock(&resource_lock);
++ p = &iomem_resource;
++ while ((p = next_resource(p))) {
++ if (p->flags != res->flags)
++ continue;
++ if (name && strcmp(p->name, name))
++ continue;
++ if (p->start > end) {
++ p = NULL;
++ break;
++ }
++ if ((p->end >= start) && (p->start < end))
++ break;
++ }
++
++ read_unlock(&resource_lock);
++ if (!p)
++ return -1;
++ /* copy data */
++ if (res->start < p->start)
++ res->start = p->start;
++ if (res->end > p->end)
++ res->end = p->end;
++ return 0;
++}
++
++int walk_ram_res(char *name, unsigned long flags, u64 start, u64 end,
++ void *arg, int (*func)(u64, u64, void *))
++{
++ struct resource res;
++ u64 orig_end;
++ int ret = -1;
++
++ res.start = start;
++ res.end = end;
++ res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ orig_end = res.end;
++ while ((res.start < res.end) &&
++ (find_next_iomem_res(&res, name) >= 0)) {
++ ret = (*func)(res.start, res.end, arg);
++ if (ret)
++ break;
++ res.start = res.end + 1;
++ res.end = orig_end;
++ }
++ return ret;
++}
++
+ /*
+ * Finds the lowest memory reosurce exists within [res->start.res->end)
+ * the caller must specify res->start, res->end, res->flags and "name".
+@@ -366,6 +435,37 @@ static int find_next_system_ram(struct r
+
+ /*
+ * This function calls callback against all memory range of "System RAM"
++ * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
++ * Now, this function is only for "System RAM". This function deals with
++ * full ranges and not pfn. If resources are not pfn aligned, dealing
++ * with pfn can truncate ranges.
++ */
++int walk_system_ram_res(u64 start, u64 end, void *arg,
++ int (*func)(u64, u64, void *))
++{
++ struct resource res;
++ u64 orig_end;
++ int ret = -1;
++
++ res.start = start;
++ res.end = end;
++ res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ orig_end = res.end;
++ while ((res.start < res.end) &&
++ (find_next_system_ram(&res, "System RAM") >= 0)) {
++ ret = (*func)(res.start, res.end, arg);
++ if (ret)
++ break;
++ res.start = res.end + 1;
++ res.end = orig_end;
++ }
++ return ret;
++}
++
++#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
++
++/*
++ * This function calls callback against all memory range of "System RAM"
+ * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
+ * Now, this function is only for "System RAM".
+ */
diff --git a/k4 b/k4
new file mode 100644
index 00000000000000..073a2daf72e200
--- /dev/null
+++ b/k4
@@ -0,0 +1,787 @@
+From vgoyal@redhat.com Wed Nov 20 09:51:43 2013
+From: Vivek Goyal <vgoyal@redhat.com>
+Date: Wed, 20 Nov 2013 12:50:49 -0500
+Subject: [PATCH 4/6] kexec: A new system call, kexec_file_load, for in kernel kexec
+To: linux-kernel@vger.kernel.org, kexec@lists.infradead.org
+Cc: ebiederm@xmission.com, hpa@zytor.com, mjg59@srcf.ucam.org, greg@kroah.com, Vivek Goyal <vgoyal@redhat.com>
+Message-ID: <1384969851-7251-5-git-send-email-vgoyal@redhat.com>
+
+
+This patch implements the in kernel kexec functionality. It implements a
+new system call kexec_file_load. I think parameter list of this system
+call will change as I have not done the kernel image signature handling
+yet. I have been told that I might have to pass the detached signature
+and size as part of system call.
+
+Previously segment list was prepared in user space. Now user space just
+passes kernel fd, initrd fd and command line and kernel will create a
+segment list internally.
+
+This patch contains generic part of the code. Actual segment preparation
+and loading is done by arch and image specific loader. Which comes in
+next patch.
+
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+---
+ arch/x86/kernel/machine_kexec_64.c | 57 ++++
+ arch/x86/syscalls/syscall_64.tbl | 1
+ include/linux/kexec.h | 57 ++++
+ include/linux/syscalls.h | 3
+ include/uapi/linux/kexec.h | 4
+ kernel/kexec.c | 486 ++++++++++++++++++++++++++++++++++++-
+ kernel/sys_ni.c | 1
+ 7 files changed, 607 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -22,6 +22,13 @@
+ #include <asm/mmu_context.h>
+ #include <asm/debugreg.h>
+
++/* arch dependent functionality related to kexec file based syscall */
++static struct kexec_file_type kexec_file_type[]={
++ {"", NULL, NULL, NULL, NULL},
++};
++
++static int nr_file_types = sizeof(kexec_file_type)/sizeof(kexec_file_type[0]);
++
+ static void free_transition_pgtable(struct kimage *image)
+ {
+ free_page((unsigned long)image->arch.pud);
+@@ -200,7 +207,7 @@ void machine_kexec(struct kimage *image)
+ {
+ unsigned long page_list[PAGES_NR];
+ void *control_page;
+- int save_ftrace_enabled;
++ int save_ftrace_enabled, idx;
+
+ #ifdef CONFIG_KEXEC_JUMP
+ if (image->preserve_context)
+@@ -226,6 +233,11 @@ void machine_kexec(struct kimage *image)
+ #endif
+ }
+
++ /* Call image loader to prepare for entry */
++ idx = image->file_handler_idx;
++ if (kexec_file_type[idx].prep_entry)
++ kexec_file_type[idx].prep_entry(image);
++
+ control_page = page_address(image->control_code_page) + PAGE_SIZE;
+ memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
+
+@@ -281,3 +293,46 @@ void arch_crash_save_vmcoreinfo(void)
+ #endif
+ }
+
++/* arch dependent functionality related to kexec file based syscall */
++
++int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
++ unsigned long buf_len)
++{
++ int i, ret = -ENOEXEC;
++
++ for (i = 0; i < nr_file_types; i++) {
++ if (!kexec_file_type[i].probe)
++ continue;
++
++ ret = kexec_file_type[i].probe(buf, buf_len);
++ if (!ret) {
++ image->file_handler_idx = i;
++ return ret;
++ }
++ }
++
++ return ret;
++}
++
++void *arch_kexec_kernel_image_load(struct kimage *image, char *kernel,
++ unsigned long kernel_len, char *initrd,
++ unsigned long initrd_len, char *cmdline,
++ unsigned long cmdline_len)
++{
++ int idx = image->file_handler_idx;
++
++ if (idx < 0)
++ return ERR_PTR(-ENOEXEC);
++
++ return kexec_file_type[idx].load(image, kernel, kernel_len, initrd,
++ initrd_len, cmdline, cmdline_len);
++}
++
++int arch_image_file_post_load_cleanup(struct kimage *image)
++{
++ int idx = image->file_handler_idx;
++
++ if (kexec_file_type[idx].cleanup)
++ return kexec_file_type[idx].cleanup(image);
++ return 0;
++}
+--- a/arch/x86/syscalls/syscall_64.tbl
++++ b/arch/x86/syscalls/syscall_64.tbl
+@@ -320,6 +320,7 @@
+ 311 64 process_vm_writev sys_process_vm_writev
+ 312 common kcmp sys_kcmp
+ 313 common finit_module sys_finit_module
++314 common kexec_file_load sys_kexec_file_load
+
+ #
+ # x32-specific system call numbers start at 512 to avoid cache impact
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -110,13 +110,60 @@ struct kimage {
+ #define KEXEC_TYPE_DEFAULT 0
+ #define KEXEC_TYPE_CRASH 1
+ unsigned int preserve_context : 1;
++ /* If set, we are using file mode kexec syscall */
++ unsigned int file_mode : 1;
+
+ #ifdef ARCH_HAS_KIMAGE_ARCH
+ struct kimage_arch arch;
+ #endif
++
++ /* Additional Fields for file based kexec syscall */
++ void *kernel_buf;
++ unsigned long kernel_buf_len;
++
++ void *initrd_buf;
++ unsigned long initrd_buf_len;
++
++ char *cmdline_buf;
++ unsigned long cmdline_buf_len;
++
++ /* index of file handler in array */
++ int file_handler_idx;
++
++ /* Image loader handling the kernel can store a pointer here */
++ void * image_loader_data;
+ };
+
++/*
++ * Keeps a track of buffer parameters as provided by caller for requesting
++ * memory placement of buffer.
++ */
++struct kexec_buf {
++ struct kimage *image;
++ char *buffer;
++ unsigned long bufsz;
++ unsigned long memsz;
++ unsigned long buf_align;
++ unsigned long buf_min;
++ unsigned long buf_max;
++ int top_down; /* allocate from top of memory hole */
++};
+
++typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
++typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
++ unsigned long kernel_len, char *initrd,
++ unsigned long initrd_len, char *cmdline,
++ unsigned long cmdline_len);
++typedef int (kexec_prep_entry_t)(struct kimage *image);
++typedef int (kexec_cleanup_t)(struct kimage *image);
++
++struct kexec_file_type {
++ const char *name;
++ kexec_probe_t *probe;
++ kexec_load_t *load;
++ kexec_prep_entry_t *prep_entry;
++ kexec_cleanup_t *cleanup;
++};
+
+ /* kexec interface functions */
+ extern void machine_kexec(struct kimage *image);
+@@ -127,6 +174,11 @@ extern asmlinkage long sys_kexec_load(un
+ struct kexec_segment __user *segments,
+ unsigned long flags);
+ extern int kernel_kexec(void);
++extern int kexec_add_buffer(struct kimage *image, char *buffer,
++ unsigned long bufsz, unsigned long memsz,
++ unsigned long buf_align, unsigned long buf_min,
++ unsigned long buf_max, int buf_end,
++ unsigned long *load_addr);
+ #ifdef CONFIG_COMPAT
+ extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
+ unsigned long nr_segments,
+@@ -135,6 +187,8 @@ extern asmlinkage long compat_sys_kexec_
+ #endif
+ extern struct page *kimage_alloc_control_pages(struct kimage *image,
+ unsigned int order);
++extern void kimage_set_start_addr(struct kimage *image, unsigned long start);
++
+ extern void crash_kexec(struct pt_regs *);
+ int kexec_should_crash(struct task_struct *);
+ void crash_save_cpu(struct pt_regs *regs, int cpu);
+@@ -182,6 +236,9 @@ extern struct kimage *kexec_crash_image;
+ #define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
+ #endif
+
++/* Listof defined/legal kexec file flags */
++#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH)
++
+ #define VMCOREINFO_BYTES (4096)
+ #define VMCOREINFO_NOTE_NAME "VMCOREINFO"
+ #define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -301,6 +301,9 @@ asmlinkage long sys_restart_syscall(void
+ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
+ struct kexec_segment __user *segments,
+ unsigned long flags);
++asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd,
++ const char __user * cmdline_ptr,
++ unsigned long cmdline_len, unsigned long flags);
+
+ asmlinkage long sys_exit(int error_code);
+ asmlinkage long sys_exit_group(int error_code);
+--- a/include/uapi/linux/kexec.h
++++ b/include/uapi/linux/kexec.h
+@@ -13,6 +13,10 @@
+ #define KEXEC_PRESERVE_CONTEXT 0x00000002
+ #define KEXEC_ARCH_MASK 0xffff0000
+
++/* Kexec file load interface flags */
++#define KEXEC_FILE_UNLOAD 0x00000001
++#define KEXEC_FILE_ON_CRASH 0x00000002
++
+ /* These values match the ELF architecture values.
+ * Unless there is a good reason that should continue to be the case.
+ */
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -120,6 +120,11 @@ static struct page *kimage_alloc_page(st
+ gfp_t gfp_mask,
+ unsigned long dest);
+
++void kimage_set_start_addr(struct kimage *image, unsigned long start)
++{
++ image->start = start;
++}
++
+ static int copy_user_segment_list(struct kimage *image,
+ unsigned long nr_segments,
+ struct kexec_segment __user *segments)
+@@ -256,6 +261,225 @@ static struct kimage *do_kimage_alloc_in
+
+ static void kimage_free_page_list(struct list_head *list);
+
++static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len)
++{
++ struct fd f = fdget(fd);
++ int ret = 0;
++ struct kstat stat;
++ loff_t pos;
++ ssize_t bytes = 0;
++
++ if (!f.file)
++ return -EBADF;
++
++ ret = vfs_getattr(&f.file->f_path, &stat);
++ if (ret)
++ goto out;
++
++ if (stat.size > INT_MAX) {
++ ret = -EFBIG;
++ goto out;
++ }
++
++ /* Don't hand 0 to vmalloc, it whines. */
++ if (stat.size == 0) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ *buf = vmalloc(stat.size);
++ if (!*buf) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ pos = 0;
++ while (pos < stat.size) {
++ bytes = kernel_read(f.file, pos, (char *)(*buf) + pos,
++ stat.size - pos);
++ if (bytes < 0) {
++ vfree(*buf);
++ ret = bytes;
++ goto out;
++ }
++
++ if (bytes == 0)
++ break;
++ pos += bytes;
++ }
++
++ *buf_len = pos;
++
++out:
++ fdput(f);
++ return ret;
++}
++
++/* Architectures can provide this probe function */
++int __attribute__ ((weak))
++arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
++ unsigned long buf_len)
++{
++ return -ENOEXEC;
++}
++
++void * __attribute__ ((weak))
++arch_kexec_kernel_image_load(struct kimage *image, char *kernel,
++ unsigned long kernel_len, char *initrd,
++ unsigned long initrd_len, char *cmdline,
++ unsigned long cmdline_len)
++{
++ return ERR_PTR(-ENOEXEC);
++}
++
++void __attribute__ ((weak))
++arch_kimage_file_post_load_cleanup(struct kimage *image)
++{
++ return;
++}
++
++/*
++ * Free up tempory buffers allocated which are not needed after image has
++ * been loaded.
++ *
++ * Free up memory used by kernel, initrd, and comand line. This is temporary
++ * memory allocation which is not needed any more after these buffers have
++ * been loaded into separate segments and have been copied elsewhere
++ */
++static void kimage_file_post_load_cleanup(struct kimage *image)
++{
++ if (image->kernel_buf) {
++ vfree(image->kernel_buf);
++ image->kernel_buf = NULL;
++ }
++
++ if (image->initrd_buf) {
++ vfree(image->initrd_buf);
++ image->initrd_buf = NULL;
++ }
++
++ if (image->cmdline_buf) {
++ vfree(image->cmdline_buf);
++ image->cmdline_buf = NULL;
++ }
++
++ /* See if architcture has anything to cleanup post load */
++ arch_kimage_file_post_load_cleanup(image);
++}
++
++/*
++ * In file mode list of segments is prepared by kernel. Copy relevant
++ * data from user space, do error checking, prepare segment list
++ */
++static int kimage_file_prepare_segments(struct kimage *image, int kernel_fd,
++ int initrd_fd, const char __user *cmdline_ptr,
++ unsigned long cmdline_len)
++{
++ int ret = 0;
++ void *ldata;
++
++ ret = copy_file_from_fd(kernel_fd, &image->kernel_buf,
++ &image->kernel_buf_len);
++ if (ret)
++ goto out;
++
++ /* Call arch image probe handlers */
++ ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
++ image->kernel_buf_len);
++
++ if (ret)
++ goto out;
++
++ ret = copy_file_from_fd(initrd_fd, &image->initrd_buf,
++ &image->initrd_buf_len);
++ if (ret)
++ goto out;
++
++ image->cmdline_buf = vzalloc(cmdline_len);
++ if (!image->cmdline_buf)
++ goto out;
++
++ ret = copy_from_user(image->cmdline_buf, cmdline_ptr, cmdline_len);
++ if (ret) {
++ ret = -EFAULT;
++ goto out;
++ }
++
++ image->cmdline_buf_len = cmdline_len;
++
++ /* command line should be a string with last byte null */
++ if (image->cmdline_buf[cmdline_len - 1] != '\0') {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* Call arch image load handlers */
++ ldata = arch_kexec_kernel_image_load(image,
++ image->kernel_buf, image->kernel_buf_len,
++ image->initrd_buf, image->initrd_buf_len,
++ image->cmdline_buf, image->cmdline_buf_len);
++
++ if (IS_ERR(ldata)) {
++ ret = PTR_ERR(ldata);
++ goto out;
++ }
++
++ image->image_loader_data = ldata;
++out:
++ return ret;
++}
++
++static int kimage_file_normal_alloc(struct kimage **rimage, int kernel_fd,
++ int initrd_fd, const char __user *cmdline_ptr,
++ unsigned long cmdline_len)
++{
++ int result;
++ struct kimage *image;
++
++ /* Allocate and initialize a controlling structure */
++ image = do_kimage_alloc_init();
++ if (!image)
++ return -ENOMEM;
++
++ image->file_mode = 1;
++ image->file_handler_idx = -1;
++
++ result = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
++ cmdline_ptr, cmdline_len);
++ if (result)
++ goto out_free_image;
++
++ result = sanity_check_segment_list(image);
++ if (result)
++ goto out_free_post_load_bufs;
++
++ result = -ENOMEM;
++ image->control_code_page = kimage_alloc_control_pages(image,
++ get_order(KEXEC_CONTROL_PAGE_SIZE));
++ if (!image->control_code_page) {
++ printk(KERN_ERR "Could not allocate control_code_buffer\n");
++ goto out_free_post_load_bufs;
++ }
++
++ image->swap_page = kimage_alloc_control_pages(image, 0);
++ if (!image->swap_page) {
++ printk(KERN_ERR "Could not allocate swap buffer\n");
++ goto out_free_control_pages;
++ }
++
++ *rimage = image;
++ return 0;
++
++out_free_control_pages:
++ kimage_free_page_list(&image->control_pages);
++out_free_post_load_bufs:
++ kimage_file_post_load_cleanup(image);
++ kfree(image->image_loader_data);
++out_free_image:
++ kfree(image);
++ return result;
++}
++
+ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
+ unsigned long nr_segments,
+ struct kexec_segment __user *segments)
+@@ -679,6 +903,14 @@ static void kimage_free(struct kimage *i
+
+ /* Free the kexec control pages... */
+ kimage_free_page_list(&image->control_pages);
++
++ kfree(image->image_loader_data);
++
++ /*
++ * Free up any temporary buffers allocated. This might hit if
++ * error occurred much later after buffer allocation.
++ */
++ kimage_file_post_load_cleanup(image);
+ kfree(image);
+ }
+
+@@ -843,7 +1075,11 @@ static int kimage_load_normal_segment(st
+ PAGE_SIZE - (maddr & ~PAGE_MASK));
+ uchunk = min(ubytes, mchunk);
+
+- result = copy_from_user(ptr, buf, uchunk);
++ /* For file based kexec, source pages are in kernel memory */
++ if (image->file_mode)
++ memcpy(ptr, buf, uchunk);
++ else
++ result = copy_from_user(ptr, buf, uchunk);
+ kunmap(page);
+ if (result) {
+ result = -EFAULT;
+@@ -1093,6 +1329,72 @@ asmlinkage long compat_sys_kexec_load(un
+ }
+ #endif
+
++SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, const char __user *, cmdline_ptr, unsigned long, cmdline_len, unsigned long, flags)
++{
++ int ret = 0, i;
++ struct kimage **dest_image, *image;
++
++ /* We only trust the superuser with rebooting the system. */
++ if (!capable(CAP_SYS_BOOT))
++ return -EPERM;
++
++ pr_debug("kexec_file_load: kernel_fd=%d initrd_fd=%d cmdline=0x%p"
++ " cmdline_len=%lu flags=0x%lx\n", kernel_fd, initrd_fd,
++ cmdline_ptr, cmdline_len, flags);
++
++ /* Make sure we have a legal set of flags */
++ if (flags != (flags & KEXEC_FILE_FLAGS))
++ return -EINVAL;
++
++ image = NULL;
++
++ if (!mutex_trylock(&kexec_mutex))
++ return -EBUSY;
++
++ dest_image = &kexec_image;
++ if (flags & KEXEC_FILE_ON_CRASH)
++ dest_image = &kexec_crash_image;
++
++ if (flags & KEXEC_FILE_UNLOAD)
++ goto exchange;
++
++ ret = kimage_file_normal_alloc(&image, kernel_fd, initrd_fd,
++ cmdline_ptr, cmdline_len);
++ if (ret)
++ goto out;
++
++ ret = machine_kexec_prepare(image);
++ if (ret)
++ goto out;
++
++ for (i = 0; i < image->nr_segments; i++) {
++ struct kexec_segment *ksegment;
++
++ ksegment = &image->segment[i];
++ pr_debug("Loading segment %d: buf=0x%p bufsz=0x%lx mem=0x%lx"
++ " memsz=0x%lx\n", i, ksegment->buf, ksegment->bufsz,
++ ksegment->mem, ksegment->memsz);
++ ret = kimage_load_segment(image, &image->segment[i]);
++ if (ret)
++ goto out;
++ pr_debug("Done loading segment %d\n", i);
++ }
++
++ kimage_terminate(image);
++
++ /*
++ * Free up any temporary buffers allocated which are not needed
++ * after image has been loaded
++ */
++ kimage_file_post_load_cleanup(image);
++exchange:
++ image = xchg(dest_image, image);
++out:
++ mutex_unlock(&kexec_mutex);
++ kimage_free(image);
++ return ret;
++}
++
+ void crash_kexec(struct pt_regs *regs)
+ {
+ /* Take the kexec_mutex here to prevent sys_kexec_load
+@@ -1647,6 +1949,188 @@ static int __init crash_save_vmcoreinfo_
+
+ module_init(crash_save_vmcoreinfo_init)
+
++static int kexec_add_segment(struct kimage *image, char *buf,
++ unsigned long bufsz, unsigned long mem, unsigned long memsz)
++{
++ struct kexec_segment *ksegment;
++
++ ksegment = &image->segment[image->nr_segments];
++ ksegment->buf = buf;
++ ksegment->bufsz = bufsz;
++ ksegment->mem = mem;
++ ksegment->memsz = memsz;
++ image->nr_segments++;
++
++ return 0;
++}
++
++static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
++ struct kexec_buf *kbuf)
++{
++ struct kimage *image = kbuf->image;
++ unsigned long temp_start, temp_end;
++
++ temp_end = min(end, kbuf->buf_max);
++ temp_start = temp_end - kbuf->memsz;
++
++ do {
++ /* align down start */
++ temp_start = temp_start & (~ (kbuf->buf_align - 1));
++
++ if (temp_start < start || temp_start < kbuf->buf_min)
++ return 0;
++
++ temp_end = temp_start + kbuf->memsz - 1;
++
++ /*
++ * Make sure this does not conflict with any of existing
++ * segments
++ */
++ if (kimage_is_destination_range(image, temp_start, temp_end)) {
++ temp_start = temp_start - PAGE_SIZE;
++ continue;
++ }
++
++ /* We found a suitable memory range */
++ break;
++ } while(1);
++
++ /* If we are here, we found a suitable memory range */
++ kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start,
++ kbuf->memsz);
++
++ /* Stop navigating through remaining System RAM ranges */
++ return 1;
++}
++
++static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
++ struct kexec_buf *kbuf)
++{
++ struct kimage *image = kbuf->image;
++ unsigned long temp_start, temp_end;
++
++ temp_start = max(start, kbuf->buf_min);
++
++ do {
++ temp_start = ALIGN(temp_start, kbuf->buf_align);
++ temp_end = temp_start + kbuf->memsz - 1;
++
++ if (temp_end > end || temp_end > kbuf->buf_max)
++ return 0;
++ /*
++ * Make sure this does not conflict with any of existing
++ * segments
++ */
++ if (kimage_is_destination_range(image, temp_start, temp_end)) {
++ temp_start = temp_start + PAGE_SIZE;
++ continue;
++ }
++
++ /* We found a suitable memory range */
++ break;
++ } while(1);
++
++ /* If we are here, we found a suitable memory range */
++ kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start,
++ kbuf->memsz);
++
++ /* Stop navigating through remaining System RAM ranges */
++ return 1;
++}
++
++static int walk_ram_range_callback(u64 start, u64 end, void *arg)
++{
++ struct kexec_buf *kbuf = (struct kexec_buf *)arg;
++ unsigned long sz = end - start + 1;
++
++ /* Returning 0 will take to next memory range */
++ if (sz < kbuf->memsz)
++ return 0;
++
++ if (end < kbuf->buf_min || start > kbuf->buf_max)
++ return 0;
++
++ /*
++ * Allocate memory top down with-in ram range. Otherwise bottom up
++ * allocation.
++ */
++ if (kbuf->top_down)
++ return locate_mem_hole_top_down(start, end, kbuf);
++ else
++ return locate_mem_hole_bottom_up(start, end, kbuf);
++}
++
++/*
++ * Helper functions for placing a buffer in a kexec segment. This assumes
++ * that kexec_mutex is held.
++ */
++int kexec_add_buffer(struct kimage *image, char *buffer,
++ unsigned long bufsz, unsigned long memsz,
++ unsigned long buf_align, unsigned long buf_min,
++ unsigned long buf_max, int top_down, unsigned long *load_addr)
++{
++
++ unsigned long nr_segments = image->nr_segments, new_nr_segments;
++ struct kexec_segment *ksegment;
++ struct kexec_buf *kbuf;
++
++ /* Currently adding segment this way is allowed only in file mode */
++ if (!image->file_mode)
++ return -EINVAL;
++
++ if (nr_segments >= KEXEC_SEGMENT_MAX)
++ return -EINVAL;
++
++ /*
++ * Make sure we are not trying to add buffer after allocating
++ * control pages. All segments need to be placed first before
++ * any control pages are allocated. As control page allocation
++ * logic goes through list of segments to make sure there are
++ * no destination overlaps.
++ */
++ WARN_ONCE(!list_empty(&image->control_pages), "Adding kexec buffer"
++ " after allocating control pages\n");
++
++ kbuf = kzalloc(sizeof(struct kexec_buf), GFP_KERNEL);
++ if (!kbuf)
++ return -ENOMEM;
++
++ kbuf->image = image;
++ kbuf->buffer = buffer;
++ kbuf->bufsz = bufsz;
++ /* Align memsz to next page boundary */
++ kbuf->memsz = ALIGN(memsz, PAGE_SIZE);
++
++ /* Align to atleast page size boundary */
++ kbuf->buf_align = max(buf_align, PAGE_SIZE);
++ kbuf->buf_min = buf_min;
++ kbuf->buf_max = buf_max;
++ kbuf->top_down = top_down;
++
++ /* Walk the RAM ranges and allocate a suitable range for the buffer */
++ walk_system_ram_res(0, -1, kbuf, walk_ram_range_callback);
++
++ kbuf->image = NULL;
++ kfree(kbuf);
++
++ /*
++ * If range could be found successfully, it would have incremented
++ * the nr_segments value.
++ */
++ new_nr_segments = image->nr_segments;
++
++ /* A suitable memory range could not be found for buffer */
++ if (new_nr_segments == nr_segments)
++ return -EADDRNOTAVAIL;
++
++ /* Found a suitable memory range */
++
++ ksegment = &image->segment[new_nr_segments - 1];
++ *load_addr = ksegment->mem;
++ return 0;
++}
++
++
+ /*
+ * Move into place and start executing a preloaded standalone
+ * executable. If nothing was preloaded return an error.
+--- a/kernel/sys_ni.c
++++ b/kernel/sys_ni.c
+@@ -25,6 +25,7 @@ cond_syscall(sys_swapon);
+ cond_syscall(sys_swapoff);
+ cond_syscall(sys_kexec_load);
+ cond_syscall(compat_sys_kexec_load);
++cond_syscall(sys_kexec_file_load);
+ cond_syscall(sys_init_module);
+ cond_syscall(sys_finit_module);
+ cond_syscall(sys_delete_module);
diff --git a/k5 b/k5
new file mode 100644
index 00000000000000..ad225f9863a0aa
--- /dev/null
+++ b/k5
@@ -0,0 +1,622 @@
+From vgoyal@redhat.com Wed Nov 20 09:51:43 2013
+From: Vivek Goyal <vgoyal@redhat.com>
+Date: Wed, 20 Nov 2013 12:50:50 -0500
+Subject: [PATCH 5/6] kexec-bzImage: Support for loading bzImage using 64bit entry
+To: linux-kernel@vger.kernel.org, kexec@lists.infradead.org
+Cc: ebiederm@xmission.com, hpa@zytor.com, mjg59@srcf.ucam.org, greg@kroah.com, Vivek Goyal <vgoyal@redhat.com>
+Message-ID: <1384969851-7251-6-git-send-email-vgoyal@redhat.com>
+
+
+This is loader specific code which can load bzImage and set it up for
+64bit entry. This does not take care of 32bit entry or real mode entry
+yet.
+
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+---
+ arch/x86/include/asm/kexec-bzimage.h | 12 +
+ arch/x86/include/asm/kexec.h | 26 ++
+ arch/x86/kernel/Makefile | 2
+ arch/x86/kernel/kexec-bzimage.c | 375 +++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/machine_kexec_64.c | 4
+ arch/x86/kernel/purgatory_entry_64.S | 119 +++++++++++
+ 6 files changed, 537 insertions(+), 1 deletion(-)
+ create mode 100644 arch/x86/include/asm/kexec-bzimage.h
+ create mode 100644 arch/x86/kernel/kexec-bzimage.c
+ create mode 100644 arch/x86/kernel/purgatory_entry_64.S
+
+--- /dev/null
++++ b/arch/x86/include/asm/kexec-bzimage.h
+@@ -0,0 +1,12 @@
++#ifndef _ASM_BZIMAGE_H
++#define _ASM_BZIMAGE_H
++
++extern int bzImage64_probe(const char *buf, unsigned long len);
++extern void *bzImage64_load(struct kimage *image, char *kernel,
++ unsigned long kernel_len, char *initrd,
++ unsigned long initrd_len, char *cmdline,
++ unsigned long cmdline_len);
++extern int bzImage64_prep_entry(struct kimage *image);
++extern int bzImage64_cleanup(struct kimage *image);
++
++#endif /* _ASM_BZIMAGE_H */
+--- a/arch/x86/include/asm/kexec.h
++++ b/arch/x86/include/asm/kexec.h
+@@ -15,6 +15,9 @@
+ # define PAGES_NR 4
+ #endif
+
++#define KEXEC_PURGATORY_PAGE_SIZE 4096
++#define KEXEC_PURGATORY_CODE_MAX_SIZE 2048
++
+ # define KEXEC_CONTROL_CODE_MAX_SIZE 2048
+
+ #ifndef __ASSEMBLY__
+@@ -141,6 +144,9 @@ relocate_kernel(unsigned long indirectio
+ unsigned long page_list,
+ unsigned long start_address,
+ unsigned int preserve_context);
++void purgatory_entry64(void);
++extern unsigned long purgatory_entry64_regs;
++extern struct desc_struct entry64_gdt;
+ #endif
+
+ #define ARCH_HAS_KIMAGE_ARCH
+@@ -161,6 +167,26 @@ struct kimage_arch {
+ pmd_t *pmd;
+ pte_t *pte;
+ };
++
++struct kexec_entry64_regs {
++ uint64_t rax;
++ uint64_t rbx;
++ uint64_t rcx;
++ uint64_t rdx;
++ uint64_t rsi;
++ uint64_t rdi;
++ uint64_t rsp;
++ uint64_t rbp;
++ uint64_t r8;
++ uint64_t r9;
++ uint64_t r10;
++ uint64_t r11;
++ uint64_t r12;
++ uint64_t r13;
++ uint64_t r14;
++ uint64_t r15;
++ uint64_t rip;
++};
+ #endif
+
+ typedef void crash_vmclear_fn(void);
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -68,6 +68,7 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.
+ obj-$(CONFIG_X86_TSC) += trace_clock.o
+ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
+ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
++obj-$(CONFIG_KEXEC) += kexec-bzimage.o
+ obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
+ obj-y += kprobes/
+ obj-$(CONFIG_MODULES) += module.o
+@@ -122,4 +123,5 @@ ifeq ($(CONFIG_X86_64),y)
+
+ obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
+ obj-y += vsmp_64.o
++ obj-$(CONFIG_KEXEC) += purgatory_entry_64.o
+ endif
+--- /dev/null
++++ b/arch/x86/kernel/kexec-bzimage.c
+@@ -0,0 +1,375 @@
++#include <linux/string.h>
++#include <linux/printk.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++#include <linux/kexec.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++
++#include <asm/bootparam.h>
++#include <asm/setup.h>
++
++#ifdef CONFIG_X86_64
++
++struct bzimage64_data {
++ unsigned long kernel_load_addr;
++ unsigned long bootparams_load_addr;
++
++ /*
++ * Temporary buffer to hold bootparams buffer. This should be
++ * freed once the bootparam segment has been loaded.
++ */
++ void *bootparams_buf;
++ struct page *purgatory_page;
++};
++
++int bzImage64_probe(const char *buf, unsigned long len)
++{
++ int ret = -ENOEXEC;
++ struct setup_header *header;
++
++ if (len < 2 * 512) {
++ pr_debug("File is too short to be a bzImage\n");
++ return ret;
++ }
++
++ header = (struct setup_header *)(buf + 0x1F1);
++ if (memcmp((char *)&header->header, "HdrS", 4) != 0) {
++ pr_debug("Not a bzImage\n");
++ return ret;
++ }
++
++ if (header->boot_flag != 0xAA55) {
++ /* No x86 boot sector present */
++ pr_debug("No x86 boot sector present\n");
++ return ret;
++ }
++
++ if (header->version < 0x020C) {
++ /* Must be at least protocol version 2.12 */
++ pr_debug("Must be at least protocol version 2.12\n");
++ return ret;
++ }
++
++ if ((header->loadflags & 1) == 0) {
++ /* Not a bzImage */
++ pr_debug("zImage not a bzImage\n");
++ return ret;
++ }
++
++ if ((header->xloadflags & 3) != 3) {
++ /* XLF_KERNEL_64 and XLF_CAN_BE_LOADED_ABOVE_4G should be set */
++ pr_debug("Not a relocatable bzImage64\n");
++ return ret;
++ }
++
++ /* I've got a bzImage */
++ pr_debug("It's a relocatable bzImage64\n");
++ ret = 0;
++
++ return ret;
++}
++
++static int setup_memory_map_entries(struct boot_params *params)
++{
++ unsigned int nr_e820_entries;
++
++ /* TODO: What about EFI */
++ nr_e820_entries = e820_saved.nr_map;
++ if (nr_e820_entries > E820MAX)
++ nr_e820_entries = E820MAX;
++
++ params->e820_entries = nr_e820_entries;
++ memcpy(&params->e820_map, &e820_saved.map,
++ nr_e820_entries * sizeof(struct e820entry));
++
++ return 0;
++}
++
++static void setup_linux_system_parameters(struct boot_params *params)
++{
++ unsigned int nr_e820_entries;
++ unsigned long long mem_k, start, end;
++ int i;
++
++ /* Get subarch from existing bootparams */
++ params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch;
++
++ /* Copying screen_info will do? */
++ memcpy(&params->screen_info, &boot_params.screen_info,
++ sizeof(struct screen_info));
++
++ /* Fill in memsize later */
++ params->screen_info.ext_mem_k = 0;
++ params->alt_mem_k = 0;
++
++ /* Default APM info */
++ memset(&params->apm_bios_info, 0, sizeof(params->apm_bios_info));
++
++ /* Default drive info */
++ memset(&params->hd0_info, 0, sizeof(params->hd0_info));
++ memset(&params->hd1_info, 0, sizeof(params->hd1_info));
++
++ /* Default sysdesc table */
++ params->sys_desc_table.length = 0;
++
++ setup_memory_map_entries(params);
++ nr_e820_entries = params->e820_entries;
++
++ for(i = 0; i < nr_e820_entries; i++) {
++ if (params->e820_map[i].type != E820_RAM)
++ continue;
++ start = params->e820_map[i].addr;
++ end = params->e820_map[i].addr + params->e820_map[i].size - 1;
++
++ if ((start <= 0x100000) && end > 0x100000) {
++ mem_k = (end >> 10) - (0x100000 >> 10);
++ params->screen_info.ext_mem_k = mem_k;
++ params->alt_mem_k = mem_k;
++ if (mem_k > 0xfc00)
++ params->screen_info.ext_mem_k = 0xfc00; /* 64M*/
++ if (mem_k > 0xffffffff)
++ params->alt_mem_k = 0xffffffff;
++ }
++ }
++
++ /* Setup EDD info */
++ memcpy(params->eddbuf, boot_params.eddbuf,
++ EDDMAXNR * sizeof(struct edd_info));
++ params->eddbuf_entries = boot_params.eddbuf_entries;
++
++ memcpy(params->edd_mbr_sig_buffer, boot_params.edd_mbr_sig_buffer,
++ EDD_MBR_SIG_MAX * sizeof(unsigned int));
++}
++
++static void setup_initrd(struct boot_params *boot_params, unsigned long initrd_load_addr, unsigned long initrd_len)
++{
++ boot_params->hdr.ramdisk_image = initrd_load_addr & 0xffffffffUL;
++ boot_params->hdr.ramdisk_size = initrd_len & 0xffffffffUL;
++
++ boot_params->ext_ramdisk_image = initrd_load_addr >> 32;
++ boot_params->ext_ramdisk_size = initrd_len >> 32;
++}
++
++static void setup_cmdline(struct boot_params *boot_params,
++ unsigned long bootparams_load_addr,
++ unsigned long cmdline_offset, char *cmdline,
++ unsigned long cmdline_len)
++{
++ char *cmdline_ptr = ((char *)boot_params) + cmdline_offset;
++ unsigned long cmdline_ptr_phys;
++ uint32_t cmdline_low_32, cmdline_ext_32;
++
++ memcpy(cmdline_ptr, cmdline, cmdline_len);
++ cmdline_ptr[cmdline_len - 1] = '\0';
++
++ cmdline_ptr_phys = bootparams_load_addr + cmdline_offset;
++ cmdline_low_32 = cmdline_ptr_phys & 0xffffffffUL;
++ cmdline_ext_32 = cmdline_ptr_phys >> 32;
++
++ boot_params->hdr.cmd_line_ptr = cmdline_low_32;
++ if (cmdline_ext_32)
++ boot_params->ext_cmd_line_ptr = cmdline_ext_32;
++}
++
++void *bzImage64_load(struct kimage *image, char *kernel,
++ unsigned long kernel_len,
++ char *initrd, unsigned long initrd_len,
++ char *cmdline, unsigned long cmdline_len)
++{
++
++ struct setup_header *header;
++ int setup_sects, kern16_size_needed, kern16_size, ret = 0;
++ unsigned long setup_size, setup_header_size;
++ struct boot_params *params;
++ unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr;
++ unsigned long kernel_bufsz, kernel_memsz, kernel_align;
++ char *kernel_buf;
++ struct bzimage64_data *ldata;
++
++ header = (struct setup_header *)(kernel + 0x1F1);
++ setup_sects = header->setup_sects;
++ if (setup_sects == 0)
++ setup_sects = 4;
++
++ kern16_size = (setup_sects + 1) * 512;
++ if (kernel_len < kern16_size) {
++ pr_debug("bzImage truncated\n");
++ return ERR_PTR(-ENOEXEC);
++ }
++
++ if (cmdline_len > header->cmdline_size) {
++ pr_debug("Kernel command line too long\n");
++ return ERR_PTR(-EINVAL);
++ }
++
++ /* Allocate loader specific data */
++ ldata = kzalloc(sizeof(struct bzimage64_data), GFP_KERNEL);
++ if (!ldata)
++ return ERR_PTR(-ENOMEM);
++
++ /* Argument/parameter segment */
++ kern16_size_needed = kern16_size;
++ if (kern16_size_needed < 4096)
++ kern16_size_needed = 4096;
++
++ setup_size = kern16_size_needed + cmdline_len;
++ params = kzalloc(setup_size, GFP_KERNEL);
++ if (!params) {
++ ret = -ENOMEM;
++ goto out_free_loader_data;
++ }
++
++ /* Copy setup header onto bootparams. */
++ setup_header_size = 0x0202 + kernel[0x0201] - 0x1F1;
++
++ /* Is there a limit on setup header size? */
++ memcpy(&params->hdr, (kernel + 0x1F1), setup_header_size);
++ ret = kexec_add_buffer(image, (char *)params, setup_size,
++ setup_size, 16, 0x3000, -1, 1, &bootparam_load_addr);
++ if (ret)
++ goto out_free_params;
++ pr_debug("Loaded boot_param and command line at 0x%lx\n",
++ bootparam_load_addr);
++
++ /* Load kernel */
++ kernel_buf = kernel + kern16_size;
++ kernel_bufsz = kernel_len - kern16_size;
++ kernel_memsz = ALIGN(header->init_size, 4096);
++ kernel_align = header->kernel_alignment;
++
++ ret = kexec_add_buffer(image, kernel_buf,
++ kernel_bufsz, kernel_memsz, kernel_align, 0x100000,
++ -1, 1, &kernel_load_addr);
++ if (ret)
++ goto out_free_params;
++
++ pr_debug("Loaded 64bit kernel at 0x%lx sz = 0x%lx\n", kernel_load_addr,
++ kernel_memsz);
++
++ /* Load initrd high */
++ if (initrd) {
++ ret = kexec_add_buffer(image, initrd, initrd_len, initrd_len,
++ 4096, 0x10000000, ULONG_MAX, 1, &initrd_load_addr);
++ if (ret)
++ goto out_free_params;
++
++ pr_debug("Loaded initrd at 0x%lx sz = 0x%lx\n",
++ initrd_load_addr, initrd_len);
++ setup_initrd(params, initrd_load_addr, initrd_len);
++ }
++
++ setup_cmdline(params, bootparam_load_addr, kern16_size_needed,
++ cmdline, cmdline_len);
++
++ /* bootloader info. Do we need a separate ID for kexec kernel loader? */
++ params->hdr.type_of_loader = 0x0D << 4;
++ params->hdr.loadflags = 0;
++
++ setup_linux_system_parameters(params);
++
++ /*
++ * Allocate a purgatory page. For 64bit entry point, purgatory
++ * code can be anywhere.
++ *
++ * Control page allocation logic goes through segment list to
++ * make sure allocated page is not destination page. So allocate
++ * control page after all required segment have been prepared.
++ */
++ ldata->purgatory_page = kimage_alloc_control_pages(image,
++ get_order(KEXEC_PURGATORY_PAGE_SIZE));
++
++ if (!ldata->purgatory_page) {
++ printk(KERN_ERR "Could not allocate purgatory page\n");
++ ret = -ENOMEM;
++ goto out_free_params;
++ }
++
++ /*
++ * Store pointer to params so that it could be freed after loading
++ * params segment has been loaded and contents have been copied
++ * somewhere else.
++ */
++ ldata->bootparams_buf = params;
++ ldata->kernel_load_addr = kernel_load_addr;
++ ldata->bootparams_load_addr = bootparam_load_addr;
++ return ldata;
++
++out_free_params:
++ kfree(params);
++out_free_loader_data:
++ kfree(ldata);
++ return ERR_PTR(ret);
++}
++
++int bzImage64_prep_entry(struct kimage *image)
++{
++ struct bzimage64_data *ldata;
++ char *purgatory_page;
++ unsigned long regs_offset, gdt_offset, purgatory_page_phys;
++ struct kexec_entry64_regs *regs;
++ char *gdt_ptr;
++ unsigned long long *gdt_addr;
++
++ if (!image->file_mode)
++ return 0;
++
++ ldata = image->image_loader_data;
++ if (!ldata)
++ return -EINVAL;
++
++ /* Copy purgatory code to its control page */
++ purgatory_page = page_address(ldata->purgatory_page);
++
++ /* Physical address of purgatory page */
++ purgatory_page_phys = PFN_PHYS(page_to_pfn(ldata->purgatory_page));
++
++ memcpy(purgatory_page, purgatory_entry64,
++ KEXEC_PURGATORY_CODE_MAX_SIZE);
++
++ /* Set registers appropriately */
++ regs_offset = (unsigned long)&purgatory_entry64_regs -
++ (unsigned long)purgatory_entry64;
++ regs = (struct kexec_entry64_regs *) (purgatory_page + regs_offset);
++
++ regs->rbx = 0; /* Bootstrap Processor */
++ regs->rsi = ldata->bootparams_load_addr;
++ regs->rip = ldata->kernel_load_addr + 0x200;
++
++ /* Fix up gdt */
++ gdt_offset = (unsigned long)&entry64_gdt -
++ (unsigned long)purgatory_entry64;
++
++ gdt_ptr = purgatory_page + gdt_offset;
++
++ /* Skip a word which contains size of gdt table */
++ gdt_addr = (unsigned long long *)(gdt_ptr + 2);
++
++ *gdt_addr = (unsigned long long)gdt_ptr;
++
++ /*
++ * Update the relocated address of gdt. By the time we load gdt
++ * in purgatory, we are running using identity mapped tables.
++ * Load identity mapped address here.
++ */
++ *gdt_addr = (unsigned long long)(purgatory_page_phys + gdt_offset);
++
++ /*
++ * Jump to purgatory after control page. By the time we jump to
++ * purgatory, we are using itentifiy mapped page tables
++ */
++ kimage_set_start_addr(image, purgatory_page_phys);
++ return 0;
++}
++
++/* This cleanup function is called after various segments have been loaded */
++int bzImage64_cleanup(struct kimage *image)
++{
++ struct bzimage64_data *ldata = image->image_loader_data;
++
++ kfree(ldata->bootparams_buf);
++ ldata->bootparams_buf = NULL;
++ return 0;
++}
++
++#endif /* CONFIG_X86_64 */
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -21,10 +21,12 @@
+ #include <asm/tlbflush.h>
+ #include <asm/mmu_context.h>
+ #include <asm/debugreg.h>
++#include <asm/kexec-bzimage.h>
+
+ /* arch dependent functionality related to kexec file based syscall */
+ static struct kexec_file_type kexec_file_type[]={
+- {"", NULL, NULL, NULL, NULL},
++ {"bzImage64", bzImage64_probe, bzImage64_load, bzImage64_prep_entry,
++ bzImage64_cleanup},
+ };
+
+ static int nr_file_types = sizeof(kexec_file_type)/sizeof(kexec_file_type[0]);
+--- /dev/null
++++ b/arch/x86/kernel/purgatory_entry_64.S
+@@ -0,0 +1,119 @@
++/*
++ * Copyright (C) 2013 Red Hat Inc.
++ *
++ * Author(s): Vivek Goyal <vgoyal@redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation (version 2 of the License).
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++
++/*
++ * One page for purgatory. Code occupies first KEXEC_PURGATORY_CODE_MAX_SIZE
++ * bytes. Rest is for data/stack etc.
++ */
++#include <asm/page.h>
++
++ .text
++ .align PAGE_SIZE
++ .code64
++ .globl purgatory_entry64, purgatory_entry64_regs, entry64_gdt
++
++
++purgatory_entry64:
++ /* Setup a gdt that should be preserved */
++ lgdt entry64_gdt(%rip)
++
++ /* load the data segments */
++ movl $0x18, %eax /* data segment */
++ movl %eax, %ds
++ movl %eax, %es
++ movl %eax, %ss
++ movl %eax, %fs
++ movl %eax, %gs
++
++ /* Setup new stack */
++ leaq stack_init(%rip), %rsp
++ pushq $0x10 /* CS */
++ leaq new_cs_exit(%rip), %rax
++ pushq %rax
++ lretq
++new_cs_exit:
++
++ /*
++ * Load the registers except rsp. rsp is already loaded with stack
++ * at the end of this page
++ */
++ movq rax(%rip), %rax
++ movq rbx(%rip), %rbx
++ movq rcx(%rip), %rcx
++ movq rdx(%rip), %rdx
++ movq rsi(%rip), %rsi
++ movq rdi(%rip), %rdi
++ movq rbp(%rip), %rbp
++ movq r8(%rip), %r8
++ movq r9(%rip), %r9
++ movq r10(%rip), %r10
++ movq r11(%rip), %r11
++ movq r12(%rip), %r12
++ movq r13(%rip), %r13
++ movq r14(%rip), %r14
++ movq r15(%rip), %r15
++
++ /* Jump to the new code... */
++ jmpq *rip(%rip)
++
++ .balign 16
++purgatory_entry64_regs:
++rax: .quad 0x00000000
++rbx: .quad 0x00000000
++rcx: .quad 0x00000000
++rdx: .quad 0x00000000
++rsi: .quad 0x00000000
++rdi: .quad 0x00000000
++rsp: .quad 0x00000000
++rbp: .quad 0x00000000
++r8: .quad 0x00000000
++r9: .quad 0x00000000
++r10: .quad 0x00000000
++r11: .quad 0x00000000
++r12: .quad 0x00000000
++r13: .quad 0x00000000
++r14: .quad 0x00000000
++r15: .quad 0x00000000
++rip: .quad 0x00000000
++
++ /* GDT */
++ .balign 16
++entry64_gdt:
++ /* 0x00 unusable segment
++ * 0x08 unused
++ * so use them as gdt ptr
++ */
++ .word gdt_end - entry64_gdt - 1
++ .quad entry64_gdt
++ .word 0, 0, 0
++
++ /* 0x10 4GB flat code segment */
++ .word 0xFFFF, 0x0000, 0x9A00, 0x00AF
++
++ /* 0x18 4GB flat data segment */
++ .word 0xFFFF, 0x0000, 0x9200, 0x00CF
++gdt_end:
++
++ .globl kexec_purgatory_code_size
++.set kexec_purgatory_code_size, . - purgatory_entry64
++
++/* Fill rest of the page with zeros to be used as stack */
++stack: .fill purgatory_entry64 + PAGE_SIZE - ., 1, 0
++stack_init:
diff --git a/k6 b/k6
new file mode 100644
index 00000000000000..e3a5e3ee201377
--- /dev/null
+++ b/k6
@@ -0,0 +1,944 @@
+From vgoyal@redhat.com Wed Nov 20 09:51:43 2013
+From: Vivek Goyal <vgoyal@redhat.com>
+Date: Wed, 20 Nov 2013 12:50:51 -0500
+Subject: [PATCH 6/6] kexec: Support for Kexec on panic using new system call
+To: linux-kernel@vger.kernel.org, kexec@lists.infradead.org
+Cc: ebiederm@xmission.com, hpa@zytor.com, mjg59@srcf.ucam.org, greg@kroah.com, Vivek Goyal <vgoyal@redhat.com>
+Message-ID: <1384969851-7251-7-git-send-email-vgoyal@redhat.com>
+
+
+This patch adds support for loading a kexec on panic (kdump) kernel usning
+new system call.
+
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+---
+ arch/x86/include/asm/crash.h | 9
+ arch/x86/include/asm/kexec.h | 17 +
+ arch/x86/kernel/crash.c | 585 +++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/kexec-bzimage.c | 63 +++
+ arch/x86/kernel/machine_kexec_64.c | 1
+ kernel/kexec.c | 69 ++++
+ 6 files changed, 731 insertions(+), 13 deletions(-)
+ create mode 100644 arch/x86/include/asm/crash.h
+
+--- /dev/null
++++ b/arch/x86/include/asm/crash.h
+@@ -0,0 +1,9 @@
++#ifndef _ASM_X86_CRASH_H
++#define _ASM_X86_CRASH_H
++
++int load_crashdump_segments(struct kimage *image);
++int crash_copy_backup_region(struct kimage *image);
++int crash_setup_memmap_entries(struct kimage *image,
++ struct boot_params *params);
++
++#endif /* _ASM_X86_CRASH_H */
+--- a/arch/x86/include/asm/kexec.h
++++ b/arch/x86/include/asm/kexec.h
+@@ -64,6 +64,10 @@
+ # define KEXEC_ARCH KEXEC_ARCH_X86_64
+ #endif
+
++/* Memory to backup during crash kdump */
++#define KEXEC_BACKUP_SRC_START (0UL)
++#define KEXEC_BACKUP_SRC_END (655360UL) /* 640K */
++
+ /*
+ * CPU does not save ss and sp on stack if execution is already
+ * running in kernel mode at the time of NMI occurrence. This code
+@@ -166,8 +170,21 @@ struct kimage_arch {
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
++ /* Details of backup region */
++ unsigned long backup_src_start;
++ unsigned long backup_src_sz;
++
++ /* Physical address of backup segment */
++ unsigned long backup_load_addr;
++
++ /* Core ELF header buffer */
++ unsigned long elf_headers;
++ unsigned long elf_headers_sz;
++ unsigned long elf_load_addr;
+ };
++#endif /* CONFIG_X86_32 */
+
++#ifdef CONFIG_X86_64
+ struct kexec_entry64_regs {
+ uint64_t rax;
+ uint64_t rbx;
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -4,6 +4,9 @@
+ * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
+ *
+ * Copyright (C) IBM Corporation, 2004. All rights reserved.
++ * Copyright (C) Red Hat Inc., 2013. All rights reserved.
++ * Authors:
++ * Vivek Goyal <vgoyal@redhat.com>
+ *
+ */
+
+@@ -17,6 +20,7 @@
+ #include <linux/elf.h>
+ #include <linux/elfcore.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+
+ #include <asm/processor.h>
+ #include <asm/hardirq.h>
+@@ -29,6 +33,45 @@
+ #include <asm/reboot.h>
+ #include <asm/virtext.h>
+
++/* Alignment required for elf header segment */
++#define ELF_CORE_HEADER_ALIGN 4096
++
++/* This primarily reprsents number of split ranges due to exclusion */
++#define CRASH_MAX_RANGES 16
++
++struct crash_mem_range {
++ unsigned long long start, end;
++};
++
++struct crash_mem {
++ unsigned int nr_ranges;
++ struct crash_mem_range ranges[CRASH_MAX_RANGES];
++};
++
++/* Misc data about ram ranges needed to prepare elf headers */
++struct crash_elf_data {
++ struct kimage *image;
++ /*
++ * Total number of ram ranges we have after various ajustments for
++ * GART, crash reserved region etc.
++ */
++ unsigned int max_nr_ranges;
++ unsigned long gart_start, gart_end;
++
++ /* Pointer to elf header */
++ void *ehdr;
++ /* Pointer to next phdr */
++ void *bufp;
++ struct crash_mem mem;
++};
++
++/* Used while prepareing memory map entries for second kernel */
++struct crash_memmap_data {
++ struct boot_params *params;
++ /* Type of memory */
++ unsigned int type;
++};
++
+ int in_crash_kexec;
+
+ /*
+@@ -138,3 +181,545 @@ void native_machine_crash_shutdown(struc
+ #endif
+ crash_save_cpu(regs, safe_smp_processor_id());
+ }
++
++#ifdef CONFIG_X86_64
++static int get_nr_ram_ranges_callback(unsigned long start_pfn,
++ unsigned long nr_pfn, void *arg)
++{
++ int *nr_ranges = arg;
++
++ (*nr_ranges)++;
++ return 0;
++}
++
++static int get_gart_ranges_callback(u64 start, u64 end, void *arg)
++{
++ struct crash_elf_data *ced = arg;
++
++ ced->gart_start = start;
++ ced->gart_end = end;
++
++ /* Not expecting more than 1 gart aperture */
++ return 1;
++}
++
++
++/* Gather all the required information to prepare elf headers for ram regions */
++static int fill_up_ced(struct crash_elf_data *ced, struct kimage *image)
++{
++ unsigned int nr_ranges = 0;
++
++ ced->image = image;
++
++ walk_system_ram_range(0, -1, &nr_ranges,
++ get_nr_ram_ranges_callback);
++
++ ced->max_nr_ranges = nr_ranges;
++
++ /*
++ * We don't create ELF headers for GART aperture as an attempt
++ * to dump this memory in second kernel leads to hang/crash.
++ * If gart aperture is present, one needs to exclude that region
++ * and that could lead to need of extra phdr.
++ */
++
++ walk_ram_res("GART", IORESOURCE_MEM, 0, -1,
++ ced, get_gart_ranges_callback);
++
++ /*
++ * If we have gart region, excluding that could potentially split
++ * a memory range, resulting in extra header. Account for that.
++ */
++ if (ced->gart_end)
++ ced->max_nr_ranges++;
++
++ /* Exclusion of crash region could split memory ranges */
++ ced->max_nr_ranges++;
++
++ /* If crashk_low_res is there, another range split possible */
++ if (crashk_low_res.end != 0)
++ ced->max_nr_ranges++;
++
++ return 0;
++}
++
++static int exclude_mem_range(struct crash_mem *mem,
++ unsigned long long mstart, unsigned long long mend)
++{
++ int i, j;
++ unsigned long long start, end;
++ struct crash_mem_range temp_range = {0, 0};
++
++ for (i = 0; i < mem->nr_ranges; i++) {
++ start = mem->ranges[i].start;
++ end = mem->ranges[i].end;
++
++ if (mstart > end || mend < start)
++ continue;
++
++ /* Truncate any area outside of range */
++ if (mstart < start)
++ mstart = start;
++ if (mend > end)
++ mend = end;
++
++ /* Found completely overlapping range */
++ if (mstart == start && mend == end) {
++ mem->ranges[i].start = 0;
++ mem->ranges[i].end = 0;
++ if (i < mem->nr_ranges - 1) {
++ /* Shift rest of the ranges to left */
++ for(j = i; j < mem->nr_ranges - 1; j++) {
++ mem->ranges[j].start =
++ mem->ranges[j+1].start;
++ mem->ranges[j].end =
++ mem->ranges[j+1].end;
++ }
++ }
++ mem->nr_ranges--;
++ return 0;
++ }
++
++ if (mstart > start && mend < end) {
++ /* Split original range */
++ mem->ranges[i].end = mstart - 1;
++ temp_range.start = mend + 1;
++ temp_range.end = end;
++ } else if (mstart != start)
++ mem->ranges[i].end = mstart - 1;
++ else
++ mem->ranges[i].start = mend + 1;
++ break;
++ }
++
++ /* If a split happend, add the split in array */
++ if (!temp_range.end)
++ return 0;
++
++ /* Split happened */
++ if (i == CRASH_MAX_RANGES - 1) {
++ printk("Too many crash ranges after split\n");
++ return -ENOMEM;
++ }
++
++ /* Location where new range should go */
++ j = i + 1;
++ if (j < mem->nr_ranges) {
++ /* Move over all ranges one place */
++ for (i = mem->nr_ranges - 1; i >= j; i--)
++ mem->ranges[i + 1] = mem->ranges[i];
++ }
++
++ mem->ranges[j].start = temp_range.start;
++ mem->ranges[j].end = temp_range.end;
++ mem->nr_ranges++;
++ return 0;
++}
++
++/*
++ * Look for any unwanted ranges between mstart, mend and remove them. This
++ * might lead to split and split ranges are put in ced->mem.ranges[] array
++ */
++static int elf_header_exclude_ranges(struct crash_elf_data *ced,
++ unsigned long long mstart, unsigned long long mend)
++{
++ struct crash_mem *cmem = &ced->mem;
++ int ret = 0;
++
++ memset(cmem->ranges, 0, sizeof(cmem->ranges));
++
++ cmem->ranges[0].start = mstart;
++ cmem->ranges[0].end = mend;
++ cmem->nr_ranges = 1;
++
++ /* Exclude crashkernel region */
++ ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
++ if (ret)
++ return ret;
++
++ ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
++ if (ret)
++ return ret;
++
++ /* Exclude GART region */
++ if (ced->gart_end) {
++ ret = exclude_mem_range(cmem, ced->gart_start, ced->gart_end);
++ if (ret)
++ return ret;
++ }
++
++ return ret;
++}
++
++static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
++{
++ struct crash_elf_data *ced = arg;
++ Elf64_Ehdr *ehdr;
++ Elf64_Phdr *phdr;
++ unsigned long mstart, mend;
++ struct kimage *image = ced->image;
++ struct crash_mem *cmem;
++ int ret, i;
++
++ ehdr = ced->ehdr;
++
++ /* Exclude unwanted mem ranges */
++ ret = elf_header_exclude_ranges(ced, start, end);
++ if (ret)
++ return ret;
++
++ /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
++ cmem = &ced->mem;
++
++ for (i = 0; i < cmem->nr_ranges; i++) {
++ mstart = cmem->ranges[i].start;
++ mend = cmem->ranges[i].end;
++
++ phdr = ced->bufp;
++ ced->bufp += sizeof(Elf64_Phdr);
++
++ phdr->p_type = PT_LOAD;
++ phdr->p_flags = PF_R|PF_W|PF_X;
++ phdr->p_offset = mstart;
++
++ /*
++ * If a range matches backup region, adjust offset to backup
++ * segment.
++ */
++ if (mstart == image->arch.backup_src_start &&
++ (mend - mstart + 1) == image->arch.backup_src_sz)
++ phdr->p_offset = image->arch.backup_load_addr;
++
++ phdr->p_paddr = mstart;
++ phdr->p_vaddr = (unsigned long long) __va(mstart);
++ phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
++ phdr->p_align = 0;
++ ehdr->e_phnum++;
++ pr_debug("Crash PT_LOAD elf header. phdr=%p"
++ " vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d"
++ " p_offset=0x%llx\n", phdr, phdr->p_vaddr,
++ phdr->p_paddr, phdr->p_filesz, ehdr->e_phnum,
++ phdr->p_offset);
++ }
++
++ return ret;
++}
++
++static int prepare_elf64_headers(struct crash_elf_data *ced,
++ unsigned long *addr, unsigned long *sz)
++{
++ Elf64_Ehdr *ehdr;
++ Elf64_Phdr *phdr;
++ unsigned long nr_cpus = NR_CPUS, nr_phdr, elf_sz;
++ unsigned char *buf, *bufp;
++ unsigned int cpu;
++ unsigned long long notes_addr;
++ int ret;
++
++ /* extra phdr for vmcoreinfo elf note */
++ nr_phdr = nr_cpus + 1;
++ nr_phdr += ced->max_nr_ranges;
++
++ /*
++ * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
++ * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
++ * I think this is required by tools like gdb. So same physical
++ * memory will be mapped in two elf headers. One will contain kernel
++ * text virtual addresses and other will have __va(physical) addresses.
++ */
++
++ nr_phdr++;
++ elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
++ elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
++
++ buf = vzalloc(elf_sz);
++ if (!buf)
++ return -ENOMEM;
++
++ bufp = buf;
++ ehdr = (Elf64_Ehdr *)bufp;
++ bufp += sizeof(Elf64_Ehdr);
++ memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
++ ehdr->e_ident[EI_CLASS] = ELFCLASS64;
++ ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
++ ehdr->e_ident[EI_VERSION] = EV_CURRENT;
++ ehdr->e_ident[EI_OSABI] = ELF_OSABI;
++ memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
++ ehdr->e_type = ET_CORE;
++ ehdr->e_machine = ELF_ARCH;
++ ehdr->e_version = EV_CURRENT;
++ ehdr->e_entry = 0;
++ ehdr->e_phoff = sizeof(Elf64_Ehdr);
++ ehdr->e_shoff = 0;
++ ehdr->e_flags = 0;
++ ehdr->e_ehsize = sizeof(Elf64_Ehdr);
++ ehdr->e_phentsize = sizeof(Elf64_Phdr);
++ ehdr->e_phnum = 0;
++ ehdr->e_shentsize = 0;
++ ehdr->e_shnum = 0;
++ ehdr->e_shstrndx = 0;
++
++ /* Prepare one phdr of type PT_NOTE for each present cpu */
++ for_each_present_cpu(cpu) {
++ phdr = (Elf64_Phdr *)bufp;
++ bufp += sizeof(Elf64_Phdr);
++ phdr->p_type = PT_NOTE;
++ phdr->p_flags = 0;
++ notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
++ phdr->p_offset = phdr->p_paddr = notes_addr;
++ phdr->p_vaddr = 0;
++ phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
++ phdr->p_align = 0;
++ (ehdr->e_phnum)++;
++ }
++
++ /* Prepare one PT_NOTE header for vmcoreinfo */
++ phdr = (Elf64_Phdr *)bufp;
++ bufp += sizeof(Elf64_Phdr);
++ phdr->p_type = PT_NOTE;
++ phdr->p_flags = 0;
++ phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
++ phdr->p_vaddr = 0;
++ phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
++ phdr->p_align = 0;
++ (ehdr->e_phnum)++;
++
++#ifdef CONFIG_X86_64
++ /* Prepare PT_LOAD type program header for kernel text region */
++ phdr = (Elf64_Phdr *)bufp;
++ bufp += sizeof(Elf64_Phdr);
++ phdr->p_type = PT_LOAD;
++ phdr->p_flags = PF_R|PF_W|PF_X;
++ phdr->p_vaddr = (Elf64_Addr)_text;
++ phdr->p_filesz = phdr->p_memsz = _end - _text;
++ phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
++ phdr->p_align = 0;
++ (ehdr->e_phnum)++;
++#endif
++
++ /* Prepare PT_LOAD headers for system ram chunks. */
++ ced->ehdr = ehdr;
++ ced->bufp = bufp;
++ ret = walk_system_ram_res(0, -1, ced,
++ prepare_elf64_ram_headers_callback);
++ if (ret < 0)
++ return ret;
++
++ *addr = (unsigned long)buf;
++ *sz = elf_sz;
++ return 0;
++}
++
++/* Prepare elf headers. Return addr and size */
++static int prepare_elf_headers(struct kimage *image, unsigned long *addr,
++ unsigned long *sz)
++{
++ struct crash_elf_data *ced;
++ int ret;
++
++ ced = kzalloc(sizeof(*ced), GFP_KERNEL);
++ if (!ced)
++ return -ENOMEM;
++
++ ret = fill_up_ced(ced, image);
++ if (ret)
++ goto out;
++
++ /* By default prepare 64bit headers */
++ ret = prepare_elf64_headers(ced, addr, sz);
++out:
++ kfree(ced);
++ return ret;
++}
++
++static int add_e820_entry(struct boot_params *params, struct e820entry *entry)
++{
++ unsigned int nr_e820_entries;
++
++ nr_e820_entries = params->e820_entries;
++ if (nr_e820_entries >= E820MAX)
++ return 1;
++
++ memcpy(&params->e820_map[nr_e820_entries], entry,
++ sizeof(struct e820entry));
++ params->e820_entries++;
++
++ pr_debug("Add e820 entry to bootparams. addr=0x%llx size=0x%llx"
++ " type=%d\n", entry->addr, entry->size, entry->type);
++ return 0;
++}
++
++static int memmap_entry_callback(u64 start, u64 end, void *arg)
++{
++ struct crash_memmap_data *cmd = arg;
++ struct boot_params *params = cmd->params;
++ struct e820entry ei;
++
++ ei.addr = start;
++ ei.size = end - start + 1;
++ ei.type = cmd->type;
++ add_e820_entry(params, &ei);
++
++ return 0;
++}
++
++static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
++ unsigned long long mstart, unsigned long long mend)
++{
++ unsigned long start, end;
++ int ret = 0;
++
++ memset(cmem->ranges, 0, sizeof(cmem->ranges));
++
++ cmem->ranges[0].start = mstart;
++ cmem->ranges[0].end = mend;
++ cmem->nr_ranges = 1;
++
++ /* Exclude Backup region */
++ start = image->arch.backup_load_addr;
++ end = start + image->arch.backup_src_sz - 1;
++ ret = exclude_mem_range(cmem, start, end);
++ if (ret)
++ return ret;
++
++ /* Exclude elf header region */
++ start = image->arch.elf_load_addr;
++ end = start + image->arch.elf_headers_sz - 1;
++ ret = exclude_mem_range(cmem, start, end);
++ return ret;
++}
++
++/* Prepare memory map for crash dump kernel */
++int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
++{
++ int i, ret = 0;
++ unsigned long flags;
++ struct e820entry ei;
++ struct crash_memmap_data cmd;
++ struct crash_mem *cmem;
++
++ cmem = vzalloc(sizeof(struct crash_mem));
++ if (!cmem)
++ return -ENOMEM;
++
++ memset(&cmd, 0, sizeof(struct crash_memmap_data));
++ cmd.params = params;
++
++ /* Add first 640K segment */
++ ei.addr = image->arch.backup_src_start;
++ ei.size = image->arch.backup_src_sz;
++ ei.type = E820_RAM;
++ add_e820_entry(params, &ei);
++
++ /* Add ACPI tables */
++ cmd.type = E820_ACPI;
++ flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ walk_ram_res("ACPI Tables", flags, 0, -1, &cmd, memmap_entry_callback);
++
++ /* Add ACPI Non-volatile Storage */
++ cmd.type = E820_NVS;
++ walk_ram_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd,
++ memmap_entry_callback);
++
++ /* Add crashk_low_res region */
++ if (crashk_low_res.end) {
++ ei.addr = crashk_low_res.start;
++ ei.size = crashk_low_res.end - crashk_low_res.start + 1;
++ ei.type = E820_RAM;
++ add_e820_entry(params, &ei);
++ }
++
++ /* Exclude some ranges from crashk_res and add rest to memmap */
++ ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
++ crashk_res.end);
++ if (ret)
++ goto out;
++
++ for (i = 0; i < cmem->nr_ranges; i++) {
++ ei.addr = cmem->ranges[i].start;
++ ei.size = cmem->ranges[i].end - ei.addr + 1;
++ ei.type = E820_RAM;
++
++ /* If entry is less than a page, skip it */
++ if (ei.size < PAGE_SIZE) {
++ continue;
++ }
++ add_e820_entry(params, &ei);
++ }
++
++out:
++ vfree(cmem);
++ return ret;
++}
++
++static int determine_backup_region(u64 start, u64 end, void *arg)
++{
++ struct kimage *image = arg;
++
++ image->arch.backup_src_start = start;
++ image->arch.backup_src_sz = end - start + 1;
++
++ /* Expecting only one range for backup region */
++ return 1;
++}
++
++int load_crashdump_segments(struct kimage *image)
++{
++ unsigned long src_start, src_sz;
++ unsigned long elf_addr, elf_sz;
++ int ret;
++
++ /*
++ * Determine and load a segment for backup area. First 640K RAM
++ * region is backup source
++ */
++
++ ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
++ image, determine_backup_region);
++
++ /* Zero of postive return values are ok */
++ if (ret < 0)
++ return ret;
++
++ src_start = image->arch.backup_src_start;
++ src_sz = image->arch.backup_src_sz;
++
++ /* Add backup segment. */
++ if (src_sz) {
++ ret = kexec_add_buffer(image, __va(src_start), src_sz, src_sz,
++ PAGE_SIZE, 0, -1, 0,
++ &image->arch.backup_load_addr);
++ if (ret)
++ return ret;
++ }
++
++ /* Prepare elf headers and add a segment */
++ ret = prepare_elf_headers(image, &elf_addr, &elf_sz);
++ if (ret)
++ return ret;
++
++ image->arch.elf_headers = elf_addr;
++ image->arch.elf_headers_sz = elf_sz;
++
++ ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz,
++ ELF_CORE_HEADER_ALIGN, 0, -1, 0,
++ &image->arch.elf_load_addr);
++ if (ret)
++ kfree((void *)image->arch.elf_headers);
++
++ return ret;
++}
++
++int crash_copy_backup_region(struct kimage *image)
++{
++ unsigned long dest_start, src_start, src_sz;
++
++ dest_start = image->arch.backup_load_addr;
++ src_start = image->arch.backup_src_start;
++ src_sz = image->arch.backup_src_sz;
++
++ memcpy(__va(dest_start), __va(src_start), src_sz);
++
++ return 0;
++}
++#endif /* CONFIG_X86_64 */
+--- a/arch/x86/kernel/kexec-bzimage.c
++++ b/arch/x86/kernel/kexec-bzimage.c
+@@ -8,6 +8,9 @@
+
+ #include <asm/bootparam.h>
+ #include <asm/setup.h>
++#include <asm/crash.h>
++
++#define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */
+
+ #ifdef CONFIG_X86_64
+
+@@ -86,7 +89,8 @@ static int setup_memory_map_entries(stru
+ return 0;
+ }
+
+-static void setup_linux_system_parameters(struct boot_params *params)
++static void setup_linux_system_parameters(struct kimage *image,
++ struct boot_params *params)
+ {
+ unsigned int nr_e820_entries;
+ unsigned long long mem_k, start, end;
+@@ -113,7 +117,10 @@ static void setup_linux_system_parameter
+ /* Default sysdesc table */
+ params->sys_desc_table.length = 0;
+
+- setup_memory_map_entries(params);
++ if (image->type == KEXEC_TYPE_CRASH)
++ crash_setup_memmap_entries(image, params);
++ else
++ setup_memory_map_entries(params);
+ nr_e820_entries = params->e820_entries;
+
+ for(i = 0; i < nr_e820_entries; i++) {
+@@ -151,18 +158,23 @@ static void setup_initrd(struct boot_par
+ boot_params->ext_ramdisk_size = initrd_len >> 32;
+ }
+
+-static void setup_cmdline(struct boot_params *boot_params,
++static void setup_cmdline(struct kimage *image, struct boot_params *boot_params,
+ unsigned long bootparams_load_addr,
+ unsigned long cmdline_offset, char *cmdline,
+ unsigned long cmdline_len)
+ {
+ char *cmdline_ptr = ((char *)boot_params) + cmdline_offset;
+- unsigned long cmdline_ptr_phys;
++ unsigned long cmdline_ptr_phys, len;
+ uint32_t cmdline_low_32, cmdline_ext_32;
+
+ memcpy(cmdline_ptr, cmdline, cmdline_len);
++ if (image->type == KEXEC_TYPE_CRASH) {
++ len = sprintf(cmdline_ptr + cmdline_len - 1,
++ " elfcorehdr=0x%lx", image->arch.elf_load_addr);
++ cmdline_len += len;
++ }
+ cmdline_ptr[cmdline_len - 1] = '\0';
+-
++ pr_debug("Final command line is:%s\n", cmdline_ptr);
+ cmdline_ptr_phys = bootparams_load_addr + cmdline_offset;
+ cmdline_low_32 = cmdline_ptr_phys & 0xffffffffUL;
+ cmdline_ext_32 = cmdline_ptr_phys >> 32;
+@@ -203,17 +215,34 @@ void *bzImage64_load(struct kimage *imag
+ return ERR_PTR(-EINVAL);
+ }
+
++ /*
++ * In case of crash dump, we will append elfcorehdr=<addr> to
++ * command line. Make sure it does not overflow
++ */
++ if (cmdline_len + MAX_ELFCOREHDR_STR_LEN > header->cmdline_size) {
++ ret = -EINVAL;
++ pr_debug("Kernel command line too long\n");
++ return ERR_PTR(-EINVAL);
++ }
++
+ /* Allocate loader specific data */
+ ldata = kzalloc(sizeof(struct bzimage64_data), GFP_KERNEL);
+ if (!ldata)
+ return ERR_PTR(-ENOMEM);
+
++ /* Allocate and load backup region */
++ if (image->type == KEXEC_TYPE_CRASH) {
++ ret = load_crashdump_segments(image);
++ if (ret)
++ goto out_free_loader_data;
++ }
++
+ /* Argument/parameter segment */
+ kern16_size_needed = kern16_size;
+ if (kern16_size_needed < 4096)
+ kern16_size_needed = 4096;
+
+- setup_size = kern16_size_needed + cmdline_len;
++ setup_size = kern16_size_needed + cmdline_len + MAX_ELFCOREHDR_STR_LEN;
+ params = kzalloc(setup_size, GFP_KERNEL);
+ if (!params) {
+ ret = -ENOMEM;
+@@ -259,14 +288,14 @@ void *bzImage64_load(struct kimage *imag
+ setup_initrd(params, initrd_load_addr, initrd_len);
+ }
+
+- setup_cmdline(params, bootparam_load_addr, kern16_size_needed,
++ setup_cmdline(image, params, bootparam_load_addr, kern16_size_needed,
+ cmdline, cmdline_len);
+
+ /* bootloader info. Do we need a separate ID for kexec kernel loader? */
+ params->hdr.type_of_loader = 0x0D << 4;
+ params->hdr.loadflags = 0;
+
+- setup_linux_system_parameters(params);
++ setup_linux_system_parameters(image, params);
+
+ /*
+ * Allocate a purgatory page. For 64bit entry point, purgatory
+@@ -302,7 +331,7 @@ out_free_loader_data:
+ return ERR_PTR(ret);
+ }
+
+-int bzImage64_prep_entry(struct kimage *image)
++static int prepare_purgatory(struct kimage *image)
+ {
+ struct bzimage64_data *ldata;
+ char *purgatory_page;
+@@ -362,6 +391,22 @@ int bzImage64_prep_entry(struct kimage *
+ return 0;
+ }
+
++int bzImage64_prep_entry(struct kimage *image)
++{
++ if (!image->file_mode)
++ return 0;
++
++ if (!image->image_loader_data)
++ return -EINVAL;
++
++ prepare_purgatory(image);
++
++ if (image->type == KEXEC_TYPE_CRASH)
++ crash_copy_backup_region(image);
++
++ return 0;
++}
++
+ /* This cleanup function is called after various segments have been loaded */
+ int bzImage64_cleanup(struct kimage *image)
+ {
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -334,6 +334,7 @@ int arch_image_file_post_load_cleanup(st
+ {
+ int idx = image->file_handler_idx;
+
++ vfree((void *)image->arch.elf_headers);
+ if (kexec_file_type[idx].cleanup)
+ return kexec_file_type[idx].cleanup(image);
+ return 0;
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -524,7 +524,6 @@ static int kimage_normal_alloc(struct ki
+ *rimage = image;
+ return 0;
+
+-
+ out_free_control_pages:
+ kimage_free_page_list(&image->control_pages);
+ out_free_image:
+@@ -532,6 +531,54 @@ out_free_image:
+ return result;
+ }
+
++static int kimage_file_crash_alloc(struct kimage **rimage, int kernel_fd,
++ int initrd_fd, const char __user *cmdline_ptr,
++ unsigned long cmdline_len)
++{
++ int result;
++ struct kimage *image;
++
++ /* Allocate and initialize a controlling structure */
++ image = do_kimage_alloc_init();
++ if (!image)
++ return -ENOMEM;
++
++ image->file_mode = 1;
++ image->file_handler_idx = -1;
++
++ /* Enable the special crash kernel control page allocation policy. */
++ image->control_page = crashk_res.start;
++ image->type = KEXEC_TYPE_CRASH;
++
++ result = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
++ cmdline_ptr, cmdline_len);
++ if (result)
++ goto out_free_image;
++
++ result = sanity_check_segment_list(image);
++ if (result)
++ goto out_free_post_load_bufs;
++
++ result = -ENOMEM;
++ image->control_code_page = kimage_alloc_control_pages(image,
++ get_order(KEXEC_CONTROL_PAGE_SIZE));
++ if (!image->control_code_page) {
++ printk(KERN_ERR "Could not allocate control_code_buffer\n");
++ goto out_free_post_load_bufs;
++ }
++
++ *rimage = image;
++ return 0;
++
++out_free_post_load_bufs:
++ kimage_file_post_load_cleanup(image);
++ kfree(image->image_loader_data);
++out_free_image:
++ kfree(image);
++ return result;
++}
++
++
+ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
+ unsigned long nr_segments,
+ struct kexec_segment __user *segments)
+@@ -1130,7 +1177,12 @@ static int kimage_load_crash_segment(str
+ /* Zero the trailing part of the page */
+ memset(ptr + uchunk, 0, mchunk - uchunk);
+ }
+- result = copy_from_user(ptr, buf, uchunk);
++
++ /* For file based kexec, source pages are in kernel memory */
++ if (image->file_mode)
++ memcpy(ptr, buf, uchunk);
++ else
++ result = copy_from_user(ptr, buf, uchunk);
+ kexec_flush_icache_page(page);
+ kunmap(page);
+ if (result) {
+@@ -1358,7 +1410,11 @@ SYSCALL_DEFINE5(kexec_file_load, int, ke
+ if (flags & KEXEC_FILE_UNLOAD)
+ goto exchange;
+
+- ret = kimage_file_normal_alloc(&image, kernel_fd, initrd_fd,
++ if (flags & KEXEC_FILE_ON_CRASH)
++ ret = kimage_file_crash_alloc(&image, kernel_fd, initrd_fd,
++ cmdline_ptr, cmdline_len);
++ else
++ ret = kimage_file_normal_alloc(&image, kernel_fd, initrd_fd,
+ cmdline_ptr, cmdline_len);
+ if (ret)
+ goto out;
+@@ -2108,7 +2164,12 @@ int kexec_add_buffer(struct kimage *imag
+ kbuf->top_down = top_down;
+
+ /* Walk the RAM ranges and allocate a suitable range for the buffer */
+- walk_system_ram_res(0, -1, kbuf, walk_ram_range_callback);
++ if (image->type == KEXEC_TYPE_CRASH)
++ walk_ram_res("Crash kernel", IORESOURCE_MEM | IORESOURCE_BUSY,
++ crashk_res.start, crashk_res.end, kbuf,
++ walk_ram_range_callback);
++ else
++ walk_system_ram_res(0, -1, kbuf, walk_ram_range_callback);
+
+ kbuf->image = NULL;
+ kfree(kbuf);
diff --git a/series b/series
index 563e5de733e576..3341f6df699225 100644
--- a/series
+++ b/series
@@ -8,6 +8,12 @@ staging-exfat-readdir-to-iterate-change.patch
pci-msi-fix.patch
+k1
+k2
+k3
+k4
+k5
+k6
xen-disable-clock-timer-when-shutting-down.patch
# patches already in my git trees, but still here so I don't loose them.