From 78cb0945f7141961781f815168f6873ad2b7ed29 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sat, 16 Mar 2024 12:18:21 +0100 Subject: powerpc: Handle error in mark_rodata_ro() and mark_initmem_nx() mark_rodata_ro() and mark_initmem_nx() use functions that can fail like set_memory_nx() and set_memory_ro(), leading to a not protected kernel. In case of failure, panic. Link: https://github.com/KSPP/linux/issues/7 Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://msgid.link/b16329611deb89e1af505d43f0e2a91310584d26.1710587887.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/book3s32/mmu.c | 7 +++++-- arch/powerpc/mm/mmu_decl.h | 8 ++++---- arch/powerpc/mm/nohash/8xx.c | 33 +++++++++++++++++++++------------ arch/powerpc/mm/nohash/e500.c | 10 +++++++--- arch/powerpc/mm/pgtable_32.c | 38 ++++++++++++++++++++++++++++---------- 5 files changed, 65 insertions(+), 31 deletions(-) diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 5445587bfe8418..100f999871bc3b 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -193,7 +193,7 @@ static bool is_module_segment(unsigned long addr) return true; } -void mmu_mark_initmem_nx(void) +int mmu_mark_initmem_nx(void) { int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; @@ -230,9 +230,10 @@ void mmu_mark_initmem_nx(void) mtsr(mfsr(i << 28) | 0x10000000, i << 28); } + return 0; } -void mmu_mark_rodata_ro(void) +int mmu_mark_rodata_ro(void) { int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; @@ -245,6 +246,8 @@ void mmu_mark_rodata_ro(void) } update_bats(); + + return 0; } /* diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 8e84bc214d133c..6949c2c937e72f 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -160,11 +160,11 @@ static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } #endif #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500) -void mmu_mark_initmem_nx(void); -void mmu_mark_rodata_ro(void); +int mmu_mark_initmem_nx(void); +int mmu_mark_rodata_ro(void); #else -static inline void mmu_mark_initmem_nx(void) { } -static inline void mmu_mark_rodata_ro(void) { } +static inline int mmu_mark_initmem_nx(void) { return 0; } +static inline int mmu_mark_rodata_ro(void) { return 0; } #endif #ifdef CONFIG_PPC_8xx diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 6be6421086ed99..43d4842bb1c7a7 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -119,23 +119,26 @@ void __init mmu_mapin_immr(void) PAGE_KERNEL_NCG, MMU_PAGE_512K, true); } -static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, - pgprot_t prot, bool new) +static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, + pgprot_t prot, bool new) { unsigned long v = PAGE_OFFSET + offset; unsigned long p = offset; + int err = 0; WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); - for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K) - __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); - for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M) - __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); - for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K) - __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); + for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); if (!new) flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); + + return err; } unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) @@ -166,27 +169,33 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) return top; } -void mmu_mark_initmem_nx(void) +int mmu_mark_initmem_nx(void) { unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); unsigned long sinittext = __pa(_sinittext); unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); + int err = 0; if (!debug_pagealloc_enabled_or_kfence()) - mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); + err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); mmu_pin_tlb(block_mapped_ram, false); + + return err; } #ifdef CONFIG_STRICT_KERNEL_RWX -void mmu_mark_rodata_ro(void) +int mmu_mark_rodata_ro(void) { unsigned long sinittext = __pa(_sinittext); + int err; - mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); + err = mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) mmu_pin_tlb(block_mapped_ram, true); + + return err; } #endif diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c index 921c3521ec113e..266fb22131fc1e 100644 --- a/arch/powerpc/mm/nohash/e500.c +++ b/arch/powerpc/mm/nohash/e500.c @@ -285,19 +285,23 @@ void __init adjust_total_lowmem(void) } #ifdef CONFIG_STRICT_KERNEL_RWX -void mmu_mark_rodata_ro(void) +int mmu_mark_rodata_ro(void) { unsigned long remapped; remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); - WARN_ON(__max_low_memory != remapped); + if (WARN_ON(__max_low_memory != remapped)) + return -EINVAL; + + return 0; } #endif -void mmu_mark_initmem_nx(void) +int mmu_mark_initmem_nx(void) { /* Everything is done in mmu_mark_rodata_ro() */ + return 0; } void setup_initial_memory_limit(phys_addr_t first_memblock_base, diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index face94977cb2fb..cfd622ebf774b0 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -130,31 +130,41 @@ void __init mapin_ram(void) } } -void mark_initmem_nx(void) +static int __mark_initmem_nx(void) { unsigned long numpages = PFN_UP((unsigned long)_einittext) - PFN_DOWN((unsigned long)_sinittext); + int err; - mmu_mark_initmem_nx(); + err = mmu_mark_initmem_nx(); if (!v_block_mapped((unsigned long)_sinittext)) { - set_memory_nx((unsigned long)_sinittext, numpages); - set_memory_rw((unsigned long)_sinittext, numpages); + err = set_memory_nx((unsigned long)_sinittext, numpages); + if (err) + return err; + err = set_memory_rw((unsigned long)_sinittext, numpages); } + return err; +} + +void mark_initmem_nx(void) +{ + int err = __mark_initmem_nx(); + + if (err) + panic("%s() failed, err = %d\n", __func__, err); } #ifdef CONFIG_STRICT_KERNEL_RWX -void mark_rodata_ro(void) +static int __mark_rodata_ro(void) { unsigned long numpages; if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE)) pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n"); - if (v_block_mapped((unsigned long)_stext + 1)) { - mmu_mark_rodata_ro(); - return; - } + if (v_block_mapped((unsigned long)_stext + 1)) + return mmu_mark_rodata_ro(); /* * mark text and rodata as read only. __end_rodata is set by @@ -164,6 +174,14 @@ void mark_rodata_ro(void) numpages = PFN_UP((unsigned long)__end_rodata) - PFN_DOWN((unsigned long)_stext); - set_memory_ro((unsigned long)_stext, numpages); + return set_memory_ro((unsigned long)_stext, numpages); +} + +void mark_rodata_ro(void) +{ + int err = __mark_rodata_ro(); + + if (err) + panic("%s() failed, err = %d\n", __func__, err); } #endif -- cgit 1.2.3-korg From 56a34d799bfa53064e7b8bd354aacd176aeaecc8 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Mon, 26 Feb 2024 16:00:08 +0530 Subject: kexec/kdump: make struct crash_mem available without CONFIG_CRASH_DUMP struct crash_mem defined under include/linux/crash_core.h represents a list of memory ranges. While it is used to represent memory ranges for kdump kernel, it can also be used for other kind of memory ranges. In fact, KEXEC_FILE_LOAD syscall in powerpc uses this structure to represent reserved memory ranges and exclude memory ranges needed to find the right memory regions to load kexec kernel. So, make the definition of crash_mem structure available for !CONFIG_CRASH_DUMP case too. Signed-off-by: Hari Bathini Acked-by: Baoquan He Signed-off-by: Michael Ellerman Link: https://msgid.link/20240226103010.589537-2-hbathini@linux.ibm.com --- include/linux/crash_core.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 23270b16e1dbf3..d33352c2e3865f 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -8,6 +8,12 @@ struct kimage; +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct range ranges[] __counted_by(max_nr_ranges); +}; + #ifdef CONFIG_CRASH_DUMP int crash_shrink_memory(unsigned long new_size); @@ -51,12 +57,6 @@ static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; } /* Alignment required for elf header segment */ #define ELF_CORE_HEADER_ALIGN 4096 -struct crash_mem { - unsigned int max_nr_ranges; - unsigned int nr_ranges; - struct range ranges[] __counted_by(max_nr_ranges); -}; - extern int crash_exclude_mem_range(struct crash_mem *mem, unsigned long long mstart, unsigned long long mend); -- cgit 1.2.3-korg From 33f2cc0a2e90f7177c49559b434191b02efd0cd5 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Mon, 26 Feb 2024 16:00:09 +0530 Subject: powerpc/kexec: split CONFIG_KEXEC_FILE and CONFIG_CRASH_DUMP CONFIG_KEXEC_FILE does not have to select CONFIG_CRASH_DUMP. Move some code under CONFIG_CRASH_DUMP to support CONFIG_KEXEC_FILE and !CONFIG_CRASH_DUMP case. Signed-off-by: Hari Bathini Signed-off-by: Michael Ellerman Link: https://msgid.link/20240226103010.589537-3-hbathini@linux.ibm.com --- arch/powerpc/kexec/elf_64.c | 4 +- arch/powerpc/kexec/file_load_64.c | 269 ++++++++++++++++++++------------------ 2 files changed, 142 insertions(+), 131 deletions(-) diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c index 904016cf89ea46..6d8951e8e96663 100644 --- a/arch/powerpc/kexec/elf_64.c +++ b/arch/powerpc/kexec/elf_64.c @@ -47,7 +47,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, if (ret) return ERR_PTR(ret); - if (image->type == KEXEC_TYPE_CRASH) { + if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) { /* min & max buffer values for kdump case */ kbuf.buf_min = pbuf.buf_min = crashk_res.start; kbuf.buf_max = pbuf.buf_max = @@ -70,7 +70,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, kexec_dprintk("Loaded purgatory at 0x%lx\n", pbuf.mem); /* Load additional segments needed for panic kernel */ - if (image->type == KEXEC_TYPE_CRASH) { + if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) { ret = load_crashdump_segments_ppc64(image, &kbuf); if (ret) { pr_err("Failed to load kdump kernel segments\n"); diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index 5b4c5cb2335485..1bc65de6174f3e 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -96,119 +96,6 @@ out: return ret; } -/** - * get_usable_memory_ranges - Get usable memory ranges. This list includes - * regions like crashkernel, opal/rtas & tce-table, - * that kdump kernel could use. - * @mem_ranges: Range list to add the memory ranges to. - * - * Returns 0 on success, negative errno on error. - */ -static int get_usable_memory_ranges(struct crash_mem **mem_ranges) -{ - int ret; - - /* - * Early boot failure observed on guests when low memory (first memory - * block?) is not added to usable memory. So, add [0, crashk_res.end] - * instead of [crashk_res.start, crashk_res.end] to workaround it. - * Also, crashed kernel's memory must be added to reserve map to - * avoid kdump kernel from using it. - */ - ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1); - if (ret) - goto out; - - ret = add_rtas_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_opal_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_tce_mem_ranges(mem_ranges); -out: - if (ret) - pr_err("Failed to setup usable memory ranges\n"); - return ret; -} - -/** - * get_crash_memory_ranges - Get crash memory ranges. This list includes - * first/crashing kernel's memory regions that - * would be exported via an elfcore. - * @mem_ranges: Range list to add the memory ranges to. - * - * Returns 0 on success, negative errno on error. - */ -static int get_crash_memory_ranges(struct crash_mem **mem_ranges) -{ - phys_addr_t base, end; - struct crash_mem *tmem; - u64 i; - int ret; - - for_each_mem_range(i, &base, &end) { - u64 size = end - base; - - /* Skip backup memory region, which needs a separate entry */ - if (base == BACKUP_SRC_START) { - if (size > BACKUP_SRC_SIZE) { - base = BACKUP_SRC_END + 1; - size -= BACKUP_SRC_SIZE; - } else - continue; - } - - ret = add_mem_range(mem_ranges, base, size); - if (ret) - goto out; - - /* Try merging adjacent ranges before reallocation attempt */ - if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges) - sort_memory_ranges(*mem_ranges, true); - } - - /* Reallocate memory ranges if there is no space to split ranges */ - tmem = *mem_ranges; - if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) { - tmem = realloc_mem_ranges(mem_ranges); - if (!tmem) - goto out; - } - - /* Exclude crashkernel region */ - ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end); - if (ret) - goto out; - - /* - * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL - * regions are exported to save their context at the time of - * crash, they should actually be backed up just like the - * first 64K bytes of memory. - */ - ret = add_rtas_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_opal_mem_range(mem_ranges); - if (ret) - goto out; - - /* create a separate program header for the backup region */ - ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE); - if (ret) - goto out; - - sort_memory_ranges(*mem_ranges, false); -out: - if (ret) - pr_err("Failed to setup crash memory ranges\n"); - return ret; -} - /** * get_reserved_memory_ranges - Get reserve memory ranges. This list includes * memory regions that should be added to the @@ -434,6 +321,120 @@ static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf, return ret; } +#ifdef CONFIG_CRASH_DUMP +/** + * get_usable_memory_ranges - Get usable memory ranges. This list includes + * regions like crashkernel, opal/rtas & tce-table, + * that kdump kernel could use. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +static int get_usable_memory_ranges(struct crash_mem **mem_ranges) +{ + int ret; + + /* + * Early boot failure observed on guests when low memory (first memory + * block?) is not added to usable memory. So, add [0, crashk_res.end] + * instead of [crashk_res.start, crashk_res.end] to workaround it. + * Also, crashed kernel's memory must be added to reserve map to + * avoid kdump kernel from using it. + */ + ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1); + if (ret) + goto out; + + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_opal_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_tce_mem_ranges(mem_ranges); +out: + if (ret) + pr_err("Failed to setup usable memory ranges\n"); + return ret; +} + +/** + * get_crash_memory_ranges - Get crash memory ranges. This list includes + * first/crashing kernel's memory regions that + * would be exported via an elfcore. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +static int get_crash_memory_ranges(struct crash_mem **mem_ranges) +{ + phys_addr_t base, end; + struct crash_mem *tmem; + u64 i; + int ret; + + for_each_mem_range(i, &base, &end) { + u64 size = end - base; + + /* Skip backup memory region, which needs a separate entry */ + if (base == BACKUP_SRC_START) { + if (size > BACKUP_SRC_SIZE) { + base = BACKUP_SRC_END + 1; + size -= BACKUP_SRC_SIZE; + } else + continue; + } + + ret = add_mem_range(mem_ranges, base, size); + if (ret) + goto out; + + /* Try merging adjacent ranges before reallocation attempt */ + if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges) + sort_memory_ranges(*mem_ranges, true); + } + + /* Reallocate memory ranges if there is no space to split ranges */ + tmem = *mem_ranges; + if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) { + tmem = realloc_mem_ranges(mem_ranges); + if (!tmem) + goto out; + } + + /* Exclude crashkernel region */ + ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end); + if (ret) + goto out; + + /* + * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL + * regions are exported to save their context at the time of + * crash, they should actually be backed up just like the + * first 64K bytes of memory. + */ + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_opal_mem_range(mem_ranges); + if (ret) + goto out; + + /* create a separate program header for the backup region */ + ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE); + if (ret) + goto out; + + sort_memory_ranges(*mem_ranges, false); +out: + if (ret) + pr_err("Failed to setup crash memory ranges\n"); + return ret; +} + /** * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries * @um_info: Usable memory buffer and ranges info. @@ -863,6 +864,7 @@ int load_crashdump_segments_ppc64(struct kimage *image, return 0; } +#endif /** * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global @@ -972,26 +974,14 @@ static unsigned int cpu_node_size(void) return size; } -/** - * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to - * setup FDT for kexec/kdump kernel. - * @image: kexec image being loaded. - * - * Returns the estimated extra size needed for kexec/kdump kernel FDT. - */ -unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) +static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image) { unsigned int cpu_nodes, extra_size = 0; struct device_node *dn; u64 usm_entries; - // Budget some space for the password blob. There's already extra space - // for the key name - if (plpks_is_available()) - extra_size += (unsigned int)plpks_get_passwordlen(); - - if (image->type != KEXEC_TYPE_CRASH) - return extra_size; + if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH) + return 0; /* * For kdump kernel, account for linux,usable-memory and @@ -1019,6 +1009,25 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) return extra_size; } +/** + * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to + * setup FDT for kexec/kdump kernel. + * @image: kexec image being loaded. + * + * Returns the estimated extra size needed for kexec/kdump kernel FDT. + */ +unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) +{ + unsigned int extra_size = 0; + + // Budget some space for the password blob. There's already extra space + // for the key name + if (plpks_is_available()) + extra_size += (unsigned int)plpks_get_passwordlen(); + + return extra_size + kdump_extra_fdt_size_ppc64(image); +} + /** * add_node_props - Reads node properties from device node structure and add * them to fdt. @@ -1171,6 +1180,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem *umem = NULL, *rmem = NULL; int i, nr_ranges, ret; +#ifdef CONFIG_CRASH_DUMP /* * Restrict memory usage for kdump kernel by setting up * usable memory ranges and memory reserve map. @@ -1207,6 +1217,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, goto out; } } +#endif /* Update cpus nodes information to account hotplug CPUs. */ ret = update_cpus_node(fdt); @@ -1278,7 +1289,7 @@ int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) buf_min = kbuf->buf_min; buf_max = kbuf->buf_max; /* Segments for kdump kernel should be within crashkernel region */ - if (kbuf->image->type == KEXEC_TYPE_CRASH) { + if (IS_ENABLED(CONFIG_CRASH_DUMP) && kbuf->image->type == KEXEC_TYPE_CRASH) { buf_min = (buf_min < crashk_res.start ? crashk_res.start : buf_min); buf_max = (buf_max > crashk_res.end ? -- cgit 1.2.3-korg From 5c4233cc0920cc90787aafe950b90f6c57a35b88 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Mon, 26 Feb 2024 16:00:10 +0530 Subject: powerpc/kdump: Split KEXEC_CORE and CRASH_DUMP dependency Remove CONFIG_CRASH_DUMP dependency on CONFIG_KEXEC. CONFIG_KEXEC_CORE was used at places where CONFIG_CRASH_DUMP or CONFIG_CRASH_RESERVE was appropriate. Replace with appropriate #ifdefs to support CONFIG_KEXEC and !CONFIG_CRASH_DUMP configuration option. Also, make CONFIG_FA_DUMP dependent on CONFIG_CRASH_DUMP to avoid unmet dependencies for FA_DUMP with !CONFIG_KEXEC_CORE configuration option. Signed-off-by: Hari Bathini Signed-off-by: Michael Ellerman Link: https://msgid.link/20240226103010.589537-4-hbathini@linux.ibm.com --- arch/powerpc/Kconfig | 9 +--- arch/powerpc/include/asm/kexec.h | 98 ++++++++++++++++++------------------ arch/powerpc/kernel/prom.c | 2 +- arch/powerpc/kernel/setup-common.c | 2 +- arch/powerpc/kernel/smp.c | 4 +- arch/powerpc/kexec/Makefile | 3 +- arch/powerpc/kexec/core.c | 4 ++ arch/powerpc/platforms/powernv/smp.c | 2 +- 8 files changed, 61 insertions(+), 63 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a68b9e637eda04..1c4be337368604 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -607,11 +607,6 @@ config PPC64_SUPPORTS_MEMORY_FAILURE config ARCH_SUPPORTS_KEXEC def_bool PPC_BOOK3S || PPC_E500 || (44x && !SMP) -config ARCH_SELECTS_KEXEC - def_bool y - depends on KEXEC - select CRASH_DUMP - config ARCH_SUPPORTS_KEXEC_FILE def_bool PPC64 @@ -622,7 +617,6 @@ config ARCH_SELECTS_KEXEC_FILE def_bool y depends on KEXEC_FILE select KEXEC_ELF - select CRASH_DUMP select HAVE_IMA_KEXEC if IMA config PPC64_BIG_ENDIAN_ELF_ABI_V2 @@ -694,8 +688,7 @@ config ARCH_SELECTS_CRASH_DUMP config FA_DUMP bool "Firmware-assisted dump" - depends on PPC64 && (PPC_RTAS || PPC_POWERNV) - select CRASH_DUMP + depends on CRASH_DUMP && PPC64 && (PPC_RTAS || PPC_POWERNV) help A robust mechanism to get reliable kernel crash dump with assistance from firmware. This approach does not use kexec, diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index e1b43aa1217535..fdb90e24dc74d4 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -55,59 +55,18 @@ typedef void (*crash_shutdown_t)(void); #ifdef CONFIG_KEXEC_CORE - -/* - * This function is responsible for capturing register states if coming - * via panic or invoking dump using sysrq-trigger. - */ -static inline void crash_setup_regs(struct pt_regs *newregs, - struct pt_regs *oldregs) -{ - if (oldregs) - memcpy(newregs, oldregs, sizeof(*newregs)); - else - ppc_save_regs(newregs); -} +struct kimage; +struct pt_regs; extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ -extern int crashing_cpu; -extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); -extern void crash_ipi_callback(struct pt_regs *); -extern int crash_wake_offline; - -struct kimage; -struct pt_regs; extern void default_machine_kexec(struct kimage *image); -extern void default_machine_crash_shutdown(struct pt_regs *regs); -extern int crash_shutdown_register(crash_shutdown_t handler); -extern int crash_shutdown_unregister(crash_shutdown_t handler); - -extern void crash_kexec_prepare(void); -extern void crash_kexec_secondary(struct pt_regs *regs); -int __init overlaps_crashkernel(unsigned long start, unsigned long size); -extern void reserve_crashkernel(void); extern void machine_kexec_mask_interrupts(void); -static inline bool kdump_in_progress(void) -{ - return crashing_cpu >= 0; -} - void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer, unsigned long start_address) __noreturn; - void kexec_copy_flush(struct kimage *image); -#if defined(CONFIG_CRASH_DUMP) -bool is_kdump_kernel(void); -#define is_kdump_kernel is_kdump_kernel -#if defined(CONFIG_PPC_RTAS) -void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); -#define crash_free_reserved_phys_range crash_free_reserved_phys_range -#endif /* CONFIG_PPC_RTAS */ -#endif /* CONFIG_CRASH_DUMP */ - #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_elf64_ops; @@ -152,15 +111,56 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, #endif /* CONFIG_KEXEC_FILE */ -#else /* !CONFIG_KEXEC_CORE */ -static inline void crash_kexec_secondary(struct pt_regs *regs) { } +#endif /* CONFIG_KEXEC_CORE */ + +#ifdef CONFIG_CRASH_RESERVE +int __init overlaps_crashkernel(unsigned long start, unsigned long size); +extern void reserve_crashkernel(void); +#else +static inline void reserve_crashkernel(void) {} +static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; } +#endif -static inline int overlaps_crashkernel(unsigned long start, unsigned long size) +#if defined(CONFIG_CRASH_DUMP) +/* + * This function is responsible for capturing register states if coming + * via panic or invoking dump using sysrq-trigger. + */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) { - return 0; + if (oldregs) + memcpy(newregs, oldregs, sizeof(*newregs)); + else + ppc_save_regs(newregs); +} + +extern int crashing_cpu; +extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); +extern void crash_ipi_callback(struct pt_regs *regs); +extern int crash_wake_offline; + +extern int crash_shutdown_register(crash_shutdown_t handler); +extern int crash_shutdown_unregister(crash_shutdown_t handler); +extern void default_machine_crash_shutdown(struct pt_regs *regs); + +extern void crash_kexec_prepare(void); +extern void crash_kexec_secondary(struct pt_regs *regs); + +static inline bool kdump_in_progress(void) +{ + return crashing_cpu >= 0; } -static inline void reserve_crashkernel(void) { ; } +bool is_kdump_kernel(void); +#define is_kdump_kernel is_kdump_kernel +#if defined(CONFIG_PPC_RTAS) +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range +#endif /* CONFIG_PPC_RTAS */ + +#else /* !CONFIG_CRASH_DUMP */ +static inline void crash_kexec_secondary(struct pt_regs *regs) { } static inline int crash_shutdown_register(crash_shutdown_t handler) { @@ -183,7 +183,7 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) { } -#endif /* CONFIG_KEXEC_CORE */ +#endif /* CONFIG_CRASH_DUMP */ #ifdef CONFIG_PPC_BOOK3S_64 #include diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 1dc32a05815612..cd8d8883de9040 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -475,7 +475,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node, tce_alloc_end = *lprop; #endif -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_RESERVE lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); if (lprop) crashk_res.start = *lprop; diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 2add292da49432..01ed1263e1a989 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -110,7 +110,7 @@ int ppc_do_canonicalize_irqs; EXPORT_SYMBOL(ppc_do_canonicalize_irqs); #endif -#ifdef CONFIG_VMCORE_INFO +#ifdef CONFIG_CRASH_DUMP /* This keeps a track of which one is the crashing cpu. */ int crashing_cpu = -1; #endif diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a60e4139214be5..12e53b3d792379 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -588,7 +588,7 @@ void smp_send_debugger_break(void) } #endif -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) { int cpu; @@ -631,7 +631,7 @@ void crash_smp_send_stop(void) stopped = true; -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP if (kexec_crash_image) { crash_kexec_prepare(); return; diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile index 91e96f5168b753..8e469c4da3f882 100644 --- a/arch/powerpc/kexec/Makefile +++ b/arch/powerpc/kexec/Makefile @@ -3,12 +3,13 @@ # Makefile for the linux kernel. # -obj-y += core.o crash.o core_$(BITS).o +obj-y += core.o core_$(BITS).o obj-$(CONFIG_PPC32) += relocate_32.o obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o +obj-$(CONFIG_CRASH_DUMP) += crash.o # Disable GCOV, KCOV & sanitizers in odd or sensitive code GCOV_PROFILE_core_$(BITS).o := n diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index 3ff4411ed49671..b8333a49ea5daa 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -44,10 +44,12 @@ void machine_kexec_mask_interrupts(void) { } } +#ifdef CONFIG_CRASH_DUMP void machine_crash_shutdown(struct pt_regs *regs) { default_machine_crash_shutdown(regs); } +#endif void machine_kexec_cleanup(struct kimage *image) { @@ -77,6 +79,7 @@ void machine_kexec(struct kimage *image) for(;;); } +#ifdef CONFIG_CRASH_RESERVE void __init reserve_crashkernel(void) { unsigned long long crash_size, crash_base, total_mem_sz; @@ -251,3 +254,4 @@ static int __init kexec_setup(void) return 0; } late_initcall(kexec_setup); +#endif /* CONFIG_CRASH_RESERVE */ diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 9e1a25398f98cb..8f14f0581a21b1 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -434,7 +434,7 @@ void __init pnv_smp_init(void) smp_ops = &pnv_smp_ops; #ifdef CONFIG_HOTPLUG_CPU -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP crash_wake_offline = 1; #endif #endif -- cgit 1.2.3-korg