Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_POWERPC_KEXEC_H #define _ASM_POWERPC_KEXEC_H #ifdef __KERNEL__ #if defined(CONFIG_PPC_85xx) || defined(CONFIG_44x) /* * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory * and therefore we can only deal with memory within this range */ #define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1) #define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1) #define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1) #else /* * Maximum page that is mapped directly into kernel memory. * XXX: Since we copy virt we can use any page we allocate */ #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) /* * Maximum address we can reach in physical address mode. * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR. */ #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) /* Maximum address we can use for the control code buffer */ #ifdef __powerpc64__ #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) #else /* TASK_SIZE, probably left over from use_mm ?? */ #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE #endif #endif #define KEXEC_CONTROL_PAGE_SIZE 4096 /* The native architecture */ #ifdef __powerpc64__ #define KEXEC_ARCH KEXEC_ARCH_PPC64 #else #define KEXEC_ARCH KEXEC_ARCH_PPC #endif #define KEXEC_STATE_NONE 0 #define KEXEC_STATE_IRQS_OFF 1 #define KEXEC_STATE_REAL_MODE 2 #ifndef __ASSEMBLY__ #include <asm/reg.h> typedef void (*crash_shutdown_t)(void); #ifdef CONFIG_KEXEC_CORE /* * This function is responsible for capturing register states if coming * via panic or invoking dump using sysrq-trigger. */ static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { if (oldregs) memcpy(newregs, oldregs, sizeof(*newregs)); else ppc_save_regs(newregs); } extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ extern int crashing_cpu; extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); extern void crash_ipi_callback(struct pt_regs *); extern int crash_wake_offline; struct kimage; struct pt_regs; extern void default_machine_kexec(struct kimage *image); extern void default_machine_crash_shutdown(struct pt_regs *regs); extern int crash_shutdown_register(crash_shutdown_t handler); extern int crash_shutdown_unregister(crash_shutdown_t handler); extern void crash_kexec_prepare(void); extern void crash_kexec_secondary(struct pt_regs *regs); int __init overlaps_crashkernel(unsigned long start, unsigned long size); extern void reserve_crashkernel(void); extern void machine_kexec_mask_interrupts(void); static inline bool kdump_in_progress(void) { return crashing_cpu >= 0; } void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer, unsigned long start_address) __noreturn; void kexec_copy_flush(struct kimage *image); #if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_PPC_RTAS) void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); #define crash_free_reserved_phys_range crash_free_reserved_phys_range #endif #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_elf64_ops; #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { struct crash_mem *exclude_ranges; unsigned long backup_start; void *backup_buf; void *fdt; }; char *setup_kdump_cmdline(struct kimage *image, char *cmdline, unsigned long cmdline_len); int setup_purgatory(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); #ifdef CONFIG_PPC64 struct kexec_buf; int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len); #define arch_kexec_kernel_image_probe arch_kexec_kernel_image_probe int arch_kimage_file_post_load_cleanup(struct kimage *image); #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); #define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole int load_crashdump_segments_ppc64(struct kimage *image, struct kexec_buf *kbuf); int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image); int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline); #endif /* CONFIG_PPC64 */ #endif /* CONFIG_KEXEC_FILE */ #else /* !CONFIG_KEXEC_CORE */ static inline void crash_kexec_secondary(struct pt_regs *regs) { } static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; } static inline void reserve_crashkernel(void) { ; } static inline int crash_shutdown_register(crash_shutdown_t handler) { return 0; } static inline int crash_shutdown_unregister(crash_shutdown_t handler) { return 0; } static inline bool kdump_in_progress(void) { return false; } static inline void crash_ipi_callback(struct pt_regs *regs) { } static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) { } #endif /* CONFIG_KEXEC_CORE */ #ifdef CONFIG_PPC_BOOK3S_64 #include <asm/book3s/64/kexec.h> #endif #ifndef reset_sprs #define reset_sprs reset_sprs static inline void reset_sprs(void) { } #endif #endif /* ! __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_KEXEC_H */ |