2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Vivek Goyal <vgoyal@redhat.com>
13 #define pr_fmt(fmt) "kexec: " fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/smp.h>
18 #include <linux/reboot.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/elf.h>
22 #include <linux/elfcore.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
27 #include <asm/processor.h>
28 #include <asm/hardirq.h>
30 #include <asm/hw_irq.h>
32 #include <asm/e820/types.h>
33 #include <asm/io_apic.h>
35 #include <linux/kdebug.h>
37 #include <asm/reboot.h>
38 #include <asm/virtext.h>
39 #include <asm/intel_pt.h>
41 /* Alignment required for elf header segment */
42 #define ELF_CORE_HEADER_ALIGN 4096
44 struct crash_mem_range {
49 unsigned int max_nr_ranges;
50 unsigned int nr_ranges;
51 struct crash_mem_range ranges[0];
54 /* Used while preparing memory map entries for second kernel */
55 struct crash_memmap_data {
56 struct boot_params *params;
62 * This is used to VMCLEAR all VMCSs loaded on the
63 * processor. And when loading kvm_intel module, the
64 * callback function pointer will be assigned.
68 crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
69 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
70 unsigned long crash_zero_bytes;
72 static inline void cpu_crash_vmclear_loaded_vmcss(void)
74 crash_vmclear_fn *do_vmclear_operation = NULL;
77 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
78 if (do_vmclear_operation)
79 do_vmclear_operation();
83 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
85 static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
88 struct pt_regs fixed_regs;
90 if (!user_mode(regs)) {
91 crash_fixup_ss_esp(&fixed_regs, regs);
95 crash_save_cpu(regs, cpu);
98 * VMCLEAR VMCSs loaded on all cpus if needed.
100 cpu_crash_vmclear_loaded_vmcss();
102 /* Disable VMX or SVM if needed.
104 * We need to disable virtualization on all CPUs.
105 * Having VMX or SVM enabled on any CPU may break rebooting
106 * after the kdump kernel has finished its task.
108 cpu_emergency_vmxoff();
109 cpu_emergency_svm_disable();
112 * Disable Intel PT to stop its logging
114 cpu_emergency_stop_pt();
116 disable_local_APIC();
119 void kdump_nmi_shootdown_cpus(void)
121 nmi_shootdown_cpus(kdump_nmi_callback);
123 disable_local_APIC();
126 /* Override the weak function in kernel/panic.c */
127 void crash_smp_send_stop(void)
129 static int cpus_stopped;
134 if (smp_ops.crash_stop_other_cpus)
135 smp_ops.crash_stop_other_cpus();
143 void crash_smp_send_stop(void)
145 /* There are no cpus to shootdown */
149 void native_machine_crash_shutdown(struct pt_regs *regs)
151 /* This function is only called after the system
152 * has panicked or is otherwise in a critical state.
153 * The minimum amount of code to allow a kexec'd kernel
154 * to run successfully needs to happen here.
156 * In practice this means shooting down the other cpus in
159 /* The kernel is broken so disable interrupts */
162 crash_smp_send_stop();
165 * VMCLEAR VMCSs loaded on this cpu if needed.
167 cpu_crash_vmclear_loaded_vmcss();
169 /* Booting kdump kernel with VMX or SVM enabled won't work,
170 * because (among other limitations) we can't disable paging
171 * with the virt flags.
173 cpu_emergency_vmxoff();
174 cpu_emergency_svm_disable();
177 * Disable Intel PT to stop its logging
179 cpu_emergency_stop_pt();
181 #ifdef CONFIG_X86_IO_APIC
182 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
187 restore_boot_irq_mode();
188 #ifdef CONFIG_HPET_TIMER
191 crash_save_cpu(regs, safe_smp_processor_id());
194 #ifdef CONFIG_KEXEC_FILE
195 static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
197 unsigned int *nr_ranges = arg;
203 /* Gather all the required information to prepare elf headers for ram regions */
204 static struct crash_mem *fill_up_crash_elf_data(void)
206 unsigned int nr_ranges = 0;
207 struct crash_mem *cmem;
209 walk_system_ram_res(0, -1, &nr_ranges,
210 get_nr_ram_ranges_callback);
215 * Exclusion of crash region and/or crashk_low_res may cause
216 * another range split. So add extra two slots here.
219 cmem = vzalloc(sizeof(struct crash_mem) +
220 sizeof(struct crash_mem_range) * nr_ranges);
224 cmem->max_nr_ranges = nr_ranges;
230 static int exclude_mem_range(struct crash_mem *mem,
231 unsigned long long mstart, unsigned long long mend)
234 unsigned long long start, end;
235 struct crash_mem_range temp_range = {0, 0};
237 for (i = 0; i < mem->nr_ranges; i++) {
238 start = mem->ranges[i].start;
239 end = mem->ranges[i].end;
241 if (mstart > end || mend < start)
244 /* Truncate any area outside of range */
250 /* Found completely overlapping range */
251 if (mstart == start && mend == end) {
252 mem->ranges[i].start = 0;
253 mem->ranges[i].end = 0;
254 if (i < mem->nr_ranges - 1) {
255 /* Shift rest of the ranges to left */
256 for (j = i; j < mem->nr_ranges - 1; j++) {
257 mem->ranges[j].start =
258 mem->ranges[j+1].start;
260 mem->ranges[j+1].end;
267 if (mstart > start && mend < end) {
268 /* Split original range */
269 mem->ranges[i].end = mstart - 1;
270 temp_range.start = mend + 1;
271 temp_range.end = end;
272 } else if (mstart != start)
273 mem->ranges[i].end = mstart - 1;
275 mem->ranges[i].start = mend + 1;
279 /* If a split happend, add the split to array */
284 if (i == mem->max_nr_ranges - 1)
287 /* Location where new range should go */
289 if (j < mem->nr_ranges) {
290 /* Move over all ranges one slot towards the end */
291 for (i = mem->nr_ranges - 1; i >= j; i--)
292 mem->ranges[i + 1] = mem->ranges[i];
295 mem->ranges[j].start = temp_range.start;
296 mem->ranges[j].end = temp_range.end;
302 * Look for any unwanted ranges between mstart, mend and remove them. This
303 * might lead to split and split ranges are put in cmem->ranges[] array
305 static int elf_header_exclude_ranges(struct crash_mem *cmem)
309 /* Exclude crashkernel region */
310 ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
314 if (crashk_low_res.end) {
315 ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
323 static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
325 struct crash_mem *cmem = arg;
327 cmem->ranges[cmem->nr_ranges].start = res->start;
328 cmem->ranges[cmem->nr_ranges].end = res->end;
334 static int prepare_elf64_headers(struct crash_mem *cmem, bool kernel_map,
335 void **addr, unsigned long *sz)
339 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
340 unsigned char *buf, *bufp;
342 unsigned long long notes_addr;
343 unsigned long mstart, mend;
345 /* extra phdr for vmcoreinfo elf note */
346 nr_phdr = nr_cpus + 1;
347 nr_phdr += cmem->nr_ranges;
350 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
351 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
352 * I think this is required by tools like gdb. So same physical
353 * memory will be mapped in two elf headers. One will contain kernel
354 * text virtual addresses and other will have __va(physical) addresses.
358 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
359 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
361 buf = vzalloc(elf_sz);
366 ehdr = (Elf64_Ehdr *)bufp;
367 bufp += sizeof(Elf64_Ehdr);
368 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
369 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
370 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
371 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
372 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
373 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
374 ehdr->e_type = ET_CORE;
375 ehdr->e_machine = ELF_ARCH;
376 ehdr->e_version = EV_CURRENT;
377 ehdr->e_phoff = sizeof(Elf64_Ehdr);
378 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
379 ehdr->e_phentsize = sizeof(Elf64_Phdr);
381 /* Prepare one phdr of type PT_NOTE for each present cpu */
382 for_each_present_cpu(cpu) {
383 phdr = (Elf64_Phdr *)bufp;
384 bufp += sizeof(Elf64_Phdr);
385 phdr->p_type = PT_NOTE;
386 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
387 phdr->p_offset = phdr->p_paddr = notes_addr;
388 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
392 /* Prepare one PT_NOTE header for vmcoreinfo */
393 phdr = (Elf64_Phdr *)bufp;
394 bufp += sizeof(Elf64_Phdr);
395 phdr->p_type = PT_NOTE;
396 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
397 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
400 /* Prepare PT_LOAD type program header for kernel text region */
402 phdr = (Elf64_Phdr *)bufp;
403 bufp += sizeof(Elf64_Phdr);
404 phdr->p_type = PT_LOAD;
405 phdr->p_flags = PF_R|PF_W|PF_X;
406 phdr->p_vaddr = (Elf64_Addr)_text;
407 phdr->p_filesz = phdr->p_memsz = _end - _text;
408 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
412 /* Go through all the ranges in cmem->ranges[] and prepare phdr */
413 for (i = 0; i < cmem->nr_ranges; i++) {
414 mstart = cmem->ranges[i].start;
415 mend = cmem->ranges[i].end;
417 phdr->p_type = PT_LOAD;
418 phdr->p_flags = PF_R|PF_W|PF_X;
419 phdr->p_offset = mstart;
421 phdr->p_paddr = mstart;
422 phdr->p_vaddr = (unsigned long long) __va(mstart);
423 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
427 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
428 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
429 ehdr->e_phnum, phdr->p_offset);
437 /* Prepare elf headers. Return addr and size */
438 static int prepare_elf_headers(struct kimage *image, void **addr,
441 struct crash_mem *cmem;
446 cmem = fill_up_crash_elf_data();
450 ret = walk_system_ram_res(0, -1, cmem,
451 prepare_elf64_ram_headers_callback);
455 /* Exclude unwanted mem ranges */
456 ret = elf_header_exclude_ranges(cmem);
460 /* By default prepare 64bit headers */
461 ret = prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
466 * If a range matches backup region, adjust offset to backup
469 ehdr = (Elf64_Ehdr *)*addr;
470 phdr = (Elf64_Phdr *)(ehdr + 1);
471 for (i = 0; i < ehdr->e_phnum; phdr++, i++)
472 if (phdr->p_type == PT_LOAD &&
473 phdr->p_paddr == image->arch.backup_src_start &&
474 phdr->p_memsz == image->arch.backup_src_sz) {
475 phdr->p_offset = image->arch.backup_load_addr;
483 static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
485 unsigned int nr_e820_entries;
487 nr_e820_entries = params->e820_entries;
488 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
491 memcpy(¶ms->e820_table[nr_e820_entries], entry,
492 sizeof(struct e820_entry));
493 params->e820_entries++;
497 static int memmap_entry_callback(struct resource *res, void *arg)
499 struct crash_memmap_data *cmd = arg;
500 struct boot_params *params = cmd->params;
501 struct e820_entry ei;
503 ei.addr = res->start;
504 ei.size = resource_size(res);
506 add_e820_entry(params, &ei);
511 static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
512 unsigned long long mstart,
513 unsigned long long mend)
515 unsigned long start, end;
518 cmem->ranges[0].start = mstart;
519 cmem->ranges[0].end = mend;
522 /* Exclude Backup region */
523 start = image->arch.backup_load_addr;
524 end = start + image->arch.backup_src_sz - 1;
525 ret = exclude_mem_range(cmem, start, end);
529 /* Exclude elf header region */
530 start = image->arch.elf_load_addr;
531 end = start + image->arch.elf_headers_sz - 1;
532 return exclude_mem_range(cmem, start, end);
535 /* Prepare memory map for crash dump kernel */
536 int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
540 struct e820_entry ei;
541 struct crash_memmap_data cmd;
542 struct crash_mem *cmem;
544 cmem = vzalloc(sizeof(struct crash_mem));
548 memset(&cmd, 0, sizeof(struct crash_memmap_data));
551 /* Add first 640K segment */
552 ei.addr = image->arch.backup_src_start;
553 ei.size = image->arch.backup_src_sz;
554 ei.type = E820_TYPE_RAM;
555 add_e820_entry(params, &ei);
557 /* Add ACPI tables */
558 cmd.type = E820_TYPE_ACPI;
559 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
560 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
561 memmap_entry_callback);
563 /* Add ACPI Non-volatile Storage */
564 cmd.type = E820_TYPE_NVS;
565 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
566 memmap_entry_callback);
568 /* Add crashk_low_res region */
569 if (crashk_low_res.end) {
570 ei.addr = crashk_low_res.start;
571 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
572 ei.type = E820_TYPE_RAM;
573 add_e820_entry(params, &ei);
576 /* Exclude some ranges from crashk_res and add rest to memmap */
577 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
582 for (i = 0; i < cmem->nr_ranges; i++) {
583 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
585 /* If entry is less than a page, skip it */
586 if (ei.size < PAGE_SIZE)
588 ei.addr = cmem->ranges[i].start;
589 ei.type = E820_TYPE_RAM;
590 add_e820_entry(params, &ei);
598 static int determine_backup_region(struct resource *res, void *arg)
600 struct kimage *image = arg;
602 image->arch.backup_src_start = res->start;
603 image->arch.backup_src_sz = resource_size(res);
605 /* Expecting only one range for backup region */
609 int crash_load_segments(struct kimage *image)
612 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
613 .buf_max = ULONG_MAX, .top_down = false };
616 * Determine and load a segment for backup area. First 640K RAM
617 * region is backup source
620 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
621 image, determine_backup_region);
623 /* Zero or postive return values are ok */
627 /* Add backup segment. */
628 if (image->arch.backup_src_sz) {
629 kbuf.buffer = &crash_zero_bytes;
630 kbuf.bufsz = sizeof(crash_zero_bytes);
631 kbuf.memsz = image->arch.backup_src_sz;
632 kbuf.buf_align = PAGE_SIZE;
634 * Ideally there is no source for backup segment. This is
635 * copied in purgatory after crash. Just add a zero filled
636 * segment for now to make sure checksum logic works fine.
638 ret = kexec_add_buffer(&kbuf);
641 image->arch.backup_load_addr = kbuf.mem;
642 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
643 image->arch.backup_load_addr,
644 image->arch.backup_src_start, kbuf.memsz);
647 /* Prepare elf headers and add a segment */
648 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
652 image->arch.elf_headers = kbuf.buffer;
653 image->arch.elf_headers_sz = kbuf.bufsz;
655 kbuf.memsz = kbuf.bufsz;
656 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
657 ret = kexec_add_buffer(&kbuf);
659 vfree((void *)image->arch.elf_headers);
662 image->arch.elf_load_addr = kbuf.mem;
663 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
664 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
668 #endif /* CONFIG_KEXEC_FILE */