From 008dcd5f5a6342d64d8f9354006d2b60ba8ecf3a Mon Sep 17 00:00:00 2001 From: Packit Date: Sep 09 2020 11:59:12 +0000 Subject: Apply patch github_9596b4388ea5.patch patch_name: github_9596b4388ea5.patch present_in_specfile: true --- diff --git a/arm64.c b/arm64.c index 49e2573..dfcde6e 100644 --- a/arm64.c +++ b/arm64.c @@ -27,12 +27,9 @@ static struct machine_specific arm64_machine_specific = { 0 }; static int arm64_verify_symbol(const char *, ulong, char); static void arm64_parse_cmdline_args(void); -static int arm64_search_for_kimage_voffset(ulong); -static int verify_kimage_voffset(void); static void arm64_calc_kimage_voffset(void); static void arm64_calc_phys_offset(void); static void arm64_calc_virtual_memory_ranges(void); -static void arm64_get_section_size_bits(void); static int arm64_kdump_phys_base(ulong *); static ulong arm64_processor_speed(void); static void arm64_init_kernel_pgd(void); @@ -81,7 +78,7 @@ static int arm64_on_irq_stack(int, ulong); static void arm64_set_irq_stack(struct bt_info *); static void arm64_set_process_stack(struct bt_info *); static int arm64_get_kvaddr_ranges(struct vaddr_range *); -static void arm64_get_crash_notes(void); +static int arm64_get_crash_notes(void); static void arm64_calc_VA_BITS(void); static int arm64_is_uvaddr(ulong, struct task_context *); @@ -147,12 +144,6 @@ arm64_init(int when) if (kernel_symbol_exists("kimage_voffset")) machdep->flags |= NEW_VMEMMAP; - if (!machdep->pagesize && - (string = pc->read_vmcoreinfo("PAGESIZE"))) { - machdep->pagesize = atoi(string); - free(string); - } - if (!machdep->pagesize) { /* * Kerneldoc Documentation/arm64/booting.txt describes @@ -179,16 +170,17 @@ arm64_init(int when) } - /* - * This code section will only be executed if the kernel is - * earlier than Linux 4.4 (if there is no vmcoreinfo) - */ if (!machdep->pagesize && kernel_symbol_exists("swapper_pg_dir") && kernel_symbol_exists("idmap_pg_dir")) { - value = symbol_value("swapper_pg_dir") - - symbol_value("idmap_pg_dir"); + if (kernel_symbol_exists("tramp_pg_dir")) + value = symbol_value("tramp_pg_dir"); + else if (kernel_symbol_exists("reserved_ttbr0")) + value = symbol_value("reserved_ttbr0"); + else + value = symbol_value("swapper_pg_dir"); + value -= symbol_value("idmap_pg_dir"); /* * idmap_pg_dir is 2 pages prior to 4.1, * and 3 pages thereafter. Only 4K and 64K @@ -214,19 +206,12 @@ arm64_init(int when) arm64_calc_VA_BITS(); ms = machdep->machspec; - if (ms->VA_BITS_ACTUAL) { - ms->page_offset = ARM64_PAGE_OFFSET_ACTUAL; - machdep->identity_map_base = ARM64_PAGE_OFFSET_ACTUAL; - machdep->kvbase = ARM64_PAGE_OFFSET_ACTUAL; - ms->userspace_top = ARM64_USERSPACE_TOP_ACTUAL; - } else { - ms->page_offset = ARM64_PAGE_OFFSET; - machdep->identity_map_base = ARM64_PAGE_OFFSET; - machdep->kvbase = ARM64_VA_START; - ms->userspace_top = ARM64_USERSPACE_TOP; - } + ms->page_offset = ARM64_PAGE_OFFSET; + machdep->identity_map_base = ARM64_PAGE_OFFSET; + machdep->kvbase = ARM64_VA_START; machdep->is_kvaddr = generic_is_kvaddr; machdep->kvtop = arm64_kvtop; + ms->userspace_top = ARM64_USERSPACE_TOP; if (machdep->flags & NEW_VMEMMAP) { struct syment *sp; @@ -235,15 +220,11 @@ arm64_init(int when) sp = kernel_symbol_search("_end"); ms->kimage_end = (sp ? sp->value : 0); - if (ms->VA_BITS_ACTUAL) { - ms->modules_vaddr = (st->_stext_vmlinux & TEXT_OFFSET_MASK) - ARM64_MODULES_VSIZE; - ms->modules_end = ms->modules_vaddr + ARM64_MODULES_VSIZE -1; - } else { - ms->modules_vaddr = ARM64_VA_START; - if (kernel_symbol_exists("kasan_init")) - ms->modules_vaddr += ARM64_KASAN_SHADOW_SIZE; - ms->modules_end = ms->modules_vaddr + ARM64_MODULES_VSIZE -1; - } + ms->modules_vaddr = ARM64_VA_START; + if (kernel_symbol_exists("kasan_init")) + ms->modules_vaddr += ARM64_KASAN_SHADOW_SIZE; + ms->modules_end = ms->modules_vaddr + + ARM64_MODULES_VSIZE -1; ms->vmalloc_start_addr = ms->modules_end + 1; @@ -285,7 +266,7 @@ arm64_init(int when) case 65536: if (kernel_symbol_exists("idmap_ptrs_per_pgd") && readmem(symbol_value("idmap_ptrs_per_pgd"), KVADDR, - &value, sizeof(ulong), "idmap_ptrs_per_pgd", QUIET|RETURN_ON_ERROR)) + &value, sizeof(ulong), "idmap_ptrs_per_pgd", RETURN_ON_ERROR)) machdep->ptrs_per_pgd = value; if (machdep->machspec->VA_BITS > PGDIR_SHIFT_L3_64K) { @@ -335,6 +316,10 @@ arm64_init(int when) machdep->uvtop = arm64_uvtop; machdep->is_uvaddr = arm64_is_uvaddr; + if (kernel_symbol_exists("vabits_user") && + readmem(symbol_value("vabits_user"), KVADDR, + &value, sizeof(ulong), "vabits_user", RETURN_ON_ERROR)) + machdep->machspec->vabits_user = value; machdep->eframe_search = arm64_eframe_search; machdep->back_trace = arm64_back_trace_cmd; machdep->in_alternate_stack = arm64_in_alternate_stack; @@ -375,8 +360,7 @@ arm64_init(int when) case POST_GDB: arm64_calc_virtual_memory_ranges(); - arm64_get_section_size_bits(); - + machdep->section_size_bits = _SECTION_SIZE_BITS; if (!machdep->max_physmem_bits) { if ((string = pc->read_vmcoreinfo("NUMBER(MAX_PHYSMEM_BITS)"))) { machdep->max_physmem_bits = atol(string); @@ -388,24 +372,8 @@ arm64_init(int when) else machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; } - ms = machdep->machspec; - if (CRASHDEBUG(1)) { - if (ms->VA_BITS_ACTUAL) { - fprintf(fp, "CONFIG_ARM64_VA_BITS: %ld\n", ms->CONFIG_ARM64_VA_BITS); - fprintf(fp, " VA_BITS_ACTUAL: %ld\n", ms->VA_BITS_ACTUAL); - fprintf(fp, "(calculated) VA_BITS: %ld\n", ms->VA_BITS); - fprintf(fp, " PAGE_OFFSET: %lx\n", ARM64_PAGE_OFFSET_ACTUAL); - fprintf(fp, " VA_START: %lx\n", ms->VA_START); - fprintf(fp, " modules: %lx - %lx\n", ms->modules_vaddr, ms->modules_end); - fprintf(fp, " vmalloc: %lx - %lx\n", ms->vmalloc_start_addr, ms->vmalloc_end); - fprintf(fp, "kernel image: %lx - %lx\n", ms->kimage_text, ms->kimage_end); - fprintf(fp, " vmemmap: %lx - %lx\n\n", ms->vmemmap_vaddr, ms->vmemmap_end); - } - } - - if (THIS_KERNEL_VERSION >= LINUX(4,0,0)) { ms->__SWP_TYPE_BITS = 6; ms->__SWP_TYPE_SHIFT = 2; @@ -465,8 +433,11 @@ arm64_init(int when) * of the crash. We need this information to extract correct * backtraces from the panic task. */ - if (!LIVE()) - arm64_get_crash_notes(); + if (!LIVE() && !arm64_get_crash_notes()) + error(WARNING, + "cannot retrieve registers for active task%s\n\n", + kt->cpus > 1 ? "s" : ""); + break; case LOG_ONLY: @@ -648,15 +619,9 @@ arm64_dump_machdep_table(ulong arg) fprintf(fp, " machspec: %lx\n", (ulong)ms); fprintf(fp, " VA_BITS: %ld\n", ms->VA_BITS); - fprintf(fp, " CONFIG_ARM64_VA_BITS: %ld\n", ms->CONFIG_ARM64_VA_BITS); - fprintf(fp, " VA_START: "); - if (ms->VA_START) - fprintf(fp, "%lx\n", ms->VA_START); - else - fprintf(fp, "(unused)\n"); - fprintf(fp, " VA_BITS_ACTUAL: "); - if (ms->VA_BITS_ACTUAL) - fprintf(fp, "%ld\n", ms->VA_BITS_ACTUAL); + fprintf(fp, " vabits_user: "); + if (ms->vabits_user) + fprintf(fp, "%ld\n", ms->vabits_user); else fprintf(fp, "(unused)\n"); fprintf(fp, " userspace_top: %016lx\n", ms->userspace_top); @@ -745,12 +710,12 @@ arm64_parse_machdep_arg_l(char *argstring, char *param, ulong *value) int flags = RETURN_ON_ERROR | QUIET; int err = 0; - if (STRNEQ(argstring, "max_physmem_bits")) { - *value = dtol(p, flags, &err); - } else if (megabytes) { + if (megabytes) { *value = dtol(p, flags, &err); if (!err) *value = MEGABYTES(*value); + } else if (STRNEQ(argstring, "max_physmem_bits")) { + *value = dtol(p, flags, &err); } else { *value = htol(p, flags, &err); } @@ -824,60 +789,11 @@ arm64_parse_cmdline_args(void) } } -#define MIN_KIMG_ALIGN (0x00200000) /* kimage load address must be aligned 2M */ -/* - * Traverse the entire dumpfile to find/verify kimage_voffset. - */ -static int -arm64_search_for_kimage_voffset(ulong phys_base) -{ - ulong kimage_load_addr; - ulong phys_end; - struct machine_specific *ms = machdep->machspec; - - if (!arm_kdump_phys_end(&phys_end)) - return FALSE; - - for (kimage_load_addr = phys_base; - kimage_load_addr <= phys_end; kimage_load_addr += MIN_KIMG_ALIGN) { - ms->kimage_voffset = ms->vmalloc_start_addr - kimage_load_addr; - - if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) - ms->kimage_voffset += (kt->relocate * - 1); - - if (verify_kimage_voffset()) { - if (CRASHDEBUG(1)) - error(INFO, - "dumpfile searched for kimage_voffset: %lx\n\n", - ms->kimage_voffset); - break; - } - } - - if (kimage_load_addr > phys_end) - return FALSE; - - return TRUE; -} - -static int -verify_kimage_voffset(void) -{ - ulong kimage_voffset; - - if (!readmem(symbol_value("kimage_voffset"), KVADDR, &kimage_voffset, - sizeof(kimage_voffset), "verify kimage_voffset", QUIET|RETURN_ON_ERROR)) - return FALSE; - - return (machdep->machspec->kimage_voffset == kimage_voffset); -} - static void arm64_calc_kimage_voffset(void) { struct machine_specific *ms = machdep->machspec; - ulong phys_addr = 0; - int errflag; + ulong phys_addr; if (ms->kimage_voffset) /* vmcoreinfo, ioctl, or --machdep override */ return; @@ -885,6 +801,7 @@ arm64_calc_kimage_voffset(void) if (ACTIVE()) { char buf[BUFSIZE]; char *p1; + int errflag; FILE *iomem; ulong kimage_voffset, vaddr; @@ -925,24 +842,9 @@ arm64_calc_kimage_voffset(void) if (errflag) return; - } else if (KDUMP_DUMPFILE()) { - errflag = 1; - if (arm_kdump_phys_base(&phys_addr)) { /* Get start address of first memory block */ - ms->kimage_voffset = ms->vmalloc_start_addr - phys_addr; - if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) - ms->kimage_voffset += (kt->relocate * -1); - if (verify_kimage_voffset() || arm64_search_for_kimage_voffset(phys_addr)) - errflag = 0; - } - - if (errflag) { - error(WARNING, - "kimage_voffset cannot be determined from the dumpfile.\n"); - error(CONT, - "Try using the command line option: --machdep kimage_voffset=\n"); - } - return; - } else { + } else if (KDUMP_DUMPFILE()) + arm_kdump_phys_base(&phys_addr); /* Get start address of first memory block */ + else { error(WARNING, "kimage_voffset cannot be determined from the dumpfile.\n"); error(CONT, @@ -981,16 +883,10 @@ arm64_calc_phys_offset(void) physaddr_t paddr; ulong vaddr; struct syment *sp; - char *string; if ((machdep->flags & NEW_VMEMMAP) && ms->kimage_voffset && (sp = kernel_symbol_search("memstart_addr"))) { if (pc->flags & PROC_KCORE) { - if ((string = pc->read_vmcoreinfo("NUMBER(PHYS_OFFSET)"))) { - ms->phys_offset = htol(string, QUIET, NULL); - free(string); - return; - } vaddr = symbol_value_from_proc_kallsyms("memstart_addr"); if (vaddr == BADVAL) vaddr = sp->value; @@ -1053,31 +949,6 @@ arm64_calc_phys_offset(void) fprintf(fp, "using %lx as phys_offset\n", ms->phys_offset); } -/* - * Determine SECTION_SIZE_BITS either by reading VMCOREINFO or the kernel - * config, otherwise use the 64-bit ARM default definiton. - */ -static void -arm64_get_section_size_bits(void) -{ - int ret; - char *string; - - machdep->section_size_bits = _SECTION_SIZE_BITS; - - if ((string = pc->read_vmcoreinfo("NUMBER(SECTION_SIZE_BITS)"))) { - machdep->section_size_bits = atol(string); - free(string); - } else if (kt->ikconfig_flags & IKCONFIG_AVAIL) { - if ((ret = get_kernel_config("CONFIG_MEMORY_HOTPLUG", NULL)) == IKCONFIG_Y) { - if ((ret = get_kernel_config("CONFIG_HOTPLUG_SIZE_BITS", &string)) == IKCONFIG_STR) - machdep->section_size_bits = atol(string); - } - } - - if (CRASHDEBUG(1)) - fprintf(fp, "SECTION_SIZE_BITS: %ld\n", machdep->section_size_bits); -} /* * Determine PHYS_OFFSET either by reading VMCOREINFO or the kernel @@ -1133,12 +1004,6 @@ ulong arm64_VTOP(ulong addr) { if (machdep->flags & NEW_VMEMMAP) { - if (machdep->machspec->VA_START && - (addr >= machdep->machspec->kimage_text) && - (addr <= machdep->machspec->kimage_end)) { - return addr - machdep->machspec->kimage_voffset; - } - if (addr >= machdep->machspec->page_offset) return machdep->machspec->phys_offset + (addr - machdep->machspec->page_offset); @@ -1215,11 +1080,6 @@ arm64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbos } } -#define PTE_ADDR_LOW ((((1UL) << (48 - machdep->pageshift)) - 1) << machdep->pageshift) -#define PTE_ADDR_HIGH ((0xfUL) << 12) -#define PTE_TO_PHYS(pteval) (machdep->max_physmem_bits == 52 ? \ - (((pteval & PTE_ADDR_LOW) | ((pteval & PTE_ADDR_HIGH) << 36))) : (pteval & PTE_ADDR_LOW)) - #define PMD_TYPE_MASK 3 #define PMD_TYPE_SECT 1 #define PMD_TYPE_TABLE 2 @@ -1312,7 +1172,7 @@ arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) * #define __PAGETABLE_PUD_FOLDED */ - pmd_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); + pmd_base = (ulong *)PTOV(pgd_val & PHYS_MASK & (s32)machdep->pagemask); FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L3_64K * sizeof(ulong)); pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L3_64K) & (PTRS_PER_PMD_L3_64K - 1)); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); @@ -1322,7 +1182,7 @@ arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) goto no_page; if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { - ulong sectionbase = PTE_TO_PHYS(pmd_val) & SECTION_PAGE_MASK_512MB; + ulong sectionbase = (pmd_val & SECTION_PAGE_MASK_512MB) & PHYS_MASK; if (verbose) { fprintf(fp, " PAGE: %lx (512MB)\n\n", sectionbase); arm64_translate_pte(pmd_val, 0, 0); @@ -1331,7 +1191,7 @@ arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) return TRUE; } - pte_base = (ulong *)PTOV(PTE_TO_PHYS(pmd_val)); + pte_base = (ulong *)PTOV(pmd_val & PHYS_MASK & (s32)machdep->pagemask); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L3_64K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L3_64K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); @@ -1341,7 +1201,7 @@ arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) goto no_page; if (pte_val & PTE_VALID) { - *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); + *paddr = (PAGEBASE(pte_val) & PHYS_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); arm64_translate_pte(pte_val, 0, 0); @@ -1644,11 +1504,10 @@ arm64_stackframe_init(void) machdep->machspec->kern_eframe_offset = SIZE(pt_regs); } - if ((sp1 = kernel_symbol_search("__exception_text_start")) && - (sp2 = kernel_symbol_search("__exception_text_end"))) { - machdep->machspec->__exception_text_start = sp1->value; - machdep->machspec->__exception_text_end = sp2->value; - } + machdep->machspec->__exception_text_start = + symbol_value("__exception_text_start"); + machdep->machspec->__exception_text_end = + symbol_value("__exception_text_end"); if ((sp1 = kernel_symbol_search("__irqentry_text_start")) && (sp2 = kernel_symbol_search("__irqentry_text_end"))) { machdep->machspec->__irqentry_text_start = sp1->value; @@ -1857,38 +1716,20 @@ arm64_eframe_search(struct bt_info *bt) return count; } -static char *arm64_exception_functions[] = { - "do_undefinstr", - "do_sysinstr", - "do_debug_exception", - "do_mem_abort", - "do_el0_irq_bp_hardening", - "do_sp_pc_abort", - NULL -}; - static int arm64_in_exception_text(ulong ptr) { struct machine_specific *ms = machdep->machspec; - char *name, **func; + + if ((ptr >= ms->__exception_text_start) && + (ptr < ms->__exception_text_end)) + return TRUE; if (ms->__irqentry_text_start && ms->__irqentry_text_end && ((ptr >= ms->__irqentry_text_start) && (ptr < ms->__irqentry_text_end))) return TRUE; - if (ms->__exception_text_start && ms->__exception_text_end) { - if ((ptr >= ms->__exception_text_start) && - (ptr < ms->__exception_text_end)) - return TRUE; - } else if ((name = closest_symbol(ptr))) { /* Linux 5.5 and later */ - for (func = &arm64_exception_functions[0]; *func; func++) { - if (STREQ(name, *func)) - return TRUE; - } - } - return FALSE; } @@ -3297,7 +3138,7 @@ arm64_translate_pte(ulong pte, void *physaddr, ulonglong unused) char *arglist[MAXARGS]; int page_present; - paddr = PTE_TO_PHYS(pte); + paddr = pte & PHYS_MASK & (s32)machdep->pagemask; page_present = pte & (PTE_VALID | machdep->machspec->PTE_PROT_NONE); if (physaddr) { @@ -3563,8 +3404,8 @@ arm64_display_machine_stats(void) fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->machspec->page_offset); - fprintf(fp, "KERNEL MODULES BASE: %lx\n", machdep->machspec->modules_vaddr); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", machdep->machspec->vmalloc_start_addr); + fprintf(fp, "KERNEL MODULES BASE: %lx\n", machdep->machspec->modules_vaddr); fprintf(fp, "KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); if (machdep->machspec->irq_stack_size) { @@ -3594,7 +3435,7 @@ arm64_get_smp_cpus(void) /* * Retrieve task registers for the time of the crash. */ -static void +static int arm64_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; @@ -3603,10 +3444,10 @@ arm64_get_crash_notes(void) ulong offset; char *buf, *p; ulong *notes_ptrs; - ulong i, found; + ulong i; if (!symbol_exists("crash_notes")) - return; + return FALSE; crash_notes = symbol_value("crash_notes"); @@ -3618,9 +3459,9 @@ arm64_get_crash_notes(void) */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { - error(WARNING, "cannot read \"crash_notes\"\n"); + error(WARNING, "cannot read crash_notes\n"); FREEBUF(notes_ptrs); - return; + return FALSE; } if (symbol_exists("__per_cpu_offset")) { @@ -3636,11 +3477,12 @@ arm64_get_crash_notes(void) if (!(ms->panic_task_regs = calloc((size_t)kt->cpus, sizeof(struct arm64_pt_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); - for (i = found = 0; i < kt->cpus; i++) { + for (i = 0; i < kt->cpus; i++) { + if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { - error(WARNING, "cpu %d: cannot read NT_PRSTATUS note\n", i); - continue; + error(WARNING, "failed to read note_buf_t\n"); + goto fail; } /* @@ -3670,24 +3512,19 @@ arm64_get_crash_notes(void) note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { - error(WARNING, "cpu %d: cannot find NT_PRSTATUS note\n", i); + error(WARNING, + "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } } - /* - * Check the sanity of NT_PRSTATUS note only for each online cpu. - * If this cpu has invalid note, continue to find the crash notes - * for other online cpus. - */ if (note->n_type != NT_PRSTATUS) { - error(WARNING, "cpu %d: invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n", i); - continue; + error(WARNING, "invalid note (n_type != NT_PRSTATUS)\n"); + goto fail; } - - if (!STRNEQ(p, "CORE")) { - error(WARNING, "cpu %d: invalid NT_PRSTATUS note (name != \"CORE\")\n", i); - continue; + if (p[0] != 'C' || p[1] != 'O' || p[2] != 'R' || p[3] != 'E') { + error(WARNING, "invalid note (name != \"CORE\"\n"); + goto fail; } /* @@ -3700,17 +3537,18 @@ arm64_get_crash_notes(void) BCOPY(p + OFFSET(elf_prstatus_pr_reg), &ms->panic_task_regs[i], sizeof(struct arm64_pt_regs)); - - found++; } FREEBUF(buf); FREEBUF(notes_ptrs); + return TRUE; - if (!found) { - free(ms->panic_task_regs); - ms->panic_task_regs = NULL; - } +fail: + FREEBUF(buf); + FREEBUF(notes_ptrs); + free(ms->panic_task_regs); + ms->panic_task_regs = NULL; + return FALSE; } static void @@ -3834,9 +3672,6 @@ arm64_IS_VMALLOC_ADDR(ulong vaddr) (vaddr <= machdep->machspec->kimage_end)) return FALSE; - if (ms->VA_START && (vaddr >= ms->VA_START)) - return TRUE; - return ((vaddr >= ms->vmalloc_start_addr && vaddr <= ms->vmalloc_end) || ((machdep->flags & VMEMMAP) && (vaddr >= ms->vmemmap_vaddr && vaddr <= ms->vmemmap_end)) || @@ -3848,54 +3683,9 @@ arm64_calc_VA_BITS(void) { int bitval; struct syment *sp; - ulong vabits_actual, value; + ulong value; char *string; - if ((string = pc->read_vmcoreinfo("NUMBER(VA_BITS)"))) { - value = atol(string); - free(string); - machdep->machspec->CONFIG_ARM64_VA_BITS = value; - } - - if (kernel_symbol_exists("vabits_actual")) { - if (pc->flags & PROC_KCORE) { - vabits_actual = symbol_value_from_proc_kallsyms("vabits_actual"); - if ((vabits_actual != BADVAL) && (READMEM(pc->mfd, &value, sizeof(ulong), - vabits_actual, KCORE_USE_VADDR) > 0)) { - if (CRASHDEBUG(1)) - fprintf(fp, - "/proc/kcore: vabits_actual: %ld\n", value); - machdep->machspec->VA_BITS_ACTUAL = value; - machdep->machspec->VA_BITS = value; - machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); - } else - error(FATAL, "/proc/kcore: cannot read vabits_actual\n"); - } else if (ACTIVE()) - error(FATAL, "cannot determine VA_BITS_ACTUAL: please use /proc/kcore\n"); - else { - if ((string = pc->read_vmcoreinfo("NUMBER(TCR_EL1_T1SZ)"))) { - /* See ARMv8 ARM for the description of - * TCR_EL1.T1SZ and how it can be used - * to calculate the vabits_actual - * supported by underlying kernel. - * - * Basically: - * vabits_actual = 64 - T1SZ; - */ - value = 64 - strtoll(string, NULL, 0); - if (CRASHDEBUG(1)) - fprintf(fp, "vmcoreinfo : vabits_actual: %ld\n", value); - free(string); - machdep->machspec->VA_BITS_ACTUAL = value; - machdep->machspec->VA_BITS = value; - machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); - } else - error(FATAL, "cannot determine VA_BITS_ACTUAL\n"); - } - - return; - } - if (!(sp = symbol_search("swapper_pg_dir")) && !(sp = symbol_search("idmap_pg_dir")) && !(sp = symbol_search("_text")) && @@ -3924,12 +3714,14 @@ arm64_calc_VA_BITS(void) /* * Verify against dumpfiles that export VA_BITS in vmcoreinfo */ - if (machdep->machspec->CONFIG_ARM64_VA_BITS && - (machdep->machspec->VA_BITS != machdep->machspec->CONFIG_ARM64_VA_BITS)) { - error(WARNING, "VA_BITS: calculated: %ld vmcoreinfo: %ld\n", - machdep->machspec->VA_BITS, machdep->machspec->CONFIG_ARM64_VA_BITS); - machdep->machspec->VA_BITS = machdep->machspec->CONFIG_ARM64_VA_BITS; - } + if ((string = pc->read_vmcoreinfo("NUMBER(VA_BITS)"))) { + value = atol(string); + free(string); + if (machdep->machspec->VA_BITS != value) + error(WARNING, "VA_BITS: calculated: %ld vmcoreinfo: %ld\n", + machdep->machspec->VA_BITS, value); + } + if (CRASHDEBUG(1)) fprintf(fp, "VA_BITS: %ld\n", machdep->machspec->VA_BITS); @@ -3947,15 +3739,6 @@ arm64_calc_VA_BITS(void) * * Since VMEMMAP_SIZE is dependent upon the size of a struct page, * the two ranges cannot be determined until POST_GDB. - * - * Since 52-bit VA was introduced: - * - * #define STRUCT_PAGE_MAX_SHIFT 6 - * #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) - * #define VMEMMAP_START (-VMEMMAP_SIZE) - * #define VMALLOC_START (MODULES_END) - * #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) - * #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) */ #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) @@ -3967,18 +3750,10 @@ static void arm64_calc_virtual_memory_ranges(void) { struct machine_specific *ms = machdep->machspec; - ulong value, vmemmap_start, vmemmap_end, vmemmap_size, vmalloc_end; - char *string; + ulong vmemmap_start, vmemmap_end, vmemmap_size; + ulong vmalloc_end; ulong PUD_SIZE = UNINITIALIZED; - if (!machdep->machspec->CONFIG_ARM64_VA_BITS) { - if ((string = pc->read_vmcoreinfo("NUMBER(VA_BITS)"))) { - value = atol(string); - free(string); - machdep->machspec->CONFIG_ARM64_VA_BITS = value; - } - } - if (THIS_KERNEL_VERSION < LINUX(3,17,0)) /* use original hardwired values */ return; @@ -3997,19 +3772,8 @@ arm64_calc_virtual_memory_ranges(void) break; } -#define STRUCT_PAGE_MAX_SHIFT 6 - - if (ms->VA_BITS_ACTUAL) { - vmemmap_size = (1UL) << (ms->CONFIG_ARM64_VA_BITS - machdep->pageshift - 1 + STRUCT_PAGE_MAX_SHIFT); - vmalloc_end = (- PUD_SIZE - vmemmap_size - KILOBYTES(64)); - vmemmap_start = (-vmemmap_size); - ms->vmalloc_end = vmalloc_end - 1; - ms->vmemmap_vaddr = vmemmap_start; - ms->vmemmap_end = -1; - return; - } - if (machdep->flags & NEW_VMEMMAP) +#define STRUCT_PAGE_MAX_SHIFT 6 vmemmap_size = 1UL << (ms->VA_BITS - machdep->pageshift - 1 + STRUCT_PAGE_MAX_SHIFT); else @@ -4033,7 +3797,7 @@ arm64_calc_virtual_memory_ranges(void) static int arm64_is_uvaddr(ulong addr, struct task_context *tc) { - return (addr < machdep->machspec->userspace_top); + return (addr < ARM64_USERSPACE_TOP); } diff --git a/arm64.cgithub_9596b4388ea5.patch b/arm64.cgithub_9596b4388ea5.patch new file mode 100644 index 0000000..49e2573 --- /dev/null +++ b/arm64.cgithub_9596b4388ea5.patch @@ -0,0 +1,4063 @@ +/* + * arm64.c - core analysis suite + * + * Copyright (C) 2012-2020 David Anderson + * Copyright (C) 2012-2020 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifdef ARM64 + +#include "defs.h" +#include +#include +#include + +#define NOT_IMPLEMENTED(X) error((X), "%s: function not implemented\n", __func__) + +static struct machine_specific arm64_machine_specific = { 0 }; +static int arm64_verify_symbol(const char *, ulong, char); +static void arm64_parse_cmdline_args(void); +static int arm64_search_for_kimage_voffset(ulong); +static int verify_kimage_voffset(void); +static void arm64_calc_kimage_voffset(void); +static void arm64_calc_phys_offset(void); +static void arm64_calc_virtual_memory_ranges(void); +static void arm64_get_section_size_bits(void); +static int arm64_kdump_phys_base(ulong *); +static ulong arm64_processor_speed(void); +static void arm64_init_kernel_pgd(void); +static int arm64_kvtop(struct task_context *, ulong, physaddr_t *, int); +static int arm64_uvtop(struct task_context *, ulong, physaddr_t *, int); +static int arm64_vtop_2level_64k(ulong, ulong, physaddr_t *, int); +static int arm64_vtop_3level_64k(ulong, ulong, physaddr_t *, int); +static int arm64_vtop_3level_4k(ulong, ulong, physaddr_t *, int); +static int arm64_vtop_4level_4k(ulong, ulong, physaddr_t *, int); +static ulong arm64_get_task_pgd(ulong); +static void arm64_irq_stack_init(void); +static void arm64_stackframe_init(void); +static int arm64_eframe_search(struct bt_info *); +static int arm64_is_kernel_exception_frame(struct bt_info *, ulong); +static int arm64_in_exception_text(ulong); +static int arm64_in_exp_entry(ulong); +static void arm64_back_trace_cmd(struct bt_info *); +static void arm64_back_trace_cmd_v2(struct bt_info *); +static void arm64_print_text_symbols(struct bt_info *, struct arm64_stackframe *, FILE *); +static int arm64_print_stackframe_entry(struct bt_info *, int, struct arm64_stackframe *, FILE *); +static int arm64_print_stackframe_entry_v2(struct bt_info *, int, struct arm64_stackframe *, FILE *); +static void arm64_display_full_frame(struct bt_info *, ulong); +static void arm64_display_full_frame_v2(struct bt_info *, struct arm64_stackframe *, struct arm64_stackframe *); +static int arm64_unwind_frame(struct bt_info *, struct arm64_stackframe *); +static int arm64_unwind_frame_v2(struct bt_info *, struct arm64_stackframe *, FILE *); +static int arm64_get_dumpfile_stackframe(struct bt_info *, struct arm64_stackframe *); +static int arm64_in_kdump_text(struct bt_info *, struct arm64_stackframe *); +static int arm64_in_kdump_text_on_irq_stack(struct bt_info *); +static int arm64_switch_stack(struct bt_info *, struct arm64_stackframe *, FILE *); +static int arm64_get_stackframe(struct bt_info *, struct arm64_stackframe *); +static void arm64_get_stack_frame(struct bt_info *, ulong *, ulong *); +static void arm64_gen_hidden_frame(struct bt_info *bt, ulong, struct arm64_stackframe *); +static void arm64_print_exception_frame(struct bt_info *, ulong, int, FILE *); +static void arm64_do_bt_reference_check(struct bt_info *, ulong, char *); +static int arm64_translate_pte(ulong, void *, ulonglong); +static ulong arm64_vmalloc_start(void); +static int arm64_is_task_addr(ulong); +static int arm64_dis_filter(ulong, char *, unsigned int); +static void arm64_cmd_mach(void); +static void arm64_display_machine_stats(void); +static int arm64_get_smp_cpus(void); +static void arm64_clear_machdep_cache(void); +static int arm64_on_process_stack(struct bt_info *, ulong); +static int arm64_in_alternate_stack(int, ulong); +static int arm64_on_irq_stack(int, ulong); +static void arm64_set_irq_stack(struct bt_info *); +static void arm64_set_process_stack(struct bt_info *); +static int arm64_get_kvaddr_ranges(struct vaddr_range *); +static void arm64_get_crash_notes(void); +static void arm64_calc_VA_BITS(void); +static int arm64_is_uvaddr(ulong, struct task_context *); + + +/* + * Do all necessary machine-specific setup here. This is called several times + * during initialization. + */ +void +arm64_init(int when) +{ + ulong value; + char *string; + struct machine_specific *ms; + +#if defined(__x86_64__) + if (ACTIVE()) + error(FATAL, "compiled for the ARM64 architecture\n"); +#endif + + switch (when) { + case SETUP_ENV: + machdep->process_elf_notes = process_elf64_notes; + break; + + case PRE_SYMTAB: + machdep->machspec = &arm64_machine_specific; + machdep->verify_symbol = arm64_verify_symbol; + if (pc->flags & KERNEL_DEBUG_QUERY) + return; + machdep->verify_paddr = generic_verify_paddr; + if (machdep->cmdline_args[0]) + arm64_parse_cmdline_args(); + machdep->flags |= MACHDEP_BT_TEXT; + + ms = machdep->machspec; + + if (!ms->kimage_voffset && STREQ(pc->live_memsrc, "/dev/crash")) + ioctl(pc->mfd, DEV_CRASH_ARCH_DATA, &ms->kimage_voffset); + + if (!ms->kimage_voffset && + (string = pc->read_vmcoreinfo("NUMBER(kimage_voffset)"))) { + ms->kimage_voffset = htol(string, QUIET, NULL); + free(string); + } + + if (ms->kimage_voffset || + (ACTIVE() && (symbol_value_from_proc_kallsyms("kimage_voffset") != BADVAL))) { + machdep->flags |= NEW_VMEMMAP; + + /* + * Even if CONFIG_RANDOMIZE_BASE is not configured, + * derive_kaslr_offset() should work and set + * kt->relocate to 0 + */ + if (!kt->relocate && !(kt->flags2 & (RELOC_AUTO|KASLR))) + kt->flags2 |= (RELOC_AUTO|KASLR); + } + + break; + + case PRE_GDB: + if (kernel_symbol_exists("kimage_voffset")) + machdep->flags |= NEW_VMEMMAP; + + if (!machdep->pagesize && + (string = pc->read_vmcoreinfo("PAGESIZE"))) { + machdep->pagesize = atoi(string); + free(string); + } + + if (!machdep->pagesize) { + /* + * Kerneldoc Documentation/arm64/booting.txt describes + * the kernel image header flags field. + */ + value = machdep->machspec->kernel_flags; + value = (value >> 1) & 3; + + switch(value) + { + case 0: + break; + case 1: + machdep->pagesize = 4096; + break; + case 2: + /* TODO: machdep->pagesize = 16384; */ + error(FATAL, "16K pages not supported."); + break; + case 3: + machdep->pagesize = 65536; + break; + } + + } + + /* + * This code section will only be executed if the kernel is + * earlier than Linux 4.4 (if there is no vmcoreinfo) + */ + if (!machdep->pagesize && + kernel_symbol_exists("swapper_pg_dir") && + kernel_symbol_exists("idmap_pg_dir")) { + value = symbol_value("swapper_pg_dir") - + symbol_value("idmap_pg_dir"); + + /* + * idmap_pg_dir is 2 pages prior to 4.1, + * and 3 pages thereafter. Only 4K and 64K + * page sizes are supported. + */ + switch (value) + { + case (4096 * 2): + case (4096 * 3): + machdep->pagesize = 4096; + break; + case (65536 * 2): + case (65536 * 3): + machdep->pagesize = 65536; + break; + } + } else if (ACTIVE()) + machdep->pagesize = memory_page_size(); /* host */ + + machdep->pageshift = ffs(machdep->pagesize) - 1; + machdep->pageoffset = machdep->pagesize - 1; + machdep->pagemask = ~((ulonglong)machdep->pageoffset); + + arm64_calc_VA_BITS(); + ms = machdep->machspec; + if (ms->VA_BITS_ACTUAL) { + ms->page_offset = ARM64_PAGE_OFFSET_ACTUAL; + machdep->identity_map_base = ARM64_PAGE_OFFSET_ACTUAL; + machdep->kvbase = ARM64_PAGE_OFFSET_ACTUAL; + ms->userspace_top = ARM64_USERSPACE_TOP_ACTUAL; + } else { + ms->page_offset = ARM64_PAGE_OFFSET; + machdep->identity_map_base = ARM64_PAGE_OFFSET; + machdep->kvbase = ARM64_VA_START; + ms->userspace_top = ARM64_USERSPACE_TOP; + } + machdep->is_kvaddr = generic_is_kvaddr; + machdep->kvtop = arm64_kvtop; + if (machdep->flags & NEW_VMEMMAP) { + struct syment *sp; + + sp = kernel_symbol_search("_text"); + ms->kimage_text = (sp ? sp->value : 0); + sp = kernel_symbol_search("_end"); + ms->kimage_end = (sp ? sp->value : 0); + + if (ms->VA_BITS_ACTUAL) { + ms->modules_vaddr = (st->_stext_vmlinux & TEXT_OFFSET_MASK) - ARM64_MODULES_VSIZE; + ms->modules_end = ms->modules_vaddr + ARM64_MODULES_VSIZE -1; + } else { + ms->modules_vaddr = ARM64_VA_START; + if (kernel_symbol_exists("kasan_init")) + ms->modules_vaddr += ARM64_KASAN_SHADOW_SIZE; + ms->modules_end = ms->modules_vaddr + ARM64_MODULES_VSIZE -1; + } + + ms->vmalloc_start_addr = ms->modules_end + 1; + + arm64_calc_kimage_voffset(); + } else { + ms->modules_vaddr = ARM64_PAGE_OFFSET - MEGABYTES(64); + ms->modules_end = ARM64_PAGE_OFFSET - 1; + ms->vmalloc_start_addr = ARM64_VA_START; + } + ms->vmalloc_end = ARM64_VMALLOC_END; + ms->vmemmap_vaddr = ARM64_VMEMMAP_VADDR; + ms->vmemmap_end = ARM64_VMEMMAP_END; + + switch (machdep->pagesize) + { + case 4096: + machdep->ptrs_per_pgd = PTRS_PER_PGD_L3_4K; + if ((machdep->pgd = + (char *)malloc(PTRS_PER_PGD_L3_4K * 8)) == NULL) + error(FATAL, "cannot malloc pgd space."); + if (machdep->machspec->VA_BITS > PGDIR_SHIFT_L4_4K) { + machdep->flags |= VM_L4_4K; + if ((machdep->pud = + (char *)malloc(PTRS_PER_PUD_L4_4K * 8)) + == NULL) + error(FATAL, "cannot malloc pud space."); + } else { + machdep->flags |= VM_L3_4K; + machdep->pud = NULL; /* not used */ + } + if ((machdep->pmd = + (char *)malloc(PTRS_PER_PMD_L3_4K * 8)) == NULL) + error(FATAL, "cannot malloc pmd space."); + if ((machdep->ptbl = + (char *)malloc(PTRS_PER_PTE_L3_4K * 8)) == NULL) + error(FATAL, "cannot malloc ptbl space."); + break; + + case 65536: + if (kernel_symbol_exists("idmap_ptrs_per_pgd") && + readmem(symbol_value("idmap_ptrs_per_pgd"), KVADDR, + &value, sizeof(ulong), "idmap_ptrs_per_pgd", QUIET|RETURN_ON_ERROR)) + machdep->ptrs_per_pgd = value; + + if (machdep->machspec->VA_BITS > PGDIR_SHIFT_L3_64K) { + machdep->flags |= VM_L3_64K; + if (!machdep->ptrs_per_pgd) + machdep->ptrs_per_pgd = PTRS_PER_PGD_L3_64K; + if ((machdep->pgd = + (char *)malloc(machdep->ptrs_per_pgd * 8)) == NULL) + error(FATAL, "cannot malloc pgd space."); + if ((machdep->pmd = + (char *)malloc(PTRS_PER_PMD_L3_64K * 8)) == NULL) + error(FATAL, "cannot malloc pmd space."); + if ((machdep->ptbl = + (char *)malloc(PTRS_PER_PTE_L3_64K * 8)) == NULL) + error(FATAL, "cannot malloc ptbl space."); + } else { + machdep->flags |= VM_L2_64K; + if (!machdep->ptrs_per_pgd) + machdep->ptrs_per_pgd = PTRS_PER_PGD_L2_64K; + if ((machdep->pgd = + (char *)malloc(machdep->ptrs_per_pgd * 8)) == NULL) + error(FATAL, "cannot malloc pgd space."); + if ((machdep->ptbl = + (char *)malloc(PTRS_PER_PTE_L2_64K * 8)) == NULL) + error(FATAL, "cannot malloc ptbl space."); + machdep->pmd = NULL; /* not used */ + } + machdep->pud = NULL; /* not used */ + break; + + default: + if (machdep->pagesize) + error(FATAL, "invalid/unsupported page size: %d\n", + machdep->pagesize); + else + error(FATAL, "cannot determine page size\n"); + } + + machdep->last_pgd_read = 0; + machdep->last_pud_read = 0; + machdep->last_pmd_read = 0; + machdep->last_ptbl_read = 0; + machdep->clear_machdep_cache = arm64_clear_machdep_cache; + + machdep->stacksize = ARM64_STACK_SIZE; + machdep->flags |= VMEMMAP; + + machdep->uvtop = arm64_uvtop; + machdep->is_uvaddr = arm64_is_uvaddr; + machdep->eframe_search = arm64_eframe_search; + machdep->back_trace = arm64_back_trace_cmd; + machdep->in_alternate_stack = arm64_in_alternate_stack; + machdep->processor_speed = arm64_processor_speed; + machdep->get_task_pgd = arm64_get_task_pgd; + machdep->get_stack_frame = arm64_get_stack_frame; + machdep->get_stackbase = generic_get_stackbase; + machdep->get_stacktop = generic_get_stacktop; + machdep->translate_pte = arm64_translate_pte; + machdep->memory_size = generic_memory_size; + machdep->vmalloc_start = arm64_vmalloc_start; + machdep->get_kvaddr_ranges = arm64_get_kvaddr_ranges; + machdep->is_task_addr = arm64_is_task_addr; + machdep->dis_filter = arm64_dis_filter; + machdep->cmd_mach = arm64_cmd_mach; + machdep->get_smp_cpus = arm64_get_smp_cpus; + machdep->line_number_hooks = NULL; + machdep->value_to_symbol = generic_machdep_value_to_symbol; + machdep->dump_irq = generic_dump_irq; + machdep->show_interrupts = generic_show_interrupts; + machdep->get_irq_affinity = generic_get_irq_affinity; + machdep->dumpfile_init = NULL; + machdep->verify_line_number = NULL; + machdep->init_kernel_pgd = arm64_init_kernel_pgd; + + /* use machdep parameters */ + arm64_calc_phys_offset(); + + if (CRASHDEBUG(1)) { + if (machdep->flags & NEW_VMEMMAP) + fprintf(fp, "kimage_voffset: %lx\n", + machdep->machspec->kimage_voffset); + fprintf(fp, "phys_offset: %lx\n", + machdep->machspec->phys_offset); + } + + break; + + case POST_GDB: + arm64_calc_virtual_memory_ranges(); + arm64_get_section_size_bits(); + + if (!machdep->max_physmem_bits) { + if ((string = pc->read_vmcoreinfo("NUMBER(MAX_PHYSMEM_BITS)"))) { + machdep->max_physmem_bits = atol(string); + free(string); + } else if (machdep->machspec->VA_BITS == 52) /* guess */ + machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_52; + else if (THIS_KERNEL_VERSION >= LINUX(3,17,0)) + machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_3_17; + else + machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; + } + + ms = machdep->machspec; + + if (CRASHDEBUG(1)) { + if (ms->VA_BITS_ACTUAL) { + fprintf(fp, "CONFIG_ARM64_VA_BITS: %ld\n", ms->CONFIG_ARM64_VA_BITS); + fprintf(fp, " VA_BITS_ACTUAL: %ld\n", ms->VA_BITS_ACTUAL); + fprintf(fp, "(calculated) VA_BITS: %ld\n", ms->VA_BITS); + fprintf(fp, " PAGE_OFFSET: %lx\n", ARM64_PAGE_OFFSET_ACTUAL); + fprintf(fp, " VA_START: %lx\n", ms->VA_START); + fprintf(fp, " modules: %lx - %lx\n", ms->modules_vaddr, ms->modules_end); + fprintf(fp, " vmalloc: %lx - %lx\n", ms->vmalloc_start_addr, ms->vmalloc_end); + fprintf(fp, "kernel image: %lx - %lx\n", ms->kimage_text, ms->kimage_end); + fprintf(fp, " vmemmap: %lx - %lx\n\n", ms->vmemmap_vaddr, ms->vmemmap_end); + } + } + + + if (THIS_KERNEL_VERSION >= LINUX(4,0,0)) { + ms->__SWP_TYPE_BITS = 6; + ms->__SWP_TYPE_SHIFT = 2; + ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); + ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); + ms->__SWP_OFFSET_BITS = 50; + ms->__SWP_OFFSET_MASK = ((1UL << ms->__SWP_OFFSET_BITS) - 1); + ms->PTE_PROT_NONE = (1UL << 58); + ms->PTE_FILE = 0; /* unused */ + } else if (THIS_KERNEL_VERSION >= LINUX(3,13,0)) { + ms->__SWP_TYPE_BITS = 6; + ms->__SWP_TYPE_SHIFT = 3; + ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); + ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); + ms->__SWP_OFFSET_BITS = 49; + ms->__SWP_OFFSET_MASK = ((1UL << ms->__SWP_OFFSET_BITS) - 1); + ms->PTE_PROT_NONE = (1UL << 58); + ms->PTE_FILE = (1UL << 2); + } else if (THIS_KERNEL_VERSION >= LINUX(3,11,0)) { + ms->__SWP_TYPE_BITS = 6; + ms->__SWP_TYPE_SHIFT = 4; + ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); + ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); + ms->__SWP_OFFSET_BITS = 0; /* unused */ + ms->__SWP_OFFSET_MASK = 0; /* unused */ + ms->PTE_PROT_NONE = (1UL << 2); + ms->PTE_FILE = (1UL << 3); + } else { + ms->__SWP_TYPE_BITS = 6; + ms->__SWP_TYPE_SHIFT = 3; + ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); + ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); + ms->__SWP_OFFSET_BITS = 0; /* unused */ + ms->__SWP_OFFSET_MASK = 0; /* unused */ + ms->PTE_PROT_NONE = (1UL << 1); + ms->PTE_FILE = (1UL << 2); + } + + if (symbol_exists("irq_desc")) + ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, + "irq_desc", NULL, 0); + else if (kernel_symbol_exists("nr_irqs")) + get_symbol_data("nr_irqs", sizeof(unsigned int), + &machdep->nr_irqs); + + if (!machdep->hz) + machdep->hz = 100; + + arm64_irq_stack_init(); + arm64_stackframe_init(); + break; + + case POST_VM: + /* + * crash_notes contains machine specific information about the + * crash. In particular, it contains CPU registers at the time + * of the crash. We need this information to extract correct + * backtraces from the panic task. + */ + if (!LIVE()) + arm64_get_crash_notes(); + break; + + case LOG_ONLY: + machdep->machspec = &arm64_machine_specific; + arm64_calc_VA_BITS(); + arm64_calc_phys_offset(); + machdep->machspec->page_offset = ARM64_PAGE_OFFSET; + break; + } +} + +/* + * Accept or reject a symbol from the kernel namelist. + */ +static int +arm64_verify_symbol(const char *name, ulong value, char type) +{ + if (!name || !strlen(name)) + return FALSE; + + if ((type == 'A') && STREQ(name, "_kernel_flags_le")) + machdep->machspec->kernel_flags = le64toh(value); + + if ((type == 'A') && STREQ(name, "_kernel_flags_le_hi32")) + machdep->machspec->kernel_flags |= ((ulong)le32toh(value) << 32); + + if ((type == 'A') && STREQ(name, "_kernel_flags_le_lo32")) + machdep->machspec->kernel_flags |= le32toh(value); + + if (((type == 'A') || (type == 'a')) && (highest_bit_long(value) != 63)) + return FALSE; + + if ((value == 0) && + ((type == 'a') || (type == 'n') || (type == 'N') || (type == 'U'))) + return FALSE; + + if (STREQ(name, "$d") || STREQ(name, "$x")) + return FALSE; + + if ((type == 'A') && STRNEQ(name, "__crc_")) + return FALSE; + + if ((type == 'N') && strstr(name, "$d")) + return FALSE; + + if (!(machdep->flags & KSYMS_START) && STREQ(name, "idmap_pg_dir")) + machdep->flags |= KSYMS_START; + + return TRUE; +} + + +void +arm64_dump_machdep_table(ulong arg) +{ + const struct machine_specific *ms; + int others, i; + + others = 0; + fprintf(fp, " flags: %lx (", machdep->flags); + if (machdep->flags & KSYMS_START) + fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); + if (machdep->flags & PHYS_OFFSET) + fprintf(fp, "%sPHYS_OFFSET", others++ ? "|" : ""); + if (machdep->flags & VM_L2_64K) + fprintf(fp, "%sVM_L2_64K", others++ ? "|" : ""); + if (machdep->flags & VM_L3_64K) + fprintf(fp, "%sVM_L3_64K", others++ ? "|" : ""); + if (machdep->flags & VM_L3_4K) + fprintf(fp, "%sVM_L3_4K", others++ ? "|" : ""); + if (machdep->flags & VM_L4_4K) + fprintf(fp, "%sVM_L4_4K", others++ ? "|" : ""); + if (machdep->flags & VMEMMAP) + fprintf(fp, "%sVMEMMAP", others++ ? "|" : ""); + if (machdep->flags & KDUMP_ENABLED) + fprintf(fp, "%sKDUMP_ENABLED", others++ ? "|" : ""); + if (machdep->flags & IRQ_STACKS) + fprintf(fp, "%sIRQ_STACKS", others++ ? "|" : ""); + if (machdep->flags & UNW_4_14) + fprintf(fp, "%sUNW_4_14", others++ ? "|" : ""); + if (machdep->flags & MACHDEP_BT_TEXT) + fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); + if (machdep->flags & NEW_VMEMMAP) + fprintf(fp, "%sNEW_VMEMMAP", others++ ? "|" : ""); + fprintf(fp, ")\n"); + + fprintf(fp, " kvbase: %lx\n", machdep->kvbase); + fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); + fprintf(fp, " pagesize: %d\n", machdep->pagesize); + fprintf(fp, " pageshift: %d\n", machdep->pageshift); + fprintf(fp, " pagemask: %lx\n", (ulong)machdep->pagemask); + fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); + fprintf(fp, " stacksize: %ld\n", machdep->stacksize); + fprintf(fp, " hz: %d\n", machdep->hz); + fprintf(fp, " mhz: %ld\n", machdep->mhz); + fprintf(fp, " memsize: %lld (0x%llx)\n", + (ulonglong)machdep->memsize, (ulonglong)machdep->memsize); + fprintf(fp, " bits: %d\n", machdep->bits); + fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); + fprintf(fp, " eframe_search: arm64_eframe_search()\n"); + fprintf(fp, " back_trace: arm64_back_trace_cmd() (default: %s method)\n", + kt->flags & USE_OPT_BT ? "optional" : "original"); + fprintf(fp, " in_alternate_stack: arm64_in_alternate_stack()\n"); + fprintf(fp, " processor_speed: arm64_processor_speed()\n"); + fprintf(fp, " uvtop: arm64_uvtop()->%s()\n", + machdep->flags & VM_L3_4K ? + "arm64_vtop_3level_4k" : + machdep->flags & VM_L4_4K ? + "arm64_vtop_4level_4k" : + machdep->flags & VM_L3_64K ? + "arm64_vtop_3level_64k" : "arm64_vtop_2level_64k"); + fprintf(fp, " kvtop: arm64_kvtop()->%s()\n", + machdep->flags & VM_L3_4K ? + "arm64_vtop_3level_4k" : + machdep->flags & VM_L4_4K ? + "arm64_vtop_4level_4k" : + machdep->flags & VM_L3_64K ? + "arm64_vtop_3level_64k" : "arm64_vtop_2level_64k"); + fprintf(fp, " get_task_pgd: arm64_get_task_pgd()\n"); + fprintf(fp, " dump_irq: generic_dump_irq()\n"); + fprintf(fp, " get_stack_frame: arm64_get_stack_frame()\n"); + fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); + fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); + fprintf(fp, " translate_pte: arm64_translate_pte()\n"); + fprintf(fp, " memory_size: generic_memory_size()\n"); + fprintf(fp, " vmalloc_start: arm64_vmalloc_start()\n"); + fprintf(fp, " get_kvaddr_ranges: arm64_get_kvaddr_ranges()\n"); + fprintf(fp, " is_task_addr: arm64_is_task_addr()\n"); + fprintf(fp, " verify_symbol: arm64_verify_symbol()\n"); + fprintf(fp, " dis_filter: arm64_dis_filter()\n"); + fprintf(fp, " cmd_mach: arm64_cmd_mach()\n"); + fprintf(fp, " get_smp_cpus: arm64_get_smp_cpus()\n"); + fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); + fprintf(fp, " is_uvaddr: arm64_is_uvaddr()\n"); + fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); + fprintf(fp, " init_kernel_pgd: arm64_init_kernel_pgd\n"); + fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); + fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); + fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); + fprintf(fp, " dumpfile_init: (not used)\n"); + fprintf(fp, " process_elf_notes: process_elf64_notes()\n"); + fprintf(fp, " verify_line_number: (not used)\n"); + + fprintf(fp, " xendump_p2m_create: (n/a)\n"); + fprintf(fp, "xen_kdump_p2m_create: (n/a)\n"); + fprintf(fp, " xendump_panic_task: (n/a)\n"); + fprintf(fp, " get_xendump_regs: (n/a)\n"); + fprintf(fp, " line_number_hooks: (not used)\n"); + fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); + fprintf(fp, " last_pud_read: "); + if ((PAGESIZE() == 65536) || + ((PAGESIZE() == 4096) && !(machdep->flags & VM_L4_4K))) + fprintf(fp, "(not used)\n"); + else + fprintf(fp, "%lx\n", machdep->last_pud_read); + fprintf(fp, " last_pmd_read: "); + if (PAGESIZE() == 65536) + fprintf(fp, "(not used)\n"); + else + fprintf(fp, "%lx\n", machdep->last_pmd_read); + fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); + fprintf(fp, " clear_machdep_cache: arm64_clear_machdep_cache()\n"); + fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); + fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); + fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); + fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); + fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); + fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); + fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); + fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); + + for (i = 0; i < MAX_MACHDEP_ARGS; i++) { + fprintf(fp, " cmdline_args[%d]: %s\n", + i, machdep->cmdline_args[i] ? + machdep->cmdline_args[i] : "(unused)"); + } + + ms = machdep->machspec; + + fprintf(fp, " machspec: %lx\n", (ulong)ms); + fprintf(fp, " VA_BITS: %ld\n", ms->VA_BITS); + fprintf(fp, " CONFIG_ARM64_VA_BITS: %ld\n", ms->CONFIG_ARM64_VA_BITS); + fprintf(fp, " VA_START: "); + if (ms->VA_START) + fprintf(fp, "%lx\n", ms->VA_START); + else + fprintf(fp, "(unused)\n"); + fprintf(fp, " VA_BITS_ACTUAL: "); + if (ms->VA_BITS_ACTUAL) + fprintf(fp, "%ld\n", ms->VA_BITS_ACTUAL); + else + fprintf(fp, "(unused)\n"); + fprintf(fp, " userspace_top: %016lx\n", ms->userspace_top); + fprintf(fp, " page_offset: %016lx\n", ms->page_offset); + fprintf(fp, " vmalloc_start_addr: %016lx\n", ms->vmalloc_start_addr); + fprintf(fp, " vmalloc_end: %016lx\n", ms->vmalloc_end); + fprintf(fp, " modules_vaddr: %016lx\n", ms->modules_vaddr); + fprintf(fp, " modules_end: %016lx\n", ms->modules_end); + fprintf(fp, " vmemmap_vaddr: %016lx\n", ms->vmemmap_vaddr); + fprintf(fp, " vmemmap_end: %016lx\n", ms->vmemmap_end); + if (machdep->flags & NEW_VMEMMAP) { + fprintf(fp, " kimage_text: %016lx\n", ms->kimage_text); + fprintf(fp, " kimage_end: %016lx\n", ms->kimage_end); + fprintf(fp, " kimage_voffset: %016lx\n", ms->kimage_voffset); + } + fprintf(fp, " phys_offset: %lx\n", ms->phys_offset); + fprintf(fp, "__exception_text_start: %lx\n", ms->__exception_text_start); + fprintf(fp, " __exception_text_end: %lx\n", ms->__exception_text_end); + fprintf(fp, " __irqentry_text_start: %lx\n", ms->__irqentry_text_start); + fprintf(fp, " __irqentry_text_end: %lx\n", ms->__irqentry_text_end); + fprintf(fp, " exp_entry1_start: %lx\n", ms->exp_entry1_start); + fprintf(fp, " exp_entry1_end: %lx\n", ms->exp_entry1_end); + fprintf(fp, " exp_entry2_start: %lx\n", ms->exp_entry2_start); + fprintf(fp, " exp_entry2_end: %lx\n", ms->exp_entry2_end); + fprintf(fp, " panic_task_regs: %lx\n", (ulong)ms->panic_task_regs); + fprintf(fp, " user_eframe_offset: %ld\n", ms->user_eframe_offset); + fprintf(fp, " kern_eframe_offset: %ld\n", ms->kern_eframe_offset); + fprintf(fp, " PTE_PROT_NONE: %lx\n", ms->PTE_PROT_NONE); + fprintf(fp, " PTE_FILE: "); + if (ms->PTE_FILE) + fprintf(fp, "%lx\n", ms->PTE_FILE); + else + fprintf(fp, "(unused)\n"); + fprintf(fp, " __SWP_TYPE_BITS: %ld\n", ms->__SWP_TYPE_BITS); + fprintf(fp, " __SWP_TYPE_SHIFT: %ld\n", ms->__SWP_TYPE_SHIFT); + fprintf(fp, " __SWP_TYPE_MASK: %lx\n", ms->__SWP_TYPE_MASK); + fprintf(fp, " __SWP_OFFSET_BITS: "); + if (ms->__SWP_OFFSET_BITS) + fprintf(fp, "%ld\n", ms->__SWP_OFFSET_BITS); + else + fprintf(fp, "(unused)\n"); + fprintf(fp, " __SWP_OFFSET_SHIFT: %ld\n", ms->__SWP_OFFSET_SHIFT); + fprintf(fp, " __SWP_OFFSET_MASK: "); + if (ms->__SWP_OFFSET_MASK) + fprintf(fp, "%lx\n", ms->__SWP_OFFSET_MASK); + else + fprintf(fp, "(unused)\n"); + fprintf(fp, " machine_kexec_start: %lx\n", ms->machine_kexec_start); + fprintf(fp, " machine_kexec_end: %lx\n", ms->machine_kexec_end); + fprintf(fp, " crash_kexec_start: %lx\n", ms->crash_kexec_start); + fprintf(fp, " crash_kexec_end: %lx\n", ms->crash_kexec_end); + fprintf(fp, " crash_save_cpu_start: %lx\n", ms->crash_save_cpu_start); + fprintf(fp, " crash_save_cpu_end: %lx\n", ms->crash_save_cpu_end); + fprintf(fp, " kernel_flags: %lx\n", ms->kernel_flags); + fprintf(fp, " irq_stackbuf: %lx\n", (ulong)ms->irq_stackbuf); + if (machdep->flags & IRQ_STACKS) { + fprintf(fp, " irq_stack_size: %ld\n", ms->irq_stack_size); + for (i = 0; i < kt->cpus; i++) + fprintf(fp, " irq_stacks[%d]: %lx\n", + i, ms->irq_stacks[i]); + } else { + fprintf(fp, " irq_stack_size: (unused)\n"); + fprintf(fp, " irq_stacks: (unused)\n"); + } +} + +static int +arm64_parse_machdep_arg_l(char *argstring, char *param, ulong *value) +{ + int len; + int megabytes = FALSE; + char *p; + + len = strlen(param); + if (!STRNEQ(argstring, param) || (argstring[len] != '=')) + return FALSE; + + if ((LASTCHAR(argstring) == 'm') || + (LASTCHAR(argstring) == 'M')) { + LASTCHAR(argstring) = NULLCHAR; + megabytes = TRUE; + } + + p = argstring + len + 1; + if (strlen(p)) { + int flags = RETURN_ON_ERROR | QUIET; + int err = 0; + + if (STRNEQ(argstring, "max_physmem_bits")) { + *value = dtol(p, flags, &err); + } else if (megabytes) { + *value = dtol(p, flags, &err); + if (!err) + *value = MEGABYTES(*value); + } else { + *value = htol(p, flags, &err); + } + + if (!err) + return TRUE; + } + + return FALSE; +} + +/* + * Parse machine dependent command line arguments. + * + * Force the phys_offset address via: + * + * --machdep phys_offset=
+ */ +static void +arm64_parse_cmdline_args(void) +{ + int index, i, c; + char *arglist[MAXARGS]; + char buf[BUFSIZE]; + char *p; + + for (index = 0; index < MAX_MACHDEP_ARGS; index++) { + if (!machdep->cmdline_args[index]) + break; + + if (!strstr(machdep->cmdline_args[index], "=")) { + error(WARNING, "ignoring --machdep option: %x\n", + machdep->cmdline_args[index]); + continue; + } + + strcpy(buf, machdep->cmdline_args[index]); + + for (p = buf; *p; p++) { + if (*p == ',') + *p = ' '; + } + + c = parse_line(buf, arglist); + + for (i = 0; i < c; i++) { + if (arm64_parse_machdep_arg_l(arglist[i], "phys_offset", + &machdep->machspec->phys_offset)) { + error(NOTE, + "setting phys_offset to: 0x%lx\n\n", + machdep->machspec->phys_offset); + machdep->flags |= PHYS_OFFSET; + continue; + } else if (arm64_parse_machdep_arg_l(arglist[i], "kimage_voffset", + &machdep->machspec->kimage_voffset)) { + error(NOTE, + "setting kimage_voffset to: 0x%lx\n\n", + machdep->machspec->kimage_voffset); + continue; + } else if (arm64_parse_machdep_arg_l(arglist[i], "max_physmem_bits", + &machdep->max_physmem_bits)) { + error(NOTE, + "setting max_physmem_bits to: %ld\n\n", + machdep->max_physmem_bits); + continue; + } + + error(WARNING, "ignoring --machdep option: %s\n", + arglist[i]); + } + } +} + +#define MIN_KIMG_ALIGN (0x00200000) /* kimage load address must be aligned 2M */ +/* + * Traverse the entire dumpfile to find/verify kimage_voffset. + */ +static int +arm64_search_for_kimage_voffset(ulong phys_base) +{ + ulong kimage_load_addr; + ulong phys_end; + struct machine_specific *ms = machdep->machspec; + + if (!arm_kdump_phys_end(&phys_end)) + return FALSE; + + for (kimage_load_addr = phys_base; + kimage_load_addr <= phys_end; kimage_load_addr += MIN_KIMG_ALIGN) { + ms->kimage_voffset = ms->vmalloc_start_addr - kimage_load_addr; + + if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) + ms->kimage_voffset += (kt->relocate * - 1); + + if (verify_kimage_voffset()) { + if (CRASHDEBUG(1)) + error(INFO, + "dumpfile searched for kimage_voffset: %lx\n\n", + ms->kimage_voffset); + break; + } + } + + if (kimage_load_addr > phys_end) + return FALSE; + + return TRUE; +} + +static int +verify_kimage_voffset(void) +{ + ulong kimage_voffset; + + if (!readmem(symbol_value("kimage_voffset"), KVADDR, &kimage_voffset, + sizeof(kimage_voffset), "verify kimage_voffset", QUIET|RETURN_ON_ERROR)) + return FALSE; + + return (machdep->machspec->kimage_voffset == kimage_voffset); +} + +static void +arm64_calc_kimage_voffset(void) +{ + struct machine_specific *ms = machdep->machspec; + ulong phys_addr = 0; + int errflag; + + if (ms->kimage_voffset) /* vmcoreinfo, ioctl, or --machdep override */ + return; + + if (ACTIVE()) { + char buf[BUFSIZE]; + char *p1; + FILE *iomem; + ulong kimage_voffset, vaddr; + + if (pc->flags & PROC_KCORE) { + kimage_voffset = symbol_value_from_proc_kallsyms("kimage_voffset"); + if ((kimage_voffset != BADVAL) && + (READMEM(pc->mfd, &vaddr, sizeof(ulong), + kimage_voffset, KCORE_USE_VADDR) > 0)) { + ms->kimage_voffset = vaddr; + return; + } + } + + if ((iomem = fopen("/proc/iomem", "r")) == NULL) + return; + + errflag = 1; + while (fgets(buf, BUFSIZE, iomem)) { + if(strstr(buf, ": Kernel code")) { + errflag = 0; + break; + } + if (strstr(buf, ": System RAM")) { + clean_line(buf); + + if (!(p1 = strstr(buf, "-"))) + continue; + + *p1 = NULLCHAR; + + phys_addr = htol(buf, RETURN_ON_ERROR | QUIET, NULL); + if (phys_addr == BADADDR) + continue; + } + } + fclose(iomem); + + if (errflag) + return; + + } else if (KDUMP_DUMPFILE()) { + errflag = 1; + if (arm_kdump_phys_base(&phys_addr)) { /* Get start address of first memory block */ + ms->kimage_voffset = ms->vmalloc_start_addr - phys_addr; + if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) + ms->kimage_voffset += (kt->relocate * -1); + if (verify_kimage_voffset() || arm64_search_for_kimage_voffset(phys_addr)) + errflag = 0; + } + + if (errflag) { + error(WARNING, + "kimage_voffset cannot be determined from the dumpfile.\n"); + error(CONT, + "Try using the command line option: --machdep kimage_voffset=\n"); + } + return; + } else { + error(WARNING, + "kimage_voffset cannot be determined from the dumpfile.\n"); + error(CONT, + "Using default value of 0. If this is not correct, then try\n"); + error(CONT, + "using the command line option: --machdep kimage_voffset=\n"); + return; + } + + ms->kimage_voffset = ms->vmalloc_start_addr - phys_addr; + + if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) + ms->kimage_voffset += (kt->relocate * -1); +} + +static void +arm64_calc_phys_offset(void) +{ + struct machine_specific *ms = machdep->machspec; + ulong phys_offset; + + if (machdep->flags & PHYS_OFFSET) /* --machdep override */ + return; + + /* + * Next determine suitable value for phys_offset. User can override this + * by passing valid '--machdep phys_offset=' option. + */ + ms->phys_offset = 0; + + if (ACTIVE()) { + char buf[BUFSIZE]; + char *p1; + int errflag; + FILE *iomem; + physaddr_t paddr; + ulong vaddr; + struct syment *sp; + char *string; + + if ((machdep->flags & NEW_VMEMMAP) && + ms->kimage_voffset && (sp = kernel_symbol_search("memstart_addr"))) { + if (pc->flags & PROC_KCORE) { + if ((string = pc->read_vmcoreinfo("NUMBER(PHYS_OFFSET)"))) { + ms->phys_offset = htol(string, QUIET, NULL); + free(string); + return; + } + vaddr = symbol_value_from_proc_kallsyms("memstart_addr"); + if (vaddr == BADVAL) + vaddr = sp->value; + paddr = KCORE_USE_VADDR; + } else { + vaddr = sp->value; + paddr = sp->value - machdep->machspec->kimage_voffset; + } + if (READMEM(pc->mfd, &phys_offset, sizeof(phys_offset), + vaddr, paddr) > 0) { + ms->phys_offset = phys_offset; + return; + } + } + + if ((iomem = fopen("/proc/iomem", "r")) == NULL) + return; + + /* + * Memory regions are sorted in ascending order. We take the + * first region which should be correct for most uses. + */ + errflag = 1; + while (fgets(buf, BUFSIZE, iomem)) { + if (strstr(buf, ": System RAM")) { + clean_line(buf); + errflag = 0; + break; + } + } + fclose(iomem); + + if (errflag) + return; + + if (!(p1 = strstr(buf, "-"))) + return; + + *p1 = NULLCHAR; + + phys_offset = htol(buf, RETURN_ON_ERROR | QUIET, &errflag); + if (errflag) + return; + + ms->phys_offset = phys_offset; + } else if (DISKDUMP_DUMPFILE() && diskdump_phys_base(&phys_offset)) { + ms->phys_offset = phys_offset; + } else if (KDUMP_DUMPFILE() && arm64_kdump_phys_base(&phys_offset)) { + ms->phys_offset = phys_offset; + } else { + error(WARNING, + "phys_offset cannot be determined from the dumpfile.\n"); + error(CONT, + "Using default value of 0. If this is not correct, then try\n"); + error(CONT, + "using the command line option: --machdep phys_offset=\n"); + } + + if (CRASHDEBUG(1)) + fprintf(fp, "using %lx as phys_offset\n", ms->phys_offset); +} + +/* + * Determine SECTION_SIZE_BITS either by reading VMCOREINFO or the kernel + * config, otherwise use the 64-bit ARM default definiton. + */ +static void +arm64_get_section_size_bits(void) +{ + int ret; + char *string; + + machdep->section_size_bits = _SECTION_SIZE_BITS; + + if ((string = pc->read_vmcoreinfo("NUMBER(SECTION_SIZE_BITS)"))) { + machdep->section_size_bits = atol(string); + free(string); + } else if (kt->ikconfig_flags & IKCONFIG_AVAIL) { + if ((ret = get_kernel_config("CONFIG_MEMORY_HOTPLUG", NULL)) == IKCONFIG_Y) { + if ((ret = get_kernel_config("CONFIG_HOTPLUG_SIZE_BITS", &string)) == IKCONFIG_STR) + machdep->section_size_bits = atol(string); + } + } + + if (CRASHDEBUG(1)) + fprintf(fp, "SECTION_SIZE_BITS: %ld\n", machdep->section_size_bits); +} + +/* + * Determine PHYS_OFFSET either by reading VMCOREINFO or the kernel + * symbol, otherwise borrow the 32-bit ARM functionality. + */ +static int +arm64_kdump_phys_base(ulong *phys_offset) +{ + char *string; + struct syment *sp; + physaddr_t paddr; + + if ((string = pc->read_vmcoreinfo("NUMBER(PHYS_OFFSET)"))) { + *phys_offset = htol(string, QUIET, NULL); + free(string); + return TRUE; + } + + if ((machdep->flags & NEW_VMEMMAP) && + machdep->machspec->kimage_voffset && + (sp = kernel_symbol_search("memstart_addr"))) { + paddr = sp->value - machdep->machspec->kimage_voffset; + if (READMEM(-1, phys_offset, sizeof(*phys_offset), + sp->value, paddr) > 0) + return TRUE; + } + + return arm_kdump_phys_base(phys_offset); +} + +static void +arm64_init_kernel_pgd(void) +{ + int i; + ulong value; + + if (!kernel_symbol_exists("init_mm") || + !readmem(symbol_value("init_mm") + OFFSET(mm_struct_pgd), KVADDR, + &value, sizeof(void *), "init_mm.pgd", RETURN_ON_ERROR)) { + if (kernel_symbol_exists("swapper_pg_dir")) + value = symbol_value("swapper_pg_dir"); + else { + error(WARNING, "cannot determine kernel pgd location\n"); + return; + } + } + + for (i = 0; i < NR_CPUS; i++) + vt->kernel_pgd[i] = value; +} + +ulong +arm64_VTOP(ulong addr) +{ + if (machdep->flags & NEW_VMEMMAP) { + if (machdep->machspec->VA_START && + (addr >= machdep->machspec->kimage_text) && + (addr <= machdep->machspec->kimage_end)) { + return addr - machdep->machspec->kimage_voffset; + } + + if (addr >= machdep->machspec->page_offset) + return machdep->machspec->phys_offset + + (addr - machdep->machspec->page_offset); + else if (machdep->machspec->kimage_voffset) + return addr - machdep->machspec->kimage_voffset; + else /* no randomness */ + return machdep->machspec->phys_offset + + (addr - machdep->machspec->vmalloc_start_addr); + } else { + return machdep->machspec->phys_offset + + (addr - machdep->machspec->page_offset); + } +} + +static int +arm64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) +{ + ulong kernel_pgd; + + if (!IS_KVADDR(kvaddr)) + return FALSE; + + if (!vt->vmalloc_start) { + *paddr = VTOP(kvaddr); + return TRUE; + } + + if (!IS_VMALLOC_ADDR(kvaddr)) { + *paddr = VTOP(kvaddr); + if (!verbose) + return TRUE; + } + + kernel_pgd = vt->kernel_pgd[0]; + *paddr = 0; + + switch (machdep->flags & (VM_L2_64K|VM_L3_64K|VM_L3_4K|VM_L4_4K)) + { + case VM_L2_64K: + return arm64_vtop_2level_64k(kernel_pgd, kvaddr, paddr, verbose); + case VM_L3_64K: + return arm64_vtop_3level_64k(kernel_pgd, kvaddr, paddr, verbose); + case VM_L3_4K: + return arm64_vtop_3level_4k(kernel_pgd, kvaddr, paddr, verbose); + case VM_L4_4K: + return arm64_vtop_4level_4k(kernel_pgd, kvaddr, paddr, verbose); + default: + return FALSE; + } +} + +static int +arm64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) +{ + ulong user_pgd; + + readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, + &user_pgd, sizeof(long), "user pgd", FAULT_ON_ERROR); + + *paddr = 0; + + switch (machdep->flags & (VM_L2_64K|VM_L3_64K|VM_L3_4K|VM_L4_4K)) + { + case VM_L2_64K: + return arm64_vtop_2level_64k(user_pgd, uvaddr, paddr, verbose); + case VM_L3_64K: + return arm64_vtop_3level_64k(user_pgd, uvaddr, paddr, verbose); + case VM_L3_4K: + return arm64_vtop_3level_4k(user_pgd, uvaddr, paddr, verbose); + case VM_L4_4K: + return arm64_vtop_4level_4k(user_pgd, uvaddr, paddr, verbose); + default: + return FALSE; + } +} + +#define PTE_ADDR_LOW ((((1UL) << (48 - machdep->pageshift)) - 1) << machdep->pageshift) +#define PTE_ADDR_HIGH ((0xfUL) << 12) +#define PTE_TO_PHYS(pteval) (machdep->max_physmem_bits == 52 ? \ + (((pteval & PTE_ADDR_LOW) | ((pteval & PTE_ADDR_HIGH) << 36))) : (pteval & PTE_ADDR_LOW)) + +#define PMD_TYPE_MASK 3 +#define PMD_TYPE_SECT 1 +#define PMD_TYPE_TABLE 2 +#define SECTION_PAGE_MASK_2MB ((long)(~((MEGABYTES(2))-1))) +#define SECTION_PAGE_MASK_512MB ((long)(~((MEGABYTES(512))-1))) + +static int +arm64_vtop_2level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) +{ + ulong *pgd_base, *pgd_ptr, pgd_val; + ulong *pte_base, *pte_ptr, pte_val; + + if (verbose) + fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); + + pgd_base = (ulong *)pgd; + FILL_PGD(pgd_base, KVADDR, machdep->ptrs_per_pgd * sizeof(ulong)); + pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L2_64K) & (machdep->ptrs_per_pgd - 1)); + pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); + if (verbose) + fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); + if (!pgd_val) + goto no_page; + + /* + * #define __PAGETABLE_PUD_FOLDED + * #define __PAGETABLE_PMD_FOLDED + */ + + if ((pgd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { + ulong sectionbase = (pgd_val & SECTION_PAGE_MASK_512MB) & PHYS_MASK; + if (verbose) { + fprintf(fp, " PAGE: %lx (512MB)\n\n", sectionbase); + arm64_translate_pte(pgd_val, 0, 0); + } + *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_512MB); + return TRUE; + } + + pte_base = (ulong *)PTOV(pgd_val & PHYS_MASK & (s32)machdep->pagemask); + FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L2_64K * sizeof(ulong)); + pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L2_64K - 1)); + pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); + if (verbose) + fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); + if (!pte_val) + goto no_page; + + if (pte_val & PTE_VALID) { + *paddr = (PAGEBASE(pte_val) & PHYS_MASK) + PAGEOFFSET(vaddr); + if (verbose) { + fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); + arm64_translate_pte(pte_val, 0, 0); + } + } else { + if (IS_UVADDR(vaddr, NULL)) + *paddr = pte_val; + if (verbose) { + fprintf(fp, "\n"); + arm64_translate_pte(pte_val, 0, 0); + } + goto no_page; + } + + return TRUE; +no_page: + return FALSE; +} + +static int +arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) +{ + ulong *pgd_base, *pgd_ptr, pgd_val; + ulong *pmd_base, *pmd_ptr, pmd_val; + ulong *pte_base, *pte_ptr, pte_val; + + if (verbose) + fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); + + pgd_base = (ulong *)pgd; + FILL_PGD(pgd_base, KVADDR, machdep->ptrs_per_pgd * sizeof(ulong)); + pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L3_64K) & (machdep->ptrs_per_pgd - 1)); + pgd_val = ULONG(machdep->pgd + PGDIR_OFFSET_L3_64K(pgd_ptr)); + if (verbose) + fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); + if (!pgd_val) + goto no_page; + + /* + * #define __PAGETABLE_PUD_FOLDED + */ + + pmd_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); + FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L3_64K * sizeof(ulong)); + pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L3_64K) & (PTRS_PER_PMD_L3_64K - 1)); + pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); + if (verbose) + fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd_ptr, pmd_val); + if (!pmd_val) + goto no_page; + + if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { + ulong sectionbase = PTE_TO_PHYS(pmd_val) & SECTION_PAGE_MASK_512MB; + if (verbose) { + fprintf(fp, " PAGE: %lx (512MB)\n\n", sectionbase); + arm64_translate_pte(pmd_val, 0, 0); + } + *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_512MB); + return TRUE; + } + + pte_base = (ulong *)PTOV(PTE_TO_PHYS(pmd_val)); + FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L3_64K * sizeof(ulong)); + pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L3_64K - 1)); + pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); + if (verbose) + fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); + if (!pte_val) + goto no_page; + + if (pte_val & PTE_VALID) { + *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); + if (verbose) { + fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); + arm64_translate_pte(pte_val, 0, 0); + } + } else { + if (IS_UVADDR(vaddr, NULL)) + *paddr = pte_val; + if (verbose) { + fprintf(fp, "\n"); + arm64_translate_pte(pte_val, 0, 0); + } + goto no_page; + } + + return TRUE; +no_page: + return FALSE; +} + +static int +arm64_vtop_3level_4k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) +{ + ulong *pgd_base, *pgd_ptr, pgd_val; + ulong *pmd_base, *pmd_ptr, pmd_val; + ulong *pte_base, *pte_ptr, pte_val; + + if (verbose) + fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); + + pgd_base = (ulong *)pgd; + FILL_PGD(pgd_base, KVADDR, PTRS_PER_PGD_L3_4K * sizeof(ulong)); + pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L3_4K) & (PTRS_PER_PGD_L3_4K - 1)); + pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); + if (verbose) + fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); + if (!pgd_val) + goto no_page; + + /* + * #define __PAGETABLE_PUD_FOLDED + */ + + pmd_base = (ulong *)PTOV(pgd_val & PHYS_MASK & (s32)machdep->pagemask); + FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L3_4K * sizeof(ulong)); + pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L3_4K) & (PTRS_PER_PMD_L3_4K - 1)); + pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); + if (verbose) + fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd_ptr, pmd_val); + if (!pmd_val) + goto no_page; + + if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { + ulong sectionbase = (pmd_val & SECTION_PAGE_MASK_2MB) & PHYS_MASK; + if (verbose) { + fprintf(fp, " PAGE: %lx (2MB)\n\n", sectionbase); + arm64_translate_pte(pmd_val, 0, 0); + } + *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_2MB); + return TRUE; + } + + pte_base = (ulong *)PTOV(pmd_val & PHYS_MASK & (s32)machdep->pagemask); + FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L3_4K * sizeof(ulong)); + pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L3_4K - 1)); + pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); + if (verbose) + fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); + if (!pte_val) + goto no_page; + + if (pte_val & PTE_VALID) { + *paddr = (PAGEBASE(pte_val) & PHYS_MASK) + PAGEOFFSET(vaddr); + if (verbose) { + fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); + arm64_translate_pte(pte_val, 0, 0); + } + } else { + if (IS_UVADDR(vaddr, NULL)) + *paddr = pte_val; + if (verbose) { + fprintf(fp, "\n"); + arm64_translate_pte(pte_val, 0, 0); + } + goto no_page; + } + + return TRUE; +no_page: + return FALSE; +} + +static int +arm64_vtop_4level_4k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) +{ + ulong *pgd_base, *pgd_ptr, pgd_val; + ulong *pud_base, *pud_ptr, pud_val; + ulong *pmd_base, *pmd_ptr, pmd_val; + ulong *pte_base, *pte_ptr, pte_val; + + if (verbose) + fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); + + pgd_base = (ulong *)pgd; + FILL_PGD(pgd_base, KVADDR, PTRS_PER_PGD_L4_4K * sizeof(ulong)); + pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L4_4K) & (PTRS_PER_PGD_L4_4K - 1)); + pgd_val = ULONG(machdep->pgd + PGDIR_OFFSET_48VA(pgd_ptr)); + if (verbose) + fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); + if (!pgd_val) + goto no_page; + + pud_base = (ulong *)PTOV(pgd_val & PHYS_MASK & PGDIR_MASK_48VA); + + FILL_PUD(pud_base, KVADDR, PTRS_PER_PUD_L4_4K * sizeof(ulong)); + pud_ptr = pud_base + (((vaddr) >> PUD_SHIFT_L4_4K) & (PTRS_PER_PUD_L4_4K - 1)); + pud_val = ULONG(machdep->pud + PAGEOFFSET(pud_ptr)); + if (verbose) + fprintf(fp, " PUD: %lx => %lx\n", (ulong)pud_ptr, pud_val); + if (!pud_val) + goto no_page; + + pmd_base = (ulong *)PTOV(pud_val & PHYS_MASK & (s32)machdep->pagemask); + FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L4_4K * sizeof(ulong)); + pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L4_4K) & (PTRS_PER_PMD_L4_4K - 1)); + pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); + if (verbose) + fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd_ptr, pmd_val); + if (!pmd_val) + goto no_page; + + if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { + ulong sectionbase = (pmd_val & SECTION_PAGE_MASK_2MB) & PHYS_MASK; + if (verbose) { + fprintf(fp, " PAGE: %lx (2MB)\n\n", sectionbase); + arm64_translate_pte(pmd_val, 0, 0); + } + *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_2MB); + return TRUE; + } + + pte_base = (ulong *)PTOV(pmd_val & PHYS_MASK & (s32)machdep->pagemask); + FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L4_4K * sizeof(ulong)); + pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L4_4K - 1)); + pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); + if (verbose) + fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); + if (!pte_val) + goto no_page; + + if (pte_val & PTE_VALID) { + *paddr = (PAGEBASE(pte_val) & PHYS_MASK) + PAGEOFFSET(vaddr); + if (verbose) { + fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); + arm64_translate_pte(pte_val, 0, 0); + } + } else { + if (IS_UVADDR(vaddr, NULL)) + *paddr = pte_val; + if (verbose) { + fprintf(fp, "\n"); + arm64_translate_pte(pte_val, 0, 0); + } + goto no_page; + } + + return TRUE; +no_page: + return FALSE; +} + +static ulong +arm64_get_task_pgd(ulong task) +{ + struct task_context *tc; + ulong pgd; + + if ((tc = task_to_context(task)) && + readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, + &pgd, sizeof(long), "user pgd", RETURN_ON_ERROR)) + return pgd; + else + return NO_TASK; +} + +static ulong +arm64_processor_speed(void) +{ + return 0; +}; + +/* + * Gather IRQ stack values. + */ +static void +arm64_irq_stack_init(void) +{ + int i; + struct syment *sp; + struct gnu_request request, *req; + struct machine_specific *ms = machdep->machspec; + ulong p, sz; + req = &request; + + if (symbol_exists("irq_stack") && + (sp = per_cpu_symbol_search("irq_stack")) && + get_symbol_type("irq_stack", NULL, req)) { + /* before v4.14 or CONFIG_VMAP_STACK disabled */ + if (CRASHDEBUG(1)) { + fprintf(fp, "irq_stack: \n"); + fprintf(fp, " type: %s\n", + (req->typecode == TYPE_CODE_ARRAY) ? + "TYPE_CODE_ARRAY" : "other"); + fprintf(fp, " target_typecode: %s\n", + req->target_typecode == TYPE_CODE_INT ? + "TYPE_CODE_INT" : "other"); + fprintf(fp, " target_length: %ld\n", + req->target_length); + fprintf(fp, " length: %ld\n", req->length); + } + + if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong))))) + error(FATAL, "cannot malloc irq_stack addresses\n"); + ms->irq_stack_size = req->length; + machdep->flags |= IRQ_STACKS; + + for (i = 0; i < kt->cpus; i++) + ms->irq_stacks[i] = kt->__per_cpu_offset[i] + sp->value; + } else if (symbol_exists("irq_stack_ptr") && + (sp = per_cpu_symbol_search("irq_stack_ptr")) && + get_symbol_type("irq_stack_ptr", NULL, req)) { + /* v4.14 and later with CONFIG_VMAP_STACK enabled */ + if (CRASHDEBUG(1)) { + fprintf(fp, "irq_stack_ptr: \n"); + fprintf(fp, " type: %x, %s\n", + (int)req->typecode, + (req->typecode == TYPE_CODE_PTR) ? + "TYPE_CODE_PTR" : "other"); + fprintf(fp, " target_typecode: %x, %s\n", + (int)req->target_typecode, + req->target_typecode == TYPE_CODE_INT ? + "TYPE_CODE_INT" : "other"); + fprintf(fp, " target_length: %ld\n", + req->target_length); + fprintf(fp, " length: %ld\n", req->length); + } + + if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong))))) + error(FATAL, "cannot malloc irq_stack addresses\n"); + + /* + * Determining the IRQ_STACK_SIZE is tricky, but for now + * 4.14 kernel has: + * + * #define IRQ_STACK_SIZE THREAD_SIZE + * + * and finding a solid usage of THREAD_SIZE is hard, but: + * + * union thread_union { + * ... + * unsigned long stack[THREAD_SIZE/sizeof(long)]; + * }; + */ + if (MEMBER_EXISTS("thread_union", "stack")) { + if ((sz = MEMBER_SIZE("thread_union", "stack")) > 0) + ms->irq_stack_size = sz; + } else + ms->irq_stack_size = ARM64_IRQ_STACK_SIZE; + + machdep->flags |= IRQ_STACKS; + + for (i = 0; i < kt->cpus; i++) { + p = kt->__per_cpu_offset[i] + sp->value; + readmem(p, KVADDR, &(ms->irq_stacks[i]), sizeof(ulong), + "IRQ stack pointer", RETURN_ON_ERROR); + } + } +} + +/* + * Gather and verify all of the backtrace requirements. + */ +static void +arm64_stackframe_init(void) +{ + long task_struct_thread; + long thread_struct_cpu_context; + long context_sp, context_pc, context_fp; + struct syment *sp1, *sp1n, *sp2, *sp2n, *sp3, *sp3n; + + STRUCT_SIZE_INIT(note_buf, "note_buf_t"); + STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); + MEMBER_OFFSET_INIT(elf_prstatus_pr_pid, "elf_prstatus", "pr_pid"); + MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); + + if (MEMBER_EXISTS("pt_regs", "stackframe")) { + machdep->machspec->user_eframe_offset = SIZE(pt_regs); + machdep->machspec->kern_eframe_offset = SIZE(pt_regs) - 16; + } else { + machdep->machspec->user_eframe_offset = SIZE(pt_regs) + 16; + machdep->machspec->kern_eframe_offset = SIZE(pt_regs); + } + + if ((sp1 = kernel_symbol_search("__exception_text_start")) && + (sp2 = kernel_symbol_search("__exception_text_end"))) { + machdep->machspec->__exception_text_start = sp1->value; + machdep->machspec->__exception_text_end = sp2->value; + } + if ((sp1 = kernel_symbol_search("__irqentry_text_start")) && + (sp2 = kernel_symbol_search("__irqentry_text_end"))) { + machdep->machspec->__irqentry_text_start = sp1->value; + machdep->machspec->__irqentry_text_end = sp2->value; + } + if ((sp1 = kernel_symbol_search("vectors")) && + (sp1n = kernel_symbol_search("cpu_switch_to")) && + (sp2 = kernel_symbol_search("ret_fast_syscall")) && + (sp2n = kernel_symbol_search("sys_rt_sigreturn_wrapper"))) { + machdep->machspec->exp_entry1_start = sp1->value; + machdep->machspec->exp_entry1_end = sp1n->value; + machdep->machspec->exp_entry2_start = sp2->value; + machdep->machspec->exp_entry2_end = sp2n->value; + } + + if ((sp1 = kernel_symbol_search("crash_kexec")) && + (sp1n = next_symbol(NULL, sp1)) && + (sp2 = kernel_symbol_search("crash_save_cpu")) && + (sp2n = next_symbol(NULL, sp2)) && + (sp3 = kernel_symbol_search("machine_kexec")) && + (sp3n = next_symbol(NULL, sp3))) { + machdep->machspec->crash_kexec_start = sp1->value; + machdep->machspec->crash_kexec_end = sp1n->value; + machdep->machspec->crash_save_cpu_start = sp2->value; + machdep->machspec->crash_save_cpu_end = sp2n->value; + machdep->machspec->machine_kexec_start = sp3->value; + machdep->machspec->machine_kexec_end = sp3n->value; + machdep->flags |= KDUMP_ENABLED; + } + + task_struct_thread = MEMBER_OFFSET("task_struct", "thread"); + thread_struct_cpu_context = MEMBER_OFFSET("thread_struct", "cpu_context"); + + if ((task_struct_thread == INVALID_OFFSET) || + (thread_struct_cpu_context == INVALID_OFFSET)) { + error(INFO, + "cannot determine task_struct.thread.context offset\n"); + return; + } + + /* + * Pay for the convenience of using a hardcopy of a kernel structure. + */ + if (offsetof(struct arm64_stackframe, sp) != + MEMBER_OFFSET("stackframe", "sp")) { + if (CRASHDEBUG(1)) + error(INFO, "builtin stackframe.sp offset differs from kernel version\n"); + } + if (offsetof(struct arm64_stackframe, fp) != + MEMBER_OFFSET("stackframe", "fp")) { + if (CRASHDEBUG(1)) + error(INFO, "builtin stackframe.fp offset differs from kernel version\n"); + } + if (offsetof(struct arm64_stackframe, pc) != + MEMBER_OFFSET("stackframe", "pc")) { + if (CRASHDEBUG(1)) + error(INFO, "builtin stackframe.pc offset differs from kernel version\n"); + } + if (!MEMBER_EXISTS("stackframe", "sp")) + machdep->flags |= UNW_4_14; + + context_sp = MEMBER_OFFSET("cpu_context", "sp"); + context_fp = MEMBER_OFFSET("cpu_context", "fp"); + context_pc = MEMBER_OFFSET("cpu_context", "pc"); + if (context_sp == INVALID_OFFSET) { + error(INFO, "cannot determine cpu_context.sp offset\n"); + return; + } + if (context_fp == INVALID_OFFSET) { + error(INFO, "cannot determine cpu_context.fp offset\n"); + return; + } + if (context_pc == INVALID_OFFSET) { + error(INFO, "cannot determine cpu_context.pc offset\n"); + return; + } + ASSIGN_OFFSET(task_struct_thread_context_sp) = + task_struct_thread + thread_struct_cpu_context + context_sp; + ASSIGN_OFFSET(task_struct_thread_context_fp) = + task_struct_thread + thread_struct_cpu_context + context_fp; + ASSIGN_OFFSET(task_struct_thread_context_pc) = + task_struct_thread + thread_struct_cpu_context + context_pc; +} + +#define KERNEL_MODE (1) +#define USER_MODE (2) + +#define USER_EFRAME_OFFSET (machdep->machspec->user_eframe_offset) +#define KERN_EFRAME_OFFSET (machdep->machspec->kern_eframe_offset) + +/* + * PSR bits + */ +#define PSR_MODE_EL0t 0x00000000 +#define PSR_MODE_EL1t 0x00000004 +#define PSR_MODE_EL1h 0x00000005 +#define PSR_MODE_EL2t 0x00000008 +#define PSR_MODE_EL2h 0x00000009 +#define PSR_MODE_EL3t 0x0000000c +#define PSR_MODE_EL3h 0x0000000d +#define PSR_MODE_MASK 0x0000000f + +/* Architecturally defined mapping between AArch32 and AArch64 registers */ +#define compat_usr(x) regs[(x)] +#define compat_fp regs[11] +#define compat_sp regs[13] +#define compat_lr regs[14] + +#define user_mode(ptregs) \ + (((ptregs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t) + +#define compat_user_mode(ptregs) \ + (((ptregs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \ + (PSR_MODE32_BIT | PSR_MODE_EL0t)) + +#define user_stack_pointer(ptregs) \ + (!compat_user_mode(ptregs) ? (ptregs)->sp : (ptregs)->compat_sp) + +#define user_frame_pointer(ptregs) \ + (!compat_user_mode(ptregs) ? (ptregs)->regs[29] : (ptregs)->compat_fp) + +static int +arm64_is_kernel_exception_frame(struct bt_info *bt, ulong stkptr) +{ + struct arm64_pt_regs *regs; + + regs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(stkptr))]; + + if (INSTACK(regs->sp, bt) && INSTACK(regs->regs[29], bt) && + !(regs->pstate & (0xffffffff00000000ULL | PSR_MODE32_BIT)) && + is_kernel_text(regs->pc) && + is_kernel_text(regs->regs[30])) { + switch (regs->pstate & PSR_MODE_MASK) + { + case PSR_MODE_EL1t: + case PSR_MODE_EL1h: + case PSR_MODE_EL2t: + case PSR_MODE_EL2h: + return TRUE; + } + } + + return FALSE; +} + +static int +arm64_eframe_search(struct bt_info *bt) +{ + int c; + ulong ptr, count; + struct machine_specific *ms; + + if (bt->flags & BT_EFRAME_SEARCH2) { + if (!(machdep->flags & IRQ_STACKS)) + error(FATAL, "IRQ stacks do not exist in this kernel\n"); + + ms = machdep->machspec; + + for (c = 0; c < kt->cpus; c++) { + if ((bt->flags & BT_CPUMASK) && + !(NUM_IN_BITMAP(bt->cpumask, c))) + continue; + + fprintf(fp, "CPU %d IRQ STACK:", c); + bt->stackbase = ms->irq_stacks[c]; + bt->stacktop = bt->stackbase + ms->irq_stack_size; + alter_stackbuf(bt); + count = 0; + + for (ptr = bt->stackbase; ptr < bt->stacktop - SIZE(pt_regs); ptr++) { + if (arm64_is_kernel_exception_frame(bt, ptr)) { + fprintf(fp, "%s\nKERNEL-MODE EXCEPTION FRAME AT: %lx\n", + count ? "" : "\n", ptr); + arm64_print_exception_frame(bt, ptr, KERNEL_MODE, fp); + count++; + } + } + + if (count) + fprintf(fp, "\n"); + else + fprintf(fp, "(none found)\n\n"); + } + + return 0; + } + + + count = 0; + for (ptr = bt->stackbase; ptr < bt->stacktop - SIZE(pt_regs); ptr++) { + if (arm64_is_kernel_exception_frame(bt, ptr)) { + fprintf(fp, "\nKERNEL-MODE EXCEPTION FRAME AT: %lx\n", ptr); + arm64_print_exception_frame(bt, ptr, KERNEL_MODE, fp); + count++; + } + } + + if (is_kernel_thread(bt->tc->task)) + return count; + + ptr = bt->stacktop - USER_EFRAME_OFFSET; + fprintf(fp, "%sUSER-MODE EXCEPTION FRAME AT: %lx\n", + count++ ? "\n" : "", ptr); + arm64_print_exception_frame(bt, ptr, USER_MODE, fp); + + return count; +} + +static char *arm64_exception_functions[] = { + "do_undefinstr", + "do_sysinstr", + "do_debug_exception", + "do_mem_abort", + "do_el0_irq_bp_hardening", + "do_sp_pc_abort", + NULL +}; + +static int +arm64_in_exception_text(ulong ptr) +{ + struct machine_specific *ms = machdep->machspec; + char *name, **func; + + if (ms->__irqentry_text_start && ms->__irqentry_text_end && + ((ptr >= ms->__irqentry_text_start) && + (ptr < ms->__irqentry_text_end))) + return TRUE; + + if (ms->__exception_text_start && ms->__exception_text_end) { + if ((ptr >= ms->__exception_text_start) && + (ptr < ms->__exception_text_end)) + return TRUE; + } else if ((name = closest_symbol(ptr))) { /* Linux 5.5 and later */ + for (func = &arm64_exception_functions[0]; *func; func++) { + if (STREQ(name, *func)) + return TRUE; + } + } + + return FALSE; +} + +static int +arm64_in_exp_entry(ulong addr) +{ + struct machine_specific *ms; + + ms = machdep->machspec; + if ((ms->exp_entry1_start <= addr) && (addr < ms->exp_entry1_end)) + return TRUE; + if ((ms->exp_entry2_start <= addr) && (addr < ms->exp_entry2_end)) + return TRUE; + return FALSE; +} + +#define BACKTRACE_CONTINUE (1) +#define BACKTRACE_COMPLETE_KERNEL (2) +#define BACKTRACE_COMPLETE_USER (3) + +static int +arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackframe *frame, FILE *ofp) +{ + char *name, *name_plus_offset; + ulong branch_pc, symbol_offset; + struct syment *sp; + struct load_module *lm; + char buf[BUFSIZE]; + + /* + * if pc comes from a saved lr, it actually points to an instruction + * after branch. To avoid any confusion, decrement pc by 4. + * See, for example, "bl schedule" before ret_to_user(). + */ + branch_pc = frame->pc - 4; + name = closest_symbol(branch_pc); + name_plus_offset = NULL; + + if (bt->flags & BT_SYMBOL_OFFSET) { + sp = value_search(branch_pc, &symbol_offset); + if (sp && symbol_offset) + name_plus_offset = + value_to_symstr(branch_pc, buf, bt->radix); + } + + if (!INSTACK(frame->fp, bt) && IN_TASK_VMA(bt->task, frame->fp)) + frame->fp = 0; + + if (bt->flags & BT_FULL) { + if (level) + arm64_display_full_frame(bt, frame->fp); + bt->frameptr = frame->fp; + } + + fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, + frame->fp ? frame->fp : bt->stacktop - USER_EFRAME_OFFSET, + name_plus_offset ? name_plus_offset : name, branch_pc); + + if (BT_REFERENCE_CHECK(bt)) { + arm64_do_bt_reference_check(bt, frame->pc, closest_symbol(frame->pc)); + arm64_do_bt_reference_check(bt, branch_pc, name); + } + + if (module_symbol(branch_pc, NULL, &lm, NULL, 0)) + fprintf(ofp, " [%s]", lm->mod_name); + + fprintf(ofp, "\n"); + + if (bt->flags & BT_LINE_NUMBERS) { + get_line_number(branch_pc, buf, FALSE); + if (strlen(buf)) + fprintf(ofp, " %s\n", buf); + } + + if (STREQ(name, "start_kernel") || STREQ(name, "secondary_start_kernel") || + STREQ(name, "kthread") || STREQ(name, "kthreadd")) + return BACKTRACE_COMPLETE_KERNEL; + + return BACKTRACE_CONTINUE; +} + +static int +arm64_print_stackframe_entry_v2(struct bt_info *bt, int level, struct arm64_stackframe *frame, FILE *ofp) +{ + char *name, *name_plus_offset; + ulong pc, symbol_offset; + struct syment *sp; + struct load_module *lm; + char buf[BUFSIZE]; + + /* + * if pc comes from a saved lr, it actually points to an instruction + * after branch. To avoid any confusion, decrement pc by 4. + * See, for example, "bl schedule" before ret_to_user(). + */ + pc = frame->pc - 0x4; + name = closest_symbol(pc); + name_plus_offset = NULL; + + if (bt->flags & BT_SYMBOL_OFFSET) { + sp = value_search(pc, &symbol_offset); + if (sp && symbol_offset) + name_plus_offset = value_to_symstr(pc, buf, bt->radix); + } + + if (bt->flags & BT_USER_EFRAME) + frame->fp = 0; + + fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, + frame->fp ? frame->fp : bt->stacktop - USER_EFRAME_OFFSET, + name_plus_offset ? name_plus_offset : name, pc); + + if (BT_REFERENCE_CHECK(bt)) + arm64_do_bt_reference_check(bt, pc, name); + + if (module_symbol(pc, NULL, &lm, NULL, 0)) + fprintf(ofp, " [%s]", lm->mod_name); + + fprintf(ofp, "\n"); + + if (bt->flags & BT_LINE_NUMBERS) { + get_line_number(pc, buf, FALSE); + if (strlen(buf)) + fprintf(ofp, " %s\n", buf); + } + + if (STREQ(name, "start_kernel") || + STREQ(name, "secondary_start_kernel") || + STREQ(name, "kthread") || STREQ(name, "kthreadd")) + return BACKTRACE_COMPLETE_KERNEL; + + return BACKTRACE_CONTINUE; +} + +static void +arm64_display_full_frame(struct bt_info *bt, ulong sp) +{ + int i, u_idx; + ulong *up; + ulong words, addr; + char buf[BUFSIZE]; + + if (bt->frameptr == sp) + return; + + if (INSTACK(bt->frameptr, bt)) { + if (INSTACK(sp, bt)) { + ; /* normal case */ + } else { + if (sp == 0) + /* interrupt in user mode */ + sp = bt->stacktop - USER_EFRAME_OFFSET; + else + /* interrupt in kernel mode */ + sp = bt->stacktop; + } + } else { + /* This is a transition case from irq to process stack. */ + return; + } + + words = (sp - bt->frameptr) / sizeof(ulong); + + addr = bt->frameptr; + u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); + for (i = 0; i < words; i++, u_idx++) { + if (!(i & 1)) + fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); + + up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); + fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); + + addr += sizeof(ulong); + } + fprintf(fp, "\n"); +} + +static void +arm64_display_full_frame_v2(struct bt_info *bt, struct arm64_stackframe *cur, + struct arm64_stackframe *next) +{ + struct machine_specific *ms; + ulong next_fp, stackbase; + char *stackbuf; + int i, u_idx; + ulong *up; + ulong words, addr; + char buf[BUFSIZE]; + + stackbase = bt->stackbase; + stackbuf = bt->stackbuf; + ms = machdep->machspec; + + /* Calc next fp for dump */ + if (next->fp == 0) + /* last stackframe on kernel tack */ + next_fp = bt->stacktop - 0x10; + else if (!INSTACK(cur->sp, bt)) { + /* We have just switched over stacks */ + next_fp = ms->irq_stacks[bt->tc->processor] + + ms->irq_stack_size - 0x10; + + /* + * We are already buffering a process stack. + * So use an old buffer for IRQ stack. + */ + stackbase = ms->irq_stacks[bt->tc->processor]; + stackbuf = ms->irq_stackbuf; + } else + next_fp = next->fp; + + if (CRASHDEBUG(1)) + fprintf(fp, " frame <%016lx:%016lx>\n", cur->fp, next_fp); + + /* Check here because we want to see a debug message above. */ + if (!(bt->flags & BT_FULL)) + return; + if (next_fp <= cur->fp) + return; + + /* Dump */ + words = (next_fp - cur->fp) / sizeof(ulong); + addr = cur->fp; + u_idx = (cur->fp - stackbase)/sizeof(ulong); + for (i = 0; i < words; i++, u_idx++) { + if (!(i & 1)) + fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); + + up = (ulong *)(&stackbuf[u_idx*sizeof(ulong)]); + fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); + + addr += sizeof(ulong); + } + fprintf(fp, "\n"); + + if (stackbuf == ms->irq_stackbuf) + FREEBUF(stackbuf); +} + +static int +arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame) +{ + unsigned long high, low, fp; + unsigned long stack_mask; + unsigned long irq_stack_ptr, orig_sp; + struct arm64_pt_regs *ptregs; + struct machine_specific *ms; + + stack_mask = (unsigned long)(ARM64_STACK_SIZE) - 1; + fp = frame->fp; + + low = frame->sp; + high = (low + stack_mask) & ~(stack_mask); + + if (fp < low || fp > high || fp & 0xf) + return FALSE; + + frame->sp = fp + 0x10; + frame->fp = GET_STACK_ULONG(fp); + frame->pc = GET_STACK_ULONG(fp + 8); + + if ((frame->fp == 0) && (frame->pc == 0)) + return FALSE; + + if (!(machdep->flags & IRQ_STACKS)) + return TRUE; + + if (!(machdep->flags & IRQ_STACKS)) + return TRUE; + + if (machdep->flags & UNW_4_14) { + if ((bt->flags & BT_IRQSTACK) && + !arm64_on_irq_stack(bt->tc->processor, frame->fp)) { + if (arm64_on_process_stack(bt, frame->fp)) { + arm64_set_process_stack(bt); + + frame->sp = frame->fp - KERN_EFRAME_OFFSET; + /* + * for switch_stack + * fp still points to irq stack + */ + bt->bptr = fp; + /* + * for display_full_frame + * sp points to process stack + * + * If we want to see pt_regs, + * comment out the below. + * bt->frameptr = frame->sp; + */ + } else { + /* irq -> user */ + return FALSE; + } + } + + return TRUE; + } + + /* + * The kernel's manner of determining the end of the IRQ stack: + * + * #define THREAD_SIZE 16384 + * #define THREAD_START_SP (THREAD_SIZE - 16) + * #define IRQ_STACK_START_SP THREAD_START_SP + * #define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP) + * #define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08))) + * + * irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id()); + * orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); (pt_regs pointer on process stack) + */ + ms = machdep->machspec; + irq_stack_ptr = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size - 16; + + if (frame->sp == irq_stack_ptr) { + orig_sp = GET_STACK_ULONG(irq_stack_ptr - 8); + arm64_set_process_stack(bt); + if (INSTACK(orig_sp, bt) && (INSTACK(frame->fp, bt) || (frame->fp == 0))) { + ptregs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(orig_sp))]; + frame->sp = orig_sp; + frame->pc = ptregs->pc; + bt->bptr = fp; + if (CRASHDEBUG(1)) + error(INFO, + "arm64_unwind_frame: switch stacks: fp: %lx sp: %lx pc: %lx\n", + frame->fp, frame->sp, frame->pc); + } else { + error(WARNING, + "arm64_unwind_frame: on IRQ stack: oriq_sp: %lx%s fp: %lx%s\n", + orig_sp, INSTACK(orig_sp, bt) ? "" : " (?)", + frame->fp, INSTACK(frame->fp, bt) ? "" : " (?)"); + return FALSE; + } + } + + return TRUE; +} + +/* + * The following figure shows how unwinding can be done. + * Here we assume that the callstack order is: + * #(X-1) ppc (previous PC) + * #X cpc (current PC) + * < #(X+ 1) epc (Exception entry) > + * #(X+1/2) npc (Next PC) + * #(X+2/3) Npc (One before Next) + * #(X+3/4) NNpc (One before 'Npc') + * and unwind frames from #X to #(X+1). + * When we add a faked frame for exception entry (exception frame) + * as #(X+1), the next frame for npc will be recognized as #(x+2). + * + * (1)Normal stackframe: + * +------+ + * | pfp | + * | cpc | + * psp + + + * | | + * | | + * pfp +------+ <--- :prev stackframe = + * | cfp | + * | npc | + * csp + + + * | | + * | | + * cfp +------+ <--- :curr stackframe = + * | nfp | cfp = *pfp + * | Npc | csp = pfp + 0x10 + * nsp + + + * | | + * | | + * nfp +------+ <--- :next stackframe = + * | | + * + * (2)Exception on the same (IRQ or process) stack: + * +------+ + * | pfp | + * | cpc | + * psp + + + * | | + * | | + * pfp +------+ <--- :prev stackframe = + * | cfp | + * | npc | + * csp + + + * | | + * | | + * cfp +------+ <--- :curr stackframe = + * | nfp | + * | epc | + * + + + * | | + * | | faked(*) + * esp +------+ <--- :excp stackframe = <---, esp, epc + * | | esp = nsp - sizeof(pt_regs) + * | | + * | Npc | (*) If we didn't add this frame, the next frame + * | nfp | would be + * | nsp | + * | npc | and the frame below for npc would be lost. + * nsp + + + * | | + * nfp +------+ <--- :task stackframe = + * | Nfp | + * | NNpc | + * Nsp + + + * | | + * Nfp +------+ <--- :task stackframe = + * | NNfp | + * + * (3)Interrupt: + * +------+ + * | cfp | + * | ipc | + * csp + + + * | | + * | | + * cfp +------+ <--- :curr stackframe = + * | ifp | + * | epc | + * isp + + + * | | + * | | (*) + * ifp +------+ <--- :irq stackframe = + * | nfp | ifp == IRQ_STACK_PTR + * | esp | (*) Before the kernel enters an irq handler, frame + * top +------+ pointer moves to the top of IRQ stack. + * IRQ stack So we have to skip this frame in unwinding. + * + * faked + * esp +------+ <--- :excp stackframe = <---, esp, epc> + * | | esp = nsp - sizeof(pt_regs) + * | | + * | Npc | + * | nfp | + * | nsp | + * | npc | + * nsp + + + * | | + * nfp +------+ <--- :task stackframe = + * | Nfp | + * | NNpc | + * Nsp + + + * | | + * Nfp +------+ <--- :task stackframe = + * | NNfp | + */ + +static struct arm64_stackframe ext_frame; + +static int +arm64_unwind_frame_v2(struct bt_info *bt, struct arm64_stackframe *frame, + FILE *ofp) +{ + unsigned long high, low, fp; + unsigned long stack_mask; + unsigned long irq_stack_ptr; + struct machine_specific *ms; + + stack_mask = (unsigned long)(ARM64_STACK_SIZE) - 1; + fp = frame->fp; + + low = frame->sp; + high = (low + stack_mask) & ~(stack_mask); + + if (fp < low || fp > high || fp & 0xf) + return FALSE; + + if (CRASHDEBUG(1)) + fprintf(ofp, " cur fp:%016lx sp:%016lx pc:%016lx\n", + frame->fp, frame->sp, frame->pc); + + if (ext_frame.pc) { + /* + * The previous frame was a dummy for exception entry. + * So complement a missing (task) stackframe now. + */ + frame->fp = ext_frame.fp; + frame->sp = ext_frame.sp; + frame->pc = ext_frame.pc; + + ext_frame.pc = 0; /* back to normal unwinding */ + + goto unwind_done; + } + + frame->pc = GET_STACK_ULONG(fp + 8); + if (!arm64_in_exp_entry(frame->pc)) { + /* (1) Normal stack frame */ + + frame->sp = fp + 0x10; + frame->fp = GET_STACK_ULONG(fp); + } else { + /* + * We are in exception entry code, and so + * - add a faked frame for exception entry, and + * - prepare for a stackframe hidden by exception + */ + + ext_frame.fp = GET_STACK_ULONG(fp); + /* + * Note: + * In the following code, we determine a stack pointer for + * exception entry based on ext_frame.fp because we have + * no way to know a ext_frame.sp. + * Fortunately, this will work fine for most functions + * in the kernel. + */ + if (ext_frame.fp == 0) { + /* + * (2) + * Either on process stack or on IRQ stack, + * the next frame is the last one on process stack. + */ + + frame->sp = bt->stacktop + - sizeof(struct arm64_pt_regs) - 0x10; + frame->fp = frame->sp; + } else if (!arm64_on_irq_stack(bt->tc->processor, frame->sp)) { + /* + * (2) + * We are on process stack. Just add a faked frame + */ + + if (!arm64_on_irq_stack(bt->tc->processor, ext_frame.fp)) + frame->sp = ext_frame.fp + - sizeof(struct arm64_pt_regs); + else { + /* + * FIXME: very exceptional case + * We are already back on process stack, but + * a saved frame pointer indicates that we are + * on IRQ stack. Unfortunately this can happen + * when some functions are called after + * an irq handler is done because irq_exit() + * doesn't restore a frame pointer (x29). + * Those functions include + * - do_notify_resume() + * - trace_hardirqs_off() + * - schedule() + * + * We have no perfect way to determine a true + * stack pointer value here. + * 0x20 is a stackframe size of schedule(). + * Really ugly + */ + frame->sp = frame->fp + 0x20; + fprintf(ofp, " (Next exception frame might be wrong)\n"); + } + + frame->fp = frame->sp; + } else { + /* We are on IRQ stack */ + + ms = machdep->machspec; + irq_stack_ptr = ms->irq_stacks[bt->tc->processor] + + ms->irq_stack_size - 0x20; + if (ext_frame.fp != irq_stack_ptr) { + /* (2) Just add a faked frame */ + + frame->sp = ext_frame.fp + - sizeof(struct arm64_pt_regs); + frame->fp = frame->sp; + } else { + /* + * (3) + * Switch from IRQ stack to process stack + */ + + frame->sp = GET_STACK_ULONG(irq_stack_ptr + 8); + frame->fp = frame->sp; + + /* + * Keep a buffer for a while until + * displaying the last frame on IRQ stack + * at next arm64_print_stackframe_entry_v2() + */ + if (bt->flags & BT_FULL) + ms->irq_stackbuf = bt->stackbuf; + + arm64_set_process_stack(bt); + } + } + + /* prepare for a stackframe hidden by exception */ + arm64_gen_hidden_frame(bt, frame->sp, &ext_frame); + } + +unwind_done: + if (CRASHDEBUG(1)) + fprintf(ofp, " nxt fp:%016lx sp:%016lx pc:%016lx\n", + frame->fp, frame->sp, frame->pc); + + return TRUE; +} + +/* + * A layout of a stack frame in a function looks like: + * + * stack grows to lower addresses. + * /|\ + * | + * | | + * new sp +------+ <--- + * |dyn | | + * | vars | | + * new fp +- - - + | + * |old fp| | a function's stack frame + * |old lr| | + * |static| | + * | vars| | + * old sp +------+ <--- + * |dyn | + * | vars | + * old fp +------+ + * | | + * + * - On function entry, sp is decremented down to new fp. + * + * - and old fp and sp are saved into this stack frame. + * "Static" local variables are allocated at the same time. + * + * - Later on, "dynamic" local variables may be allocated on a stack. + * But those dynamic variables are rarely used in the kernel image, + * and, as a matter of fact, sp is equal to fp in almost all functions. + * (not 100% though.) + * + * - Currently, sp is determined in arm64_unwind_frame() by + * sp = a callee's fp + 0x10 + * where 0x10 stands for a saved area for fp and sp + * + * - As you can see, however, this calculated sp still points to the top of + * callee's static local variables and doesn't match with a *real* sp. + * + * - So, generally, dumping a stack from this calculated sp to the next frame's + * sp shows "callee's static local variables", old fp and sp. + * + * Diagram and explanation courtesy of Takahiro Akashi + */ + +static void +arm64_back_trace_cmd(struct bt_info *bt) +{ + struct arm64_stackframe stackframe; + int level; + ulong exception_frame; + FILE *ofp; + + if (bt->flags & BT_OPT_BACK_TRACE) { + if (machdep->flags & UNW_4_14) { + option_not_supported('o'); + return; + } + + arm64_back_trace_cmd_v2(bt); + return; + } + + ofp = BT_REFERENCE_CHECK(bt) ? pc->nullfp : fp; + + /* + * stackframes are created from 3 contiguous stack addresses: + * + * x: contains stackframe.fp -- points to next triplet + * x+8: contains stackframe.pc -- text return address + * x+16: is the stackframe.sp address + */ + + if (bt->flags & BT_KDUMP_ADJUST) { + if (arm64_on_irq_stack(bt->tc->processor, bt->bptr)) { + arm64_set_irq_stack(bt); + bt->flags |= BT_IRQSTACK; + } + stackframe.fp = GET_STACK_ULONG(bt->bptr - 8); + stackframe.pc = GET_STACK_ULONG(bt->bptr); + stackframe.sp = bt->bptr + 8; + bt->frameptr = stackframe.sp; + } else if (bt->hp && bt->hp->esp) { + if (arm64_on_irq_stack(bt->tc->processor, bt->hp->esp)) { + arm64_set_irq_stack(bt); + bt->flags |= BT_IRQSTACK; + } + stackframe.fp = GET_STACK_ULONG(bt->hp->esp - 8); + stackframe.pc = bt->hp->eip ? + bt->hp->eip : GET_STACK_ULONG(bt->hp->esp); + stackframe.sp = bt->hp->esp + 8; + bt->flags &= ~BT_REGS_NOT_FOUND; + } else { + if (arm64_on_irq_stack(bt->tc->processor, bt->frameptr)) { + arm64_set_irq_stack(bt); + bt->flags |= BT_IRQSTACK; + } + stackframe.sp = bt->stkptr; + stackframe.pc = bt->instptr; + stackframe.fp = bt->frameptr; + } + + if (bt->flags & BT_TEXT_SYMBOLS) { + arm64_print_text_symbols(bt, &stackframe, ofp); + if (BT_REFERENCE_FOUND(bt)) { + print_task_header(fp, task_to_context(bt->task), 0); + arm64_print_text_symbols(bt, &stackframe, fp); + fprintf(fp, "\n"); + } + return; + } + + if (bt->flags & BT_REGS_NOT_FOUND) + return; + + if (!(bt->flags & BT_KDUMP_ADJUST)) { + if (bt->flags & BT_USER_SPACE) + goto complete_user; + + if (DUMPFILE() && is_task_active(bt->task)) { + exception_frame = stackframe.fp - KERN_EFRAME_OFFSET; + if (arm64_is_kernel_exception_frame(bt, exception_frame)) + arm64_print_exception_frame(bt, exception_frame, + KERNEL_MODE, ofp); + } + } + + level = exception_frame = 0; + while (1) { + bt->instptr = stackframe.pc; + + switch (arm64_print_stackframe_entry(bt, level, &stackframe, ofp)) + { + case BACKTRACE_COMPLETE_KERNEL: + return; + case BACKTRACE_COMPLETE_USER: + goto complete_user; + case BACKTRACE_CONTINUE: + break; + } + + if (exception_frame) { + arm64_print_exception_frame(bt, exception_frame, KERNEL_MODE, ofp); + exception_frame = 0; + } + + if (!arm64_unwind_frame(bt, &stackframe)) + break; + + if (arm64_in_exception_text(bt->instptr) && INSTACK(stackframe.fp, bt)) { + if (!(bt->flags & BT_IRQSTACK) || + ((stackframe.sp + SIZE(pt_regs)) < bt->stacktop)) { + if (arm64_is_kernel_exception_frame(bt, stackframe.fp - KERN_EFRAME_OFFSET)) + exception_frame = stackframe.fp - KERN_EFRAME_OFFSET; + } + } + + if ((bt->flags & BT_IRQSTACK) && + !arm64_on_irq_stack(bt->tc->processor, stackframe.fp)) { + bt->flags &= ~BT_IRQSTACK; + if (arm64_switch_stack(bt, &stackframe, ofp) == USER_MODE) + break; + } + + + level++; + } + + if (is_kernel_thread(bt->tc->task)) + return; + +complete_user: + exception_frame = bt->stacktop - USER_EFRAME_OFFSET; + arm64_print_exception_frame(bt, exception_frame, USER_MODE, ofp); + if ((bt->flags & (BT_USER_SPACE|BT_KDUMP_ADJUST)) == BT_USER_SPACE) + fprintf(ofp, " #0 [user space]\n"); +} + +static void +arm64_back_trace_cmd_v2(struct bt_info *bt) +{ + struct arm64_stackframe stackframe, cur_frame; + int level, mode; + ulong exception_frame; + FILE *ofp; + + ofp = BT_REFERENCE_CHECK(bt) ? pc->nullfp : fp; + + /* + * stackframes are created from 3 contiguous stack addresses: + * + * x: contains stackframe.fp -- points to next triplet + * x+8: contains stackframe.pc -- text return address + * x+16: is the stackframe.sp address + */ + + if (bt->flags & BT_KDUMP_ADJUST) { + if (arm64_on_irq_stack(bt->tc->processor, bt->bptr)) { + arm64_set_irq_stack(bt); + bt->flags |= BT_IRQSTACK; + } + stackframe.fp = GET_STACK_ULONG(bt->bptr); + stackframe.pc = GET_STACK_ULONG(bt->bptr + 8); + stackframe.sp = bt->bptr + 16; + bt->frameptr = stackframe.fp; + } else { + if (arm64_on_irq_stack(bt->tc->processor, bt->frameptr)) { + arm64_set_irq_stack(bt); + bt->flags |= BT_IRQSTACK; + } + stackframe.sp = bt->stkptr; + stackframe.pc = bt->instptr; + stackframe.fp = bt->frameptr; + } + + if (bt->flags & BT_TEXT_SYMBOLS) { + arm64_print_text_symbols(bt, &stackframe, ofp); + if (BT_REFERENCE_FOUND(bt)) { + print_task_header(fp, task_to_context(bt->task), 0); + arm64_print_text_symbols(bt, &stackframe, fp); + fprintf(fp, "\n"); + } + return; + } + + if (bt->flags & BT_REGS_NOT_FOUND) + return; + + if (!(bt->flags & BT_KDUMP_ADJUST)) { + if (bt->flags & BT_USER_SPACE) { +user_space: + exception_frame = bt->stacktop - USER_EFRAME_OFFSET; + arm64_print_exception_frame(bt, exception_frame, + USER_MODE, ofp); +// fprintf(ofp, " #0 [user space]\n"); + + return; + } + + if (DUMPFILE() && is_task_active(bt->task)) { + exception_frame = stackframe.fp - SIZE(pt_regs); + if (arm64_is_kernel_exception_frame(bt, exception_frame)) + arm64_print_exception_frame(bt, exception_frame, + KERNEL_MODE, ofp); + } + } + + for (level = 0;; level++) { + bt->instptr = stackframe.pc; + + /* + * Show one-line stackframe info + */ + if (arm64_print_stackframe_entry_v2(bt, level, &stackframe, ofp) + == BACKTRACE_COMPLETE_KERNEL) + break; + + cur_frame = stackframe; + if (!arm64_unwind_frame_v2(bt, &stackframe, ofp)) + break; + + /* + * Dump the contents of the current stackframe. + * We need to know the next stackframe to determine + * the dump range: + * + */ + arm64_display_full_frame_v2(bt, &cur_frame, &stackframe); + + /* + * If we are in a normal stackframe, just continue, + * otherwise show an exception frame. + * Since exception entry code doesn't have a real + * stackframe, we fake a dummy frame here. + */ + if (!arm64_in_exp_entry(stackframe.pc)) + continue; + + if (!INSTACK(cur_frame.sp, bt)) + fprintf(ofp, "--- ---\n"); + + arm64_print_stackframe_entry_v2(bt, ++level, &stackframe, ofp); + if (bt->flags & BT_USER_EFRAME) + goto user_space; + cur_frame = stackframe; + arm64_unwind_frame_v2(bt, &stackframe, ofp); + + /* + * and don't show the contenxts. Instead, + * show an exception frame below + */ + + if (!INSTACK(cur_frame.sp, bt)) { + /* This check is a safeguard. See unwind_frame(). */ + error(WARNING, + "stack pointer for exception frame is wrong\n"); + return; + } + mode = (stackframe.pc < machdep->machspec->userspace_top) ? + USER_MODE : KERNEL_MODE; +// fprintf(ofp, "--- ---\n", +// mode == KERNEL_MODE ? "kernel" : "user"); + arm64_print_exception_frame(bt, cur_frame.sp, mode, ofp); + + if (mode == USER_MODE) + break; + } +} + +static void +arm64_print_text_symbols(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp) +{ + int i; + ulong *up; + struct load_module *lm; + char buf1[BUFSIZE]; + char buf2[BUFSIZE]; + char *name; + ulong start; + + if (bt->flags & BT_TEXT_SYMBOLS_ALL) + start = bt->stackbase; + else { + start = frame->sp - 8; + fprintf(ofp, "%sSTART: %s at %lx\n", + space(VADDR_PRLEN > 8 ? 14 : 6), + bt->flags & BT_SYMBOL_OFFSET ? + value_to_symstr(frame->pc, buf2, bt->radix) : + closest_symbol(frame->pc), frame->pc); + } + + for (i = (start - bt->stackbase)/sizeof(ulong); i < LONGS_PER_STACK; i++) { + up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); + if (is_kernel_text(*up)) { + name = closest_symbol(*up); + fprintf(ofp, " %s[%s] %s at %lx", + bt->flags & BT_ERROR_MASK ? + " " : "", + mkstring(buf1, VADDR_PRLEN, + RJUST|LONG_HEX, + MKSTR(bt->stackbase + + (i * sizeof(long)))), + bt->flags & BT_SYMBOL_OFFSET ? + value_to_symstr(*up, buf2, bt->radix) : + name, *up); + if (module_symbol(*up, NULL, &lm, NULL, 0)) + fprintf(ofp, " [%s]", lm->mod_name); + fprintf(ofp, "\n"); + if (BT_REFERENCE_CHECK(bt)) + arm64_do_bt_reference_check(bt, *up, name); + } + } +} + +static int +arm64_in_kdump_text(struct bt_info *bt, struct arm64_stackframe *frame) +{ + ulong *ptr, *start, *base; + struct machine_specific *ms; + ulong crash_kexec_frame; + + if (!(machdep->flags & KDUMP_ENABLED)) + return FALSE; + + base = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stackbase))]; + if (bt->flags & BT_USER_SPACE) + start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stacktop))]; + else { + if (INSTACK(frame->fp, bt)) + start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(frame->fp))]; + else + start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stacktop))]; + } + + crash_kexec_frame = 0; + ms = machdep->machspec; + for (ptr = start - 8; ptr >= base; ptr--) { + if (bt->flags & BT_OPT_BACK_TRACE) { + if ((*ptr >= ms->crash_kexec_start) && + (*ptr < ms->crash_kexec_end) && + INSTACK(*(ptr - 1), bt)) { + bt->bptr = ((ulong)(ptr - 1) - (ulong)base) + + task_to_stackbase(bt->tc->task); + if (CRASHDEBUG(1)) + fprintf(fp, "%lx: %lx (crash_kexec)\n", bt->bptr, *ptr); + return TRUE; + } + if ((*ptr >= ms->crash_save_cpu_start) && + (*ptr < ms->crash_save_cpu_end) && + INSTACK(*(ptr - 1), bt)) { + bt->bptr = ((ulong)(ptr - 1) - (ulong)base) + + task_to_stackbase(bt->tc->task); + if (CRASHDEBUG(1)) + fprintf(fp, "%lx: %lx (crash_save_cpu)\n", bt->bptr, *ptr); + return TRUE; + } + } else { + if ((*ptr >= ms->machine_kexec_start) && (*ptr < ms->machine_kexec_end)) { + bt->bptr = ((ulong)ptr - (ulong)base) + + task_to_stackbase(bt->tc->task); + if (CRASHDEBUG(1)) + fprintf(fp, "%lx: %lx (machine_kexec)\n", bt->bptr, *ptr); + return TRUE; + } + if ((*ptr >= ms->crash_kexec_start) && (*ptr < ms->crash_kexec_end)) { + /* + * Stash the first crash_kexec frame in case the machine_kexec + * frame is not found. + */ + if (!crash_kexec_frame) { + crash_kexec_frame = ((ulong)ptr - (ulong)base) + + task_to_stackbase(bt->tc->task); + if (CRASHDEBUG(1)) + fprintf(fp, "%lx: %lx (crash_kexec)\n", + bt->bptr, *ptr); + } + continue; + } + if ((*ptr >= ms->crash_save_cpu_start) && (*ptr < ms->crash_save_cpu_end)) { + bt->bptr = ((ulong)ptr - (ulong)base) + + task_to_stackbase(bt->tc->task); + if (CRASHDEBUG(1)) + fprintf(fp, "%lx: %lx (crash_save_cpu)\n", bt->bptr, *ptr); + return TRUE; + } + } + } + + if (crash_kexec_frame) { + bt->bptr = crash_kexec_frame; + return TRUE; + } + + return FALSE; +} + +static int +arm64_in_kdump_text_on_irq_stack(struct bt_info *bt) +{ + int cpu; + ulong stackbase; + char *stackbuf; + ulong *ptr, *start, *base; + struct machine_specific *ms; + + if ((machdep->flags & (IRQ_STACKS|KDUMP_ENABLED)) != (IRQ_STACKS|KDUMP_ENABLED)) + return FALSE; + + ms = machdep->machspec; + cpu = bt->tc->processor; + stackbase = ms->irq_stacks[cpu]; + stackbuf = GETBUF(ms->irq_stack_size); + + if (!readmem(stackbase, KVADDR, stackbuf, + ms->irq_stack_size, "IRQ stack contents", RETURN_ON_ERROR)) { + error(INFO, "read of IRQ stack at %lx failed\n", stackbase); + FREEBUF(stackbuf); + return FALSE; + } + + base = (ulong *)stackbuf; + start = (ulong *)(stackbuf + ms->irq_stack_size); + + for (ptr = start - 8; ptr >= base; ptr--) { + if (bt->flags & BT_OPT_BACK_TRACE) { + if ((*ptr >= ms->crash_kexec_start) && + (*ptr < ms->crash_kexec_end) && + INSTACK(*(ptr - 1), bt)) { + bt->bptr = ((ulong)(ptr - 1) - (ulong)base) + stackbase; + if (CRASHDEBUG(1)) + fprintf(fp, "%lx: %lx (crash_kexec on IRQ stack)\n", + bt->bptr, *ptr); + FREEBUF(stackbuf); + return TRUE; + } + if ((*ptr >= ms->crash_save_cpu_start) && + (*ptr < ms->crash_save_cpu_end) && + INSTACK(*(ptr - 1), bt)) { + bt->bptr = ((ulong)(ptr - 1) - (ulong)base) + stackbase; + if (CRASHDEBUG(1)) + fprintf(fp, "%lx: %lx (crash_save_cpu on IRQ stack)\n", + bt->bptr, *ptr); + FREEBUF(stackbuf); + return TRUE; + } + } else { + if ((*ptr >= ms->crash_kexec_start) && (*ptr < ms->crash_kexec_end)) { + bt->bptr = ((ulong)ptr - (ulong)base) + stackbase; + if (CRASHDEBUG(1)) + fprintf(fp, "%lx: %lx (crash_kexec on IRQ stack)\n", + bt->bptr, *ptr); + FREEBUF(stackbuf); + return TRUE; + } + if ((*ptr >= ms->crash_save_cpu_start) && (*ptr < ms->crash_save_cpu_end)) { + bt->bptr = ((ulong)ptr - (ulong)base) + stackbase; + if (CRASHDEBUG(1)) + fprintf(fp, "%lx: %lx (crash_save_cpu on IRQ stack)\n", + bt->bptr, *ptr); + FREEBUF(stackbuf); + return TRUE; + } + } + } + + FREEBUF(stackbuf); + return FALSE; +} + +static int +arm64_switch_stack(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp) +{ + int i; + ulong stacktop, words, addr; + ulong *stackbuf; + char buf[BUFSIZE]; + struct machine_specific *ms = machdep->machspec; + + if (bt->flags & BT_FULL) { + stacktop = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size; + words = (stacktop - bt->bptr) / sizeof(ulong); + stackbuf = (ulong *)GETBUF(words * sizeof(ulong)); + readmem(bt->bptr, KVADDR, stackbuf, words * sizeof(long), + "top of IRQ stack", FAULT_ON_ERROR); + + addr = bt->bptr; + for (i = 0; i < words; i++) { + if (!(i & 1)) + fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); + fprintf(ofp, "%s ", format_stack_entry(bt, buf, stackbuf[i], 0)); + addr += sizeof(ulong); + } + fprintf(ofp, "\n"); + FREEBUF(stackbuf); + } + fprintf(ofp, "--- ---\n"); + + if (frame->fp == 0) + return USER_MODE; + + if (!(machdep->flags & UNW_4_14)) + arm64_print_exception_frame(bt, frame->sp, KERNEL_MODE, ofp); + + return KERNEL_MODE; +} + +static int +arm64_get_dumpfile_stackframe(struct bt_info *bt, struct arm64_stackframe *frame) +{ + struct machine_specific *ms = machdep->machspec; + struct arm64_pt_regs *ptregs; + + if (!ms->panic_task_regs || + (!ms->panic_task_regs[bt->tc->processor].sp && + !ms->panic_task_regs[bt->tc->processor].pc)) { + bt->flags |= BT_REGS_NOT_FOUND; + return FALSE; + } + + ptregs = &ms->panic_task_regs[bt->tc->processor]; + frame->pc = ptregs->pc; + if (user_mode(ptregs)) { + frame->sp = user_stack_pointer(ptregs); + frame->fp = user_frame_pointer(ptregs); + if (is_kernel_text(frame->pc) || + !in_user_stack(bt->tc->task, frame->sp)) { + error(WARNING, + "corrupt NT_PRSTATUS? pstate: 0x%lx, but no user frame found\n", + ptregs->pstate); + if (is_kernel_text(frame->pc) && + INSTACK(frame->sp, bt) && INSTACK(frame->fp, bt)) + goto try_kernel; + bt->flags |= BT_REGS_NOT_FOUND; + return FALSE; + } + bt->flags |= BT_USER_SPACE; + } else { +try_kernel: + frame->sp = ptregs->sp; + frame->fp = ptregs->regs[29]; + } + + if (arm64_in_kdump_text(bt, frame) || + arm64_in_kdump_text_on_irq_stack(bt)) + bt->flags |= BT_KDUMP_ADJUST; + + return TRUE; +} + +static int +arm64_get_stackframe(struct bt_info *bt, struct arm64_stackframe *frame) +{ + if (!fill_task_struct(bt->task)) + return FALSE; + + frame->sp = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_sp)); + frame->pc = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_pc)); + frame->fp = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_fp)); + + return TRUE; +} + +static void +arm64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) +{ + int ret; + struct arm64_stackframe stackframe = { 0 }; + + if (DUMPFILE() && is_task_active(bt->task)) + ret = arm64_get_dumpfile_stackframe(bt, &stackframe); + else + ret = arm64_get_stackframe(bt, &stackframe); + + if (!ret) + error(WARNING, + "cannot determine starting stack frame for task %lx\n", + bt->task); + + bt->frameptr = stackframe.fp; + if (pcp) + *pcp = stackframe.pc; + if (spp) + *spp = stackframe.sp; +} + +static void +arm64_gen_hidden_frame(struct bt_info *bt, ulong sp, + struct arm64_stackframe *frame) +{ + struct arm64_pt_regs *ptregs; + + if (IN_TASK_VMA(bt->task, sp)) { + bt->flags |= BT_USER_EFRAME; + return; + } + + ptregs = (struct arm64_pt_regs *) + &bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(sp))]; + + frame->pc = ptregs->pc; + frame->fp = ptregs->regs[29]; + frame->sp = ptregs->sp; +} + +static void +arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *ofp) +{ + int i, r, rows, top_reg, is_64_bit; + struct arm64_pt_regs *regs; + struct syment *sp; + ulong LR, SP, offset; + char buf[BUFSIZE]; + + if (CRASHDEBUG(1)) + fprintf(ofp, "pt_regs: %lx\n", pt_regs); + + regs = (struct arm64_pt_regs *) + &bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(pt_regs))]; + + if ((mode == USER_MODE) && (regs->pstate & PSR_MODE32_BIT)) { + LR = regs->regs[14]; + SP = regs->regs[13]; + top_reg = 12; + is_64_bit = FALSE; + rows = 4; + } else { + LR = regs->regs[30]; + SP = regs->sp; + top_reg = 29; + is_64_bit = TRUE; + rows = 3; + } + + switch (mode) { + case USER_MODE: + if (is_64_bit) + fprintf(ofp, + " PC: %016lx LR: %016lx SP: %016lx\n ", + (ulong)regs->pc, LR, SP); + else + fprintf(ofp, + " PC: %08lx LR: %08lx SP: %08lx PSTATE: %08lx\n ", + (ulong)regs->pc, LR, SP, (ulong)regs->pstate); + break; + + case KERNEL_MODE: + fprintf(ofp, " PC: %016lx ", (ulong)regs->pc); + if (is_kernel_text(regs->pc) && + (sp = value_search(regs->pc, &offset))) { + fprintf(ofp, "[%s", sp->name); + if (offset) + fprintf(ofp, (*gdb_output_radix == 16) ? + "+0x%lx" : "+%ld", + offset); + fprintf(ofp, "]\n"); + } else + fprintf(ofp, "[unknown or invalid address]\n"); + + fprintf(ofp, " LR: %016lx ", LR); + if (is_kernel_text(LR) && + (sp = value_search(LR, &offset))) { + fprintf(ofp, "[%s", sp->name); + if (offset) + fprintf(ofp, (*gdb_output_radix == 16) ? + "+0x%lx" : "+%ld", + offset); + fprintf(ofp, "]\n"); + } else + fprintf(ofp, "[unknown or invalid address]\n"); + + fprintf(ofp, " SP: %016lx PSTATE: %08lx\n ", + SP, (ulong)regs->pstate); + break; + } + + for (i = top_reg, r = 1; i >= 0; r++, i--) { + fprintf(ofp, "%sX%d: ", + i < 10 ? " " : "", i); + fprintf(ofp, is_64_bit ? "%016lx" : "%08lx", + (ulong)regs->regs[i]); + if ((i == 0) && !is_64_bit) + fprintf(ofp, "\n"); + else if ((i == 0) || ((r % rows) == 0)) + fprintf(ofp, "\n%s", + (i == 0) && (mode == KERNEL_MODE) ? "" : " "); + else + fprintf(ofp, "%s", is_64_bit ? " " : " "); + } + + if (is_64_bit) { + if (mode == USER_MODE) { + fprintf(ofp, "ORIG_X0: %016lx SYSCALLNO: %lx", + (ulong)regs->orig_x0, (ulong)regs->syscallno); + fprintf(ofp, " PSTATE: %08lx\n", (ulong)regs->pstate); + } + } + + if (is_kernel_text(regs->pc) && (bt->flags & BT_LINE_NUMBERS)) { + get_line_number(regs->pc, buf, FALSE); + if (strlen(buf)) + fprintf(ofp, " %s\n", buf); + } + + if (BT_REFERENCE_CHECK(bt)) { + arm64_do_bt_reference_check(bt, regs->pc, NULL); + if ((sp = value_search(regs->pc, &offset))) + arm64_do_bt_reference_check(bt, 0, sp->name); + arm64_do_bt_reference_check(bt, LR, NULL); + arm64_do_bt_reference_check(bt, SP, NULL); + arm64_do_bt_reference_check(bt, regs->pstate, NULL); + for (i = 0; i <= top_reg; i++) + arm64_do_bt_reference_check(bt, regs->regs[i], NULL); + if (is_64_bit) { + arm64_do_bt_reference_check(bt, regs->orig_x0, NULL); + arm64_do_bt_reference_check(bt, regs->syscallno, NULL); + } + } +} + +/* + * Check a frame for a requested reference. + */ +static void +arm64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) +{ + ulong offset; + struct syment *sp = NULL; + + if (!name) + sp = value_search(text, &offset); + else if (!text) + sp = symbol_search(name); + + switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) + { + case BT_REF_SYMBOL: + if (name) { + if (STREQ(name, bt->ref->str)) + bt->ref->cmdflags |= BT_REF_FOUND; + } else { + if (sp && !offset && STREQ(sp->name, bt->ref->str)) + bt->ref->cmdflags |= BT_REF_FOUND; + } + break; + + case BT_REF_HEXVAL: + if (text) { + if (bt->ref->hexval == text) + bt->ref->cmdflags |= BT_REF_FOUND; + } else if (sp && (bt->ref->hexval == sp->value)) + bt->ref->cmdflags |= BT_REF_FOUND; + else if (!name && !text && (bt->ref->hexval == 0)) + bt->ref->cmdflags |= BT_REF_FOUND; + break; + } +} + +/* + * Translate a PTE, returning TRUE if the page is present. + * If a physaddr pointer is passed in, don't print anything. + */ +static int +arm64_translate_pte(ulong pte, void *physaddr, ulonglong unused) +{ + int c, others, len1, len2, len3; + ulong paddr; + char buf1[BUFSIZE]; + char buf2[BUFSIZE]; + char buf3[BUFSIZE]; + char ptebuf[BUFSIZE]; + char physbuf[BUFSIZE]; + char *arglist[MAXARGS]; + int page_present; + + paddr = PTE_TO_PHYS(pte); + page_present = pte & (PTE_VALID | machdep->machspec->PTE_PROT_NONE); + + if (physaddr) { + *((ulong *)physaddr) = paddr; + return page_present; + } + + sprintf(ptebuf, "%lx", pte); + len1 = MAX(strlen(ptebuf), strlen("PTE")); + fprintf(fp, "%s ", mkstring(buf1, len1, CENTER|LJUST, "PTE")); + + if (!page_present) { + swap_location(pte, buf1); + if ((c = parse_line(buf1, arglist)) != 3) + error(FATAL, "cannot determine swap location\n"); + + len2 = MAX(strlen(arglist[0]), strlen("SWAP")); + len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); + + fprintf(fp, "%s %s\n", + mkstring(buf2, len2, CENTER|LJUST, "SWAP"), + mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); + + strcpy(buf2, arglist[0]); + strcpy(buf3, arglist[2]); + fprintf(fp, "%s %s %s\n", + mkstring(ptebuf, len1, CENTER|RJUST, NULL), + mkstring(buf2, len2, CENTER|RJUST, NULL), + mkstring(buf3, len3, CENTER|RJUST, NULL)); + return page_present; + } + + sprintf(physbuf, "%lx", paddr); + len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); + fprintf(fp, "%s ", mkstring(buf1, len2, CENTER|LJUST, "PHYSICAL")); + + fprintf(fp, "FLAGS\n"); + + fprintf(fp, "%s %s ", + mkstring(ptebuf, len1, CENTER|RJUST, NULL), + mkstring(physbuf, len2, CENTER|RJUST, NULL)); + fprintf(fp, "("); + others = 0; + + if (pte) { + if (pte & PTE_VALID) + fprintf(fp, "%sVALID", others++ ? "|" : ""); + if (pte & machdep->machspec->PTE_FILE) + fprintf(fp, "%sFILE", others++ ? "|" : ""); + if (pte & machdep->machspec->PTE_PROT_NONE) + fprintf(fp, "%sPROT_NONE", others++ ? "|" : ""); + if (pte & PTE_USER) + fprintf(fp, "%sUSER", others++ ? "|" : ""); + if (pte & PTE_RDONLY) + fprintf(fp, "%sRDONLY", others++ ? "|" : ""); + if (pte & PTE_SHARED) + fprintf(fp, "%sSHARED", others++ ? "|" : ""); + if (pte & PTE_AF) + fprintf(fp, "%sAF", others++ ? "|" : ""); + if (pte & PTE_NG) + fprintf(fp, "%sNG", others++ ? "|" : ""); + if (pte & PTE_PXN) + fprintf(fp, "%sPXN", others++ ? "|" : ""); + if (pte & PTE_UXN) + fprintf(fp, "%sUXN", others++ ? "|" : ""); + if (pte & PTE_DIRTY) + fprintf(fp, "%sDIRTY", others++ ? "|" : ""); + if (pte & PTE_SPECIAL) + fprintf(fp, "%sSPECIAL", others++ ? "|" : ""); + } else { + fprintf(fp, "no mapping"); + } + + fprintf(fp, ")\n"); + + return (page_present); +} + +static ulong +arm64_vmalloc_start(void) +{ + return machdep->machspec->vmalloc_start_addr; +} + +/* + * Not so accurate since thread_info introduction. + */ +static int +arm64_is_task_addr(ulong task) +{ + if (tt->flags & THREAD_INFO) + return IS_KVADDR(task); + else + return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); +} + +static ulong +PLT_veneer_to_kvaddr(ulong value) +{ + uint32_t insn; + ulong addr = 0; + int i; + + /* + * PLT veneer always looks: + * movn x16, #0x.... + * movk x16, #0x...., lsl #16 + * movk x16, #0x...., lsl #32 + * br x16 + */ + for (i = 0; i < 4; i++) { + if (!readmem(value + i * sizeof(insn), KVADDR, &insn, + sizeof(insn), "PLT veneer", RETURN_ON_ERROR)) { + error(WARNING, "cannot read PLT veneer instruction at %lx\n", + value + i * sizeof(insn)); + return value; + } + switch (i) { + case 0: + if ((insn & 0xffe0001f) != 0x92800010) + goto not_plt; + addr = ~((ulong)(insn & 0x1fffe0) >> 5); + break; + case 1: + if ((insn & 0xffe0001f) != 0xf2a00010) + goto not_plt; + addr &= 0xffffffff0000ffff; + addr |= (ulong)(insn & 0x1fffe0) << (16 - 5); + break; + case 2: + if ((insn & 0xffe0001f) != 0xf2c00010) + goto not_plt; + addr &= 0xffff0000ffffffff; + addr |= (ulong)(insn & 0x1fffe0) << (32 - 5); + break; + case 3: + if (insn != 0xd61f0200) + goto not_plt; + break; + default: + return value; /* to avoid any warnings */ + } + } + + return addr; + +not_plt: + return value; +} + +/* + * Filter dissassembly output if the output radix is not gdb's default 10 + */ +static int +arm64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) +{ + char buf1[BUFSIZE]; + char buf2[BUFSIZE]; + char *colon, *p1; + int argc; + char *argv[MAXARGS]; + ulong value; + + if (!inbuf) + return TRUE; + + console("IN: %s", inbuf); + + colon = strstr(inbuf, ":"); + + if (colon) { + sprintf(buf1, "0x%lx <%s>", vaddr, + value_to_symstr(vaddr, buf2, output_radix)); + sprintf(buf2, "%s%s", buf1, colon); + strcpy(inbuf, buf2); + } + + strcpy(buf1, inbuf); + argc = parse_line(buf1, argv); + + if ((FIRSTCHAR(argv[argc-1]) == '<') && + (LASTCHAR(argv[argc-1]) == '>')) { + p1 = rindex(inbuf, '<'); + while ((p1 > inbuf) && !(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x"))) + p1--; + + if (!(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x"))) + return FALSE; + p1++; + + if (!extract_hex(p1, &value, NULLCHAR, TRUE)) + return FALSE; + + sprintf(buf1, "0x%lx <%s>\n", value, + value_to_symstr(value, buf2, output_radix)); + + sprintf(p1, "%s", buf1); + } + + if (IS_MODULE_VADDR(vaddr)) { + ulong orig_value; + + p1 = &inbuf[strlen(inbuf)-1]; + strcpy(buf1, inbuf); + argc = parse_line(buf1, argv); + + if ((STREQ(argv[argc-2], "b") || STREQ(argv[argc-2], "bl")) && + extract_hex(argv[argc-1], &orig_value, NULLCHAR, TRUE)) { + value = PLT_veneer_to_kvaddr(orig_value); + sprintf(p1, " <%s%s>\n", + value == orig_value ? "" : "plt:", + value_to_symstr(value, buf2, output_radix)); + } + } + + console(" %s", inbuf); + + return TRUE; +} + +/* + * Machine dependent command. + */ +static void +arm64_cmd_mach(void) +{ + int c; + + while ((c = getopt(argcnt, args, "cm")) != -1) { + switch (c) { + case 'c': + case 'm': + option_not_supported(c); + break; + + default: + argerrs++; + break; + } + } + + if (argerrs) + cmd_usage(pc->curcmd, SYNOPSIS); + + arm64_display_machine_stats(); +} + +static void +arm64_display_machine_stats(void) +{ + int i, pad; + struct new_utsname *uts; + char buf[BUFSIZE]; + ulong mhz; + + uts = &kt->utsname; + + fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); + fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); + fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); + if ((mhz = machdep->processor_speed())) + fprintf(fp, " PROCESSOR SPEED: %ld Mhz\n", mhz); + fprintf(fp, " HZ: %d\n", machdep->hz); + fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); + fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->machspec->page_offset); + fprintf(fp, "KERNEL MODULES BASE: %lx\n", machdep->machspec->modules_vaddr); + fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", machdep->machspec->vmalloc_start_addr); + fprintf(fp, "KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr); + fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); + if (machdep->machspec->irq_stack_size) { + fprintf(fp, " IRQ STACK SIZE: %ld\n", + machdep->machspec->irq_stack_size); + fprintf(fp, " IRQ STACKS:\n"); + for (i = 0; i < kt->cpus; i++) { + pad = (i < 10) ? 3 : (i < 100) ? 2 : (i < 1000) ? 1 : 0; + fprintf(fp, "%s CPU %d: %lx\n", space(pad), i, + machdep->machspec->irq_stacks[i]); + } + } +} + +static int +arm64_get_smp_cpus(void) +{ + int cpus; + + if ((cpus = get_cpus_present())) + return cpus; + else + return MAX(get_cpus_online(), get_highest_cpu_online()+1); +} + + +/* + * Retrieve task registers for the time of the crash. + */ +static void +arm64_get_crash_notes(void) +{ + struct machine_specific *ms = machdep->machspec; + ulong crash_notes; + Elf64_Nhdr *note; + ulong offset; + char *buf, *p; + ulong *notes_ptrs; + ulong i, found; + + if (!symbol_exists("crash_notes")) + return; + + crash_notes = symbol_value("crash_notes"); + + notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); + + /* + * Read crash_notes for the first CPU. crash_notes are in standard ELF + * note format. + */ + if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], + sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { + error(WARNING, "cannot read \"crash_notes\"\n"); + FREEBUF(notes_ptrs); + return; + } + + if (symbol_exists("__per_cpu_offset")) { + /* + * Add __per_cpu_offset for each cpu to form the notes pointer. + */ + for (i = 0; icpus; i++) + notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; + } + + buf = GETBUF(SIZE(note_buf)); + + if (!(ms->panic_task_regs = calloc((size_t)kt->cpus, sizeof(struct arm64_pt_regs)))) + error(FATAL, "cannot calloc panic_task_regs space\n"); + + for (i = found = 0; i < kt->cpus; i++) { + if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), + "note_buf_t", RETURN_ON_ERROR)) { + error(WARNING, "cpu %d: cannot read NT_PRSTATUS note\n", i); + continue; + } + + /* + * Do some sanity checks for this note before reading registers from it. + */ + note = (Elf64_Nhdr *)buf; + p = buf + sizeof(Elf64_Nhdr); + + /* + * dumpfiles created with qemu won't have crash_notes, but there will + * be elf notes; dumpfiles created by kdump do not create notes for + * offline cpus. + */ + if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { + if (DISKDUMP_DUMPFILE()) + note = diskdump_get_prstatus_percpu(i); + else if (KDUMP_DUMPFILE()) + note = netdump_get_prstatus_percpu(i); + if (note) { + /* + * SIZE(note_buf) accounts for a "final note", which is a + * trailing empty elf note header. + */ + long notesz = SIZE(note_buf) - sizeof(Elf64_Nhdr); + + if (sizeof(Elf64_Nhdr) + roundup(note->n_namesz, 4) + + note->n_descsz == notesz) + BCOPY((char *)note, buf, notesz); + } else { + error(WARNING, "cpu %d: cannot find NT_PRSTATUS note\n", i); + continue; + } + } + + /* + * Check the sanity of NT_PRSTATUS note only for each online cpu. + * If this cpu has invalid note, continue to find the crash notes + * for other online cpus. + */ + if (note->n_type != NT_PRSTATUS) { + error(WARNING, "cpu %d: invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n", i); + continue; + } + + if (!STRNEQ(p, "CORE")) { + error(WARNING, "cpu %d: invalid NT_PRSTATUS note (name != \"CORE\")\n", i); + continue; + } + + /* + * Find correct location of note data. This contains elf_prstatus + * structure which has registers etc. for the crashed task. + */ + offset = sizeof(Elf64_Nhdr); + offset = roundup(offset + note->n_namesz, 4); + p = buf + offset; /* start of elf_prstatus */ + + BCOPY(p + OFFSET(elf_prstatus_pr_reg), &ms->panic_task_regs[i], + sizeof(struct arm64_pt_regs)); + + found++; + } + + FREEBUF(buf); + FREEBUF(notes_ptrs); + + if (!found) { + free(ms->panic_task_regs); + ms->panic_task_regs = NULL; + } +} + +static void +arm64_clear_machdep_cache(void) { + /* + * TBD: probably not necessary... + */ + return; +} + +static int +arm64_on_process_stack(struct bt_info *bt, ulong stkptr) +{ + ulong stackbase, stacktop; + + stackbase = GET_STACKBASE(bt->task); + stacktop = GET_STACKTOP(bt->task); + + if ((stkptr >= stackbase) && (stkptr < stacktop)) + return TRUE; + + return FALSE; +} + +static int +arm64_on_irq_stack(int cpu, ulong stkptr) +{ + return arm64_in_alternate_stack(cpu, stkptr); +} + +static int +arm64_in_alternate_stack(int cpu, ulong stkptr) +{ + struct machine_specific *ms = machdep->machspec; + + if (!ms->irq_stack_size || (cpu >= kt->cpus)) + return FALSE; + + if ((stkptr >= ms->irq_stacks[cpu]) && + (stkptr < (ms->irq_stacks[cpu] + ms->irq_stack_size))) + return TRUE; + + return FALSE; +} + +static void +arm64_set_irq_stack(struct bt_info *bt) +{ + struct machine_specific *ms = machdep->machspec; + + bt->stackbase = ms->irq_stacks[bt->tc->processor]; + bt->stacktop = bt->stackbase + ms->irq_stack_size; + alter_stackbuf(bt); +} + +static void +arm64_set_process_stack(struct bt_info *bt) +{ + bt->stackbase = GET_STACKBASE(bt->task); + bt->stacktop = GET_STACKTOP(bt->task); + alter_stackbuf(bt); +} + + +static int +compare_kvaddr(const void *v1, const void *v2) +{ + struct vaddr_range *r1, *r2; + + r1 = (struct vaddr_range *)v1; + r2 = (struct vaddr_range *)v2; + + return (r1->start < r2->start ? -1 : + r1->start == r2->start ? 0 : 1); +} + +static int +arm64_get_kvaddr_ranges(struct vaddr_range *vrp) +{ + int cnt; + + cnt = 0; + + vrp[cnt].type = KVADDR_UNITY_MAP; + vrp[cnt].start = machdep->machspec->page_offset; + vrp[cnt++].end = vt->high_memory; + + vrp[cnt].type = KVADDR_VMALLOC; + vrp[cnt].start = machdep->machspec->vmalloc_start_addr; + vrp[cnt++].end = last_vmalloc_address(); + + if (st->mods_installed) { + vrp[cnt].type = KVADDR_MODULES; + vrp[cnt].start = lowest_module_address(); + vrp[cnt++].end = roundup(highest_module_address(), + PAGESIZE()); + } + + if (machdep->flags & VMEMMAP) { + vrp[cnt].type = KVADDR_VMEMMAP; + vrp[cnt].start = machdep->machspec->vmemmap_vaddr; + vrp[cnt++].end = vt->node_table[vt->numnodes-1].mem_map + + (vt->node_table[vt->numnodes-1].size * SIZE(page)); + } + + qsort(vrp, cnt, sizeof(struct vaddr_range), compare_kvaddr); + + return cnt; +} + +/* + * Include both vmalloc'd, module and vmemmap address space as VMALLOC space. + */ +int +arm64_IS_VMALLOC_ADDR(ulong vaddr) +{ + struct machine_specific *ms = machdep->machspec; + + if ((machdep->flags & NEW_VMEMMAP) && + (vaddr >= machdep->machspec->kimage_text) && + (vaddr <= machdep->machspec->kimage_end)) + return FALSE; + + if (ms->VA_START && (vaddr >= ms->VA_START)) + return TRUE; + + return ((vaddr >= ms->vmalloc_start_addr && vaddr <= ms->vmalloc_end) || + ((machdep->flags & VMEMMAP) && + (vaddr >= ms->vmemmap_vaddr && vaddr <= ms->vmemmap_end)) || + (vaddr >= ms->modules_vaddr && vaddr <= ms->modules_end)); +} + +static void +arm64_calc_VA_BITS(void) +{ + int bitval; + struct syment *sp; + ulong vabits_actual, value; + char *string; + + if ((string = pc->read_vmcoreinfo("NUMBER(VA_BITS)"))) { + value = atol(string); + free(string); + machdep->machspec->CONFIG_ARM64_VA_BITS = value; + } + + if (kernel_symbol_exists("vabits_actual")) { + if (pc->flags & PROC_KCORE) { + vabits_actual = symbol_value_from_proc_kallsyms("vabits_actual"); + if ((vabits_actual != BADVAL) && (READMEM(pc->mfd, &value, sizeof(ulong), + vabits_actual, KCORE_USE_VADDR) > 0)) { + if (CRASHDEBUG(1)) + fprintf(fp, + "/proc/kcore: vabits_actual: %ld\n", value); + machdep->machspec->VA_BITS_ACTUAL = value; + machdep->machspec->VA_BITS = value; + machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); + } else + error(FATAL, "/proc/kcore: cannot read vabits_actual\n"); + } else if (ACTIVE()) + error(FATAL, "cannot determine VA_BITS_ACTUAL: please use /proc/kcore\n"); + else { + if ((string = pc->read_vmcoreinfo("NUMBER(TCR_EL1_T1SZ)"))) { + /* See ARMv8 ARM for the description of + * TCR_EL1.T1SZ and how it can be used + * to calculate the vabits_actual + * supported by underlying kernel. + * + * Basically: + * vabits_actual = 64 - T1SZ; + */ + value = 64 - strtoll(string, NULL, 0); + if (CRASHDEBUG(1)) + fprintf(fp, "vmcoreinfo : vabits_actual: %ld\n", value); + free(string); + machdep->machspec->VA_BITS_ACTUAL = value; + machdep->machspec->VA_BITS = value; + machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); + } else + error(FATAL, "cannot determine VA_BITS_ACTUAL\n"); + } + + return; + } + + if (!(sp = symbol_search("swapper_pg_dir")) && + !(sp = symbol_search("idmap_pg_dir")) && + !(sp = symbol_search("_text")) && + !(sp = symbol_search("stext"))) { + for (sp = st->symtable; sp < st->symend; sp++) { + if (highest_bit_long(sp->value) == 63) + break; + } + } + + if (sp) + value = sp->value; + else + value = kt->vmcoreinfo.log_buf_SYMBOL; /* crash --log */ + + for (bitval = highest_bit_long(value); bitval; bitval--) { + if ((value & (1UL << bitval)) == 0) { + if (machdep->flags & NEW_VMEMMAP) + machdep->machspec->VA_BITS = bitval + 1; + else + machdep->machspec->VA_BITS = bitval + 2; + break; + } + } + + /* + * Verify against dumpfiles that export VA_BITS in vmcoreinfo + */ + if (machdep->machspec->CONFIG_ARM64_VA_BITS && + (machdep->machspec->VA_BITS != machdep->machspec->CONFIG_ARM64_VA_BITS)) { + error(WARNING, "VA_BITS: calculated: %ld vmcoreinfo: %ld\n", + machdep->machspec->VA_BITS, machdep->machspec->CONFIG_ARM64_VA_BITS); + machdep->machspec->VA_BITS = machdep->machspec->CONFIG_ARM64_VA_BITS; + } + + if (CRASHDEBUG(1)) + fprintf(fp, "VA_BITS: %ld\n", machdep->machspec->VA_BITS); + +} + +/* + * The size and end of the vmalloc range is dependent upon the kernel's + * VMEMMAP_SIZE value, and the vmemmap range is dependent upon the end + * of the vmalloc range as well as the VMEMMAP_SIZE: + * + * #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) + * #define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS) + * #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) + * + * Since VMEMMAP_SIZE is dependent upon the size of a struct page, + * the two ranges cannot be determined until POST_GDB. + * + * Since 52-bit VA was introduced: + * + * #define STRUCT_PAGE_MAX_SHIFT 6 + * #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) + * #define VMEMMAP_START (-VMEMMAP_SIZE) + * #define VMALLOC_START (MODULES_END) + * #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) + * #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) + */ + +#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define SZ_64K 0x00010000 + +static void +arm64_calc_virtual_memory_ranges(void) +{ + struct machine_specific *ms = machdep->machspec; + ulong value, vmemmap_start, vmemmap_end, vmemmap_size, vmalloc_end; + char *string; + ulong PUD_SIZE = UNINITIALIZED; + + if (!machdep->machspec->CONFIG_ARM64_VA_BITS) { + if ((string = pc->read_vmcoreinfo("NUMBER(VA_BITS)"))) { + value = atol(string); + free(string); + machdep->machspec->CONFIG_ARM64_VA_BITS = value; + } + } + + if (THIS_KERNEL_VERSION < LINUX(3,17,0)) /* use original hardwired values */ + return; + + STRUCT_SIZE_INIT(page, "page"); + + switch (machdep->flags & (VM_L2_64K|VM_L3_64K|VM_L3_4K|VM_L4_4K)) + { + case VM_L2_64K: + case VM_L3_64K: + PUD_SIZE = PGDIR_SIZE_L2_64K; + break; + case VM_L3_4K: + PUD_SIZE = PGDIR_SIZE_L3_4K; + case VM_L4_4K: + PUD_SIZE = PUD_SIZE_L4_4K; + break; + } + +#define STRUCT_PAGE_MAX_SHIFT 6 + + if (ms->VA_BITS_ACTUAL) { + vmemmap_size = (1UL) << (ms->CONFIG_ARM64_VA_BITS - machdep->pageshift - 1 + STRUCT_PAGE_MAX_SHIFT); + vmalloc_end = (- PUD_SIZE - vmemmap_size - KILOBYTES(64)); + vmemmap_start = (-vmemmap_size); + ms->vmalloc_end = vmalloc_end - 1; + ms->vmemmap_vaddr = vmemmap_start; + ms->vmemmap_end = -1; + return; + } + + if (machdep->flags & NEW_VMEMMAP) + vmemmap_size = 1UL << (ms->VA_BITS - machdep->pageshift - 1 + + STRUCT_PAGE_MAX_SHIFT); + else + vmemmap_size = ALIGN((1UL << (ms->VA_BITS - machdep->pageshift)) * SIZE(page), PUD_SIZE); + + vmalloc_end = (ms->page_offset - PUD_SIZE - vmemmap_size - SZ_64K); + + if (machdep->flags & NEW_VMEMMAP) { + vmemmap_start = ms->page_offset - vmemmap_size; + vmemmap_end = ms->page_offset; + } else { + vmemmap_start = vmalloc_end + SZ_64K; + vmemmap_end = vmemmap_start + vmemmap_size; + } + + ms->vmalloc_end = vmalloc_end - 1; + ms->vmemmap_vaddr = vmemmap_start; + ms->vmemmap_end = vmemmap_end - 1; +} + +static int +arm64_is_uvaddr(ulong addr, struct task_context *tc) +{ + return (addr < machdep->machspec->userspace_top); +} + + +ulong +arm64_swp_type(ulong pte) +{ + struct machine_specific *ms = machdep->machspec; + + pte >>= ms->__SWP_TYPE_SHIFT; + pte &= ms->__SWP_TYPE_MASK; + return pte; +} + +ulong +arm64_swp_offset(ulong pte) +{ + struct machine_specific *ms = machdep->machspec; + + pte >>= ms->__SWP_OFFSET_SHIFT; + if (ms->__SWP_OFFSET_MASK) + pte &= ms->__SWP_OFFSET_MASK; + return pte; +} + +#endif /* ARM64 */ + + diff --git a/defs.h b/defs.h index ac24a5d..d0b021f 100644 --- a/defs.h +++ b/defs.h @@ -3148,17 +3148,9 @@ typedef signed int s32; */ #define ARM64_VA_START ((0xffffffffffffffffUL) \ << machdep->machspec->VA_BITS) -#define _VA_START(va) ((0xffffffffffffffffUL) - \ - ((1UL) << ((va) - 1)) + 1) -#define TEXT_OFFSET_MASK (~((MEGABYTES(2UL))-1)) - #define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) \ << (machdep->machspec->VA_BITS - 1)) -#define ARM64_PAGE_OFFSET_ACTUAL ((0xffffffffffffffffUL) \ - - ((1UL) << machdep->machspec->VA_BITS_ACTUAL) + 1) - #define ARM64_USERSPACE_TOP ((1UL) << machdep->machspec->VA_BITS) -#define ARM64_USERSPACE_TOP_ACTUAL ((1UL) << machdep->machspec->VA_BITS_ACTUAL) /* only used for v4.6 or later */ #define ARM64_MODULES_VSIZE MEGABYTES(128) @@ -3261,9 +3253,7 @@ struct machine_specific { ulong kern_eframe_offset; ulong machine_kexec_start; ulong machine_kexec_end; - ulong VA_BITS_ACTUAL; - ulong CONFIG_ARM64_VA_BITS; - ulong VA_START; + ulong vabits_user; }; struct arm64_stackframe { diff --git a/defs.hgithub_9596b4388ea5.patch b/defs.hgithub_9596b4388ea5.patch new file mode 100644 index 0000000..ac24a5d --- /dev/null +++ b/defs.hgithub_9596b4388ea5.patch @@ -0,0 +1,7020 @@ +/* defs.h - core analysis suite + * + * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. + * Copyright (C) 2002-2020 David Anderson + * Copyright (C) 2002-2020 Red Hat, Inc. All rights reserved. + * Copyright (C) 2002 Silicon Graphics, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef GDB_COMMON + +#include +#include +#include +#include +#include +#include +#include +#undef basename +#if !defined(__USE_GNU) +#define __USE_GNU +#include +#undef __USE_GNU +#else +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* backtrace() */ +#include +#ifdef LZO +#include +#endif +#ifdef SNAPPY +#include +#endif + +#ifndef ATTRIBUTE_UNUSED +#define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) +#endif + +#undef TRUE +#undef FALSE + +#define TRUE (1) +#define FALSE (0) +#define STR(x) #x +#ifndef offsetof +# define offsetof(TYPE, MEMBER) ((ulong)&((TYPE *)0)->MEMBER) +#endif + +#if !defined(X86) && !defined(X86_64) && !defined(ALPHA) && !defined(PPC) && \ + !defined(IA64) && !defined(PPC64) && !defined(S390) && !defined(S390X) && \ + !defined(ARM) && !defined(ARM64) && !defined(MIPS) && !defined(SPARC64) +#ifdef __alpha__ +#define ALPHA +#endif +#ifdef __i386__ +#define X86 +#endif +#ifdef __powerpc64__ +#define PPC64 +#else +#ifdef __powerpc__ +#define PPC +#endif +#endif +#ifdef __ia64__ +#define IA64 +#endif +#ifdef __s390__ +#define S390 +#endif +#ifdef __s390x__ +#define S390X +#endif +#ifdef __x86_64__ +#define X86_64 +#endif +#ifdef __arm__ +#define ARM +#endif +#ifdef __aarch64__ +#define ARM64 +#endif +#ifdef __mipsel__ +#define MIPS +#endif +#ifdef __sparc_v9__ +#define SPARC64 +#endif +#endif + +#ifdef X86 +#define NR_CPUS (256) +#endif +#ifdef X86_64 +#define NR_CPUS (8192) +#endif +#ifdef ALPHA +#define NR_CPUS (64) +#endif +#ifdef PPC +#define NR_CPUS (32) +#endif +#ifdef IA64 +#define NR_CPUS (4096) +#endif +#ifdef PPC64 +#define NR_CPUS (2048) +#endif +#ifdef S390 +#define NR_CPUS (512) +#endif +#ifdef S390X +#define NR_CPUS (512) +#endif +#ifdef ARM +#define NR_CPUS (32) +#endif +#ifdef ARM64 +#define NR_CPUS (4096) /* TBD */ +#endif +#ifdef MIPS +#define NR_CPUS (32) +#endif +#ifdef SPARC64 +#define NR_CPUS (4096) +#endif + +#define NR_DEVICE_DUMPS (64) + +/* Some architectures require memory accesses to be aligned. */ +#if defined(SPARC64) +#define NEED_ALIGNED_MEM_ACCESS +#endif + +#define BUFSIZE (1500) +#define NULLCHAR ('\0') + +#define MAXARGS (100) /* max number of arguments to one function */ +#define MAXARGLEN (40) /* max length of argument */ + +#define HIST_BLKSIZE (4096) + +static inline int string_exists(char *s) { return (s ? TRUE : FALSE); } +#define STREQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \ + (strcmp((char *)(A), (char *)(B)) == 0)) +#define STRNEQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \ + (strncmp((char *)(A), (char *)(B), strlen((char *)(B))) == 0)) +#define BZERO(S, N) (memset(S, NULLCHAR, N)) +#define BCOPY(S, D, C) (memcpy(D, S, C)) +#define BNEG(S, N) (memset(S, 0xff, N)) +#define BEEP() fprintf(stderr, "%c", 0x7) +#define LASTCHAR(s) (s[strlen(s)-1]) +#define FIRSTCHAR(s) (s[0]) +#define QUOTED_STRING(s) ((FIRSTCHAR(s) == '"') && (LASTCHAR(s) == '"')) +#define SINGLE_QUOTED_STRING(s) ((FIRSTCHAR(s) == '\'') && (LASTCHAR(s) == '\'')) +#define PATHEQ(A, B) ((A) && (B) && (pathcmp((char *)(A), (char *)(B)) == 0)) + +#ifdef roundup +#undef roundup +#endif +#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) + +typedef uint64_t physaddr_t; + +#define PADDR_NOT_AVAILABLE (0x1ULL) +#define KCORE_USE_VADDR (-1ULL) + +typedef unsigned long long int ulonglong; +struct number_option { + ulong num; + ulonglong ll_num; + ulong retflags; +}; + +/* + * program_context flags + */ +#define LIVE_SYSTEM (0x1ULL) +#define TTY (0x2ULL) +#define RUNTIME (0x4ULL) +#define IN_FOREACH (0x8ULL) +#define MCLXCD (0x10ULL) +#define CMDLINE_IFILE (0x20ULL) +#define MFD_RDWR (0x40ULL) +#define KVMDUMP (0x80ULL) +#define SILENT (0x100ULL) +#define SADUMP (0x200ULL) +#define HASH (0x400ULL) +#define SCROLL (0x800ULL) +#define NO_CONSOLE (0x1000ULL) +#define RUNTIME_IFILE (0x2000ULL) +#define DROP_CORE (0x4000ULL) +#define LKCD (0x8000ULL) +#define GDB_INIT (0x10000ULL) +#define IN_GDB (0x20000ULL) +#define RCLOCAL_IFILE (0x40000ULL) +#define RCHOME_IFILE (0x80000ULL) +#define VMWARE_VMSS (0x100000ULL) +#define READLINE (0x200000ULL) +#define _SIGINT_ (0x400000ULL) +#define IN_RESTART (0x800000ULL) +#define KERNEL_DEBUG_QUERY (0x1000000ULL) +#define DEVMEM (0x2000000ULL) +#define REM_LIVE_SYSTEM (0x4000000ULL) +#define NAMELIST_LOCAL (0x8000000ULL) +#define LIVE_RAMDUMP (0x10000000ULL) +#define NAMELIST_SAVED (0x20000000ULL) +#define DUMPFILE_SAVED (0x40000000ULL) +#define UNLINK_NAMELIST (0x80000000ULL) +#define NAMELIST_UNLINKED (0x100000000ULL) +#define REM_MCLXCD (0x200000000ULL) +#define REM_LKCD (0x400000000ULL) +#define NAMELIST_NO_GZIP (0x800000000ULL) +#define UNLINK_MODULES (0x1000000000ULL) +#define S390D (0x2000000000ULL) +#define REM_S390D (0x4000000000ULL) +#define SYSRQ (0x8000000000ULL) +#define KDUMP (0x10000000000ULL) +#define NETDUMP (0x20000000000ULL) +#define REM_NETDUMP (0x40000000000ULL) +#define SYSMAP (0x80000000000ULL) +#define SYSMAP_ARG (0x100000000000ULL) +#define MEMMOD (0x200000000000ULL) +#define MODPRELOAD (0x400000000000ULL) +#define DISKDUMP (0x800000000000ULL) +#define DATADEBUG (0x1000000000000ULL) +#define FINDKERNEL (0x2000000000000ULL) +#define VERSION_QUERY (0x4000000000000ULL) +#define READNOW (0x8000000000000ULL) +#define NOCRASHRC (0x10000000000000ULL) +#define INIT_IFILE (0x20000000000000ULL) +#define XENDUMP (0x40000000000000ULL) +#define XEN_HYPER (0x80000000000000ULL) +#define XEN_CORE (0x100000000000000ULL) +#define PLEASE_WAIT (0x200000000000000ULL) +#define IFILE_ERROR (0x400000000000000ULL) +#define KERNTYPES (0x800000000000000ULL) +#define MINIMAL_MODE (0x1000000000000000ULL) +#define CRASHBUILTIN (0x2000000000000000ULL) +#define PRELOAD_EXTENSIONS \ + (0x4000000000000000ULL) +#define PROC_KCORE (0x8000000000000000ULL) + +#define ACTIVE() (pc->flags & LIVE_SYSTEM) +#define LOCAL_ACTIVE() ((pc->flags & (LIVE_SYSTEM|LIVE_RAMDUMP)) == LIVE_SYSTEM) +#define DUMPFILE() (!(pc->flags & LIVE_SYSTEM)) +#define LIVE() (pc->flags2 & LIVE_DUMP || pc->flags & LIVE_SYSTEM) +#define MEMORY_SOURCES (NETDUMP|KDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP|XENDUMP|CRASHBUILTIN|KVMDUMP|PROC_KCORE|SADUMP|VMWARE_VMSS|LIVE_RAMDUMP) +#define DUMPFILE_TYPES (DISKDUMP|NETDUMP|KDUMP|MCLXCD|LKCD|S390D|XENDUMP|KVMDUMP|SADUMP|VMWARE_VMSS|LIVE_RAMDUMP) +#define REMOTE() (pc->flags2 & REMOTE_DAEMON) +#define REMOTE_ACTIVE() (pc->flags & REM_LIVE_SYSTEM) +#define REMOTE_DUMPFILE() \ + (pc->flags & (REM_NETDUMP|REM_MCLXCD|REM_LKCD|REM_S390D)) +#define REMOTE_MEMSRC() (REMOTE_ACTIVE() || REMOTE_PAUSED() || REMOTE_DUMPFILE()) +#define LKCD_DUMPFILE() (pc->flags & (LKCD|REM_LKCD)) +#define NETDUMP_DUMPFILE() (pc->flags & (NETDUMP|REM_NETDUMP)) +#define DISKDUMP_DUMPFILE() (pc->flags & DISKDUMP) +#define KDUMP_DUMPFILE() (pc->flags & KDUMP) +#define XENDUMP_DUMPFILE() (pc->flags & XENDUMP) +#define XEN_HYPER_MODE() (pc->flags & XEN_HYPER) +#define SYSRQ_TASK(X) ((pc->flags & SYSRQ) && is_task_active(X)) +#define XEN_CORE_DUMPFILE() (pc->flags & XEN_CORE) +#define LKCD_KERNTYPES() (pc->flags & KERNTYPES) +#define KVMDUMP_DUMPFILE() (pc->flags & KVMDUMP) +#define SADUMP_DUMPFILE() (pc->flags & SADUMP) +#define VMSS_DUMPFILE() (pc->flags & VMWARE_VMSS) +#define QEMU_MEM_DUMP_NO_VMCOREINFO() \ + ((pc->flags2 & (QEMU_MEM_DUMP_ELF|QEMU_MEM_DUMP_COMPRESSED)) && !(pc->flags2 & VMCOREINFO)) + + +#define NETDUMP_LOCAL (0x1) /* netdump_data flags */ +#define NETDUMP_REMOTE (0x2) +#define VMCORE_VALID() (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE|KDUMP_LOCAL)) +#define NETDUMP_ELF32 (0x4) +#define NETDUMP_ELF64 (0x8) +#define PARTIAL_DUMP (0x10) /* netdump or diskdump */ +#define KDUMP_ELF32 (0x20) +#define KDUMP_ELF64 (0x40) +#define KDUMP_LOCAL (0x80) +#define KCORE_LOCAL (0x100) +#define KCORE_ELF32 (0x200) +#define KCORE_ELF64 (0x400) +#define QEMU_MEM_DUMP_KDUMP_BACKUP \ + (0x800) +#define KVMDUMP_LOCAL (0x1) +#define KVMDUMP_VALID() (kvm->flags & (KVMDUMP_LOCAL)) + +#define DUMPFILE_FORMAT(flags) ((flags) & \ + (NETDUMP_ELF32|NETDUMP_ELF64|KDUMP_ELF32|KDUMP_ELF64)) + +#define DISKDUMP_LOCAL (0x1) +#define KDUMP_CMPRS_LOCAL (0x2) +#define ERROR_EXCLUDED (0x4) +#define ZERO_EXCLUDED (0x8) +#define DUMPFILE_SPLIT (0x10) +#define NO_ELF_NOTES (0x20) +#define LZO_SUPPORTED (0x40) +#define SNAPPY_SUPPORTED (0x80) +#define DISKDUMP_VALID() (dd->flags & DISKDUMP_LOCAL) +#define KDUMP_CMPRS_VALID() (dd->flags & KDUMP_CMPRS_LOCAL) +#define KDUMP_SPLIT() (dd->flags & DUMPFILE_SPLIT) + +#define XENDUMP_LOCAL (0x1) +#define XENDUMP_VALID() (xd->flags & XENDUMP_LOCAL) + +#define SADUMP_LOCAL (0x1) +#define SADUMP_DISKSET (0x2) +#define SADUMP_MEDIA (0x4) +#define SADUMP_ZERO_EXCLUDED (0x8) +#define SADUMP_KDUMP_BACKUP (0x10) +#define SADUMP_VALID() (sd->flags & SADUMP_LOCAL) + +#define CRASHDEBUG(x) (pc->debug >= (x)) + +#define CRASHDEBUG_SUSPEND(X) { pc->debug_save = pc->debug; pc->debug = X; } +#define CRASHDEBUG_RESTORE() { pc->debug = pc->debug_save; } + +#define VERBOSE (0x1) +#define ADDRESS_SPECIFIED (0x2) + +#define FAULT_ON_ERROR (0x1) +#define RETURN_ON_ERROR (0x2) +#define QUIET (0x4) +#define HEX_BIAS (0x8) +#define LONG_LONG (0x10) +#define RETURN_PARTIAL (0x20) +#define NO_DEVMEM_SWITCH (0x40) + +#define SEEK_ERROR (-1) +#define READ_ERROR (-2) +#define WRITE_ERROR (-3) +#define PAGE_EXCLUDED (-4) + +#define RESTART() (longjmp(pc->main_loop_env, 1)) +#define RESUME_FOREACH() (longjmp(pc->foreach_loop_env, 1)) + +#define INFO (1) +#define FATAL (2) +#define FATAL_RESTART (3) +#define WARNING (4) +#define NOTE (5) +#define CONT (6) +#define FATAL_ERROR(x) (((x) == FATAL) || ((x) == FATAL_RESTART)) + +#define CONSOLE_OFF(x) ((x) = console_off()) +#define CONSOLE_ON(x) (console_on(x)) + +#define RADIX(X) (X) + +#define NUM_HEX (0x1) +#define NUM_DEC (0x2) +#define NUM_EXPR (0x4) +#define NUM_ANY (NUM_HEX|NUM_DEC|NUM_EXPR) + +/* + * program context redirect flags + */ +#define FROM_COMMAND_LINE (0x1) +#define FROM_INPUT_FILE (0x2) +#define REDIRECT_NOT_DONE (0x4) +#define REDIRECT_TO_PIPE (0x8) +#define REDIRECT_TO_STDPIPE (0x10) +#define REDIRECT_TO_FILE (0x20) +#define REDIRECT_FAILURE (0x40) +#define REDIRECT_SHELL_ESCAPE (0x80) +#define REDIRECT_SHELL_COMMAND (0x100) +#define REDIRECT_PID_KNOWN (0x200) +#define REDIRECT_MULTI_PIPE (0x400) + +#define PIPE_OPTIONS (FROM_COMMAND_LINE | FROM_INPUT_FILE | REDIRECT_TO_PIPE | \ + REDIRECT_TO_STDPIPE | REDIRECT_TO_FILE) + +#define DEFAULT_REDHAT_DEBUG_LOCATION "/usr/lib/debug/lib/modules" + +#define MEMORY_DRIVER_MODULE "crash" +#define MEMORY_DRIVER_DEVICE "/dev/crash" +#define MEMORY_DRIVER_DEVICE_MODE (S_IFCHR|S_IRUSR) + +/* + * structure definitions + */ +struct program_context { + char *program_name; /* this program's name */ + char *program_path; /* unadulterated argv[0] */ + char *program_version; /* this program's version */ + char *gdb_version; /* embedded gdb version */ + char *prompt; /* this program's prompt */ + unsigned long long flags; /* flags from above */ + char *namelist; /* linux namelist */ + char *dumpfile; /* dumpfile or /dev/kmem */ + char *live_memsrc; /* live memory driver */ + char *system_map; /* get symbol values from System.map */ + char *namelist_debug; /* namelist containing debug data */ + char *debuginfo_file; /* separate debuginfo file */ + char *memory_module; /* alternative to mem.c driver */ + char *memory_device; /* alternative to /dev/[k]mem device */ + char *machine_type; /* machine's processor type */ + char *editing_mode; /* readline vi or emacs */ + char *server; /* network daemon */ + char *server_memsrc; /* memory source on server */ + char *server_namelist; /* kernel namelist on server */ + int nfd; /* linux namelist fd */ + int mfd; /* /dev/mem fd */ + int kfd; /* /dev/kmem fd */ + int dfd; /* dumpfile fd */ + int confd; /* console fd */ + int sockfd; /* network daemon socket */ + ushort port; /* network daemon port */ + int rmfd; /* remote server memory source fd */ + int rkfd; /* remote server /dev/kmem fd */ + ulong program_pid; /* program pid */ + ulong server_pid; /* server pid */ + ulong rcvbufsize; /* client-side receive buffer size */ + char *home; /* user's home directory */ + char command_line[BUFSIZE]; /* possibly parsed input command line */ + char orig_line[BUFSIZE]; /* original input line */ + char *readline; /* pointer to last readline() return */ + char my_tty[10]; /* real tty name (shown by ps -ef) */ + ulong debug; /* level of debug */ + ulong debug_save; /* saved level for debug-suspend */ + char *console; /* current debug console device */ + char *redhat_debug_loc; /* location of matching debug objects */ + int pipefd[2]; /* output pipe file descriptors */ + FILE *nullfp; /* bitbucket */ + FILE *stdpipe; /* standard pipe for output */ + FILE *pipe; /* command line specified pipe */ + FILE *ofile; /* command line specified output file */ + FILE *ifile; /* command line specified input file */ + FILE *ifile_pipe; /* output pipe specified from file */ + FILE *ifile_ofile; /* output file specified from file */ + FILE *symfile; /* symbol table data file */ + FILE *symfile2; /* alternate access to above */ + FILE *tmpfile; /* tmpfile for selective data output */ + FILE *saved_fp; /* for printing while parsing tmpfile */ + FILE *tmp_fp; /* stored tmpfile pointer */ + char *input_file; /* input file specified at invocation */ + FILE *tmpfile2; /* tmpfile2 does not use save_fp! */ + int eoc_index; /* end of redirected command index */ + int scroll_command; /* default scroll command for output */ +#define SCROLL_NONE 0 +#define SCROLL_LESS 1 +#define SCROLL_MORE 2 +#define SCROLL_CRASHPAGER 3 + ulong redirect; /* per-cmd origin and output flags */ + pid_t stdpipe_pid; /* per-cmd standard output pipe's pid */ + pid_t pipe_pid; /* per-cmd output pipe's pid */ + pid_t pipe_shell_pid; /* per-cmd output pipe's shell pid */ + char pipe_command[BUFSIZE]; /* pipe command line */ + struct command_table_entry *cmd_table; /* linux/xen command table */ + char *curcmd; /* currently-executing command */ + char *lastcmd; /* previously-executed command */ + ulong cmdgencur; /* current command generation number */ + ulong curcmd_flags; /* general purpose per-command flag */ +#define XEN_MACHINE_ADDR (0x1) +#define REPEAT (0x2) +#define IDLE_TASK_SHOWN (0x4) +#define TASK_SPECIFIED (0x8) +#define MEMTYPE_UVADDR (0x10) +#define MEMTYPE_FILEADDR (0x20) +#define HEADER_PRINTED (0x40) +#define BAD_INSTRUCTION (0x80) +#define UD2A_INSTRUCTION (0x100) +#define IRQ_IN_USE (0x200) +#define NO_MODIFY (0x400) +#define IGNORE_ERRORS (0x800) +#define FROM_RCFILE (0x1000) +#define MEMTYPE_KVADDR (0x2000) +#define MOD_SECTIONS (0x4000) +#define MOD_READNOW (0x8000) +#define MM_STRUCT_FORCE (0x10000) +#define CPUMASK (0x20000) +#define PARTIAL_READ_OK (0x40000) + ulonglong curcmd_private; /* general purpose per-command info */ + int cur_gdb_cmd; /* current gdb command */ + int last_gdb_cmd; /* previously-executed gdb command */ + int sigint_cnt; /* number of ignored SIGINTs */ + struct gnu_request *cur_req; /* current gdb gnu_request */ + struct sigaction sigaction; /* general usage sigaction. */ + struct sigaction gdb_sigaction; /* gdb's SIGINT sigaction. */ + jmp_buf main_loop_env; /* longjmp target default */ + jmp_buf foreach_loop_env; /* longjmp target within foreach */ + jmp_buf gdb_interface_env; /* longjmp target for gdb error catch */ + struct termios termios_orig; /* non-raw settings */ + struct termios termios_raw; /* while gathering command input */ + int ncmds; /* number of commands in menu */ + char **cmdlist; /* current list of available commands */ + int cmdlistsz; /* space available in cmdlist */ + unsigned output_radix; /* current gdb output_radix */ + void *sbrk; /* current sbrk value */ + struct extension_table *curext; /* extension being loaded */ + int (*readmem)(int, void *, int, ulong, physaddr_t); /* memory access */ + int (*writemem)(int, void *, int, ulong, physaddr_t);/* memory access */ + ulong ifile_in_progress; /* original xxx_IFILE flags */ + off_t ifile_offset; /* current offset into input file */ + char *runtime_ifile_cmd; /* runtime command using input file */ + char *kvmdump_mapfile; /* storage of physical to file offsets */ + ulonglong flags2; /* flags overrun */ +#define FLAT (0x01ULL) +#define ELF_NOTES (0x02ULL) +#define GET_OSRELEASE (0x04ULL) +#define REMOTE_DAEMON (0x08ULL) +#define ERASEINFO_DATA (0x10ULL) +#define GDB_CMD_MODE (0x20ULL) +#define LIVE_DUMP (0x40ULL) +#define FLAT_FORMAT() (pc->flags2 & FLAT) +#define ELF_NOTES_VALID() (pc->flags2 & ELF_NOTES) +#define RADIX_OVERRIDE (0x80ULL) +#define QEMU_MEM_DUMP_ELF (0x100ULL) +#define GET_LOG (0x200ULL) +#define VMCOREINFO (0x400ULL) +#define ALLOW_FP (0x800ULL) +#define REM_PAUSED_F (0x1000ULL) +#define RAMDUMP (0x2000ULL) +#define REMOTE_PAUSED() (pc->flags2 & REM_PAUSED_F) +#define OFFLINE_HIDE (0x4000ULL) +#define INCOMPLETE_DUMP (0x8000ULL) +#define is_incomplete_dump() (pc->flags2 & INCOMPLETE_DUMP) +#define QEMU_MEM_DUMP_COMPRESSED (0x10000ULL) +#define SNAP (0x20000ULL) +#define EXCLUDED_VMEMMAP (0x40000ULL) +#define is_excluded_vmemmap() (pc->flags2 & EXCLUDED_VMEMMAP) +#define MEMSRC_LOCAL (0x80000ULL) +#define REDZONE (0x100000ULL) + char *cleanup; + char *namelist_orig; + char *namelist_debug_orig; + FILE *args_ifile; /* per-command args input file */ + void (*cmd_cleanup)(void *); /* per-command cleanup function */ + void *cmd_cleanup_arg; /* optional cleanup function argument */ + ulong scope; /* optional text context address */ + ulong nr_hash_queues; /* hash queue head count */ + char *(*read_vmcoreinfo)(const char *); + FILE *error_fp; /* error() message direction */ + char *error_path; /* stderr path information */ +}; + +#define READMEM pc->readmem + +typedef void (*cmd_func_t)(void); + +struct command_table_entry { /* one for each command in menu */ + char *name; + cmd_func_t func; + char **help_data; + ulong flags; +}; + +struct args_input_file { + int index; + int args_used; + int is_gdb_cmd; + int in_expression; + int start; + int resume; + char *fileptr; +}; + +#define REFRESH_TASK_TABLE (0x1) /* command_table_entry flags */ +#define HIDDEN_COMMAND (0x2) +#define CLEANUP (0x4) /* for extensions only */ +#define MINIMAL (0x8) + +/* + * A linked list of extension table structures keeps track of the current + * set of shared library extensions. + */ +struct extension_table { + void *handle; /* handle from dlopen() */ + char *filename; /* name of shared library */ + struct command_table_entry *command_table; /* list of commands */ + ulong flags; /* registration flags */ + struct extension_table *next, *prev; /* bookkeeping */ +}; + +#define REGISTERED (0x1) /* extension_table flags */ +#define DUPLICATE_COMMAND_NAME (0x2) +#define NO_MINIMAL_COMMANDS (0x4) + +struct new_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +#define NO_MODULE_ACCESS (0x1) +#define TVEC_BASES_V1 (0x2) +#define GCC_3_2 (0x4) +#define GCC_3_2_3 (0x8) +#define GCC_2_96 (0x10) +#define RA_SEEK (0x20) +#define NO_RA_SEEK (0x40) +#define KALLSYMS_V1 (0x80) +#define NO_KALLSYMS (0x100) +#define PER_CPU_OFF (0x200) +#define SMP (0x400) +#define GCC_3_3_2 (0x800) +#define KMOD_V1 (0x1000) +#define KMOD_V2 (0x2000) +#define KALLSYMS_V2 (0x2000) +#define TVEC_BASES_V2 (0x4000) +#define GCC_3_3_3 (0x8000) +#define USE_OLD_BT (0x10000) +#define USE_OPT_BT (0x10000) +#define ARCH_XEN (0x20000) +#define NO_IKCONFIG (0x40000) +#define DWARF_UNWIND (0x80000) +#define NO_DWARF_UNWIND (0x100000) +#define DWARF_UNWIND_MEMORY (0x200000) +#define DWARF_UNWIND_EH_FRAME (0x400000) +#define DWARF_UNWIND_CAPABLE (DWARF_UNWIND_MEMORY|DWARF_UNWIND_EH_FRAME) +#define DWARF_UNWIND_MODULES (0x800000) +#define BUGVERBOSE_OFF (0x1000000) +#define RELOC_SET (0x2000000) +#define RELOC_FORCE (0x4000000) +#define ARCH_OPENVZ (0x8000000) +#define ARCH_PVOPS (0x10000000) +#define PRE_KERNEL_INIT (0x20000000) +#define ARCH_PVOPS_XEN (0x40000000) + +#define GCC_VERSION_DEPRECATED (GCC_3_2|GCC_3_2_3|GCC_2_96|GCC_3_3_2|GCC_3_3_3) + +/* flags2 */ +#define RELOC_AUTO (0x1ULL) +#define KASLR (0x2ULL) +#define KASLR_CHECK (0x4ULL) +#define GET_TIMESTAMP (0x8ULL) +#define TVEC_BASES_V3 (0x10ULL) +#define TIMER_BASES (0x20ULL) +#define IRQ_DESC_TREE_RADIX (0x40ULL) +#define IRQ_DESC_TREE_XARRAY (0x80ULL) + +#define XEN() (kt->flags & ARCH_XEN) +#define OPENVZ() (kt->flags & ARCH_OPENVZ) +#define PVOPS() (kt->flags & ARCH_PVOPS) +#define PVOPS_XEN() (kt->flags & ARCH_PVOPS_XEN) + +#define XEN_MACHINE_TO_MFN(m) ((ulonglong)(m) >> PAGESHIFT()) +#define XEN_PFN_TO_PSEUDO(p) ((ulonglong)(p) << PAGESHIFT()) + +#define XEN_MFN_NOT_FOUND (~0UL) +#define XEN_PFNS_PER_PAGE (PAGESIZE()/sizeof(ulong)) +#define XEN_FOREIGN_FRAME (1UL << (BITS()-1)) + +#define XEN_MACHADDR_NOT_FOUND (~0ULL) + +#define XEN_P2M_PER_PAGE (PAGESIZE() / sizeof(unsigned long)) +#define XEN_P2M_MID_PER_PAGE (PAGESIZE() / sizeof(unsigned long *)) +#define XEN_P2M_TOP_PER_PAGE (PAGESIZE() / sizeof(unsigned long **)) + +struct kernel_table { /* kernel data */ + ulong flags; + ulong stext; + ulong etext; + ulong stext_init; + ulong etext_init; + ulong init_begin; + ulong init_end; + ulong end; + int cpus; + char *cpus_override; + void (*display_bh)(void); + ulong module_list; + ulong kernel_module; + int mods_installed; + struct timespec date; + char proc_version[BUFSIZE]; + struct new_utsname utsname; + uint kernel_version[3]; + uint gcc_version[3]; + int runq_siblings; + int kernel_NR_CPUS; + long __per_cpu_offset[NR_CPUS]; + long *__rq_idx; + long *__cpu_idx; + ulong *cpu_flags; +#define POSSIBLE (0x1) +#define PRESENT (0x2) +#define ONLINE (0x4) +#define NMI (0x8) +#define POSSIBLE_MAP (POSSIBLE) +#define PRESENT_MAP (PRESENT) +#define ONLINE_MAP (ONLINE) +#define ACTIVE_MAP (0x10) + int BUG_bytes; + ulong xen_flags; +#define WRITABLE_PAGE_TABLES (0x1) +#define SHADOW_PAGE_TABLES (0x2) +#define CANONICAL_PAGE_TABLES (0x4) +#define XEN_SUSPEND (0x8) + char *m2p_page; + ulong phys_to_machine_mapping; + ulong p2m_table_size; +#define P2M_MAPPING_CACHE (512) + struct p2m_mapping_cache { + ulong mapping; + ulong pfn; + ulong start; + ulong end; + } p2m_mapping_cache[P2M_MAPPING_CACHE]; +#define P2M_MAPPING_PAGE_PFN(c) \ + (PVOPS_XEN() ? kt->p2m_mapping_cache[c].pfn : \ + (((kt->p2m_mapping_cache[c].mapping - kt->phys_to_machine_mapping)/PAGESIZE()) \ + * XEN_PFNS_PER_PAGE)) + ulong last_mapping_read; + ulong p2m_cache_index; + ulong p2m_pages_searched; + ulong p2m_mfn_cache_hits; + ulong p2m_page_cache_hits; + ulong relocate; + char *module_tree; + struct pvops_xen_info { + int p2m_top_entries; + ulong p2m_top; + ulong p2m_mid_missing; + ulong p2m_missing; + } pvops_xen; + int highest_irq; +#define IKCONFIG_AVAIL 0x1 /* kernel contains ikconfig data */ +#define IKCONFIG_LOADED 0x2 /* ikconfig data is currently loaded */ + int ikconfig_flags; + int ikconfig_ents; + char *hypervisor; + struct vmcoreinfo_data { + ulong log_buf_SYMBOL; + ulong log_end_SYMBOL; + ulong log_buf_len_SYMBOL; + ulong logged_chars_SYMBOL; + ulong log_first_idx_SYMBOL; + ulong log_next_idx_SYMBOL; + long log_SIZE; + long log_ts_nsec_OFFSET; + long log_len_OFFSET; + long log_text_len_OFFSET; + long log_dict_len_OFFSET; + ulong phys_base_SYMBOL; + ulong _stext_SYMBOL; + } vmcoreinfo; + ulonglong flags2; + char *source_tree; +}; + +/* + * Aid for the two versions of the kernel's module list linkage. + */ +#define NEXT_MODULE(next_module, modbuf) \ +{ \ + switch (kt->flags & (KMOD_V1|KMOD_V2)) \ + { \ + case KMOD_V1: \ + next_module = ULONG(modbuf + OFFSET(module_next)); \ + break; \ + case KMOD_V2: \ + next_module = ULONG(modbuf + OFFSET(module_list)); \ + if (next_module != kt->kernel_module) \ + next_module -= OFFSET(module_list); \ + break; \ + } \ +} + +#define THIS_KERNEL_VERSION ((kt->kernel_version[0] << 16) + \ + (kt->kernel_version[1] << 8) + \ + (kt->kernel_version[2])) +#define LINUX(x,y,z) (((uint)(x) << 16) + ((uint)(y) << 8) + (uint)(z)) + +#define THIS_GCC_VERSION ((kt->gcc_version[0] << 16) + \ + (kt->gcc_version[1] << 8) + \ + (kt->gcc_version[2])) +#define GCC(x,y,z) (((uint)(x) << 16) + ((uint)(y) << 8) + (uint)(z)) + +#define IS_KERNEL_STATIC_TEXT(x) (((ulong)(x) >= kt->stext) && \ + ((ulong)(x) < kt->etext)) + +#define TASK_COMM_LEN 16 /* task command name length including NULL */ + +struct task_context { /* context stored for each task */ + ulong task; + ulong thread_info; + ulong pid; + char comm[TASK_COMM_LEN+1]; + int processor; + ulong ptask; + ulong mm_struct; + struct task_context *tc_next; +}; + +struct tgid_context { /* tgid and task stored for each task */ + ulong tgid; + ulong task; +}; + +struct task_table { /* kernel/local task table data */ + struct task_context *current; + struct task_context *context_array; + void (*refresh_task_table)(void); + ulong flags; + ulong task_start; + ulong task_end; + void *task_local; + int max_tasks; + int nr_threads; + ulong running_tasks; + ulong retries; + ulong panicmsg; + int panic_processor; + ulong *idle_threads; + ulong *panic_threads; + ulong *active_set; + ulong *panic_ksp; + ulong *hardirq_ctx; + ulong *hardirq_tasks; + ulong *softirq_ctx; + ulong *softirq_tasks; + ulong panic_task; + ulong this_task; + int pidhash_len; + ulong pidhash_addr; + ulong last_task_read; + ulong last_thread_info_read; + ulong last_mm_read; + char *task_struct; + char *thread_info; + char *mm_struct; + ulong init_pid_ns; + struct tgid_context *tgid_array; + struct tgid_context *last_tgid; + ulong tgid_searches; + ulong tgid_cache_hits; + long filepages; + long anonpages; + ulong stack_end_magic; + ulong pf_kthread; + ulong pid_radix_tree; + int callbacks; + struct task_context **context_by_task; /* task_context sorted by task addr */ + ulong pid_xarray; +}; + +#define TASK_INIT_DONE (0x1) +#define TASK_ARRAY_EXISTS (0x2) +#define PANIC_TASK_NOT_FOUND (0x4) +#define TASK_REFRESH (0x8) +#define TASK_REFRESH_OFF (0x10) +#define PANIC_KSP (0x20) +#define ACTIVE_SET (0x40) +#define POPULATE_PANIC (0x80) +#define PIDHASH (0x100) +#define PID_HASH (0x200) +#define THREAD_INFO (0x400) +#define IRQSTACKS (0x800) +#define TIMESPEC (0x1000) +#define NO_TIMESPEC (0x2000) +#define ACTIVE_ONLY (0x4000) +#define START_TIME_NSECS (0x8000) +#define THREAD_INFO_IN_TASK (0x10000) +#define PID_RADIX_TREE (0x20000) +#define INDEXED_CONTEXTS (0x40000) +#define PID_XARRAY (0x80000) + +#define TASK_SLUSH (20) + +#define NO_PROC_ID 0xFF /* No processor magic marker (from kernel) */ + +/* + * Global "tt" points to task_table + */ +#define CURRENT_CONTEXT() (tt->current) +#define CURRENT_TASK() (tt->current->task) +#define CURRENT_PID() (tt->current->pid) +#define CURRENT_COMM() (tt->current->comm) +#define RUNNING_TASKS() (tt->running_tasks) +#define FIRST_CONTEXT() (tt->context_array) + +#define NO_PID ((ulong)-1) +#define NO_TASK (0) + +#define IS_TASK_ADDR(X) (machdep->is_task_addr(X)) +#define GET_STACKBASE(X) (machdep->get_stackbase(X)) +#define GET_STACKTOP(X) (machdep->get_stacktop(X)) +#define STACKSIZE() (machdep->stacksize) +#define LONGS_PER_STACK (machdep->stacksize/sizeof(ulong)) + +#define INSTACK(X,BT) \ + (((ulong)(X) >= (BT)->stackbase) && ((ulong)(X) < (BT)->stacktop)) + +#define ALIGNED_STACK_OFFSET(task) ((ulong)(task) & (STACKSIZE()-1)) + +#define BITS() (machdep->bits) +#define BITS32() (machdep->bits == 32) +#define BITS64() (machdep->bits == 64) +#define IS_KVADDR(X) (machdep->is_kvaddr(X)) +#define IS_UVADDR(X,C) (machdep->is_uvaddr(X,C)) + +#define PID_ALIVE(x) (kill(x, 0) == 0) + +struct kernel_list_head { + struct kernel_list_head *next, *prev; +}; + +struct stack_hook { + ulong esp; + ulong eip; +}; + +struct bt_info { + ulong task; + ulonglong flags; + ulong instptr; + ulong stkptr; + ulong bptr; + ulong stackbase; + ulong stacktop; + char *stackbuf; + struct task_context *tc; + struct stack_hook *hp; + struct stack_hook *textlist; + struct reference *ref; + ulong frameptr; + char *call_target; + void *machdep; + ulong debug; + ulong eframe_ip; + ulong radix; + ulong *cpumask; +}; + +#define STACK_OFFSET_TYPE(OFF) \ + (((ulong)(OFF) > STACKSIZE()) ? \ + (ulong)((ulong)(OFF) - (ulong)(bt->stackbase)) : (ulong)(OFF)) + +#define GET_STACK_ULONG(OFF) \ + *((ulong *)((char *)(&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(OFF))]))) + +#define GET_STACK_DATA(OFF, LOC, SZ) memcpy((void *)(LOC), \ + (void *)(&bt->stackbuf[(ulong)STACK_OFFSET_TYPE(OFF)]), (size_t)(SZ)) + +struct machine_specific; /* uniquely defined below each machine's area */ +struct xendump_data; +struct xen_kdump_data; + +struct vaddr_range { + ulong start; + ulong end; + ulong type; +#define KVADDR_UNITY_MAP (1) +#define KVADDR_VMALLOC (2) +#define KVADDR_VMEMMAP (3) +#define KVADDR_START_MAP (4) +#define KVADDR_MODULES (5) +#define MAX_KVADDR_RANGES KVADDR_MODULES +}; + +#define MAX_MACHDEP_ARGS 5 /* for --machdep/-m machine-specific args */ + +struct machdep_table { + ulong flags; + ulong kvbase; + ulong identity_map_base; + uint pagesize; + uint pageshift; + ulonglong pagemask; + ulong pageoffset; + ulong stacksize; + uint hz; + ulong mhz; + int bits; + int nr_irqs; + uint64_t memsize; + int (*eframe_search)(struct bt_info *); + void (*back_trace)(struct bt_info *); + ulong (*processor_speed)(void); + int (*uvtop)(struct task_context *, ulong, physaddr_t *, int); + int (*kvtop)(struct task_context *, ulong, physaddr_t *, int); + ulong (*get_task_pgd)(ulong); + void (*dump_irq)(int); + void (*get_stack_frame)(struct bt_info *, ulong *, ulong *); + ulong (*get_stackbase)(ulong); + ulong (*get_stacktop)(ulong); + int (*translate_pte)(ulong, void *, ulonglong); + uint64_t (*memory_size)(void); + ulong (*vmalloc_start)(void); + int (*is_task_addr)(ulong); + int (*verify_symbol)(const char *, ulong, char); + int (*dis_filter)(ulong, char *, unsigned int); + int (*get_smp_cpus)(void); + int (*is_kvaddr)(ulong); + int (*is_uvaddr)(ulong, struct task_context *); + int (*verify_paddr)(uint64_t); + void (*cmd_mach)(void); + void (*init_kernel_pgd)(void); + struct syment *(*value_to_symbol)(ulong, ulong *); + struct line_number_hook { + char *func; + char **file; + } *line_number_hooks; + ulong last_pgd_read; + ulong last_pud_read; + ulong last_pmd_read; + ulong last_ptbl_read; + char *pgd; + char *pud; + char *pmd; + char *ptbl; + int ptrs_per_pgd; + char *cmdline_args[MAX_MACHDEP_ARGS]; + struct machine_specific *machspec; + ulong section_size_bits; + ulong max_physmem_bits; + ulong sections_per_root; + int (*xendump_p2m_create)(struct xendump_data *); + ulong (*xendump_panic_task)(struct xendump_data *); + void (*get_xendump_regs)(struct xendump_data *, struct bt_info *, ulong *, ulong *); + void (*clear_machdep_cache)(void); + int (*xen_kdump_p2m_create)(struct xen_kdump_data *); + int (*in_alternate_stack)(int, ulong); + void (*dumpfile_init)(int, void *); + void (*process_elf_notes)(void *, unsigned long); + int (*get_kvaddr_ranges)(struct vaddr_range *); + int (*verify_line_number)(ulong, ulong, ulong); + void (*get_irq_affinity)(int); + void (*show_interrupts)(int, ulong *); + int (*is_page_ptr)(ulong, physaddr_t *); +}; + +/* + * Processor-common flags; processor-specific flags use the lower bits + * as defined in their processor-specific files below. (see KSYMS_START defs). + */ +#define HWRESET (0x80000000) +#define OMIT_FRAME_PTR (0x40000000) +#define FRAMESIZE_DEBUG (0x20000000) +#define MACHDEP_BT_TEXT (0x10000000) +#define DEVMEMRD (0x8000000) +#define INIT (0x4000000) +#define VM_4_LEVEL (0x2000000) +#define MCA (0x1000000) +#define PAE (0x800000) +#define VMEMMAP (0x400000) + +extern struct machdep_table *machdep; + +#ifndef HZ +#define HZ sysconf(_SC_CLK_TCK) +#endif + +#define IS_LAST_PGD_READ(pgd) ((ulong)(pgd) == machdep->last_pgd_read) +#define IS_LAST_PMD_READ(pmd) ((ulong)(pmd) == machdep->last_pmd_read) +#define IS_LAST_PTBL_READ(ptbl) ((ulong)(ptbl) == machdep->last_ptbl_read) +#define IS_LAST_PUD_READ(pud) ((ulong)(pud) == machdep->last_pud_read) + +#define FILL_PGD(PGD, TYPE, SIZE) \ + if (!IS_LAST_PGD_READ(PGD)) { \ + readmem((ulonglong)((ulong)(PGD)), TYPE, machdep->pgd, \ + SIZE, "pgd page", FAULT_ON_ERROR); \ + machdep->last_pgd_read = (ulong)(PGD); \ + } + +#define FILL_PUD(PUD, TYPE, SIZE) \ + if (!IS_LAST_PUD_READ(PUD)) { \ + readmem((ulonglong)((ulong)(PUD)), TYPE, machdep->pud, \ + SIZE, "pud page", FAULT_ON_ERROR); \ + machdep->last_pud_read = (ulong)(PUD); \ + } + +#define FILL_PMD(PMD, TYPE, SIZE) \ + if (!IS_LAST_PMD_READ(PMD)) { \ + readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ + SIZE, "pmd page", FAULT_ON_ERROR); \ + machdep->last_pmd_read = (ulong)(PMD); \ + } + +#define FILL_PTBL(PTBL, TYPE, SIZE) \ + if (!IS_LAST_PTBL_READ(PTBL)) { \ + readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ + SIZE, "page table", FAULT_ON_ERROR); \ + machdep->last_ptbl_read = (ulong)(PTBL); \ + } + +#define SETUP_ENV (0) +#define PRE_SYMTAB (1) +#define PRE_GDB (2) +#define POST_GDB (3) +#define POST_INIT (4) +#define POST_VM (5) +#define LOG_ONLY (6) +#define POST_RELOC (7) + +#define FOREACH_BT (1) +#define FOREACH_VM (2) +#define FOREACH_TASK (3) +#define FOREACH_SET (4) +#define FOREACH_FILES (5) +#define FOREACH_NET (6) +#define FOREACH_TEST (7) +#define FOREACH_VTOP (8) +#define FOREACH_SIG (9) +#define FOREACH_PS (10) + +#define MAX_FOREACH_KEYWORDS (10) +#define MAX_FOREACH_TASKS (50) +#define MAX_FOREACH_PIDS (50) +#define MAX_FOREACH_COMMS (50) +#define MAX_FOREACH_ARGS (50) +#define MAX_REGEX_ARGS (10) + +#define FOREACH_CMD (0x1) +#define FOREACH_r_FLAG (0x2) +#define FOREACH_s_FLAG (0x4) +#define FOREACH_S_FLAG (0x8) +#define FOREACH_i_FLAG (0x10) +#define FOREACH_e_FLAG (0x20) +#define FOREACH_g_FLAG (0x40) +#define FOREACH_l_FLAG (0x80) +#define FOREACH_p_FLAG (0x100) +#define FOREACH_t_FLAG (0x200) +#define FOREACH_u_FLAG (0x400) +#define FOREACH_m_FLAG (0x800) +#define FOREACH_v_FLAG (0x1000) +#define FOREACH_KERNEL (0x2000) +#define FOREACH_USER (0x4000) +#define FOREACH_SPECIFIED (0x8000) +#define FOREACH_ACTIVE (0x10000) +#define FOREACH_k_FLAG (0x20000) +#define FOREACH_c_FLAG (0x40000) +#define FOREACH_f_FLAG (0x80000) +#define FOREACH_o_FLAG (0x100000) +#define FOREACH_T_FLAG (0x200000) +#define FOREACH_F_FLAG (0x400000) +#define FOREACH_x_FLAG (0x800000) +#define FOREACH_d_FLAG (0x1000000) +#define FOREACH_STATE (0x2000000) +#define FOREACH_a_FLAG (0x4000000) +#define FOREACH_G_FLAG (0x8000000) +#define FOREACH_F_FLAG2 (0x10000000) +#define FOREACH_y_FLAG (0x20000000) +#define FOREACH_GLEADER (0x40000000) + +#define FOREACH_PS_EXCLUSIVE \ + (FOREACH_g_FLAG|FOREACH_a_FLAG|FOREACH_t_FLAG|FOREACH_c_FLAG|FOREACH_p_FLAG|FOREACH_l_FLAG|FOREACH_r_FLAG|FOREACH_m_FLAG) + +struct foreach_data { + ulong flags; + int keyword_array[MAX_FOREACH_KEYWORDS]; + ulong task_array[MAX_FOREACH_TASKS]; + char *comm_array[MAX_FOREACH_COMMS]; + ulong pid_array[MAX_FOREACH_PIDS]; + ulong arg_array[MAX_FOREACH_ARGS]; + struct regex_info { + char *pattern; + regex_t regex; + } regex_info[MAX_REGEX_ARGS]; + ulong state; + char *reference; + int keys; + int pids; + int tasks; + int comms; + int args; + int regexs; + int policy; +}; + +struct reference { + char *str; + ulong cmdflags; + ulong hexval; + ulong decval; + ulong ref1; + ulong ref2; + void *refp; +}; + +struct offset_table { /* stash of commonly-used offsets */ + long list_head_next; /* add new entries to end of table */ + long list_head_prev; + long task_struct_pid; + long task_struct_state; + long task_struct_comm; + long task_struct_mm; + long task_struct_tss; + long task_struct_thread; + long task_struct_active_mm; + long task_struct_tss_eip; + long task_struct_tss_esp; + long task_struct_tss_ksp; + long task_struct_processor; + long task_struct_p_pptr; + long task_struct_parent; + long task_struct_has_cpu; + long task_struct_cpus_runnable; + long task_struct_thread_eip; + long task_struct_thread_esp; + long task_struct_thread_ksp; + long task_struct_next_task; + long task_struct_files; + long task_struct_fs; + long task_struct_pidhash_next; + long task_struct_next_run; + long task_struct_flags; + long task_struct_sig; + long task_struct_signal; + long task_struct_blocked; + long task_struct_sigpending; + long task_struct_pending; + long task_struct_sigqueue; + long task_struct_sighand; + long task_struct_start_time; + long task_struct_times; + long task_struct_utime; + long task_struct_stime; + long task_struct_cpu; + long task_struct_run_list; + long task_struct_pgrp; + long task_struct_tgid; + long task_struct_namespace; + long task_struct_pids; + long task_struct_last_run; + long task_struct_timestamp; + long task_struct_thread_info; + long task_struct_nsproxy; + long task_struct_rlim; + long thread_info_task; + long thread_info_cpu; + long thread_info_previous_esp; + long thread_info_flags; + long nsproxy_mnt_ns; + long mnt_namespace_root; + long mnt_namespace_list; + long pid_link_pid; + long pid_hash_chain; + long hlist_node_next; + long hlist_node_pprev; + long pid_pid_chain; + long thread_struct_eip; + long thread_struct_esp; + long thread_struct_ksp; + long thread_struct_fph; + long thread_struct_rip; + long thread_struct_rsp; + long thread_struct_rsp0; + long tms_tms_utime; + long tms_tms_stime; + long signal_struct_count; + long signal_struct_action; + long signal_struct_shared_pending; + long signal_struct_rlim; + long k_sigaction_sa; + long sigaction_sa_handler; + long sigaction_sa_flags; + long sigaction_sa_mask; + long sigpending_head; + long sigpending_list; + long sigpending_signal; + long signal_queue_next; + long signal_queue_info; + long sigqueue_next; + long sigqueue_list; + long sigqueue_info; + long sighand_struct_action; + long siginfo_si_signo; + long thread_struct_cr3; + long thread_struct_ptbr; + long thread_struct_pg_tables; + long switch_stack_r26; + long switch_stack_b0; + long switch_stack_ar_bspstore; + long switch_stack_ar_pfs; + long switch_stack_ar_rnat; + long switch_stack_pr; + long cpuinfo_ia64_proc_freq; + long cpuinfo_ia64_unimpl_va_mask; + long cpuinfo_ia64_unimpl_pa_mask; + long device_node_type; + long device_node_allnext; + long device_node_properties; + long property_name; + long property_value; + long property_next; + long machdep_calls_setup_residual; + long RESIDUAL_VitalProductData; + long VPD_ProcessorHz; + long bd_info_bi_intfreq; + long hwrpb_struct_cycle_freq; + long hwrpb_struct_processor_offset; + long hwrpb_struct_processor_size; + long percpu_struct_halt_PC; + long percpu_struct_halt_ra; + long percpu_struct_halt_pv; + long mm_struct_mmap; + long mm_struct_pgd; + long mm_struct_rss; + long mm_struct_anon_rss; + long mm_struct_file_rss; + long mm_struct_total_vm; + long mm_struct_start_code; + long mm_struct_arg_start; + long mm_struct_arg_end; + long mm_struct_env_start; + long mm_struct_env_end; + long vm_area_struct_vm_mm; + long vm_area_struct_vm_next; + long vm_area_struct_vm_end; + long vm_area_struct_vm_start; + long vm_area_struct_vm_flags; + long vm_area_struct_vm_file; + long vm_area_struct_vm_offset; + long vm_area_struct_vm_pgoff; + long vm_struct_addr; + long vm_struct_size; + long vm_struct_next; + long module_size_of_struct; + long module_next; + long module_size; + long module_name; + long module_nsyms; + long module_syms; + long module_flags; + long module_num_syms; + long module_list; + long module_gpl_syms; + long module_num_gpl_syms; + long module_module_core; + long module_core_size; + long module_core_text_size; + long module_num_symtab; + long module_symtab; + long module_strtab; + + long module_kallsyms_start; + long kallsyms_header_sections; + long kallsyms_header_section_off; + long kallsyms_header_symbols; + long kallsyms_header_symbol_off; + long kallsyms_header_string_off; + long kallsyms_symbol_section_off; + long kallsyms_symbol_symbol_addr; + long kallsyms_symbol_name_off; + long kallsyms_section_start; + long kallsyms_section_size; + long kallsyms_section_name_off; + + long page_next; + long page_prev; + long page_next_hash; + long page_list; + long page_list_next; + long page_list_prev; + long page_inode; + long page_offset; + long page_count; + long page_flags; + long page_mapping; + long page_index; + long page_buffers; + long page_lru; + long page_pte; + long swap_info_struct_swap_file; + long swap_info_struct_swap_vfsmnt; + long swap_info_struct_flags; + long swap_info_struct_swap_map; + long swap_info_struct_swap_device; + long swap_info_struct_prio; + long swap_info_struct_max; + long swap_info_struct_pages; + long swap_info_struct_old_block_size; + long block_device_bd_inode; + long block_device_bd_list; + long block_device_bd_disk; + long irq_desc_t_status; + long irq_desc_t_handler; + long irq_desc_t_chip; + long irq_desc_t_action; + long irq_desc_t_depth; + long irqdesc_action; + long irqdesc_ctl; + long irqdesc_level; + long irqaction_handler; + long irqaction_flags; + long irqaction_mask; + long irqaction_name; + long irqaction_dev_id; + long irqaction_next; + long hw_interrupt_type_typename; + long hw_interrupt_type_startup; + long hw_interrupt_type_shutdown; + long hw_interrupt_type_handle; + long hw_interrupt_type_enable; + long hw_interrupt_type_disable; + long hw_interrupt_type_ack; + long hw_interrupt_type_end; + long hw_interrupt_type_set_affinity; + long irq_chip_typename; + long irq_chip_startup; + long irq_chip_shutdown; + long irq_chip_enable; + long irq_chip_disable; + long irq_chip_ack; + long irq_chip_end; + long irq_chip_set_affinity; + long irq_chip_mask; + long irq_chip_mask_ack; + long irq_chip_unmask; + long irq_chip_eoi; + long irq_chip_retrigger; + long irq_chip_set_type; + long irq_chip_set_wake; + long irq_cpustat_t___softirq_active; + long irq_cpustat_t___softirq_mask; + long fdtable_max_fds; + long fdtable_max_fdset; + long fdtable_open_fds; + long fdtable_fd; + long files_struct_fdt; + long files_struct_max_fds; + long files_struct_max_fdset; + long files_struct_open_fds; + long files_struct_fd; + long files_struct_open_fds_init; + long file_f_dentry; + long file_f_vfsmnt; + long file_f_count; + long file_f_path; + long path_mnt; + long path_dentry; + long fs_struct_root; + long fs_struct_pwd; + long fs_struct_rootmnt; + long fs_struct_pwdmnt; + long dentry_d_inode; + long dentry_d_parent; + long dentry_d_name; + long dentry_d_covers; + long dentry_d_iname; + long qstr_len; + long qstr_name; + long inode_i_mode; + long inode_i_op; + long inode_i_sb; + long inode_u; + long inode_i_flock; + long inode_i_fop; + long inode_i_mapping; + long address_space_nrpages; + long vfsmount_mnt_next; + long vfsmount_mnt_devname; + long vfsmount_mnt_dirname; + long vfsmount_mnt_sb; + long vfsmount_mnt_list; + long vfsmount_mnt_mountpoint; + long vfsmount_mnt_parent; + long namespace_root; + long namespace_list; + long super_block_s_dirty; + long super_block_s_type; + long super_block_s_files; + long file_system_type_name; + long nlm_file_f_file; + long file_lock_fl_owner; + long nlm_host_h_exportent; + long svc_client_cl_ident; + long kmem_cache_s_c_nextp; + long kmem_cache_s_c_name; + long kmem_cache_s_c_num; + long kmem_cache_s_c_org_size; + long kmem_cache_s_c_flags; + long kmem_cache_s_c_offset; + long kmem_cache_s_c_firstp; + long kmem_cache_s_c_gfporder; + long kmem_cache_s_c_magic; + long kmem_cache_s_num; + long kmem_cache_s_next; + long kmem_cache_s_name; + long kmem_cache_s_objsize; + long kmem_cache_s_flags; + long kmem_cache_s_gfporder; + long kmem_cache_s_slabs; + long kmem_cache_s_slabs_full; + long kmem_cache_s_slabs_partial; + long kmem_cache_s_slabs_free; + long kmem_cache_s_cpudata; + long kmem_cache_s_c_align; + long kmem_cache_s_colour_off; + long cpucache_s_avail; + long cpucache_s_limit; + long kmem_cache_s_array; + long array_cache_avail; + long array_cache_limit; + long kmem_cache_s_lists; + long kmem_list3_slabs_partial; + long kmem_list3_slabs_full; + long kmem_list3_slabs_free; + long kmem_list3_free_objects; + long kmem_list3_shared; + long kmem_slab_s_s_nextp; + long kmem_slab_s_s_freep; + long kmem_slab_s_s_inuse; + long kmem_slab_s_s_mem; + long kmem_slab_s_s_index; + long kmem_slab_s_s_offset; + long kmem_slab_s_s_magic; + long slab_s_list; + long slab_s_s_mem; + long slab_s_inuse; + long slab_s_free; + long slab_list; + long slab_s_mem; + long slab_inuse; + long slab_free; + long net_device_next; + long net_device_name; + long net_device_type; + long net_device_addr_len; + long net_device_ip_ptr; + long net_device_dev_list; + long net_dev_base_head; + long device_next; + long device_name; + long device_type; + long device_ip_ptr; + long device_addr_len; + long socket_sk; + long sock_daddr; + long sock_rcv_saddr; + long sock_dport; + long sock_sport; + long sock_num; + long sock_type; + long sock_family; + long sock_common_skc_family; + long sock_sk_type; + long inet_sock_inet; + long inet_opt_daddr; + long inet_opt_rcv_saddr; + long inet_opt_dport; + long inet_opt_sport; + long inet_opt_num; + long ipv6_pinfo_rcv_saddr; + long ipv6_pinfo_daddr; + long timer_list_list; + long timer_list_next; + long timer_list_entry; + long timer_list_expires; + long timer_list_function; + long timer_vec_root_vec; + long timer_vec_vec; + long tvec_root_s_vec; + long tvec_s_vec; + long tvec_t_base_s_tv1; + long wait_queue_task; + long wait_queue_next; + long __wait_queue_task; + long __wait_queue_head_task_list; + long __wait_queue_task_list; + long pglist_data_node_zones; + long pglist_data_node_mem_map; + long pglist_data_node_start_paddr; + long pglist_data_node_start_mapnr; + long pglist_data_node_size; + long pglist_data_node_id; + long pglist_data_node_next; + long pglist_data_nr_zones; + long pglist_data_node_start_pfn; + long pglist_data_pgdat_next; + long pglist_data_node_present_pages; + long pglist_data_node_spanned_pages; + long pglist_data_bdata; + long page_cache_bucket_chain; + long zone_struct_free_pages; + long zone_struct_free_area; + long zone_struct_zone_pgdat; + long zone_struct_name; + long zone_struct_size; + long zone_struct_memsize; + long zone_struct_zone_start_pfn; + long zone_struct_zone_start_paddr; + long zone_struct_zone_start_mapnr; + long zone_struct_zone_mem_map; + long zone_struct_inactive_clean_pages; + long zone_struct_inactive_clean_list; + long zone_struct_inactive_dirty_pages; + long zone_struct_active_pages; + long zone_struct_pages_min; + long zone_struct_pages_low; + long zone_struct_pages_high; + long zone_free_pages; + long zone_free_area; + long zone_zone_pgdat; + long zone_zone_mem_map; + long zone_name; + long zone_spanned_pages; + long zone_zone_start_pfn; + long zone_pages_min; + long zone_pages_low; + long zone_pages_high; + long zone_vm_stat; + long neighbour_next; + long neighbour_primary_key; + long neighbour_ha; + long neighbour_dev; + long neighbour_nud_state; + long neigh_table_hash_buckets; + long neigh_table_key_len; + long in_device_ifa_list; + long in_ifaddr_ifa_next; + long in_ifaddr_ifa_address; + long pci_dev_global_list; + long pci_dev_next; + long pci_dev_bus; + long pci_dev_devfn; + long pci_dev_class; + long pci_dev_device; + long pci_dev_vendor; + long pci_bus_number; + long resource_entry_t_from; + long resource_entry_t_num; + long resource_entry_t_name; + long resource_entry_t_next; + long resource_name; + long resource_start; + long resource_end; + long resource_sibling; + long resource_child; + long runqueue_curr; + long runqueue_idle; + long runqueue_active; + long runqueue_expired; + long runqueue_arrays; + long runqueue_cpu; + long cpu_s_idle; + long cpu_s_curr; + long prio_array_nr_active; + long prio_array_queue; + long user_regs_struct_ebp; + long user_regs_struct_esp; + long user_regs_struct_rip; + long user_regs_struct_cs; + long user_regs_struct_eflags; + long user_regs_struct_rsp; + long user_regs_struct_ss; + long e820map_nr_map; + long e820entry_addr; + long e820entry_size; + long e820entry_type; + long char_device_struct_next; + long char_device_struct_name; + long char_device_struct_fops; + long char_device_struct_major; + long gendisk_major; + long gendisk_disk_name; + long gendisk_fops; + long blk_major_name_next; + long blk_major_name_major; + long blk_major_name_name; + long radix_tree_root_height; + long radix_tree_root_rnode; + long x8664_pda_pcurrent; + long x8664_pda_data_offset; + long x8664_pda_kernelstack; + long x8664_pda_irqrsp; + long x8664_pda_irqstackptr; + long x8664_pda_level4_pgt; + long x8664_pda_cpunumber; + long x8664_pda_me; + long tss_struct_ist; + long mem_section_section_mem_map; + long vcpu_guest_context_user_regs; + long cpu_user_regs_eip; + long cpu_user_regs_esp; + long cpu_user_regs_rip; + long cpu_user_regs_rsp; + long unwind_table_core; + long unwind_table_init; + long unwind_table_address; + long unwind_table_size; + long unwind_table_link; + long unwind_table_name; + long rq_cfs; + long rq_rt; + long rq_nr_running; + long cfs_rq_rb_leftmost; + long cfs_rq_nr_running; + long cfs_rq_tasks_timeline; + long task_struct_se; + long sched_entity_run_node; + long rt_rq_active; + long kmem_cache_size; + long kmem_cache_objsize; + long kmem_cache_offset; + long kmem_cache_order; + long kmem_cache_local_node; + long kmem_cache_objects; + long kmem_cache_inuse; + long kmem_cache_align; + long kmem_cache_name; + long kmem_cache_list; + long kmem_cache_node; + long kmem_cache_cpu_slab; + long page_inuse; +/* long page_offset; use "old" page->offset */ + long page_slab; + long page_first_page; + long page_freelist; + long kmem_cache_node_nr_partial; + long kmem_cache_node_nr_slabs; + long kmem_cache_node_partial; + long kmem_cache_node_full; + long pid_numbers; + long upid_nr; + long upid_ns; + long upid_pid_chain; + long pid_tasks; + long kmem_cache_cpu_freelist; + long kmem_cache_cpu_page; + long kmem_cache_cpu_node; + long kmem_cache_flags; + long zone_nr_active; + long zone_nr_inactive; + long zone_all_unreclaimable; + long zone_present_pages; + long zone_flags; + long zone_pages_scanned; + long pcpu_info_vcpu; + long pcpu_info_idle; + long vcpu_struct_rq; + long task_struct_sched_info; + long sched_info_last_arrival; + long page_objects; + long kmem_cache_oo; + long char_device_struct_cdev; + long char_device_struct_baseminor; + long cdev_ops; + long probe_next; + long probe_dev; + long probe_data; + long kobj_map_probes; + long task_struct_prio; + long zone_watermark; + long module_sect_attrs; + long module_sect_attrs_attrs; + long module_sect_attrs_nsections; + long module_sect_attr_mattr; + long module_sect_attr_name; + long module_sect_attr_address; + long module_attribute_attr; + long attribute_owner; + long module_sect_attr_attr; + long module_sections_attrs; + long swap_info_struct_inuse_pages; + long s390_lowcore_psw_save_area; + long mm_struct_rss_stat; + long mm_rss_stat_count; + long module_module_init; + long module_init_text_size; + long cpu_context_save_fp; + long cpu_context_save_sp; + long cpu_context_save_pc; + long elf_prstatus_pr_pid; + long elf_prstatus_pr_reg; + long irq_desc_t_name; + long thread_info_cpu_context; + long unwind_table_list; + long unwind_table_start; + long unwind_table_stop; + long unwind_table_begin_addr; + long unwind_table_end_addr; + long unwind_idx_addr; + long unwind_idx_insn; + long signal_struct_nr_threads; + long module_init_size; + long module_percpu; + long radix_tree_node_slots; + long s390_stack_frame_back_chain; + long s390_stack_frame_r14; + long user_regs_struct_eip; + long user_regs_struct_rax; + long user_regs_struct_eax; + long user_regs_struct_rbx; + long user_regs_struct_ebx; + long user_regs_struct_rcx; + long user_regs_struct_ecx; + long user_regs_struct_rdx; + long user_regs_struct_edx; + long user_regs_struct_rsi; + long user_regs_struct_esi; + long user_regs_struct_rdi; + long user_regs_struct_edi; + long user_regs_struct_ds; + long user_regs_struct_es; + long user_regs_struct_fs; + long user_regs_struct_gs; + long user_regs_struct_rbp; + long user_regs_struct_r8; + long user_regs_struct_r9; + long user_regs_struct_r10; + long user_regs_struct_r11; + long user_regs_struct_r12; + long user_regs_struct_r13; + long user_regs_struct_r14; + long user_regs_struct_r15; + long sched_entity_cfs_rq; + long sched_entity_my_q; + long sched_entity_on_rq; + long task_struct_on_rq; + long cfs_rq_curr; + long irq_desc_t_irq_data; + long irq_desc_t_kstat_irqs; + long irq_desc_t_affinity; + long irq_data_chip; + long irq_data_affinity; + long kernel_stat_irqs; + long socket_alloc_vfs_inode; + long class_devices; + long class_p; + long class_private_devices; + long device_knode_class; + long device_node; + long gendisk_dev; + long gendisk_kobj; + long gendisk_part0; + long gendisk_queue; + long hd_struct_dev; + long klist_k_list; + long klist_node_n_klist; + long klist_node_n_node; + long kobject_entry; + long kset_list; + long request_list_count; + long request_queue_in_flight; + long request_queue_rq; + long subsys_private_klist_devices; + long subsystem_kset; + long mount_mnt_parent; + long mount_mnt_mountpoint; + long mount_mnt_list; + long mount_mnt_devname; + long mount_mnt; + long task_struct_exit_state; + long timekeeper_xtime; + long file_f_op; + long file_private_data; + long hstate_order; + long hugetlbfs_sb_info_hstate; + long idr_layer_ary; + long idr_layer_layer; + long idr_layers; + long idr_top; + long ipc_id_ary_p; + long ipc_ids_entries; + long ipc_ids_max_id; + long ipc_ids_ipcs_idr; + long ipc_ids_in_use; + long ipc_namespace_ids; + long kern_ipc_perm_deleted; + long kern_ipc_perm_key; + long kern_ipc_perm_mode; + long kern_ipc_perm_uid; + long kern_ipc_perm_id; + long kern_ipc_perm_seq; + long nsproxy_ipc_ns; + long shmem_inode_info_swapped; + long shmem_inode_info_vfs_inode; + long shm_file_data_file; + long shmid_kernel_shm_file; + long shmid_kernel_shm_nattch; + long shmid_kernel_shm_perm; + long shmid_kernel_shm_segsz; + long shmid_kernel_id; + long sem_array_sem_perm; + long sem_array_sem_id; + long sem_array_sem_nsems; + long msg_queue_q_perm; + long msg_queue_q_id; + long msg_queue_q_cbytes; + long msg_queue_q_qnum; + long super_block_s_fs_info; + long rq_timestamp; + long radix_tree_node_height; + long rb_root_rb_node; + long rb_node_rb_left; + long rb_node_rb_right; + long rt_prio_array_queue; + long task_struct_rt; + long sched_rt_entity_run_list; + long log_ts_nsec; + long log_len; + long log_text_len; + long log_dict_len; + long log_level; + long log_flags_level; + long timekeeper_xtime_sec; + long neigh_table_hash_mask; + long sched_rt_entity_my_q; + long neigh_table_hash_shift; + long neigh_table_nht_ptr; + long task_group_parent; + long task_group_css; + long cgroup_subsys_state_cgroup; + long cgroup_dentry; + long task_group_rt_rq; + long rt_rq_tg; + long task_group_cfs_rq; + long cfs_rq_tg; + long task_group_siblings; + long task_group_children; + long task_group_cfs_bandwidth; + long cfs_rq_throttled; + long task_group_rt_bandwidth; + long rt_rq_rt_throttled; + long rt_rq_highest_prio; + long rt_rq_rt_nr_running; + long vmap_area_va_start; + long vmap_area_va_end; + long vmap_area_list; + long vmap_area_flags; + long vmap_area_vm; + long hrtimer_cpu_base_clock_base; + long hrtimer_clock_base_offset; + long hrtimer_clock_base_active; + long hrtimer_clock_base_first; + long hrtimer_clock_base_get_time; + long hrtimer_base_first; + long hrtimer_base_pending; + long hrtimer_base_get_time; + long hrtimer_node; + long hrtimer_list; + long hrtimer_softexpires; + long hrtimer_expires; + long hrtimer_function; + long timerqueue_head_next; + long timerqueue_node_expires; + long timerqueue_node_node; + long ktime_t_tv64; + long ktime_t_sec; + long ktime_t_nsec; + long module_taints; + long module_gpgsig_ok; + long module_license_gplok; + long tnt_bit; + long tnt_true; + long tnt_false; + long task_struct_thread_context_fp; + long task_struct_thread_context_sp; + long task_struct_thread_context_pc; + long page_slab_page; + long trace_print_flags_mask; + long trace_print_flags_name; + long task_struct_rss_stat; + long task_rss_stat_count; + long page_s_mem; + long page_active; + long hstate_nr_huge_pages; + long hstate_free_huge_pages; + long hstate_name; + long cgroup_kn; + long kernfs_node_name; + long kernfs_node_parent; + long kmem_cache_cpu_partial; + long kmem_cache_cpu_cache; + long nsproxy_net_ns; + long atomic_t_counter; + long percpu_counter_count; + long mm_struct_mm_count; + long task_struct_thread_reg29; + long task_struct_thread_reg31; + long pt_regs_regs; + long pt_regs_cp0_badvaddr; + long address_space_page_tree; + long page_compound_head; + long irq_desc_irq_data; + long kmem_cache_node_total_objects; + long timer_base_vectors; + long request_queue_mq_ops; + long request_queue_queue_ctx; + long blk_mq_ctx_rq_dispatched; + long blk_mq_ctx_rq_completed; + long task_struct_stack; + long tnt_mod; + long radix_tree_node_shift; + long kmem_cache_red_left_pad; + long inactive_task_frame_ret_addr; + long sk_buff_head_next; + long sk_buff_head_qlen; + long sk_buff_next; + long sk_buff_len; + long sk_buff_data; + long nlmsghdr_nlmsg_type; + long module_arch; + long mod_arch_specific_num_orcs; + long mod_arch_specific_orc_unwind_ip; + long mod_arch_specific_orc_unwind; + long task_struct_policy; + long kmem_cache_random; + long pid_namespace_idr; + long idr_idr_rt; + long bpf_prog_aux; + long bpf_prog_type; + long bpf_prog_tag; + long bpf_prog_jited_len; + long bpf_prog_bpf_func; + long bpf_prog_len; + long bpf_prog_insnsi; + long bpf_prog_pages; + long bpf_map_map_type; + long bpf_map_map_flags; + long bpf_map_pages; + long bpf_map_key_size; + long bpf_map_value_size; + long bpf_map_max_entries; + long bpf_map_user; + long bpf_map_name; + long bpf_prog_aux_used_map_cnt; + long bpf_prog_aux_used_maps; + long bpf_prog_aux_load_time; + long bpf_prog_aux_user; + long user_struct_uid; + long idr_cur; + long kmem_cache_memcg_params; + long memcg_cache_params___root_caches_node; + long memcg_cache_params_children; + long memcg_cache_params_children_node; + long task_struct_pid_links; + long kernel_symbol_value; + long pci_dev_dev; + long pci_dev_hdr_type; + long pci_dev_pcie_flags_reg; + long pci_bus_node; + long pci_bus_devices; + long pci_bus_dev; + long pci_bus_children; + long pci_bus_parent; + long pci_bus_self; + long device_kobj; + long kobject_name; + long memory_block_dev; + long memory_block_start_section_nr; + long memory_block_end_section_nr; + long memory_block_state; + long memory_block_nid; + long mem_section_pageblock_flags; + long bus_type_p; + long device_private_device; + long device_private_knode_bus; + long xarray_xa_head; + long xa_node_slots; + long xa_node_shift; + long hd_struct_dkstats; + long disk_stats_in_flight; + long cpu_context_save_r7; + long dentry_d_sb; + long device_private_knode_class; + long timerqueue_head_rb_root; + long rb_root_cached_rb_leftmost; +}; + +struct size_table { /* stash of commonly-used sizes */ + long page; + long free_area_struct; + long zone_struct; + long free_area; + long zone; + long kmem_slab_s; + long kmem_cache_s; + long kmem_bufctl_t; + long slab_s; + long slab; + long cpucache_s; + long array_cache; + long swap_info_struct; + long mm_struct; + long vm_area_struct; + long pglist_data; + long page_cache_bucket; + long pt_regs; + long task_struct; + long thread_info; + long softirq_state; + long desc_struct; + long umode_t; + long dentry; + long files_struct; + long fdtable; + long fs_struct; + long file; + long inode; + long vfsmount; + long super_block; + long irqdesc; + long module; + long list_head; + long hlist_node; + long hlist_head; + long irq_cpustat_t; + long cpuinfo_x86; + long cpuinfo_ia64; + long timer_list; + long timer_vec_root; + long timer_vec; + long tvec_root_s; + long tvec_s; + long tvec_t_base_s; + long wait_queue; + long __wait_queue; + long device; + long net_device; + long sock; + long signal_struct; + long sigpending_signal; + long signal_queue; + long sighand_struct; + long sigqueue; + long k_sigaction; + long resource_entry_t; + long resource; + long runqueue; + long irq_desc_t; + long task_union; + long thread_union; + long prio_array; + long user_regs_struct; + long switch_stack; + long vm_area_struct_vm_flags; + long e820map; + long e820entry; + long cpu_s; + long pgd_t; + long kallsyms_header; + long kallsyms_symbol; + long kallsyms_section; + long irq_ctx; + long block_device; + long blk_major_name; + long gendisk; + long address_space; + long char_device_struct; + long inet_sock; + long in6_addr; + long socket; + long spinlock_t; + long radix_tree_root; + long radix_tree_node; + long x8664_pda; + long ppc64_paca; + long gate_struct; + long tss_struct; + long task_struct_start_time; + long cputime_t; + long mem_section; + long pid_link; + long unwind_table; + long rlimit; + long kmem_cache; + long kmem_cache_node; + long upid; + long kmem_cache_cpu; + long cfs_rq; + long pcpu_info; + long vcpu_struct; + long cdev; + long probe; + long kobj_map; + long page_flags; + long module_sect_attr; + long task_struct_utime; + long task_struct_stime; + long cpu_context_save; + long elf_prstatus; + long note_buf; + long unwind_idx; + long softirq_action; + long irq_data; + long s390_stack_frame; + long percpu_data; + long sched_entity; + long kernel_stat; + long subsystem; + long class_private; + long rq_in_flight; + long class_private_devices; + long mount; + long hstate; + long ipc_ids; + long shmid_kernel; + long sem_array; + long msg_queue; + long log; + long log_level; + long rt_rq; + long task_group; + long vmap_area; + long hrtimer_clock_base; + long hrtimer_base; + long tnt; + long trace_print_flags; + long task_struct_flags; + long timer_base; + long taint_flag; + long nlmsghdr; + long nlmsghdr_nlmsg_type; + long sk_buff_head_qlen; + long sk_buff_len; + long orc_entry; + long task_struct_policy; + long pid; + long bpf_prog; + long bpf_prog_aux; + long bpf_map; + long bpf_insn; + long xarray; + long xa_node; +}; + +struct array_table { + int kmem_cache_s_name; + int kmem_cache_s_c_name; + int kmem_cache_s_array; + int kmem_cache_s_cpudata; + int irq_desc; + int irq_action; + int log_buf; + int timer_vec_vec; + int timer_vec_root_vec; + int tvec_s_vec; + int tvec_root_s_vec; + int page_hash_table; + int net_device_name; + int neigh_table_hash_buckets; + int neighbour_ha; + int swap_info; + int pglist_data_node_zones; + int zone_struct_free_area; + int zone_free_area; + int free_area; + int free_area_DIMENSION; + int prio_array_queue; + int height_to_maxindex; + int pid_hash; + int kmem_cache_node; + int kmem_cache_cpu_slab; + int rt_prio_array_queue; + int height_to_maxnodes; + int task_struct_rlim; + int signal_struct_rlim; + int vm_numa_stat; +}; + +/* + * The following set of macros use gdb to determine structure, union, + * or member sizes/offsets. They should be used only during initialization + * of the offset_table or size_table, or with data structures whose names + * or members are only known/specified during runtime. + */ +#define MEMBER_SIZE_REQUEST ((struct datatype_member *)(-1)) +#define ANON_MEMBER_OFFSET_REQUEST ((struct datatype_member *)(-2)) +#define MEMBER_TYPE_REQUEST ((struct datatype_member *)(-3)) +#define STRUCT_SIZE_REQUEST ((struct datatype_member *)(-4)) +#define MEMBER_TYPE_NAME_REQUEST ((struct datatype_member *)(-5)) + +#define STRUCT_SIZE(X) datatype_info((X), NULL, STRUCT_SIZE_REQUEST) +#define UNION_SIZE(X) datatype_info((X), NULL, STRUCT_SIZE_REQUEST) +#define STRUCT_EXISTS(X) (datatype_info((X), NULL, STRUCT_SIZE_REQUEST) >= 0) +#define DATATYPE_SIZE(X) datatype_info((X)->name, NULL, (X)) +#define MEMBER_OFFSET(X,Y) datatype_info((X), (Y), NULL) +#define MEMBER_EXISTS(X,Y) (datatype_info((X), (Y), NULL) >= 0) +#define MEMBER_SIZE(X,Y) datatype_info((X), (Y), MEMBER_SIZE_REQUEST) +#define MEMBER_TYPE(X,Y) datatype_info((X), (Y), MEMBER_TYPE_REQUEST) +#define MEMBER_TYPE_NAME(X,Y) ((char *)datatype_info((X), (Y), MEMBER_TYPE_NAME_REQUEST)) +#define ANON_MEMBER_OFFSET(X,Y) datatype_info((X), (Y), ANON_MEMBER_OFFSET_REQUEST) + +/* + * The following set of macros can only be used with pre-intialized fields + * in the offset table, size table or array_table. + */ +#define OFFSET(X) (OFFSET_verify(offset_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X)) +#define SIZE(X) (SIZE_verify(size_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X)) +#define INVALID_OFFSET (-1) +#define INVALID_MEMBER(X) (offset_table.X == INVALID_OFFSET) +#define INVALID_SIZE(X) (size_table.X == -1) +#define VALID_SIZE(X) (size_table.X >= 0) +#define VALID_STRUCT(X) (size_table.X >= 0) +#define VALID_MEMBER(X) (offset_table.X >= 0) +#define ARRAY_LENGTH(X) (array_table.X) +#define ASSIGN_OFFSET(X) (offset_table.X) +#define ASSIGN_SIZE(X) (size_table.X) +#define OFFSET_OPTION(X,Y) (OFFSET_option(offset_table.X, offset_table.Y, (char *)__FUNCTION__, __FILE__, __LINE__, #X, #Y)) +#define SIZE_OPTION(X,Y) (SIZE_option(size_table.X, size_table.Y, (char *)__FUNCTION__, __FILE__, __LINE__, #X, #Y)) + +#define MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) +#define STRUCT_SIZE_INIT(X, Y) (ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) +#define ARRAY_LENGTH_INIT(A, B, C, D, E) ((A) = get_array_length(C, D, E)) +#define ARRAY_LENGTH_INIT_ALT(A, B, C, D, E) ((A) = get_array_length_alt(B, C, D, E)) +#define MEMBER_SIZE_INIT(X, Y, Z) (ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) +#define ANON_MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = ANON_MEMBER_OFFSET(Y, Z)) + +/* + * For use with non-debug kernels. + */ +struct builtin_debug_table { + char *release; + char *machine_type; + struct offset_table *offset_table; + struct size_table *size_table; + struct array_table *array_table; +}; + +/* + * Facilitators for pulling correctly-sized data out of a buffer at a + * known address. + */ + +#ifdef NEED_ALIGNED_MEM_ACCESS + +#define DEF_LOADER(TYPE) \ +static inline TYPE \ +load_##TYPE (char *addr) \ +{ \ + TYPE ret; \ + size_t i = sizeof(TYPE); \ + while (i--) \ + ((char *)&ret)[i] = addr[i]; \ + return ret; \ +} + +DEF_LOADER(int); +DEF_LOADER(uint); +DEF_LOADER(long); +DEF_LOADER(ulong); +DEF_LOADER(ulonglong); +DEF_LOADER(ushort); +DEF_LOADER(short); +typedef void *pointer_t; +DEF_LOADER(pointer_t); + +#define LOADER(TYPE) load_##TYPE + +#define INT(ADDR) LOADER(int) ((char *)(ADDR)) +#define UINT(ADDR) LOADER(uint) ((char *)(ADDR)) +#define LONG(ADDR) LOADER(long) ((char *)(ADDR)) +#define ULONG(ADDR) LOADER(ulong) ((char *)(ADDR)) +#define ULONGLONG(ADDR) LOADER(ulonglong) ((char *)(ADDR)) +#define ULONG_PTR(ADDR) ((ulong *) (LOADER(pointer_t) ((char *)(ADDR)))) +#define USHORT(ADDR) LOADER(ushort) ((char *)(ADDR)) +#define SHORT(ADDR) LOADER(short) ((char *)(ADDR)) +#define UCHAR(ADDR) *((unsigned char *)((char *)(ADDR))) +#define VOID_PTR(ADDR) ((void *) (LOADER(pointer_t) ((char *)(ADDR)))) + +#else + +#define INT(ADDR) *((int *)((char *)(ADDR))) +#define UINT(ADDR) *((uint *)((char *)(ADDR))) +#define LONG(ADDR) *((long *)((char *)(ADDR))) +#define ULONG(ADDR) *((ulong *)((char *)(ADDR))) +#define ULONGLONG(ADDR) *((ulonglong *)((char *)(ADDR))) +#define ULONG_PTR(ADDR) *((ulong **)((char *)(ADDR))) +#define USHORT(ADDR) *((ushort *)((char *)(ADDR))) +#define SHORT(ADDR) *((short *)((char *)(ADDR))) +#define UCHAR(ADDR) *((unsigned char *)((char *)(ADDR))) +#define VOID_PTR(ADDR) *((void **)((char *)(ADDR))) + +#endif /* NEED_ALIGNED_MEM_ACCESS */ + +struct node_table { + int node_id; + ulong pgdat; + ulong mem_map; + ulong size; + ulong present; + ulonglong start_paddr; + ulong start_mapnr; +}; + +struct meminfo; +struct slab_data; + +#define VMA_CACHE (20) + +struct vm_table { /* kernel VM-related data */ + ulong flags; + ulong kernel_pgd[NR_CPUS]; + ulong high_memory; + ulong vmalloc_start; + ulong mem_map; + long total_pages; + ulong totalram_pages; + ulong totalhigh_pages; + ulong num_physpages; + ulong max_mapnr; + ulong kmem_max_c_num; + ulong kmem_max_limit; + ulong kmem_max_cpus; + ulong kmem_cache_count; + ulong kmem_cache_len_nodes; + ulong PG_reserved; + ulong PG_slab; + ulong PG_head_tail_mask; + int kmem_cache_namelen; + ulong page_hash_table; + int page_hash_table_len; + int paddr_prlen; + int numnodes; + int nr_zones; + int nr_free_areas; + struct node_table *node_table; + void (*dump_free_pages)(struct meminfo *); + void (*dump_kmem_cache)(struct meminfo *); + struct slab_data *slab_data; + uint nr_swapfiles; + ulong last_swap_read; + char *swap_info_struct; + char *vma_cache; + ulong cached_vma[VMA_CACHE]; + ulong cached_vma_hits[VMA_CACHE]; + int vma_cache_index; + ulong vma_cache_fills; + void *mem_sec; + char *mem_section; + int ZONE_HIGHMEM; + ulong *node_online_map; + int node_online_map_len; + int nr_vm_stat_items; + char **vm_stat_items; + int cpu_slab_type; + int nr_vm_event_items; + char **vm_event_items; + int nr_bad_slab_caches; + ulong *bad_slab_caches; + int nr_pageflags; + struct pageflags_data { + ulong mask; + char *name; + } *pageflags_data; + ulong max_mem_section_nr; +}; + +#define NODES (0x1) +#define ZONES (0x2) +#define PERCPU_KMALLOC_V1 (0x4) +#define COMMON_VADDR (0x8) +#define KMEM_CACHE_INIT (0x10) +#define V_MEM_MAP (0x20) +#define PERCPU_KMALLOC_V2 (0x40) +#define KMEM_CACHE_UNAVAIL (0x80) +#define FLATMEM (0x100) +#define DISCONTIGMEM (0x200) +#define SPARSEMEM (0x400) +#define SPARSEMEM_EX (0x800) +#define PERCPU_KMALLOC_V2_NODES (0x1000) +#define KMEM_CACHE_DELAY (0x2000) +#define NODES_ONLINE (0x4000) +#define VM_STAT (0x8000) +#define KMALLOC_SLUB (0x10000) +#define CONFIG_NUMA (0x20000) +#define VM_EVENT (0x40000) +#define PGCNT_ADJ (0x80000) +#define VM_INIT (0x100000) +#define SWAPINFO_V1 (0x200000) +#define SWAPINFO_V2 (0x400000) +#define NODELISTS_IS_PTR (0x800000) +#define KMALLOC_COMMON (0x1000000) +#define USE_VMAP_AREA (0x2000000) +#define PAGEFLAGS (0x4000000) +#define SLAB_OVERLOAD_PAGE (0x8000000) +#define SLAB_CPU_CACHE (0x10000000) +#define SLAB_ROOT_CACHES (0x20000000) + +#define IS_FLATMEM() (vt->flags & FLATMEM) +#define IS_DISCONTIGMEM() (vt->flags & DISCONTIGMEM) +#define IS_SPARSEMEM() (vt->flags & SPARSEMEM) +#define IS_SPARSEMEM_EX() (vt->flags & SPARSEMEM_EX) + +#define COMMON_VADDR_SPACE() (vt->flags & COMMON_VADDR) +#define PADDR_PRLEN (vt->paddr_prlen) + +struct datatype_member { /* minimal definition of a structure/union */ + char *name; /* and possibly a member within it */ + char *member; + ulong type; + long size; + long member_offset; + long member_size; + int member_typecode; + ulong flags; + char *tagname; /* tagname and value for enums */ + long value; + ulong vaddr; +}; + +#define union_name struct_name + +struct list_data { /* generic structure used by do_list() to walk */ + ulong flags; /* through linked lists in the kernel */ + ulong start; + long member_offset; + long list_head_offset; + ulong end; + ulong searchfor; + char **structname; + int structname_args; + char *header; + ulong *list_ptr; + int (*callback_func)(void *, void *); + void *callback_data; + long struct_list_offset; +}; +#define LIST_OFFSET_ENTERED (VERBOSE << 1) +#define LIST_START_ENTERED (VERBOSE << 2) +#define LIST_HEAD_FORMAT (VERBOSE << 3) +#define LIST_HEAD_POINTER (VERBOSE << 4) +#define RETURN_ON_DUPLICATE (VERBOSE << 5) +#define RETURN_ON_LIST_ERROR (VERBOSE << 6) +#define LIST_STRUCT_RADIX_10 (VERBOSE << 7) +#define LIST_STRUCT_RADIX_16 (VERBOSE << 8) +#define LIST_HEAD_REVERSE (VERBOSE << 9) +#define LIST_ALLOCATE (VERBOSE << 10) +#define LIST_CALLBACK (VERBOSE << 11) +#define CALLBACK_RETURN (VERBOSE << 12) +#define LIST_PARSE_MEMBER (VERBOSE << 13) +#define LIST_READ_MEMBER (VERBOSE << 14) +#define LIST_BRENT_ALGO (VERBOSE << 15) + +struct tree_data { + ulong flags; + ulong start; + long node_member_offset; + char **structname; + int structname_args; + int count; +}; + +#define TREE_ROOT_OFFSET_ENTERED (VERBOSE << 1) +#define TREE_NODE_OFFSET_ENTERED (VERBOSE << 2) +#define TREE_NODE_POINTER (VERBOSE << 3) +#define TREE_POSITION_DISPLAY (VERBOSE << 4) +#define TREE_STRUCT_RADIX_10 (VERBOSE << 5) +#define TREE_STRUCT_RADIX_16 (VERBOSE << 6) +#define TREE_PARSE_MEMBER (VERBOSE << 7) +#define TREE_READ_MEMBER (VERBOSE << 8) +#define TREE_LINEAR_ORDER (VERBOSE << 9) + +#define ALIAS_RUNTIME (1) +#define ALIAS_RCLOCAL (2) +#define ALIAS_RCHOME (3) +#define ALIAS_BUILTIN (4) + +struct alias_data { /* command alias storage */ + struct alias_data *next; + char *alias; + int argcnt; + int size; + int origin; + char *args[MAXARGS]; + char argbuf[1]; +}; + +struct rb_node +{ + unsigned long rb_parent_color; +#define RB_RED 0 +#define RB_BLACK 1 + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +struct rb_root +{ + struct rb_node *rb_node; +}; + +#define NUMBER_STACKFRAMES 4 + +#define SAVE_RETURN_ADDRESS(retaddr) \ +{ \ + int i; \ + int saved_stacks; \ + \ + saved_stacks = backtrace((void **)retaddr, NUMBER_STACKFRAMES); \ + \ + /* explicitely zero out the invalid addresses */ \ + for (i = saved_stacks; i < NUMBER_STACKFRAMES; i++) \ + retaddr[i] = 0; \ +} + +#endif /* !GDB_COMMON */ + + +#define SYMBOL_NAME_USED (0x1) +#define MODULE_SYMBOL (0x2) +#define IS_MODULE_SYMBOL(SYM) ((SYM)->flags & MODULE_SYMBOL) + +struct syment { + ulong value; + char *name; + struct syment *val_hash_next; + struct syment *name_hash_next; + char type; + unsigned char cnt; + unsigned char flags; + unsigned char pad2; +}; + +#define NAMESPACE_INIT (1) +#define NAMESPACE_REUSE (2) +#define NAMESPACE_FREE (3) +#define NAMESPACE_INSTALL (4) +#define NAMESPACE_COMPLETE (5) + +struct symbol_namespace { + char *address; + size_t size; + long index; + long cnt; +}; + +struct downsized { + char *name; + struct downsized *next; +}; + +#define SYMVAL_HASH (512) +#define SYMVAL_HASH_INDEX(vaddr) \ + (((vaddr) >> machdep->pageshift) % SYMVAL_HASH) + +#define SYMNAME_HASH (512) +#define SYMNAME_HASH_INDEX(name) \ + ((name[0] ^ (name[strlen(name)-1] * name[strlen(name)/2])) % SYMNAME_HASH) + +#define PATCH_KERNEL_SYMBOLS_START ((char *)(1)) +#define PATCH_KERNEL_SYMBOLS_STOP ((char *)(2)) + +#ifndef GDB_COMMON + +struct symbol_table_data { + ulong flags; +#ifdef GDB_5_3 + struct _bfd *bfd; +#else + struct bfd *bfd; +#endif + struct sec *sections; + struct syment *symtable; + struct syment *symend; + long symcnt; + ulong syment_size; + struct symval_hash_chain { + struct syment *val_hash_head; + struct syment *val_hash_last; + } symval_hash[SYMVAL_HASH]; + double val_hash_searches; + double val_hash_iterations; + struct syment *symname_hash[SYMNAME_HASH]; + struct symbol_namespace kernel_namespace; + struct syment *ext_module_symtable; + struct syment *ext_module_symend; + long ext_module_symcnt; + struct symbol_namespace ext_module_namespace; + int mods_installed; + struct load_module *current; + struct load_module *load_modules; + off_t dwarf_eh_frame_file_offset; + ulong dwarf_eh_frame_size; + ulong first_ksymbol; + ulong __per_cpu_start; + ulong __per_cpu_end; + off_t dwarf_debug_frame_file_offset; + ulong dwarf_debug_frame_size; + ulong first_section_start; + ulong last_section_end; + ulong _stext_vmlinux; + struct downsized downsized; + ulong divide_error_vmlinux; + ulong idt_table_vmlinux; + ulong saved_command_line_vmlinux; + ulong pti_init_vmlinux; + ulong kaiser_init_vmlinux; + int kernel_symbol_type; +}; + +/* flags for st */ +#define KERNEL_SYMS (0x1) +#define MODULE_SYMS (0x2) +#define LOAD_MODULE_SYMS (0x4) +#define INSMOD_BUILTIN (0x8) +#define GDB_SYMS_PATCHED (0x10) +#define GDB_PATCHED() (st->flags & GDB_SYMS_PATCHED) +#define NO_SEC_LOAD (0x20) +#define NO_SEC_CONTENTS (0x40) +#define FORCE_DEBUGINFO (0x80) +#define CRC_MATCHES (0x100) +#define ADD_SYMBOL_FILE (0x200) +#define USE_OLD_ADD_SYM (0x400) +#define PERCPU_SYMS (0x800) +#define MODSECT_UNKNOWN (0x1000) +#define MODSECT_V1 (0x2000) +#define MODSECT_V2 (0x4000) +#define MODSECT_V3 (0x8000) +#define MODSECT_VMASK (MODSECT_V1|MODSECT_V2|MODSECT_V3) +#define NO_STRIP (0x10000) + +#define NO_LINE_NUMBERS() ((st->flags & GDB_SYMS_PATCHED) && !(kt->flags2 & KASLR)) + +#endif /* !GDB_COMMON */ + +#define ALL_MODULES (0) + +#define MAX_MOD_NAMELIST (256) +#define MAX_MOD_NAME (64) +#define MAX_MOD_SEC_NAME (64) + +#define MOD_EXT_SYMS (0x1) +#define MOD_LOAD_SYMS (0x2) +#define MOD_REMOTE (0x4) +#define MOD_KALLSYMS (0x8) +#define MOD_INITRD (0x10) +#define MOD_NOPATCH (0x20) +#define MOD_INIT (0x40) +#define MOD_DO_READNOW (0x80) + +#define SEC_FOUND (0x10000) + +struct mod_section_data { +#if defined(GDB_5_3) || defined(GDB_6_0) + struct sec *section; +#else + struct bfd_section *section; +#endif + char name[MAX_MOD_SEC_NAME]; + ulong offset; + ulong size; + int priority; + int flags; +}; + +struct load_module { + ulong mod_base; + ulong module_struct; + long mod_size; + char mod_namelist[MAX_MOD_NAMELIST]; + char mod_name[MAX_MOD_NAME]; + ulong mod_flags; + struct syment *mod_symtable; + struct syment *mod_symend; + long mod_ext_symcnt; + struct syment *mod_ext_symtable; + struct syment *mod_ext_symend; + long mod_load_symcnt; + struct syment *mod_load_symtable; + struct syment *mod_load_symend; + long mod_symalloc; + struct symbol_namespace mod_load_namespace; + ulong mod_size_of_struct; + ulong mod_text_start; + ulong mod_etext_guess; + ulong mod_rodata_start; + ulong mod_data_start; + ulong mod_bss_start; + int mod_sections; + struct mod_section_data *mod_section_data; + ulong mod_init_text_size; + ulong mod_init_module_ptr; + ulong mod_init_size; + struct syment *mod_init_symtable; + struct syment *mod_init_symend; + ulong mod_percpu; + ulong mod_percpu_size; + struct objfile *loaded_objfile; +}; + +#define IN_MODULE(A,L) \ + (((ulong)(A) >= (L)->mod_base) && ((ulong)(A) < ((L)->mod_base+(L)->mod_size))) + +#define IN_MODULE_INIT(A,L) \ + (((ulong)(A) >= (L)->mod_init_module_ptr) && ((ulong)(A) < ((L)->mod_init_module_ptr+(L)->mod_init_size))) + +#define IN_MODULE_PERCPU(A,L) \ + (((ulong)(A) >= (L)->mod_percpu) && ((ulong)(A) < ((L)->mod_percpu+(L)->mod_percpu_size))) + +#define MODULE_PERCPU_SYMS_LOADED(L) ((L)->mod_percpu && (L)->mod_percpu_size) + +#ifndef GDB_COMMON + +#define KVADDR (0x1) +#define UVADDR (0x2) +#define PHYSADDR (0x4) +#define XENMACHADDR (0x8) +#define FILEADDR (0x10) +#define AMBIGUOUS (~0) + +#define USE_USER_PGD (UVADDR << 2) + +#define VERIFY_ADDR (0x8) /* vm_area_dump() flags -- must follow */ +#define PRINT_INODES (0x10) /* KVADDR, UVADDR, and PHYSADDR */ +#define PRINT_MM_STRUCT (0x20) +#define PRINT_VMA_STRUCTS (0x40) +#define PRINT_SINGLE_VMA (0x80) +#define PRINT_RADIX_10 (0x100) +#define PRINT_RADIX_16 (0x200) +#define PRINT_NRPAGES (0x400) + +#define MIN_PAGE_SIZE (4096) + +#define PTOB(X) ((ulonglong)(X) << machdep->pageshift) +#define BTOP(X) ((ulonglong)(X) >> machdep->pageshift) + +#define PAGESIZE() (machdep->pagesize) +#define PAGESHIFT() (machdep->pageshift) + +#define PAGEOFFSET(X) (((ulong)(X)) & machdep->pageoffset) +#define VIRTPAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) +#define PHYSPAGEBASE(X) (((physaddr_t)(X)) & (physaddr_t)machdep->pagemask) + +/* + * Sparse memory stuff + * These must follow the definitions in the kernel mmzone.h + */ +#define SECTION_SIZE_BITS() (machdep->section_size_bits) +#define MAX_PHYSMEM_BITS() (machdep->max_physmem_bits) +#define SECTIONS_SHIFT() (MAX_PHYSMEM_BITS() - SECTION_SIZE_BITS()) +#define PA_SECTION_SHIFT() (SECTION_SIZE_BITS()) +#define PFN_SECTION_SHIFT() (SECTION_SIZE_BITS() - PAGESHIFT()) +#define NR_MEM_SECTIONS() (1UL << SECTIONS_SHIFT()) +#define PAGES_PER_SECTION() (1UL << PFN_SECTION_SHIFT()) +#define PAGE_SECTION_MASK() (~(PAGES_PER_SECTION()-1)) + +#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT()) +#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT()) + +#define SECTIONS_PER_ROOT() (machdep->sections_per_root) + +/* CONFIG_SPARSEMEM_EXTREME */ +#define _SECTIONS_PER_ROOT_EXTREME() (PAGESIZE() / SIZE(mem_section)) +/* !CONFIG_SPARSEMEM_EXTREME */ +#define _SECTIONS_PER_ROOT() (1) + +#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT()) +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#define NR_SECTION_ROOTS() (DIV_ROUND_UP(NR_MEM_SECTIONS(), SECTIONS_PER_ROOT())) +#define SECTION_ROOT_MASK() (SECTIONS_PER_ROOT() - 1) + +struct QEMUCPUSegment { + uint32_t selector; + uint32_t limit; + uint32_t flags; + uint32_t pad; + uint64_t base; +}; + +typedef struct QEMUCPUSegment QEMUCPUSegment; + +struct QEMUCPUState { + uint32_t version; + uint32_t size; + uint64_t rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp; + uint64_t r8, r9, r10, r11, r12, r13, r14, r15; + uint64_t rip, rflags; + QEMUCPUSegment cs, ds, es, fs, gs, ss; + QEMUCPUSegment ldt, tr, gdt, idt; + uint64_t cr[5]; +}; + +typedef struct QEMUCPUState QEMUCPUState; + +/* + * Machine specific stuff + */ + +#ifdef ARM +#define _32BIT_ +#define MACHINE_TYPE "ARM" + +#define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) + +#define PTOV(X) \ + ((unsigned long)(X)-(machdep->machspec->phys_base)+(machdep->kvbase)) +#define VTOP(X) \ + ((unsigned long)(X)-(machdep->kvbase)+(machdep->machspec->phys_base)) + +#define IS_VMALLOC_ADDR(X) arm_is_vmalloc_addr((ulong)(X)) + +#define DEFAULT_MODULES_VADDR (machdep->kvbase - 16 * 1024 * 1024) +#define MODULES_VADDR (machdep->machspec->modules_vaddr) +#define MODULES_END (machdep->machspec->modules_end) +#define VMALLOC_START (machdep->machspec->vmalloc_start_addr) +#define VMALLOC_END (machdep->machspec->vmalloc_end) + +#define PGDIR_SHIFT (21) +#define PTRS_PER_PTE (512) +#define PTRS_PER_PGD (2048) + +#define PGD_OFFSET(vaddr) ((vaddr) >> PGDIR_SHIFT) +#define PTE_OFFSET(vaddr) (((vaddr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) + +#define __SWP_TYPE_SHIFT 3 +#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) + +#define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) + +#define __swp_type(entry) SWP_TYPE(entry) +#define __swp_offset(entry) SWP_OFFSET(entry) + +#define TIF_SIGPENDING (2) + +#define _SECTION_SIZE_BITS 28 +#define _MAX_PHYSMEM_BITS 32 + +/*add for LPAE*/ +typedef unsigned long long u64; +typedef signed int s32; +typedef u64 pgd_t; +typedef u64 pmd_t; +typedef u64 pte_t; + +#define PMDSIZE() (PAGESIZE()) +#define LPAE_PGDIR_SHIFT (30) +#define LPAE_PMDIR_SHIFT (21) + +#define LPAE_PGD_OFFSET(vaddr) ((vaddr) >> LPAE_PGDIR_SHIFT) +#define LPAE_PMD_OFFSET(vaddr) (((vaddr) >> LPAE_PMDIR_SHIFT) & \ + ((1<<(LPAE_PGDIR_SHIFT-LPAE_PMDIR_SHIFT))-1)) + +#define _SECTION_SIZE_BITS_LPAE 28 +#define _MAX_PHYSMEM_BITS_LPAE 36 + +/* + * #define PTRS_PER_PTE 512 + * #define PTRS_PER_PMD 512 + * #define PTRS_PER_PGD 4 + * + */ + +#define LPAE_PGDIR_SIZE() 32 +#define LPAE_PGDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PGDIR_SIZE() - 1)) + +#define LPAE_PMDIR_SIZE() 4096 +#define LPAE_PMDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PMDIR_SIZE() - 1)) + +#define LPAE_PTEDIR_SIZE() 4096 +#define LPAE_PTEDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PTEDIR_SIZE() - 1)) + +/*section size for LPAE is 2MiB*/ +#define LPAE_SECTION_PAGE_MASK (~((MEGABYTES(2))-1)) + +#define _PHYSICAL_MASK_LPAE ((1ULL << _MAX_PHYSMEM_BITS_LPAE) - 1) +#define PAGE_BASE_MASK ((u64)((s32)machdep->pagemask & _PHYSICAL_MASK_LPAE)) +#define LPAE_PAGEBASE(X) (((ulonglong)(X)) & PAGE_BASE_MASK) + +#define LPAE_VTOP(X) \ + ((unsigned long long)(unsigned long)(X) - \ + (machdep->kvbase) + (machdep->machspec->phys_base)) + +#define IS_LAST_PGD_READ_LPAE(pgd) ((pgd) == \ + machdep->machspec->last_pgd_read_lpae) +#define IS_LAST_PMD_READ_LPAE(pmd) ((pmd) == \ + machdep->machspec->last_pmd_read_lpae) +#define IS_LAST_PTBL_READ_LPAE(ptbl) ((ptbl) == \ + machdep->machspec->last_ptbl_read_lpae) + +#define FILL_PGD_LPAE(PGD, TYPE, SIZE) \ + if (!IS_LAST_PGD_READ_LPAE(PGD)) { \ + readmem((ulonglong)(PGD), TYPE, machdep->pgd, \ + SIZE, "pmd page", FAULT_ON_ERROR); \ + machdep->machspec->last_pgd_read_lpae \ + = (ulonglong)(PGD); \ + } +#define FILL_PMD_LPAE(PMD, TYPE, SIZE) \ + if (!IS_LAST_PMD_READ_LPAE(PMD)) { \ + readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ + SIZE, "pmd page", FAULT_ON_ERROR); \ + machdep->machspec->last_pmd_read_lpae \ + = (ulonglong)(PMD); \ + } + +#define FILL_PTBL_LPAE(PTBL, TYPE, SIZE) \ + if (!IS_LAST_PTBL_READ_LPAE(PTBL)) { \ + readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ + SIZE, "page table", FAULT_ON_ERROR); \ + machdep->machspec->last_ptbl_read_lpae \ + = (ulonglong)(PTBL); \ + } +#endif /* ARM */ + +#ifndef EM_AARCH64 +#define EM_AARCH64 183 +#endif + +#ifdef ARM64 +#define _64BIT_ +#define MACHINE_TYPE "ARM64" + +#define PTOV(X) \ + ((unsigned long)(X)-(machdep->machspec->phys_offset)+(machdep->machspec->page_offset)) + +#define VTOP(X) arm64_VTOP((ulong)(X)) + +#define USERSPACE_TOP (machdep->machspec->userspace_top) +#define PAGE_OFFSET (machdep->machspec->page_offset) +#define VMALLOC_START (machdep->machspec->vmalloc_start_addr) +#define VMALLOC_END (machdep->machspec->vmalloc_end) +#define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) +#define VMEMMAP_END (machdep->machspec->vmemmap_end) +#define MODULES_VADDR (machdep->machspec->modules_vaddr) +#define MODULES_END (machdep->machspec->modules_end) + +#define IS_VMALLOC_ADDR(X) arm64_IS_VMALLOC_ADDR((ulong)(X)) + +#define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) + +/* + * 48-bit physical address supported. + */ +#define PHYS_MASK_SHIFT (48) +#define PHYS_MASK (((1UL) << PHYS_MASK_SHIFT) - 1) + +typedef signed int s32; + +/* + * 3-levels / 4K pages + */ +#define PTRS_PER_PGD_L3_4K (512) +#define PTRS_PER_PMD_L3_4K (512) +#define PTRS_PER_PTE_L3_4K (512) +#define PGDIR_SHIFT_L3_4K (30) +#define PGDIR_SIZE_L3_4K ((1UL) << PGDIR_SHIFT_L3_4K) +#define PGDIR_MASK_L3_4K (~(PGDIR_SIZE_L3_4K-1)) +#define PMD_SHIFT_L3_4K (21) +#define PMD_SIZE_L3_4K (1UL << PMD_SHIFT_L3_4K) +#define PMD_MASK_L3_4K (~(PMD_SIZE_L3_4K-1)) + +/* + * 4-levels / 4K pages + * 48-bit VA + */ +#define PTRS_PER_PGD_L4_4K ((1UL) << (48 - 39)) +#define PTRS_PER_PUD_L4_4K (512) +#define PTRS_PER_PMD_L4_4K (512) +#define PTRS_PER_PTE_L4_4K (512) +#define PGDIR_SHIFT_L4_4K (39) +#define PGDIR_SIZE_L4_4K ((1UL) << PGDIR_SHIFT_L4_4K) +#define PGDIR_MASK_L4_4K (~(PGDIR_SIZE_L4_4K-1)) +#define PUD_SHIFT_L4_4K (30) +#define PUD_SIZE_L4_4K ((1UL) << PUD_SHIFT_L4_4K) +#define PUD_MASK_L4_4K (~(PUD_SIZE_L4_4K-1)) +#define PMD_SHIFT_L4_4K (21) +#define PMD_SIZE_L4_4K (1UL << PMD_SHIFT_L4_4K) +#define PMD_MASK_L4_4K (~(PMD_SIZE_L4_4K-1)) + +#define PGDIR_SIZE_48VA (1UL << ((48 - 39) + 3)) +#define PGDIR_MASK_48VA (~(PGDIR_SIZE_48VA - 1)) +#define PGDIR_OFFSET_48VA(X) (((ulong)(X)) & (PGDIR_SIZE_48VA - 1)) + +/* + * 3-levels / 64K pages + */ +#define PTRS_PER_PGD_L3_64K (64) +#define PTRS_PER_PMD_L3_64K (8192) +#define PTRS_PER_PTE_L3_64K (8192) +#define PGDIR_SHIFT_L3_64K (42) +#define PGDIR_SIZE_L3_64K ((1UL) << PGDIR_SHIFT_L3_64K) +#define PGDIR_MASK_L3_64K (~(PGDIR_SIZE_L3_64K-1)) +#define PMD_SHIFT_L3_64K (29) +#define PMD_SIZE_L3_64K (1UL << PMD_SHIFT_L3_64K) +#define PMD_MASK_L3_64K (~(PMD_SIZE_L3_64K-1)) +#define PGDIR_OFFSET_L3_64K(X) (((ulong)(X)) & ((machdep->ptrs_per_pgd * 8) - 1)) + +/* + * 2-levels / 64K pages + */ +#define PTRS_PER_PGD_L2_64K (8192) +#define PTRS_PER_PTE_L2_64K (8192) +#define PGDIR_SHIFT_L2_64K (29) +#define PGDIR_SIZE_L2_64K ((1UL) << PGDIR_SHIFT_L2_64K) +#define PGDIR_MASK_L2_64K (~(PGDIR_SIZE_L2_64K-1)) + +/* + * Software defined PTE bits definition. + * (arch/arm64/include/asm/pgtable.h) + */ +#define PTE_VALID (1UL << 0) +#define PTE_DIRTY (1UL << 55) +#define PTE_SPECIAL (1UL << 56) + +/* + * Level 3 descriptor (PTE). + * (arch/arm64/include/asm/pgtable-hwdef.h) + */ +#define PTE_TYPE_MASK (3UL << 0) +#define PTE_TYPE_FAULT (0UL << 0) +#define PTE_TYPE_PAGE (3UL << 0) +#define PTE_USER (1UL << 6) /* AP[1] */ +#define PTE_RDONLY (1UL << 7) /* AP[2] */ +#define PTE_SHARED (3UL << 8) /* SH[1:0], inner shareable */ +#define PTE_AF (1UL << 10) /* Access Flag */ +#define PTE_NG (1UL << 11) /* nG */ +#define PTE_PXN (1UL << 53) /* Privileged XN */ +#define PTE_UXN (1UL << 54) /* User XN */ + +#define __swp_type(x) arm64_swp_type(x) +#define __swp_offset(x) arm64_swp_offset(x) +#define SWP_TYPE(x) __swp_type(x) +#define SWP_OFFSET(x) __swp_offset(x) + +#define KSYMS_START (0x1) +#define PHYS_OFFSET (0x2) +#define VM_L2_64K (0x4) +#define VM_L3_64K (0x8) +#define VM_L3_4K (0x10) +#define KDUMP_ENABLED (0x20) +#define IRQ_STACKS (0x40) +#define NEW_VMEMMAP (0x80) +#define VM_L4_4K (0x100) +#define UNW_4_14 (0x200) + +/* + * Get kimage_voffset from /dev/crash + */ +#define DEV_CRASH_ARCH_DATA _IOR('c', 1, unsigned long) + +/* + * sources: Documentation/arm64/memory.txt + * arch/arm64/include/asm/memory.h + * arch/arm64/include/asm/pgtable.h + */ +#define ARM64_VA_START ((0xffffffffffffffffUL) \ + << machdep->machspec->VA_BITS) +#define _VA_START(va) ((0xffffffffffffffffUL) - \ + ((1UL) << ((va) - 1)) + 1) +#define TEXT_OFFSET_MASK (~((MEGABYTES(2UL))-1)) + +#define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) \ + << (machdep->machspec->VA_BITS - 1)) +#define ARM64_PAGE_OFFSET_ACTUAL ((0xffffffffffffffffUL) \ + - ((1UL) << machdep->machspec->VA_BITS_ACTUAL) + 1) + +#define ARM64_USERSPACE_TOP ((1UL) << machdep->machspec->VA_BITS) +#define ARM64_USERSPACE_TOP_ACTUAL ((1UL) << machdep->machspec->VA_BITS_ACTUAL) + +/* only used for v4.6 or later */ +#define ARM64_MODULES_VSIZE MEGABYTES(128) +#define ARM64_KASAN_SHADOW_SIZE (1UL << (machdep->machspec->VA_BITS - 3)) + +/* + * The following 3 definitions are the original values, but are obsolete + * for 3.17 and later kernels because they are now build-time calculations. + * They all depend on the kernel's new VMEMMAP_SIZE value, which is dependent + * upon the size of struct page. Accordingly, arm64_calc_virtual_memory_ranges() + * determines their values at POST_GDB time. + */ +#define ARM64_VMALLOC_END (ARM64_PAGE_OFFSET - 0x400000000UL - KILOBYTES(64) - 1) +#define ARM64_VMEMMAP_VADDR ((ARM64_VMALLOC_END+1) + KILOBYTES(64)) +#define ARM64_VMEMMAP_END (ARM64_VMEMMAP_VADDR + GIGABYTES(8UL) - 1) + +#define ARM64_STACK_SIZE (16384) +#define ARM64_IRQ_STACK_SIZE ARM64_STACK_SIZE + +#define _SECTION_SIZE_BITS 30 +#define _MAX_PHYSMEM_BITS 40 +#define _MAX_PHYSMEM_BITS_3_17 48 +#define _MAX_PHYSMEM_BITS_52 52 + +typedef unsigned long long __u64; +typedef unsigned long long u64; + +struct arm64_user_pt_regs { + __u64 regs[31]; + __u64 sp; + __u64 pc; + __u64 pstate; +}; + +struct arm64_pt_regs { + union { + struct arm64_user_pt_regs user_regs; + struct { + u64 regs[31]; + u64 sp; + u64 pc; + u64 pstate; + }; + }; + u64 orig_x0; + u64 syscallno; +}; + +/* AArch32 CPSR bits */ +#define PSR_MODE32_BIT 0x00000010 + +#define TIF_SIGPENDING (0) +#define display_idt_table() \ + error(FATAL, "-d option is not applicable to ARM64 architecture\n") + +struct machine_specific { + ulong flags; + ulong userspace_top; + ulong page_offset; + ulong vmalloc_start_addr; + ulong vmalloc_end; + ulong vmemmap_vaddr; + ulong vmemmap_end; + ulong modules_vaddr; + ulong modules_end; + ulong phys_offset; + ulong __exception_text_start; + ulong __exception_text_end; + struct arm64_pt_regs *panic_task_regs; + ulong PTE_PROT_NONE; + ulong PTE_FILE; + ulong VA_BITS; + ulong __SWP_TYPE_BITS; + ulong __SWP_TYPE_SHIFT; + ulong __SWP_TYPE_MASK; + ulong __SWP_OFFSET_BITS; + ulong __SWP_OFFSET_SHIFT; + ulong __SWP_OFFSET_MASK; + ulong crash_kexec_start; + ulong crash_kexec_end; + ulong crash_save_cpu_start; + ulong crash_save_cpu_end; + ulong kernel_flags; + ulong irq_stack_size; + ulong *irq_stacks; + char *irq_stackbuf; + ulong __irqentry_text_start; + ulong __irqentry_text_end; + /* for exception vector code */ + ulong exp_entry1_start; + ulong exp_entry1_end; + ulong exp_entry2_start; + ulong exp_entry2_end; + /* only needed for v4.6 or later kernel */ + ulong kimage_voffset; + ulong kimage_text; + ulong kimage_end; + ulong user_eframe_offset; + /* for v4.14 or later */ + ulong kern_eframe_offset; + ulong machine_kexec_start; + ulong machine_kexec_end; + ulong VA_BITS_ACTUAL; + ulong CONFIG_ARM64_VA_BITS; + ulong VA_START; +}; + +struct arm64_stackframe { + unsigned long fp; + unsigned long sp; + unsigned long pc; +}; + +#endif /* ARM64 */ + +#ifdef MIPS +#define _32BIT_ +#define MACHINE_TYPE "MIPS" + +#define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) + +#define PTOV(X) ((unsigned long)(X) + 0x80000000lu) +#define VTOP(X) ((unsigned long)(X) & 0x1ffffffflu) + +#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) + +#define DEFAULT_MODULES_VADDR (machdep->kvbase - 16 * 1024 * 1024) +#define MODULES_VADDR (machdep->machspec->modules_vaddr) +#define MODULES_END (machdep->machspec->modules_end) +#define VMALLOC_START (machdep->machspec->vmalloc_start_addr) +#define VMALLOC_END (machdep->machspec->vmalloc_end) + +#define __SWP_TYPE_SHIFT 3 +#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) + +#define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) + +#define __swp_type(entry) SWP_TYPE(entry) +#define __swp_offset(entry) SWP_OFFSET(entry) + +#define TIF_SIGPENDING (2) + +#define _SECTION_SIZE_BITS 26 +#define _MAX_PHYSMEM_BITS 32 +#endif /* MIPS */ + +#ifdef X86 +#define _32BIT_ +#define MACHINE_TYPE "X86" +#define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) +#define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) +#define KVBASE_MASK (0x1ffffff) + +#define PGDIR_SHIFT_2LEVEL (22) +#define PTRS_PER_PTE_2LEVEL (1024) +#define PTRS_PER_PGD_2LEVEL (1024) + +#define PGDIR_SHIFT_3LEVEL (30) +#define PTRS_PER_PTE_3LEVEL (512) +#define PTRS_PER_PGD_3LEVEL (4) +#define PMD_SHIFT (21) /* only used by PAE translators */ +#define PTRS_PER_PMD (512) /* only used by PAE translators */ + +#define _PAGE_PRESENT 0x001 +#define _PAGE_RW 0x002 +#define _PAGE_USER 0x004 +#define _PAGE_PWT 0x008 +#define _PAGE_PCD 0x010 +#define _PAGE_ACCESSED 0x020 +#define _PAGE_DIRTY 0x040 +#define _PAGE_4M 0x080 /* 4 MB page, Pentium+, if present.. */ +#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ +#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ +#define _PAGE_PROTNONE (machdep->machspec->page_protnone) +#define _PAGE_NX (0x8000000000000000ULL) + +#define NONPAE_PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) +#define NX_BIT_MASK (0x7fffffffffffffffULL) +#define PAE_PAGEBASE(X) (((unsigned long long)(X)) & ((unsigned long long)machdep->pagemask) & NX_BIT_MASK) + +#define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) +#define SWP_OFFSET(entry) ((entry) >> 8) +#define __swp_type_PAE(entry) (((entry) >> 32) & 0x1f) +#define __swp_type_nonPAE(entry) (((entry) >> 1) & 0x1f) +#define __swp_offset_PAE(entry) (((entry) >> 32) >> 5) +#define __swp_offset_nonPAE(entry) ((entry) >> 8) +#define __swp_type(entry) (machdep->flags & PAE ? \ + __swp_type_PAE(entry) : __swp_type_nonPAE(entry)) +#define __swp_offset(entry) (machdep->flags & PAE ? \ + __swp_offset_PAE(entry) : __swp_offset_nonPAE(entry)) + +#define TIF_SIGPENDING (2) + +// CONFIG_X86_PAE +#define _SECTION_SIZE_BITS_PAE_ORIG 30 +#define _SECTION_SIZE_BITS_PAE_2_6_26 29 +#define _MAX_PHYSMEM_BITS_PAE 36 + +// !CONFIG_X86_PAE +#define _SECTION_SIZE_BITS 26 +#define _MAX_PHYSMEM_BITS 32 + +#define IS_LAST_PMD_READ_PAE(pmd) ((ulong)(pmd) == machdep->machspec->last_pmd_read_PAE) +#define IS_LAST_PTBL_READ_PAE(ptbl) ((ulong)(ptbl) == machdep->machspec->last_ptbl_read_PAE) + +#define FILL_PMD_PAE(PMD, TYPE, SIZE) \ + if (!IS_LAST_PMD_READ_PAE(PMD)) { \ + readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ + SIZE, "pmd page", FAULT_ON_ERROR); \ + machdep->machspec->last_pmd_read_PAE = (ulonglong)(PMD); \ + } + +#define FILL_PTBL_PAE(PTBL, TYPE, SIZE) \ + if (!IS_LAST_PTBL_READ_PAE(PTBL)) { \ + readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ + SIZE, "page table", FAULT_ON_ERROR); \ + machdep->machspec->last_ptbl_read_PAE = (ulonglong)(PTBL); \ + } + +#endif /* X86 */ + +#ifdef X86_64 +#define _64BIT_ +#define MACHINE_TYPE "X86_64" + +#define USERSPACE_TOP (machdep->machspec->userspace_top) +#define PAGE_OFFSET (machdep->machspec->page_offset) +#define VMALLOC_START (machdep->machspec->vmalloc_start_addr) +#define VMALLOC_END (machdep->machspec->vmalloc_end) +#define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) +#define VMEMMAP_END (machdep->machspec->vmemmap_end) +#define MODULES_VADDR (machdep->machspec->modules_vaddr) +#define MODULES_END (machdep->machspec->modules_end) + +#define __START_KERNEL_map 0xffffffff80000000UL +#define MODULES_LEN (MODULES_END - MODULES_VADDR) + +#define USERSPACE_TOP_ORIG 0x0000008000000000 +#define PAGE_OFFSET_ORIG 0x0000010000000000 +#define VMALLOC_START_ADDR_ORIG 0xffffff0000000000 +#define VMALLOC_END_ORIG 0xffffff7fffffffff +#define MODULES_VADDR_ORIG 0xffffffffa0000000 +#define MODULES_END_ORIG 0xffffffffafffffff + +#define USERSPACE_TOP_2_6_11 0x0000800000000000 +#define PAGE_OFFSET_2_6_11 0xffff810000000000 +#define VMALLOC_START_ADDR_2_6_11 0xffffc20000000000 +#define VMALLOC_END_2_6_11 0xffffe1ffffffffff +#define MODULES_VADDR_2_6_11 0xffffffff88000000 +#define MODULES_END_2_6_11 0xfffffffffff00000 + +#define VMEMMAP_VADDR_2_6_24 0xffffe20000000000 +#define VMEMMAP_END_2_6_24 0xffffe2ffffffffff + +#define MODULES_VADDR_2_6_26 0xffffffffa0000000 + +#define PAGE_OFFSET_2_6_27 0xffff880000000000 +#define MODULES_END_2_6_27 0xffffffffff000000 + +#define USERSPACE_TOP_XEN 0x0000800000000000 +#define PAGE_OFFSET_XEN 0xffff880000000000 +#define VMALLOC_START_ADDR_XEN 0xffffc20000000000 +#define VMALLOC_END_XEN 0xffffe1ffffffffff +#define MODULES_VADDR_XEN 0xffffffff88000000 +#define MODULES_END_XEN 0xfffffffffff00000 + +#define USERSPACE_TOP_XEN_RHEL4 0x0000008000000000 +#define PAGE_OFFSET_XEN_RHEL4 0xffffff8000000000 +#define VMALLOC_START_ADDR_XEN_RHEL4 0xffffff0000000000 +#define VMALLOC_END_XEN_RHEL4 0xffffff7fffffffff +#define MODULES_VADDR_XEN_RHEL4 0xffffffffa0000000 +#define MODULES_END_XEN_RHEL4 0xffffffffafffffff + +#define VMALLOC_START_ADDR_2_6_31 0xffffc90000000000 +#define VMALLOC_END_2_6_31 0xffffe8ffffffffff +#define VMEMMAP_VADDR_2_6_31 0xffffea0000000000 +#define VMEMMAP_END_2_6_31 0xffffeaffffffffff +#define MODULES_VADDR_2_6_31 0xffffffffa0000000 +#define MODULES_END_2_6_31 0xffffffffff000000 + +#define USERSPACE_TOP_5LEVEL 0x0100000000000000 +#define PAGE_OFFSET_5LEVEL 0xff10000000000000 +#define VMALLOC_START_ADDR_5LEVEL 0xffa0000000000000 +#define VMALLOC_END_5LEVEL 0xffd1ffffffffffff +#define MODULES_VADDR_5LEVEL 0xffffffffa0000000 +#define MODULES_END_5LEVEL 0xffffffffff5fffff +#define VMEMMAP_VADDR_5LEVEL 0xffd4000000000000 +#define VMEMMAP_END_5LEVEL 0xffd5ffffffffffff + +#define PAGE_OFFSET_4LEVEL_4_20 0xffff888000000000 +#define PAGE_OFFSET_5LEVEL_4_20 0xff11000000000000 + +#define VSYSCALL_START 0xffffffffff600000 +#define VSYSCALL_END 0xffffffffff601000 + +#define CPU_ENTRY_AREA_START 0xfffffe0000000000 +#define CPU_ENTRY_AREA_END 0xfffffe7fffffffff + +#define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) +#define VTOP(X) x86_64_VTOP((ulong)(X)) +#define IS_VMALLOC_ADDR(X) x86_64_IS_VMALLOC_ADDR((ulong)(X)) + +/* + * the default page table level for x86_64: + * 4 level page tables + */ +#define PGDIR_SHIFT 39 +#define PTRS_PER_PGD 512 +#define PUD_SHIFT 30 +#define PTRS_PER_PUD 512 +#define PMD_SHIFT 21 +#define PTRS_PER_PMD 512 +#define PTRS_PER_PTE 512 + +/* 5 level page */ +#define PGDIR_SHIFT_5LEVEL 48 +#define PTRS_PER_PGD_5LEVEL 512 +#define P4D_SHIFT 39 +#define PTRS_PER_P4D 512 + +#define __PGDIR_SHIFT (machdep->machspec->pgdir_shift) +#define __PTRS_PER_PGD (machdep->machspec->ptrs_per_pgd) + +#define pgd_index(address) (((address) >> __PGDIR_SHIFT) & (__PTRS_PER_PGD-1)) +#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) +#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + +#define FILL_TOP_PGD() \ + if (!(pc->flags & RUNTIME) || ACTIVE()) { \ + FILL_PGD(vt->kernel_pgd[0], KVADDR, PAGESIZE()); \ + } + +#define FILL_TOP_PGD_HYPER() \ + unsigned long idle_pg_table = symbol_exists("idle_pg_table_4") ? \ + symbol_value("idle_pg_table_4") : \ + symbol_value("idle_pg_table"); \ + FILL_PGD(idle_pg_table, KVADDR, PAGESIZE()); + +#define IS_LAST_P4D_READ(p4d) ((ulong)(p4d) == machdep->machspec->last_p4d_read) + +#define FILL_P4D(P4D, TYPE, SIZE) \ + if (!IS_LAST_P4D_READ(P4D)) { \ + readmem((ulonglong)((ulong)(P4D)), TYPE, machdep->machspec->p4d, \ + SIZE, "p4d page", FAULT_ON_ERROR); \ + machdep->machspec->last_p4d_read = (ulong)(P4D); \ + } + +/* + * PHYSICAL_PAGE_MASK changed (enlarged) between 2.4 and 2.6, so + * for safety, use the 2.6 values to generate it. + */ +#define __PHYSICAL_MASK_SHIFT_XEN 40 +#define __PHYSICAL_MASK_SHIFT_2_6 46 +#define __PHYSICAL_MASK_SHIFT_5LEVEL 52 +#define __PHYSICAL_MASK_SHIFT (machdep->machspec->physical_mask_shift) +#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) +#define __VIRTUAL_MASK_SHIFT 48 +#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK ) + +#define _PAGE_BIT_NX 63 +#define _PAGE_PRESENT 0x001 +#define _PAGE_RW 0x002 +#define _PAGE_USER 0x004 +#define _PAGE_PWT 0x008 +#define _PAGE_PCD 0x010 +#define _PAGE_ACCESSED 0x020 +#define _PAGE_DIRTY 0x040 +#define _PAGE_PSE 0x080 /* 2MB page */ +#define _PAGE_FILE 0x040 /* set:pagecache, unset:swap */ +#define _PAGE_GLOBAL 0x100 /* Global TLB entry */ +#define _PAGE_PROTNONE (machdep->machspec->page_protnone) +#define _PAGE_NX (1UL<<_PAGE_BIT_NX) + +#define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) +#define SWP_OFFSET(entry) ((entry) >> 8) +#define __swp_type(entry) x86_64_swp_type(entry) +#define __swp_offset(entry) x86_64_swp_offset(entry) + +#define TIF_SIGPENDING (2) + +#define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) + +#define _CPU_PDA_READ2(CPU, BUFFER) \ + ((readmem(symbol_value("_cpu_pda"), \ + KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ + "_cpu_pda addr", RETURN_ON_ERROR)) && \ + (readmem(cpu_pda_addr + ((CPU) * sizeof(void *)), \ + KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ + "_cpu_pda addr", RETURN_ON_ERROR)) && \ + (cpu_pda_addr) && \ + (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ + "cpu_pda entry", RETURN_ON_ERROR))) + +#define _CPU_PDA_READ(CPU, BUFFER) \ + ((STRNEQ("_cpu_pda", closest_symbol((symbol_value("_cpu_pda") + \ + ((CPU) * sizeof(unsigned long)))))) && \ + (readmem(symbol_value("_cpu_pda") + ((CPU) * sizeof(void *)), \ + KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ + "_cpu_pda addr", RETURN_ON_ERROR)) && \ + (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ + "cpu_pda entry", RETURN_ON_ERROR))) + +#define CPU_PDA_READ(CPU, BUFFER) \ + (STRNEQ("cpu_pda", closest_symbol((symbol_value("cpu_pda") + \ + ((CPU) * SIZE(x8664_pda))))) && \ + readmem(symbol_value("cpu_pda") + ((CPU) * SIZE(x8664_pda)), \ + KVADDR, (BUFFER), SIZE(x8664_pda), "cpu_pda entry", \ + RETURN_ON_ERROR)) + +#define VALID_LEVEL4_PGT_ADDR(X) \ + (((X) == VIRTPAGEBASE(X)) && IS_KVADDR(X) && !IS_VMALLOC_ADDR(X)) + +#define _SECTION_SIZE_BITS 27 +#define _MAX_PHYSMEM_BITS 40 +#define _MAX_PHYSMEM_BITS_2_6_26 44 +#define _MAX_PHYSMEM_BITS_2_6_31 46 +#define _MAX_PHYSMEM_BITS_5LEVEL 52 + +#endif /* X86_64 */ + +#ifdef ALPHA +#define _64BIT_ +#define MACHINE_TYPE "ALPHA" + +#define PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) + +#define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) +#define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) +#define KSEG_BASE_48_BIT (0xffff800000000000) +#define KSEG_BASE (0xfffffc0000000000) +#define _PFN_MASK (0xFFFFFFFF00000000) +#define VMALLOC_START (0xFFFFFE0000000000) +#define MIN_SYMBOL_VALUE (KSEG_BASE_48_BIT) + +#define PGDIR_SHIFT (PAGESHIFT() + 2*(PAGESHIFT()-3)) +#define PMD_SHIFT (PAGESHIFT() + (PAGESHIFT()-3)) +#define PTRS_PER_PAGE (1024) + +#define PTRS_PER_PGD (1UL << (PAGESHIFT()-3)) + +/* + * OSF/1 PAL-code-imposed page table bits + */ +#define _PAGE_VALID 0x0001 +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_ASM 0x0010 +#define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */ +#define _PAGE_URE 0x0200 /* xxx */ +#define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */ +#define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */ + +/* .. and these are ours ... */ +#define _PAGE_DIRTY 0x20000 +#define _PAGE_ACCESSED 0x40000 + +#define SWP_TYPE(entry) (((entry) >> 32) & 0xff) +#define SWP_OFFSET(entry) ((entry) >> 40) +#define __swp_type(entry) SWP_TYPE(entry) +#define __swp_offset(entry) SWP_OFFSET(entry) + +#define TIF_SIGPENDING (2) + +#endif /* ALPHA */ + +#ifdef PPC +#define _32BIT_ +#define MACHINE_TYPE "PPC" + +#define PAGEBASE(X) ((X) & machdep->pagemask) + +#define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) +#define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) + +/* Holds the platform specific info for page translation */ +struct machine_specific { + char *platform; + + /* page address translation bits */ + int pte_size; + int pte_rpn_shift; + + /* page flags */ + ulong _page_present; + ulong _page_user; + ulong _page_rw; + ulong _page_guarded; + ulong _page_coherent; + ulong _page_no_cache; + ulong _page_writethru; + ulong _page_dirty; + ulong _page_accessed; + ulong _page_hwwrite; + ulong _page_shared; + ulong _page_k_rw; + + /* platform special vtop */ + int (*vtop_special)(ulong vaddr, physaddr_t *paddr, int verbose); + void *mmu_special; +}; + +/* machdep flags for ppc32 specific */ +#define IS_PAE() (machdep->flags & PAE) +#define IS_BOOKE() (machdep->flags & CPU_BOOKE) +/* Page translation bits */ +#define PPC_PLATFORM (machdep->machspec->platform) +#define PTE_SIZE (machdep->machspec->pte_size) +#define PTE_RPN_SHIFT (machdep->machspec->pte_rpn_shift) +#define PAGE_SHIFT (12) +#define PTE_T_LOG2 (ffs(PTE_SIZE) - 1) +#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) +#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) +#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) +#define PTRS_PER_PTE (1 << PTE_SHIFT) +/* special vtop */ +#define VTOP_SPECIAL (machdep->machspec->vtop_special) +#define MMU_SPECIAL (machdep->machspec->mmu_special) + +/* PFN shifts */ +#define BOOKE3E_PTE_RPN_SHIFT (24) + +/* PAGE flags */ +#define _PAGE_PRESENT (machdep->machspec->_page_present) /* software: pte contains a translation */ +#define _PAGE_USER (machdep->machspec->_page_user) /* matches one of the PP bits */ +#define _PAGE_RW (machdep->machspec->_page_rw) /* software: user write access allowed */ +#define _PAGE_GUARDED (machdep->machspec->_page_guarded) +#define _PAGE_COHERENT (machdep->machspec->_page_coherent /* M: enforce memory coherence (SMP systems) */) +#define _PAGE_NO_CACHE (machdep->machspec->_page_no_cache) /* I: cache inhibit */ +#define _PAGE_WRITETHRU (machdep->machspec->_page_writethru) /* W: cache write-through */ +#define _PAGE_DIRTY (machdep->machspec->_page_dirty) /* C: page changed */ +#define _PAGE_ACCESSED (machdep->machspec->_page_accessed) /* R: page referenced */ +#define _PAGE_HWWRITE (machdep->machspec->_page_hwwrite) /* software: _PAGE_RW & _PAGE_DIRTY */ +#define _PAGE_SHARED (machdep->machspec->_page_shared) +#define _PAGE_K_RW (machdep->machspec->_page_k_rw) /* privilege only write access allowed */ + +/* Default values for PAGE flags */ +#define DEFAULT_PAGE_PRESENT 0x001 +#define DEFAULT_PAGE_USER 0x002 +#define DEFAULT_PAGE_RW 0x004 +#define DEFAULT_PAGE_GUARDED 0x008 +#define DEFAULT_PAGE_COHERENT 0x010 +#define DEFAULT_PAGE_NO_CACHE 0x020 +#define DEFAULT_PAGE_WRITETHRU 0x040 +#define DEFAULT_PAGE_DIRTY 0x080 +#define DEFAULT_PAGE_ACCESSED 0x100 +#define DEFAULT_PAGE_HWWRITE 0x200 +#define DEFAULT_PAGE_SHARED 0 + +/* PPC44x PAGE flags: Values from kernel asm/pte-44x.h */ +#define PPC44x_PAGE_PRESENT 0x001 +#define PPC44x_PAGE_RW 0x002 +#define PPC44x_PAGE_ACCESSED 0x008 +#define PPC44x_PAGE_DIRTY 0x010 +#define PPC44x_PAGE_USER 0x040 +#define PPC44x_PAGE_GUARDED 0x100 +#define PPC44x_PAGE_COHERENT 0x200 +#define PPC44x_PAGE_NO_CACHE 0x400 +#define PPC44x_PAGE_WRITETHRU 0x800 +#define PPC44x_PAGE_HWWRITE 0 +#define PPC44x_PAGE_SHARED 0 + +/* BOOK3E */ +#define BOOK3E_PAGE_PRESENT 0x000001 +#define BOOK3E_PAGE_BAP_SR 0x000004 +#define BOOK3E_PAGE_BAP_UR 0x000008 /* User Readable */ +#define BOOK3E_PAGE_BAP_SW 0x000010 +#define BOOK3E_PAGE_BAP_UW 0x000020 /* User Writable */ +#define BOOK3E_PAGE_DIRTY 0x001000 +#define BOOK3E_PAGE_ACCESSED 0x040000 +#define BOOK3E_PAGE_GUARDED 0x100000 +#define BOOK3E_PAGE_COHERENT 0x200000 +#define BOOK3E_PAGE_NO_CACHE 0x400000 +#define BOOK3E_PAGE_WRITETHRU 0x800000 +#define BOOK3E_PAGE_HWWRITE 0 +#define BOOK3E_PAGE_SHARED 0 +#define BOOK3E_PAGE_USER (BOOK3E_PAGE_BAP_SR | BOOK3E_PAGE_BAP_UR) +#define BOOK3E_PAGE_RW (BOOK3E_PAGE_BAP_SW | BOOK3E_PAGE_BAP_UW) +#define BOOK3E_PAGE_KERNEL_RW (BOOK3E_PAGE_BAP_SW | BOOK3E_PAGE_BAP_SR | BOOK3E_PAGE_DIRTY) + +/* FSL BOOKE */ +#define FSL_BOOKE_PAGE_PRESENT 0x00001 +#define FSL_BOOKE_PAGE_USER 0x00002 +#define FSL_BOOKE_PAGE_RW 0x00004 +#define FSL_BOOKE_PAGE_DIRTY 0x00008 +#define FSL_BOOKE_PAGE_ACCESSED 0x00020 +#define FSL_BOOKE_PAGE_GUARDED 0x00080 +#define FSL_BOOKE_PAGE_COHERENT 0x00100 +#define FSL_BOOKE_PAGE_NO_CACHE 0x00200 +#define FSL_BOOKE_PAGE_WRITETHRU 0x00400 +#define FSL_BOOKE_PAGE_HWWRITE 0 +#define FSL_BOOKE_PAGE_SHARED 0 + +#define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) +#define SWP_OFFSET(entry) ((entry) >> 8) +#define __swp_type(entry) SWP_TYPE(entry) +#define __swp_offset(entry) SWP_OFFSET(entry) + +#define TIF_SIGPENDING (2) + +#define _SECTION_SIZE_BITS 24 +#define _MAX_PHYSMEM_BITS 44 + +#define STACK_FRAME_OVERHEAD 16 +#define STACK_FRAME_LR_SAVE (sizeof(ulong)) +#define STACK_FRAME_MARKER (2 * sizeof(ulong)) +#define STACK_FRAME_REGS_MARKER 0x72656773 +#define PPC_STACK_SIZE 8192 + +#endif /* PPC */ + +#ifdef IA64 +#define _64BIT_ +#define MACHINE_TYPE "IA64" + +#define PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) + +#define REGION_SHIFT (61) +#define VADDR_REGION(X) ((ulong)(X) >> REGION_SHIFT) + +#define KERNEL_CACHED_REGION (7) +#define KERNEL_UNCACHED_REGION (6) +#define KERNEL_VMALLOC_REGION (5) +#define USER_STACK_REGION (4) +#define USER_DATA_REGION (3) +#define USER_TEXT_REGION (2) +#define USER_SHMEM_REGION (1) +#define USER_IA32_EMUL_REGION (0) + +#define KERNEL_VMALLOC_BASE ((ulong)KERNEL_VMALLOC_REGION << REGION_SHIFT) +#define KERNEL_UNCACHED_BASE ((ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT) +#define KERNEL_CACHED_BASE ((ulong)KERNEL_CACHED_REGION << REGION_SHIFT) + +#define _SECTION_SIZE_BITS 30 +#define _MAX_PHYSMEM_BITS 50 + +/* + * As of 2.6, these are no longer straight forward. + */ +#define PTOV(X) ia64_PTOV((ulong)(X)) +#define VTOP(X) ia64_VTOP((ulong)(X)) +#define IS_VMALLOC_ADDR(X) ia64_IS_VMALLOC_ADDR((ulong)(X)) + +#define SWITCH_STACK_ADDR(X) (ia64_get_switch_stack((ulong)(X))) + +#define __IA64_UL(x) ((unsigned long)(x)) +#define IA64_MAX_PHYS_BITS (50) /* max # of phys address bits (architected) */ + +/* + * How many pointers will a page table level hold expressed in shift + */ +#define PTRS_PER_PTD_SHIFT (PAGESHIFT()-3) + +/* + * Definitions for fourth level: + */ +#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) + +/* + * Definitions for third level: + * + * PMD_SHIFT determines the size of the area a third-level page table + * can map. + */ +#define PMD_SHIFT (PAGESHIFT() + (PTRS_PER_PTD_SHIFT)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) + +/* + * PUD_SHIFT determines the size of the area a second-level page table + * can map + */ +#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) +#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) + +/* + * Definitions for first level: + * + * PGDIR_SHIFT determines what a first-level page table entry can map. + */ + +#define PGDIR_SHIFT_4L (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) +#define PGDIR_SHIFT_3L (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) +/* Turns out 4L & 3L PGDIR_SHIFT are the same (for now) */ +#define PGDIR_SHIFT PGDIR_SHIFT_4L +#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT +#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) +#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ +#define FIRST_USER_ADDRESS 0 + +/* + * First, define the various bits in a PTE. Note that the PTE format + * matches the VHPT short format, the firt doubleword of the VHPD long + * format, and the first doubleword of the TLB insertion format. + */ +#define _PAGE_P (1 << 0) /* page present bit */ +#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */ +#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */ +#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */ +#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */ +#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */ +#define _PAGE_MA_MASK (0x7 << 2) +#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */ +#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */ +#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */ +#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */ +#define _PAGE_PL_MASK (3 << 7) +#define _PAGE_AR_R (0 << 9) /* read only */ +#define _PAGE_AR_RX (1 << 9) /* read & execute */ +#define _PAGE_AR_RW (2 << 9) /* read & write */ +#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */ +#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */ +#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */ +#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */ +#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */ +#define _PAGE_AR_MASK (7 << 9) +#define _PAGE_AR_SHIFT 9 +#define _PAGE_A (1 << 5) /* page accessed bit */ +#define _PAGE_D (1 << 6) /* page dirty bit */ +#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) +#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ +#define _PAGE_PROTNONE (__IA64_UL(1) << 63) + +#define _PFN_MASK _PAGE_PPN_MASK +#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_A | _PAGE_D) + +#define _PAGE_SIZE_4K 12 +#define _PAGE_SIZE_8K 13 +#define _PAGE_SIZE_16K 14 +#define _PAGE_SIZE_64K 16 +#define _PAGE_SIZE_256K 18 +#define _PAGE_SIZE_1M 20 +#define _PAGE_SIZE_4M 22 +#define _PAGE_SIZE_16M 24 +#define _PAGE_SIZE_64M 26 +#define _PAGE_SIZE_256M 28 + +#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB +#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB +#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED + +#define EFI_PAGE_SHIFT (12) + +/* + * NOTE: #include'ing creates too many compiler problems, so + * this stuff is hardwired here; it's probably etched in stone somewhere. + */ +struct efi_memory_desc_t { + uint32_t type; + uint32_t pad; + uint64_t phys_addr; + uint64_t virt_addr; + uint64_t num_pages; + uint64_t attribute; +} desc; + +/* Memory types: */ +#define EFI_RESERVED_TYPE 0 +#define EFI_LOADER_CODE 1 +#define EFI_LOADER_DATA 2 +#define EFI_BOOT_SERVICES_CODE 3 +#define EFI_BOOT_SERVICES_DATA 4 +#define EFI_RUNTIME_SERVICES_CODE 5 +#define EFI_RUNTIME_SERVICES_DATA 6 +#define EFI_CONVENTIONAL_MEMORY 7 +#define EFI_UNUSABLE_MEMORY 8 +#define EFI_ACPI_RECLAIM_MEMORY 9 +#define EFI_ACPI_MEMORY_NVS 10 +#define EFI_MEMORY_MAPPED_IO 11 +#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 +#define EFI_PAL_CODE 13 +#define EFI_MAX_MEMORY_TYPE 14 + +/* Attribute values: */ +#define EFI_MEMORY_UC 0x0000000000000001 /* uncached */ +#define EFI_MEMORY_WC 0x0000000000000002 /* write-coalescing */ +#define EFI_MEMORY_WT 0x0000000000000004 /* write-through */ +#define EFI_MEMORY_WB 0x0000000000000008 /* write-back */ +#define EFI_MEMORY_WP 0x0000000000001000 /* write-protect */ +#define EFI_MEMORY_RP 0x0000000000002000 /* read-protect */ +#define EFI_MEMORY_XP 0x0000000000004000 /* execute-protect */ +#define EFI_MEMORY_RUNTIME 0x8000000000000000 /* range requires runtime mapping */ + +#define SWP_TYPE(entry) (((entry) >> 1) & 0xff) +#define SWP_OFFSET(entry) ((entry) >> 9) +#define __swp_type(entry) ((entry >> 2) & 0x7f) +#define __swp_offset(entry) ((entry << 1) >> 10) + +#define TIF_SIGPENDING (1) + +#define KERNEL_TR_PAGE_SIZE (1 << _PAGE_SIZE_64M) +#define KERNEL_TR_PAGE_MASK (~(KERNEL_TR_PAGE_SIZE - 1)) + +#define UNKNOWN_PHYS_START ((ulong)(-1)) +#define DEFAULT_PHYS_START (KERNEL_TR_PAGE_SIZE * 1) + +#define IA64_GET_STACK_ULONG(OFF) \ + ((INSTACK(OFF,bt)) ? (GET_STACK_ULONG(OFF)) : get_init_stack_ulong((unsigned long)OFF)) + +#endif /* IA64 */ + +#ifdef PPC64 +#define _64BIT_ +#define MACHINE_TYPE "PPC64" + +#define PPC64_64K_PAGE_SIZE 65536 +#define PPC64_STACK_SIZE 16384 + +#define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) + +#define PTOV(X) ((unsigned long)(X)+(machdep->identity_map_base)) +#define VTOP(X) ((unsigned long)(X)-(machdep->identity_map_base)) +#define BOOK3E_VMBASE 0x8000000000000000 +#define IS_VMALLOC_ADDR(X) machdep->machspec->is_vmaddr(X) +#define KERNELBASE machdep->pageoffset + +#define PGDIR_SHIFT (machdep->pageshift + (machdep->pageshift -3) + (machdep->pageshift - 2)) +#define PMD_SHIFT (machdep->pageshift + (machdep->pageshift - 3)) + +#define PGD_MASK (~((1UL << PGDIR_SHIFT) - 1)) +#define PMD_MASK (~((1UL << PMD_SHIFT) - 1)) + +/* shift to put page number into pte */ +#define PTE_RPN_SHIFT_DEFAULT 16 +#define PMD_TO_PTEPAGE_SHIFT 2 /* Used for 2.6 or later */ + +#define PTE_INDEX_SIZE 9 +#define PMD_INDEX_SIZE 10 +#define PGD_INDEX_SIZE 10 + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + +#define PGD_OFFSET_24(vaddr) ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) +#define PGD_OFFSET(vaddr) ((vaddr >> PGDIR_SHIFT) & 0x7ff) +#define PMD_OFFSET(vaddr) ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) + +/* 4-level page table support */ + +/* 4K pagesize */ +#define PTE_INDEX_SIZE_L4_4K 9 +#define PMD_INDEX_SIZE_L4_4K 7 +#define PUD_INDEX_SIZE_L4_4K 7 +#define PGD_INDEX_SIZE_L4_4K 9 +#define PUD_INDEX_SIZE_L4_4K_3_7 9 +#define PTE_INDEX_SIZE_RADIX_4K 9 +#define PMD_INDEX_SIZE_RADIX_4K 9 +#define PUD_INDEX_SIZE_RADIX_4K 9 +#define PGD_INDEX_SIZE_RADIX_4K 13 +#define PTE_RPN_SHIFT_L4_4K 17 +#define PTE_RPN_SHIFT_L4_4K_4_5 18 +#define PGD_MASKED_BITS_4K 0 +#define PUD_MASKED_BITS_4K 0 +#define PMD_MASKED_BITS_4K 0 + +/* 64K pagesize */ +#define PTE_INDEX_SIZE_L4_64K 12 +#define PMD_INDEX_SIZE_L4_64K 12 +#define PUD_INDEX_SIZE_L4_64K 0 +#define PGD_INDEX_SIZE_L4_64K 4 +#define PTE_INDEX_SIZE_L4_64K_3_10 8 +#define PMD_INDEX_SIZE_L4_64K_3_10 10 +#define PGD_INDEX_SIZE_L4_64K_3_10 12 +#define PMD_INDEX_SIZE_L4_64K_4_6 5 +#define PUD_INDEX_SIZE_L4_64K_4_6 5 +#define PMD_INDEX_SIZE_L4_64K_4_12 10 +#define PUD_INDEX_SIZE_L4_64K_4_12 7 +#define PGD_INDEX_SIZE_L4_64K_4_12 8 +#define PUD_INDEX_SIZE_L4_64K_4_17 10 +#define PTE_INDEX_SIZE_RADIX_64K 5 +#define PMD_INDEX_SIZE_RADIX_64K 9 +#define PUD_INDEX_SIZE_RADIX_64K 9 +#define PGD_INDEX_SIZE_RADIX_64K 13 +#define PTE_RPN_SHIFT_L4_64K_V1 32 +#define PTE_RPN_SHIFT_L4_64K_V2 30 +#define PTE_RPN_SHIFT_L4_BOOK3E_64K 28 +#define PTE_RPN_SHIFT_L4_BOOK3E_4K 24 +#define PGD_MASKED_BITS_64K 0 +#define PUD_MASKED_BITS_64K 0x1ff +#define PMD_MASKED_BITS_64K 0x1ff +#define PMD_MASKED_BITS_64K_3_11 0xfff +#define PMD_MASKED_BITS_BOOK3E_64K_4_5 0x7ff +#define PGD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL +#define PUD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL +#define PMD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL + +#define PTE_RPN_MASK_DEFAULT 0xffffffffffffffffUL +#define PAGE_PA_MAX_L4_4_6 (THIS_KERNEL_VERSION >= LINUX(4,11,0) ? 53 : 57) +#define PTE_RPN_MASK_L4_4_6 \ + (((1UL << PAGE_PA_MAX_L4_4_6) - 1) & ~((1UL << PAGESHIFT()) - 1)) +#define PTE_RPN_SHIFT_L4_4_6 PAGESHIFT() + +#define PGD_MASKED_BITS_4_7 0xc0000000000000ffUL +#define PUD_MASKED_BITS_4_7 0xc0000000000000ffUL +#define PMD_MASKED_BITS_4_7 0xc0000000000000ffUL + +#define PD_HUGE 0x8000000000000000 +#define HUGE_PTE_MASK 0x03 +#define HUGEPD_SHIFT_MASK 0x3f +#define HUGEPD_ADDR_MASK (0x0fffffffffffffffUL & ~HUGEPD_SHIFT_MASK) + +#define PGD_MASK_L4 \ + (THIS_KERNEL_VERSION >= LINUX(3,10,0) ? (machdep->ptrs_per_pgd - 1) : 0x1ff) + +#define PGD_OFFSET_L4(vaddr) \ + ((vaddr >> (machdep->machspec->l4_shift)) & PGD_MASK_L4) + +#define PUD_OFFSET_L4(vaddr) \ + ((vaddr >> (machdep->machspec->l3_shift)) & (machdep->machspec->ptrs_per_l3 - 1)) + +#define PMD_OFFSET_L4(vaddr) \ + ((vaddr >> (machdep->machspec->l2_shift)) & (machdep->machspec->ptrs_per_l2 - 1)) + +#define _PAGE_PTE (machdep->machspec->_page_pte) /* distinguishes PTEs from pointers */ +#define _PAGE_PRESENT (machdep->machspec->_page_present) /* software: pte contains a translation */ +#define _PAGE_USER (machdep->machspec->_page_user) /* matches one of the PP bits */ +#define _PAGE_RW (machdep->machspec->_page_rw) /* software: user write access allowed */ +#define _PAGE_GUARDED (machdep->machspec->_page_guarded) +#define _PAGE_COHERENT (machdep->machspec->_page_coherent /* M: enforce memory coherence (SMP systems) */) +#define _PAGE_NO_CACHE (machdep->machspec->_page_no_cache) /* I: cache inhibit */ +#define _PAGE_WRITETHRU (machdep->machspec->_page_writethru) /* W: cache write-through */ +#define _PAGE_DIRTY (machdep->machspec->_page_dirty) /* C: page changed */ +#define _PAGE_ACCESSED (machdep->machspec->_page_accessed) /* R: page referenced */ + +#define PTE_RPN_MASK (machdep->machspec->pte_rpn_mask) +#define PTE_RPN_SHIFT (machdep->machspec->pte_rpn_shift) + +#define TIF_SIGPENDING (2) + +#define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) +#define SWP_OFFSET(entry) ((entry) >> 8) +#define __swp_type(entry) SWP_TYPE(entry) +#define __swp_offset(entry) SWP_OFFSET(entry) + +#define MSR_PR_LG 14 /* Problem State / Privilege Level */ + /* Used to find the user or kernel-mode frame*/ + +#define STACK_FRAME_OVERHEAD 112 +#define EXCP_FRAME_MARKER 0x7265677368657265 + +#define _SECTION_SIZE_BITS 24 +#define _MAX_PHYSMEM_BITS 44 +#define _MAX_PHYSMEM_BITS_3_7 46 +#define _MAX_PHYSMEM_BITS_4_19 47 +#define _MAX_PHYSMEM_BITS_4_20 51 + +#endif /* PPC64 */ + +#ifdef S390 +#define _32BIT_ +#define MACHINE_TYPE "S390" + +#define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) +#define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) + +#define PTRS_PER_PTE 1024 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD 512 +#define SEGMENT_TABLE_SIZE ((sizeof(ulong)*4) * PTRS_PER_PGD) + +#define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) +#define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffe) | \ + (((entry) >> 7) & 0x1)) +#define __swp_type(entry) SWP_TYPE(entry) +#define __swp_offset(entry) SWP_OFFSET(entry) + +#define TIF_SIGPENDING (2) + +#define _SECTION_SIZE_BITS 25 +#define _MAX_PHYSMEM_BITS 31 + +#endif /* S390 */ + +#ifdef S390X +#define _64BIT_ +#define MACHINE_TYPE "S390X" + +#define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) +#define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) +#define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) +#define PTRS_PER_PTE 512 +#define PTRS_PER_PMD 1024 +#define PTRS_PER_PGD 2048 +#define SEGMENT_TABLE_SIZE ((sizeof(ulong)*2) * PTRS_PER_PMD) + +#define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) +#define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffffffffffe) | \ + (((entry) >> 7) & 0x1)) +#define __swp_type(entry) SWP_TYPE(entry) +#define __swp_offset(entry) SWP_OFFSET(entry) + +#define TIF_SIGPENDING (2) + +#define _SECTION_SIZE_BITS 28 +#define _MAX_PHYSMEM_BITS_OLD 42 +#define _MAX_PHYSMEM_BITS_NEW 46 + +#endif /* S390X */ + +#ifdef SPARC64 +#define _64BIT_ +#define MACHINE_TYPE "SPARC64" + +#define PTOV(X) \ + ((unsigned long)(X) + machdep->machspec->page_offset) +#define VTOP(X) \ + ((unsigned long)(X) - machdep->machspec->page_offset) + +#define PAGE_OFFSET (machdep->machspec->page_offset) + +extern int sparc64_IS_VMALLOC_ADDR(ulong vaddr); +#define IS_VMALLOC_ADDR(X) sparc64_IS_VMALLOC_ADDR((ulong)(X)) +#define PAGE_SHIFT (13) +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) +#define THREAD_SIZE (2 * PAGE_SIZE) + +/* S3 Core + * Core 48-bit physical address supported. + * Bit 47 distinguishes memory or I/O. When set to "1" it is I/O. + */ +#define PHYS_MASK_SHIFT (47) +#define PHYS_MASK (((1UL) << PHYS_MASK_SHIFT) - 1) + +typedef signed int s32; + +/* + * This next two defines are convenience defines for normal page table. + */ +#define PTES_PER_PAGE (1UL << (PAGE_SHIFT - 3)) +#define PTES_PER_PAGE_MASK (PTES_PER_PAGE - 1) + +/* 4-level page table */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE - 1)) +#define PMD_BITS (PAGE_SHIFT - 3) + +#define PUD_SHIFT (PMD_SHIFT + PMD_BITS) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE - 1)) +#define PUD_BITS (PAGE_SHIFT - 3) + +#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) +#define PGDIR_BITS (PAGE_SHIFT - 3) + +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PMD (1UL << PMD_BITS) +#define PTRS_PER_PUD (1UL << PUD_BITS) +#define PTRS_PER_PGD (1UL << PGDIR_BITS) + +#define HPAGE_SHIFT (23) +/* Down one huge page */ +#define SPARC64_USERSPACE_TOP (-(1UL << HPAGE_SHIFT)) +#define PAGE_PMD_HUGE (0x0100000000000000UL) + +/* These are for SUN4V. */ +#define _PAGE_VALID (0x8000000000000000UL) +#define _PAGE_NFO_4V (0x4000000000000000UL) +#define _PAGE_MODIFIED_4V (0x2000000000000000UL) +#define _PAGE_ACCESSED_4V (0x1000000000000000UL) +#define _PAGE_READ_4V (0x0800000000000000UL) +#define _PAGE_WRITE_4V (0x0400000000000000UL) +#define _PAGE_PADDR_4V (0x00FFFFFFFFFFE000UL) +#define _PAGE_PFN_MASK (_PAGE_PADDR_4V) +#define _PAGE_P_4V (0x0000000000000100UL) +#define _PAGE_EXEC_4V (0x0000000000000080UL) +#define _PAGE_W_4V (0x0000000000000040UL) +#define _PAGE_PRESENT_4V (0x0000000000000010UL) +#define _PAGE_SZALL_4V (0x0000000000000007UL) +/* There are other page sizes. Some supported. */ +#define _PAGE_SZ4MB_4V (0x0000000000000003UL) +#define _PAGE_SZ512K_4V (0x0000000000000002UL) +#define _PAGE_SZ64K_4V (0x0000000000000001UL) +#define _PAGE_SZ8K_4V (0x0000000000000000UL) + +#define SPARC64_MODULES_VADDR (0x0000000010000000UL) +#define SPARC64_MODULES_END (0x00000000f0000000UL) +#define SPARC64_VMALLOC_START (0x0000000100000000UL) + +#define SPARC64_STACK_SIZE 0x4000 + +/* sparsemem */ +#define _SECTION_SIZE_BITS 30 +#define _MAX_PHYSMEM_BITS 53 + +#define STACK_BIAS 2047 + +struct machine_specific { + ulong page_offset; + ulong vmalloc_end; +}; + +#define TIF_SIGPENDING (2) +#define SWP_OFFSET(E) ((E) >> (PAGE_SHIFT + 8UL)) +#define SWP_TYPE(E) (((E) >> PAGE_SHIFT) & 0xffUL) +#define __swp_type(E) SWP_TYPE(E) +#define __swp_offset(E) SWP_OFFSET(E) +#endif /* SPARC64 */ + +#ifdef PLATFORM + +#define SWP_TYPE(entry) (error("PLATFORM_SWP_TYPE: TBD\n")) +#define SWP_OFFSET(entry) (error("PLATFORM_SWP_OFFSET: TBD\n")) +#define __swp_type(entry) SWP_TYPE(entry) +#define __swp_offset(entry) SWP_OFFSET(entry) + +#endif /* PLATFORM */ + +#define KILOBYTES(x) ((x) * (1024)) +#define MEGABYTES(x) ((x) * (1048576)) +#define GIGABYTES(x) ((x) * (1073741824)) +#define TB_SHIFT (40) +#define TERABYTES(x) ((x) * (1UL << TB_SHIFT)) + +#define MEGABYTE_MASK (MEGABYTES(1)-1) + +#define SIZEOF_64BIT (8) +#define SIZEOF_32BIT (4) +#define SIZEOF_16BIT (2) +#define SIZEOF_8BIT (1) + +#ifdef ARM +#define MAX_HEXADDR_STRLEN (8) +#define UVADDR_PRLEN (8) +#endif +#ifdef X86 +#define MAX_HEXADDR_STRLEN (8) +#define UVADDR_PRLEN (8) +#endif +#ifdef ALPHA +#define MAX_HEXADDR_STRLEN (16) +#define UVADDR_PRLEN (11) +#endif +#ifdef PPC +#define MAX_HEXADDR_STRLEN (8) +#define UVADDR_PRLEN (8) +#endif +#ifdef IA64 +#define MAX_HEXADDR_STRLEN (16) +#define UVADDR_PRLEN (16) +#endif +#ifdef S390 +#define MAX_HEXADDR_STRLEN (8) +#define UVADDR_PRLEN (8) +#endif +#ifdef S390X +#define MAX_HEXADDR_STRLEN (16) +#define UVADDR_PRLEN (16) +#endif +#ifdef X86_64 +#define MAX_HEXADDR_STRLEN (16) +#define UVADDR_PRLEN (10) +#endif +#ifdef PPC64 +#define MAX_HEXADDR_STRLEN (16) +#define UVADDR_PRLEN (16) +#endif +#ifdef ARM64 +#define MAX_HEXADDR_STRLEN (16) +#define UVADDR_PRLEN (10) +#endif +#ifdef MIPS +#define MAX_HEXADDR_STRLEN (8) +#define UVADDR_PRLEN (8) +#endif +#ifdef SPARC64 +#define MAX_HEXADDR_STRLEN (16) +#define UVADDR_PRLEN (16) +#endif + +#define BADADDR ((ulong)(-1)) +#define BADVAL ((ulong)(-1)) +#define UNUSED (-1) + +#define UNINITIALIZED (BADVAL) + +#define BITS_PER_BYTE (8) +#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long)) +#define NUM_TO_BIT(x) (1UL<<((x)%BITS_PER_LONG)) +#define NUM_IN_BITMAP(bitmap, x) (bitmap[(x)/BITS_PER_LONG] & NUM_TO_BIT(x)) +#define SET_BIT(bitmap, x) (bitmap[(x)/BITS_PER_LONG] |= NUM_TO_BIT(x)) + +/* + * precision lengths for fprintf + */ +#define VADDR_PRLEN (sizeof(char *) == 8 ? 16 : 8) +#define LONG_LONG_PRLEN (16) +#define LONG_PRLEN (sizeof(long) == 8 ? 16 : 8) +#define INT_PRLEN (sizeof(int) == 8 ? 16 : 8) +#define CHAR_PRLEN (2) +#define SHORT_PRLEN (4) + +#define MINSPACE (-100) + +#define SYNOPSIS (0x1) +#define COMPLETE_HELP (0x2) +#define PIPE_TO_SCROLL (0x4) +#define MUST_HELP (0x8) + +#define LEFT_JUSTIFY (1) +#define RIGHT_JUSTIFY (2) + +#define CENTER (0x1) +#define LJUST (0x2) +#define RJUST (0x4) +#define LONG_DEC (0x8) +#define LONG_HEX (0x10) +#define INT_DEC (0x20) +#define INT_HEX (0x40) +#define LONGLONG_HEX (0x80) +#define ZERO_FILL (0x100) +#define SLONG_DEC (0x200) + +#define INIT_TIME (1) +#define RUN_TIME (2) + +/* + * IRQ line status. + * For kernels up to and including 2.6.17 + */ +#define IRQ_INPROGRESS_2_6_17 1 /* IRQ handler active - do not enter! */ +#define IRQ_DISABLED_2_6_17 2 /* IRQ disabled - do not enter! */ +#define IRQ_PENDING_2_6_17 4 /* IRQ pending - replay on enable */ +#define IRQ_REPLAY_2_6_17 8 /* IRQ has been replayed but not acked yet */ +#define IRQ_AUTODETECT_2_6_17 16 /* IRQ is being autodetected */ +#define IRQ_WAITING_2_6_17 32 /* IRQ not yet seen - for autodetection */ +#define IRQ_LEVEL_2_6_17 64 /* IRQ level triggered */ +#define IRQ_MASKED_2_6_17 128 /* IRQ masked - shouldn't be seen again */ + +/* + * For kernel 2.6.21 and later + */ +#define IRQ_TYPE_NONE_2_6_21 0x00000000 /* Default, unspecified type */ +#define IRQ_TYPE_EDGE_RISING_2_6_21 0x00000001 /* Edge rising type */ +#define IRQ_TYPE_EDGE_FALLING_2_6_21 0x00000002 /* Edge falling type */ +#define IRQ_TYPE_EDGE_BOTH_2_6_21 (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) +#define IRQ_TYPE_LEVEL_HIGH_2_6_21 0x00000004 /* Level high type */ +#define IRQ_TYPE_LEVEL_LOW_2_6_21 0x00000008 /* Level low type */ +#define IRQ_TYPE_SENSE_MASK_2_6_21 0x0000000f /* Mask of the above */ +#define IRQ_TYPE_PROBE_2_6_21 0x00000010 /* Probing in progress */ + +#define IRQ_INPROGRESS_2_6_21 0x00000100 /* IRQ handler active - do not enter! */ +#define IRQ_DISABLED_2_6_21 0x00000200 /* IRQ disabled - do not enter! */ +#define IRQ_PENDING_2_6_21 0x00000400 /* IRQ pending - replay on enable */ +#define IRQ_REPLAY_2_6_21 0x00000800 /* IRQ has been replayed but not acked yet */ +#define IRQ_AUTODETECT_2_6_21 0x00001000 /* IRQ is being autodetected */ +#define IRQ_WAITING_2_6_21 0x00002000 /* IRQ not yet seen - for autodetection */ +#define IRQ_LEVEL_2_6_21 0x00004000 /* IRQ level triggered */ +#define IRQ_MASKED_2_6_21 0x00008000 /* IRQ masked - shouldn't be seen again */ +#define IRQ_PER_CPU_2_6_21 0x00010000 /* IRQ is per CPU */ +#define IRQ_NOPROBE_2_6_21 0x00020000 /* IRQ is not valid for probing */ +#define IRQ_NOREQUEST_2_6_21 0x00040000 /* IRQ cannot be requested */ +#define IRQ_NOAUTOEN_2_6_21 0x00080000 /* IRQ will not be enabled on request irq */ +#define IRQ_WAKEUP_2_6_21 0x00100000 /* IRQ triggers system wakeup */ +#define IRQ_MOVE_PENDING_2_6_21 0x00200000 /* need to re-target IRQ destination */ +#define IRQ_NO_BALANCING_2_6_21 0x00400000 /* IRQ is excluded from balancing */ +#define IRQ_SPURIOUS_DISABLED_2_6_21 0x00800000 /* IRQ was disabled by the spurious trap */ +#define IRQ_MOVE_PCNTXT_2_6_21 0x01000000 /* IRQ migration from process context */ +#define IRQ_AFFINITY_SET_2_6_21 0x02000000 /* IRQ affinity was set from userspace*/ + +/* + * Select proper IRQ value depending on kernel version + */ +#define IRQ_TYPE_NONE \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_NONE_2_6_21 : 0) +#define IRQ_TYPE_EDGE_RISING \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_RISING_2_6_21 : 0) +#define IRQ_TYPE_EDGE_FALLING \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_FALLING_2_6_21 : 0) +#define IRQ_TYPE_EDGE_BOTH \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_BOTH_2_6_21 : 0) +#define IRQ_TYPE_LEVEL_HIGH \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_LEVEL_HIGH_2_6_21 : 0) +#define IRQ_TYPE_LEVEL_LOW \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_LEVEL_LOW_2_6_21 : 0) +#define IRQ_TYPE_SENSE_MASK \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_SENSE_MASK_2_6_21 : 0) +#define IRQ_TYPE_PROBE \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_PROBE_2_6_21 : 0) + +#define IRQ_INPROGRESS \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_INPROGRESS_2_6_21 : IRQ_INPROGRESS_2_6_17) +#define IRQ_DISABLED \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_DISABLED_2_6_21 : IRQ_DISABLED_2_6_17) +#define IRQ_PENDING \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_PENDING_2_6_21 : IRQ_PENDING_2_6_17) +#define IRQ_REPLAY \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_REPLAY_2_6_21 : IRQ_REPLAY_2_6_17) +#define IRQ_AUTODETECT \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_AUTODETECT_2_6_21 : IRQ_AUTODETECT_2_6_17) +#define IRQ_WAITING \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_WAITING_2_6_21 : IRQ_WAITING_2_6_17) +#define IRQ_LEVEL \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_LEVEL_2_6_21 : IRQ_LEVEL_2_6_17) +#define IRQ_MASKED \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MASKED_2_6_21 : IRQ_MASKED_2_6_17) +#define IRQ_PER_CPU \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_PER_CPU_2_6_21 : 0) +#define IRQ_NOPROBE \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOPROBE_2_6_21 : 0) +#define IRQ_NOREQUEST \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOREQUEST_2_6_21 : 0) +#define IRQ_NOAUTOEN \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOAUTOEN_2_6_21 : 0) +#define IRQ_WAKEUP \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_WAKEUP_2_6_21 : 0) +#define IRQ_MOVE_PENDING \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MOVE_PENDING_2_6_21 : 0) +#define IRQ_NO_BALANCING \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NO_BALANCING_2_6_21 : 0) +#define IRQ_SPURIOUS_DISABLED \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_SPURIOUS_DISABLED_2_6_21 : 0) +#define IRQ_MOVE_PCNTXT \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MOVE_PCNTXT_2_6_21 : 0) +#define IRQ_AFFINITY_SET \ + (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_AFFINITY_SET_2_6_21 : 0) + +#ifdef ARM +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#define SA_RESTORER 0x04000000 +#endif + +#ifdef X86 +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#define SA_RESTORER 0x04000000 +#endif + +#ifdef X86_64 +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#define SA_RESTORER 0x04000000 +#endif + +#ifdef ALPHA +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x40000000 +#endif + +#ifdef PPC +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#define SA_RESTORER 0x04000000 +#endif + +#ifdef PPC64 +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#define SA_RESTORER 0x04000000u +#endif + +#ifdef IA64 +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#define SA_RESTORER 0x04000000 +#endif + +#ifdef S390 +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#define SA_RESTORER 0x04000000 +#endif + +#ifdef S390X +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#define SA_RESTORER 0x04000000 +#endif + + +#define ACTION_FLAGS (SA_INTERRUPT|SA_PROBE|SA_SAMPLE_RANDOM|SA_SHIRQ) + + +#endif /* !GDB_COMMON */ + +/* + * Common request structure for BFD or GDB data or commands. + */ +struct gnu_request { + int command; + char *buf; + FILE *fp; + ulong addr; + ulong addr2; + ulong count; + ulong flags; + char *name; + ulong length; + int typecode; +#if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) + char *typename; +#else + char *type_name; +#endif + char *target_typename; + ulong target_length; + int target_typecode; + int is_typedef; + char *member; + long member_offset; + long member_length; + int member_typecode; + long value; + char *tagname; + ulong pc; + ulong sp; + ulong ra; + int curframe; + ulong frame; + ulong prevsp; + ulong prevpc; + ulong lastsp; + ulong task; + ulong debug; + struct stack_hook *hookp; + struct global_iterator { + int finished; + int block_index; + struct symtab *symtab; + struct symbol *sym; + struct objfile *obj; + } global_iterator; + struct load_module *lm; + char *member_main_type_name; + char *member_main_type_tag_name; + char *member_target_type_name; + char *member_target_type_tag_name; + char *type_tag_name; +}; + +/* + * GNU commands + */ +#define GNU_DATATYPE_INIT (1) +#define GNU_DISASSEMBLE (2) +#define GNU_GET_LINE_NUMBER (3) +#define GNU_PASS_THROUGH (4) +#define GNU_GET_DATATYPE (5) +#define GNU_COMMAND_EXISTS (6) +#define GNU_STACK_TRACE (7) +#define GNU_ALPHA_FRAME_OFFSET (8) +#define GNU_FUNCTION_NUMARGS (9) +#define GNU_RESOLVE_TEXT_ADDR (10) +#define GNU_ADD_SYMBOL_FILE (11) +#define GNU_DELETE_SYMBOL_FILE (12) +#define GNU_VERSION (13) +#define GNU_PATCH_SYMBOL_VALUES (14) +#define GNU_GET_SYMBOL_TYPE (15) +#define GNU_USER_PRINT_OPTION (16) +#define GNU_SET_CRASH_BLOCK (17) +#define GNU_GET_FUNCTION_RANGE (18) +#define GNU_GET_NEXT_DATATYPE (19) +#define GNU_LOOKUP_STRUCT_CONTENTS (20) +#define GNU_DEBUG_COMMAND (100) +/* + * GNU flags + */ +#define GNU_PRINT_LINE_NUMBERS (0x1) +#define GNU_FUNCTION_ONLY (0x2) +#define GNU_PRINT_ENUMERATORS (0x4) +#define GNU_RETURN_ON_ERROR (0x8) +#define GNU_COMMAND_FAILED (0x10) +#define GNU_FROM_TTY_OFF (0x20) +#define GNU_NO_READMEM (0x40) +#define GNU_VAR_LENGTH_TYPECODE (0x80) + +#undef TRUE +#undef FALSE + +#define TRUE (1) +#define FALSE (0) + +#ifdef GDB_COMMON +/* + * function prototypes required by modified gdb source files. + */ +int console(char *, ...); +int gdb_CRASHDEBUG(ulong); +int gdb_readmem_callback(ulong, void *, int, int); +void patch_load_module(struct objfile *objfile, struct minimal_symbol *msymbol); +int patch_kernel_symbol(struct gnu_request *); +struct syment *symbol_search(char *); +int gdb_line_number_callback(ulong, ulong, ulong); +int gdb_print_callback(ulong); +#endif + +#ifndef GDB_COMMON +/* + * WARNING: the following type codes are type_code enums from gdb/gdbtypes.h + */ +enum type_code { + TYPE_CODE_UNDEF, /* Not used; catches errors */ + TYPE_CODE_PTR, /* Pointer type */ + TYPE_CODE_ARRAY, /* Array type with lower & upper bounds. */ + TYPE_CODE_STRUCT, /* C struct or Pascal record */ + TYPE_CODE_UNION, /* C union or Pascal variant part */ + TYPE_CODE_ENUM, /* Enumeration type */ +#if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) +#if defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) + TYPE_CODE_FLAGS, /* Bit flags type */ +#endif + TYPE_CODE_FUNC, /* Function type */ + TYPE_CODE_INT, /* Integer type */ + + /* Floating type. This is *NOT* a complex type. Beware, there are parts + of GDB which bogusly assume that TYPE_CODE_FLT can mean complex. */ + TYPE_CODE_FLT, + + /* Void type. The length field specifies the length (probably always + one) which is used in pointer arithmetic involving pointers to + this type, but actually dereferencing such a pointer is invalid; + a void type has no length and no actual representation in memory + or registers. A pointer to a void type is a generic pointer. */ + TYPE_CODE_VOID, + + TYPE_CODE_SET, /* Pascal sets */ + TYPE_CODE_RANGE, /* Range (integers within spec'd bounds) */ + + /* + * NOTE: the remainder of the type codes are not list or used here... + */ + TYPE_CODE_BOOL = 20, +#endif +}; + +/* + * include/linux/sched.h + */ +#define PF_EXITING 0x00000004 /* getting shut down */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define SCHED_NORMAL 0 +#define SCHED_FIFO 1 +#define SCHED_RR 2 +#define SCHED_BATCH 3 +#define SCHED_ISO 4 +#define SCHED_IDLE 5 +#define SCHED_DEADLINE 6 + +extern long _ZOMBIE_; +#define IS_ZOMBIE(task) (task_state(task) & _ZOMBIE_) +#define IS_EXITING(task) (task_flags(task) & PF_EXITING) + +/* + * ps command options. + */ +#define PS_BY_PID (0x1) +#define PS_BY_TASK (0x2) +#define PS_BY_CMD (0x4) +#define PS_SHOW_ALL (0x8) +#define PS_PPID_LIST (0x10) +#define PS_CHILD_LIST (0x20) +#define PS_KERNEL (0x40) +#define PS_USER (0x80) +#define PS_TIMES (0x100) +#define PS_KSTACKP (0x200) +#define PS_LAST_RUN (0x400) +#define PS_ARGV_ENVP (0x800) +#define PS_TGID_LIST (0x1000) +#define PS_RLIMIT (0x2000) +#define PS_GROUP (0x4000) +#define PS_BY_REGEX (0x8000) +#define PS_NO_HEADER (0x10000) +#define PS_MSECS (0x20000) +#define PS_SUMMARY (0x40000) +#define PS_POLICY (0x80000) +#define PS_ACTIVE (0x100000) + +#define PS_EXCLUSIVE (PS_TGID_LIST|PS_ARGV_ENVP|PS_TIMES|PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN|PS_RLIMIT|PS_MSECS|PS_SUMMARY|PS_ACTIVE) + +#define MAX_PS_ARGS (100) /* maximum command-line specific requests */ + +struct psinfo { + int argc; + ulong pid[MAX_PS_ARGS]; + int type[MAX_PS_ARGS]; + ulong task[MAX_PS_ARGS]; + char comm[MAX_PS_ARGS][TASK_COMM_LEN+1]; + struct regex_data { + char *pattern; + regex_t regex; + } regex_data[MAX_PS_ARGS]; + int regexs; + ulong *cpus; + int policy; +}; + +#define IS_A_NUMBER(X) (decimal(X, 0) || hexadecimal(X, 0)) +#define AMBIGUOUS_NUMBER(X) (decimal(X, 0) && hexadecimal(X, 0)) + +#define is_mclx_compressed_dump(X) (va_server_init((X), 0, 0, 0) == 0) + +struct task_mem_usage { + ulong rss; + ulong total_vm; + double pct_physmem; + ulong mm_struct_addr; + ulong pgd_addr; +}; + +/* + * Global data (global_data.c) + */ +extern FILE *fp; +extern struct program_context program_context, *pc; +extern struct task_table task_table, *tt; +extern struct kernel_table kernel_table, *kt; +extern struct command_table_entry linux_command_table[]; +extern char *args[MAXARGS]; +extern int argcnt; +extern int argerrs; +extern struct offset_table offset_table; +extern struct size_table size_table; +extern struct array_table array_table; +extern struct vm_table vm_table, *vt; +extern struct machdep_table *machdep; +extern struct symbol_table_data symbol_table_data, *st; +extern struct extension_table *extension_table; + +/* + * Generated in build_data.c + */ +extern char *build_command; +extern char *build_data; +extern char *build_target; +extern char *build_version; +extern char *compiler_version; + + +/* + * command prototypes + */ +void cmd_quit(void); /* main.c */ +void cmd_mach(void); /* main.c */ +void cmd_help(void); /* help.c */ +void cmd_test(void); /* test.c */ +void cmd_ascii(void); /* tools.c */ +void cmd_bpf(void); /* bfp.c */ +void cmd_set(void); /* tools.c */ +void cmd_eval(void); /* tools.c */ +void cmd_list(void); /* tools.c */ +void cmd_tree(void); /* tools.c */ +void cmd_template(void); /* tools.c */ +void cmd_alias(void); /* cmdline.c */ +void cmd_repeat(void); /* cmdline.c */ +void cmd_rd(void); /* memory.c */ +void cmd_wr(void); /* memory.c */ +void cmd_ptov(void); /* memory.c */ +void cmd_vtop(void); /* memory.c */ +void cmd_vm(void); /* memory.c */ +void cmd_ptob(void); /* memory.c */ +void cmd_btop(void); /* memory.c */ +void cmd_kmem(void); /* memory.c */ +void cmd_search(void); /* memory.c */ +void cmd_swap(void); /* memory.c */ +void cmd_pte(void); /* memory.c */ +void cmd_ps(void); /* task.c */ +void cmd_task(void); /* task.c */ +void cmd_foreach(void); /* task.c */ +void cmd_runq(void); /* task.c */ +void cmd_sig(void); /* task.c */ +void cmd_bt(void); /* kernel.c */ +void cmd_dis(void); /* kernel.c */ +void cmd_mod(void); /* kernel.c */ +void cmd_log(void); /* kernel.c */ +void cmd_sys(void); /* kernel.c */ +void cmd_irq(void); /* kernel.c */ +void cmd_timer(void); /* kernel.c */ +void cmd_waitq(void); /* kernel.c */ +void cmd_sym(void); /* symbols.c */ +void cmd_struct(void); /* symbols.c */ +void cmd_union(void); /* symbols.c */ +void cmd_pointer(void); /* symbols.c */ +void cmd_whatis(void); /* symbols.c */ +void cmd_p(void); /* symbols.c */ +void cmd_mount(void); /* filesys.c */ +void cmd_files(void); /* filesys.c */ +void cmd_fuser(void); /* filesys.c */ +void cmd_dev(void); /* dev.c */ +void cmd_gdb(void); /* gdb_interface.c */ +void cmd_net(void); /* net.c */ +void cmd_extend(void); /* extensions.c */ +#if defined(S390) || defined(S390X) +void cmd_s390dbf(void); +#endif +void cmd_map(void); /* kvmdump.c */ +void cmd_ipcs(void); /* ipcs.c */ + +/* + * main.c + */ +void main_loop(void); +void exec_command(void); +struct command_table_entry *get_command_table_entry(char *); +void program_usage(int); +#define LONG_FORM (1) +#define SHORT_FORM (0) +void dump_program_context(void); +void dump_build_data(void); +#ifdef ARM +#define machdep_init(X) arm_init(X) +#endif +#ifdef ARM64 +#define machdep_init(X) arm64_init(X) +#endif +#ifdef X86 +#define machdep_init(X) x86_init(X) +#endif +#ifdef ALPHA +#define machdep_init(X) alpha_init(X) +#endif +#ifdef PPC +#define machdep_init(X) ppc_init(X) +#endif +#ifdef IA64 +#define machdep_init(X) ia64_init(X) +#endif +#ifdef S390 +#define machdep_init(X) s390_init(X) +#endif +#ifdef S390X +#define machdep_init(X) s390x_init(X) +#endif +#ifdef X86_64 +#define machdep_init(X) x86_64_init(X) +#endif +#ifdef PPC64 +#define machdep_init(X) ppc64_init(X) +#endif +#ifdef MIPS +#define machdep_init(X) mips_init(X) +#endif +#ifdef SPARC64 +#define machdep_init(X) sparc64_init(X) +#endif +int clean_exit(int); +int untrusted_file(FILE *, char *); +char *readmem_function_name(void); +char *writemem_function_name(void); +char *no_vmcoreinfo(const char *); + +/* + * cmdline.c + */ +void restart(int); +void alias_init(char *); +struct alias_data *is_alias(char *); +void deallocate_alias(char *); +void cmdline_init(void); +void set_command_prompt(char *); +void exec_input_file(void); +void process_command_line(void); +void dump_history(void); +void resolve_rc_cmd(char *, int); +void dump_alias_data(void); +int output_open(void); +#define output_closed() (!output_open()) +void close_output(void); +int interruptible(void); +int received_SIGINT(void); +void debug_redirect(char *); +int CRASHPAGER_valid(void); +char *setup_scroll_command(void); +int minimal_functions(char *); +int is_args_input_file(struct command_table_entry *, struct args_input_file *); +void exec_args_input_file(struct command_table_entry *, struct args_input_file *); + +/* + * tools.c + */ +FILE *set_error(char *); +int __error(int, char *, ...); +#define error __error /* avoid conflict with gdb error() */ +int console(char *, ...); +void create_console_device(char *); +int console_off(void); +int console_on(int); +int console_verbatim(char *); +int whitespace(int); +int ascii(int); +int ascii_string(char *); +int printable_string(char *); +char *clean_line(char *); +char *strip_line_end(char *); +char *strip_linefeeds(char *); +char *strip_beginning_whitespace(char *); +char *strip_ending_whitespace(char *); +char *strip_ending_char(char *, char); +char *strip_beginning_char(char *, char); +char *strip_comma(char *); +char *strip_hex(char *); +char *upper_case(const char *, char *); +char *first_nonspace(char *); +char *first_space(char *); +char *replace_string(char *, char *, char); +void string_insert(char *, char *); +char *strstr_rightmost(char *, char *); +char *null_first_space(char *); +int parse_line(char *, char **); +void print_verbatim(FILE *, char *); +char *fixup_percent(char *); +int can_eval(char *); +ulong eval(char *, int, int *); +ulonglong evall(char *, int, int *); +int eval_common(char *, int, int *, struct number_option *); +ulong htol(char *, int, int *); +ulong dtol(char *, int, int *); +unsigned int dtoi(char *, int, int *); +ulong stol(char *, int, int *); +ulonglong stoll(char *, int, int *); +ulonglong htoll(char *, int, int *); +ulonglong dtoll(char *, int, int *); +int decimal(char *, int); +int hexadecimal(char *, int); +int hexadecimal_only(char *, int); +ulong convert(char *, int, int *, ulong); +void pad_line(FILE *, int, char); +#define INDENT(x) pad_line(fp, x, ' ') +char *mkstring(char *, int, ulong, const char *); +#define MKSTR(X) ((const char *)(X)) +int count_leading_spaces(char *); +int count_chars(char *, char); +long count_buffer_chars(char *, char, long); +char *space(int); +char *concat_args(char *, int, int); +char *shift_string_left(char *, int); +char *shift_string_right(char *, int); +int bracketed(char *, char *, int); +void backspace(int); +int do_list(struct list_data *); +int do_list_no_hash(struct list_data *); +struct radix_tree_ops { + void (*entry)(ulong node, ulong slot, const char *path, + ulong index, void *private); + uint radix; + void *private; +}; +int do_radix_tree_traverse(ulong ptr, int is_root, struct radix_tree_ops *ops); +struct xarray_ops { + void (*entry)(ulong node, ulong slot, const char *path, + ulong index, void *private); + uint radix; + void *private; +}; +int do_xarray_traverse(ulong ptr, int is_root, struct xarray_ops *ops); +int do_rdtree(struct tree_data *); +int do_rbtree(struct tree_data *); +int do_xatree(struct tree_data *); +int retrieve_list(ulong *, int); +long power(long, int); +long long ll_power(long long, long long); +void hq_init(void); +int hq_open(void); +int hq_close(void); +int hq_enter(ulong); +int hq_entry_exists(ulong); +int hq_is_open(void); +int hq_is_inuse(void); +long get_embedded(void); +void dump_embedded(char *); +char *ordinal(ulong, char *); +char *first_nonspace(char *); +void dump_hash_table(int); +void dump_shared_bufs(void); +void drop_core(char *); +int extract_hex(char *, ulong *, char, ulong); +int count_bits_int(int); +int count_bits_long(ulong); +int highest_bit_long(ulong); +int lowest_bit_long(ulong); +void buf_init(void); +void sym_buf_init(void); +void free_all_bufs(void); +char *getbuf(long); +void freebuf(char *); +char *resizebuf(char *, long, long); +char *strdupbuf(char *); +#define GETBUF(X) getbuf((long)(X)) +#define FREEBUF(X) freebuf((char *)(X)) +#define RESIZEBUF(X,Y,Z) (X) = (typeof(X))resizebuf((char *)(X), (long)(Y), (long)(Z)); +#define STRDUPBUF(X) strdupbuf((char *)(X)) +void sigsetup(int, void *, struct sigaction *, struct sigaction *); +#define SIGACTION(s, h, a, o) sigsetup(s, h, a, o) +char *convert_time(ulonglong, char *); +void stall(ulong); +char *pages_to_size(ulong, char *); +int clean_arg(void); +int empty_list(ulong); +int machine_type(char *); +int machine_type_mismatch(char *, char *, char *, ulong); +void command_not_supported(void); +void option_not_supported(int); +void please_wait(char *); +void please_wait_done(void); +int pathcmp(char *, char *); +int calculate(char *, ulong *, ulonglong *, ulong); +int endian_mismatch(char *, char, ulong); +uint16_t swap16(uint16_t, int); +uint32_t swap32(uint32_t, int); +uint64_t swap64(uint64_t, int); +ulong *get_cpumask_buf(void); +int make_cpumask(char *, ulong *, int, int *); +size_t strlcpy(char *, char *, size_t); +struct rb_node *rb_first(struct rb_root *); +struct rb_node *rb_parent(struct rb_node *, struct rb_node *); +struct rb_node *rb_right(struct rb_node *, struct rb_node *); +struct rb_node *rb_left(struct rb_node *, struct rb_node *); +struct rb_node *rb_next(struct rb_node *); +struct rb_node *rb_last(struct rb_root *); + +/* + * symbols.c + */ +void symtab_init(void); +char *check_specified_kernel_debug_file(void); +void no_debugging_data(int); +void get_text_init_space(void); +int is_kernel_text(ulong); +int is_kernel_data(ulong); +int is_init_data(ulong value); +int is_kernel_text_offset(ulong); +int is_symbol_text(struct syment *); +int is_rodata(ulong, struct syment **); +int get_text_function_range(ulong, ulong *, ulong *); +void datatype_init(void); +struct syment *symbol_search(char *); +struct syment *value_search(ulong, ulong *); +struct syment *value_search_base_kernel(ulong, ulong *); +struct syment *value_search_module(ulong, ulong *); +struct syment *symbol_search_next(char *, struct syment *); +ulong highest_bss_symbol(void); +int in_ksymbol_range(ulong); +int module_symbol(ulong, struct syment **, + struct load_module **, char *, ulong); +#define IS_MODULE_VADDR(X) \ + (module_symbol((ulong)(X), NULL, NULL, NULL, *gdb_output_radix)) +char *closest_symbol(ulong); +ulong closest_symbol_value(ulong); +#define SAME_FUNCTION(X,Y) (closest_symbol_value(X) == closest_symbol_value(Y)) +void show_symbol(struct syment *, ulong, ulong); +#define SHOW_LINENUM (0x1) +#define SHOW_SECTION (0x2) +#define SHOW_HEX_OFFS (0x4) +#define SHOW_DEC_OFFS (0x8) +#define SHOW_RADIX() (*gdb_output_radix == 16 ? SHOW_HEX_OFFS : SHOW_DEC_OFFS) +#define SHOW_MODULE (0x10) +int symbol_name_count(char *); +int symbol_query(char *, char *, struct syment **); +struct syment *next_symbol(char *, struct syment *); +struct syment *prev_symbol(char *, struct syment *); +void get_symbol_data(char *, long, void *); +int try_get_symbol_data(char *, long, void *); +char *value_to_symstr(ulong, char *, ulong); +char *value_symbol(ulong); +ulong symbol_value(char *); +ulong symbol_value_module(char *, char *); +struct syment *per_cpu_symbol_search(char *); +int symbol_exists(char *s); +int kernel_symbol_exists(char *s); +struct syment *kernel_symbol_search(char *); +ulong symbol_value_from_proc_kallsyms(char *); +int get_syment_array(char *, struct syment **, int); +void set_temporary_radix(unsigned int, unsigned int *); +void restore_current_radix(unsigned int); +void dump_struct(char *, ulong, unsigned); +void dump_struct_member(char *, ulong, unsigned); +void dump_union(char *, ulong, unsigned); +void store_module_symbols_v1(ulong, int); +void store_module_symbols_v2(ulong, int); +int is_datatype_command(void); +int is_typedef(char *); +int arg_to_datatype(char *, struct datatype_member *, ulong); +void dump_symbol_table(void); +void dump_struct_table(ulong); +void dump_offset_table(char *, ulong); +int is_elf_file(char *); +int is_kernel(char *); +int is_shared_object(char *); +int file_elf_version(char *); +int is_system_map(char *); +int is_compressed_kernel(char *, char **); +int select_namelist(char *); +int get_array_length(char *, int *, long); +int get_array_length_alt(char *, char *, int *, long); +int builtin_array_length(char *, int, int *); +char *get_line_number(ulong, char *, int); +char *get_build_directory(char *); +int datatype_exists(char *); +int get_function_numargs(ulong); +int is_module_name(char *, ulong *, struct load_module **); +int is_module_address(ulong, char *); +ulong lowest_module_address(void); +ulong highest_module_address(void); +int load_module_symbols(char *, char *, ulong); +void delete_load_module(ulong); +ulong gdb_load_module_callback(ulong, char *); +char *load_module_filter(char *, int); +#define LM_P_FILTER (1) +#define LM_DIS_FILTER (2) +long datatype_info(char *, char *, struct datatype_member *); +int get_symbol_type(char *, char *, struct gnu_request *); +int get_symbol_length(char *); +int text_value_cache(ulong, uint32_t, uint32_t *); +int text_value_cache_byte(ulong, unsigned char *); +void dump_text_value_cache(int); +void clear_text_value_cache(void); +void dump_numargs_cache(void); +int patch_kernel_symbol(struct gnu_request *); +struct syment *generic_machdep_value_to_symbol(ulong, ulong *); +long OFFSET_verify(long, char *, char *, int, char *); +long SIZE_verify(long, char *, char *, int, char *); +long OFFSET_option(long, long, char *, char *, int, char *, char *); +long SIZE_option(long, long, char *, char *, int, char *, char *); +void dump_trace(void **); +int enumerator_value(char *, long *); +int dump_enumerator_list(char *); +struct load_module *init_module_function(ulong); +struct struct_member_data { + char *structure; + char *member; + long type; + long unsigned_type; + long length; + long offset; + long bitpos; + long bitsize; +}; +int fill_struct_member_data(struct struct_member_data *); +void parse_for_member_extended(struct datatype_member *, ulong); +void add_to_downsized(char *); +int is_downsized(char *); +int is_string(char *, char *); +struct syment *symbol_complete_match(const char *, struct syment *); + +/* + * memory.c + */ +void mem_init(void); +void vm_init(void); +int readmem(ulonglong, int, void *, long, char *, ulong); +int writemem(ulonglong, int, void *, long, char *, ulong); +int generic_verify_paddr(uint64_t); +int read_dev_mem(int, void *, int, ulong, physaddr_t); +int read_memory_device(int, void *, int, ulong, physaddr_t); +int read_mclx_dumpfile(int, void *, int, ulong, physaddr_t); +int read_lkcd_dumpfile(int, void *, int, ulong, physaddr_t); +int read_daemon(int, void *, int, ulong, physaddr_t); +int write_dev_mem(int, void *, int, ulong, physaddr_t); +int write_memory_device(int, void *, int, ulong, physaddr_t); +int write_mclx_dumpfile(int, void *, int, ulong, physaddr_t); +int write_lkcd_dumpfile(int, void *, int, ulong, physaddr_t); +int write_daemon(int, void *, int, ulong, physaddr_t); +int kvtop(struct task_context *, ulong, physaddr_t *, int); +int uvtop(struct task_context *, ulong, physaddr_t *, int); +void do_vtop(ulong, struct task_context *, ulong); +void raw_stack_dump(ulong, ulong); +void raw_data_dump(ulong, long, int); +int accessible(ulong); +ulong vm_area_dump(ulong, ulong, ulong, struct reference *); +#define IN_TASK_VMA(TASK,VA) (vm_area_dump((TASK), UVADDR|VERIFY_ADDR, (VA), 0)) +char *fill_vma_cache(ulong); +void clear_vma_cache(void); +void dump_vma_cache(ulong); +int generic_is_page_ptr(ulong, physaddr_t *); +int is_page_ptr(ulong, physaddr_t *); +void dump_vm_table(int); +int read_string(ulong, char *, int); +void get_task_mem_usage(ulong, struct task_mem_usage *); +char *get_memory_size(char *); +uint64_t generic_memory_size(void); +char *swap_location(ulonglong, char *); +void clear_swap_info_cache(void); +uint memory_page_size(void); +void force_page_size(char *); +ulong first_vmalloc_address(void); +ulong last_vmalloc_address(void); +int in_vmlist_segment(ulong); +int phys_to_page(physaddr_t, ulong *); +int generic_get_kvaddr_ranges(struct vaddr_range *); +int l1_cache_size(void); +int dumpfile_memory(int); +#define DUMPFILE_MEM_USED (1) +#define DUMPFILE_FREE_MEM (2) +#define DUMPFILE_MEM_DUMP (3) +#define DUMPFILE_ENVIRONMENT (4) +uint64_t total_node_memory(void); +int generic_is_kvaddr(ulong); +int generic_is_uvaddr(ulong, struct task_context *); +void fill_stackbuf(struct bt_info *); +void alter_stackbuf(struct bt_info *); +int vaddr_type(ulong, struct task_context *); +char *format_stack_entry(struct bt_info *bt, char *, ulong, ulong); +int in_user_stack(ulong, ulong); +int dump_inode_page(ulong); +ulong valid_section_nr(ulong); +void display_memory_from_file_offset(ulonglong, long, void *); + + +/* + * filesys.c + */ +void fd_init(void); +void vfs_init(void); +int is_a_tty(char *); +int file_exists(char *, struct stat *); +int file_readable(char *); +int is_directory(char *); +char *search_directory_tree(char *, char *, int); +void open_tmpfile(void); +void close_tmpfile(void); +void open_tmpfile2(void); +void set_tmpfile2(FILE *); +void close_tmpfile2(void); +void open_files_dump(ulong, int, struct reference *); +void get_pathname(ulong, char *, int, int, ulong); +ulong *get_mount_list(int *, struct task_context *); +char *vfsmount_devname(ulong, char *, int); +ulong file_to_dentry(ulong); +ulong file_to_vfsmnt(ulong); +int get_proc_version(void); +int file_checksum(char *, long *); +void dump_filesys_table(int); +char *fill_file_cache(ulong); +void clear_file_cache(void); +char *fill_dentry_cache(ulong); +void clear_dentry_cache(void); +char *fill_inode_cache(ulong); +void clear_inode_cache(void); +int monitor_memory(long *, long *, long *, long *); +int is_readable(char *); +struct list_pair { + ulong index; + void *value; +}; +#define radix_tree_pair list_pair +ulong do_radix_tree(ulong, int, struct list_pair *); +#define RADIX_TREE_COUNT (1) +#define RADIX_TREE_SEARCH (2) +#define RADIX_TREE_DUMP (3) +#define RADIX_TREE_GATHER (4) +#define RADIX_TREE_DUMP_CB (5) +/* + * from: "include/linux/radix-tree.h" + */ +#define RADIX_TREE_ENTRY_MASK 3UL +#define RADIX_TREE_EXCEPTIONAL_ENTRY 2 + +ulong do_xarray(ulong, int, struct list_pair *); +#define XARRAY_COUNT (1) +#define XARRAY_SEARCH (2) +#define XARRAY_DUMP (3) +#define XARRAY_GATHER (4) +#define XARRAY_DUMP_CB (5) +#define XARRAY_TAG_MASK (3UL) +#define XARRAY_TAG_INTERNAL (2UL) + +int file_dump(ulong, ulong, ulong, int, int); +#define DUMP_FULL_NAME 0x1 +#define DUMP_INODE_ONLY 0x2 +#define DUMP_DENTRY_ONLY 0x4 +#define DUMP_EMPTY_FILE 0x8 +#define DUMP_FILE_NRPAGES 0x10 +#endif /* !GDB_COMMON */ +int same_file(char *, char *); +#ifndef GDB_COMMON +int cleanup_memory_driver(void); + + +/* + * help.c + */ +#define HELP_COLUMNS 5 +#define START_OF_HELP_DATA(X) "START_OF_HELP_DATA" X +#define END_OF_HELP_DATA "END_OF_HELP_DATA" +void help_init(void); +void cmd_usage(char *, int); +void display_version(void); +void display_help_screen(char *); +#ifdef ARM +#define dump_machdep_table(X) arm_dump_machdep_table(X) +#endif +#ifdef ARM64 +#define dump_machdep_table(X) arm64_dump_machdep_table(X) +#endif +#ifdef X86 +#define dump_machdep_table(X) x86_dump_machdep_table(X) +#endif +#ifdef ALPHA +#define dump_machdep_table(X) alpha_dump_machdep_table(X) +#endif +#ifdef PPC +#define dump_machdep_table(X) ppc_dump_machdep_table(X) +#endif +#ifdef IA64 +#define dump_machdep_table(X) ia64_dump_machdep_table(X) +#endif +#ifdef S390 +#define dump_machdep_table(X) s390_dump_machdep_table(X) +#endif +#ifdef S390X +#define dump_machdep_table(X) s390x_dump_machdep_table(X) +#endif +#ifdef X86_64 +#define dump_machdep_table(X) x86_64_dump_machdep_table(X) +#endif +#ifdef PPC64 +#define dump_machdep_table(X) ppc64_dump_machdep_table(X) +#endif +#ifdef MIPS +#define dump_machdep_table(X) mips_dump_machdep_table(X) +#endif +#ifdef SPARC64 +#define dump_machdep_table(X) sparc64_dump_machdep_table(X) +#endif +extern char *help_pointer[]; +extern char *help_alias[]; +extern char *help_ascii[]; +extern char *help_bpf[]; +extern char *help_bt[]; +extern char *help_btop[]; +extern char *help_dev[]; +extern char *help_dis[]; +extern char *help_eval[]; +extern char *help_exit[]; +extern char *help_extend[]; +extern char *help_files[]; +extern char *help_foreach[]; +extern char *help_fuser[]; +extern char *help_gdb[]; +extern char *help_help[]; +extern char *help_irq[]; +extern char *help_kmem[]; +extern char *help__list[]; +extern char *help_tree[]; +extern char *help_log[]; +extern char *help_mach[]; +extern char *help_mod[]; +extern char *help_mount[]; +extern char *help_net[]; +extern char *help_p[]; +extern char *help_ps[]; +extern char *help_pte[]; +extern char *help_ptob[]; +extern char *help_ptov[]; +extern char *help_quit[]; +extern char *help_rd[]; +extern char *help_repeat[]; +extern char *help_runq[]; +extern char *help_ipcs[]; +extern char *help_search[]; +extern char *help_set[]; +extern char *help_sig[]; +extern char *help_struct[]; +extern char *help_swap[]; +extern char *help_sym[]; +extern char *help_sys[]; +extern char *help_task[]; +extern char *help_timer[]; +extern char *help_union[]; +extern char *help_vm[]; +extern char *help_vtop[]; +extern char *help_waitq[]; +extern char *help_whatis[]; +extern char *help_wr[]; +#if defined(S390) || defined(S390X) +extern char *help_s390dbf[]; +#endif +extern char *help_map[]; + +/* + * task.c + */ +void task_init(void); +int set_context(ulong, ulong); +void show_context(struct task_context *); +ulong pid_to_task(ulong); +ulong task_to_pid(ulong); +int task_exists(ulong); +int is_kernel_thread(ulong); +int is_idle_thread(ulong); +void get_idle_threads(ulong *, int); +char *task_state_string(ulong, char *, int); +ulong task_flags(ulong); +ulong task_state(ulong); +ulong task_mm(ulong, int); +ulong task_tgid(ulong); +ulonglong task_last_run(ulong); +ulong vaddr_in_task_struct(ulong); +int comm_exists(char *); +struct task_context *task_to_context(ulong); +struct task_context *pid_to_context(ulong); +struct task_context *tgid_to_context(ulong); +ulong stkptr_to_task(ulong); +ulong task_to_thread_info(ulong); +ulong task_to_stackbase(ulong); +int str_to_context(char *, ulong *, struct task_context **); +#define STR_PID (0x1) +#define STR_TASK (0x2) +#define STR_INVALID (0x4) +char *get_panicmsg(char *); +char *task_cpu(int, char *, int); +void print_task_header(FILE *, struct task_context *, int); +ulong get_active_task(int); +int is_task_active(ulong); +int is_panic_thread(ulong); +int get_panic_ksp(struct bt_info *, ulong *); +void foreach(struct foreach_data *); +int pid_exists(ulong); +#define TASKS_PER_PID(x) pid_exists(x) +char *fill_task_struct(ulong); +#define IS_LAST_TASK_READ(task) ((ulong)(task) == tt->last_task_read) +char *fill_thread_info(ulong); +#define IS_LAST_THREAD_INFO_READ(ti) ((ulong)(ti) == tt->last_thread_info_read) +char *fill_mm_struct(ulong); +#define IS_LAST_MM_READ(mm) ((ulong)(mm) == tt->last_mm_read) +void do_task(ulong, ulong, struct reference *, unsigned int); +void clear_task_cache(void); +int get_active_set(void); +void clear_active_set(void); +void do_sig(ulong, ulong, struct reference *); +void modify_signame(int, char *, char *); +ulong generic_get_stackbase(ulong); +ulong generic_get_stacktop(ulong); +void dump_task_table(int); +void sort_context_array(void); +void sort_tgid_array(void); +int sort_by_tgid(const void *, const void *); +int in_irq_ctx(ulonglong, int, ulong); +void check_stack_overflow(void); + +/* + * extensions.c + */ +void register_extension(struct command_table_entry *); +void dump_extension_table(int); +void load_extension(char *); +void unload_extension(char *); +void preload_extensions(void); +/* Hooks for sial */ +unsigned long get_curtask(void); +char *crash_global_cmd(void); +struct command_table_entry *crash_cmd_table(void); + +/* + * kernel.c + */ +void kernel_init(void); +void module_init(void); +void verify_version(void); +void verify_spinlock(void); +void non_matching_kernel(void); +struct load_module *modref_to_load_module(char *); +int load_module_symbols_helper(char *); +void unlink_module(struct load_module *); +int check_specified_module_tree(char *, char *); +int is_system_call(char *, ulong); +void generic_dump_irq(int); +void generic_get_irq_affinity(int); +void generic_show_interrupts(int, ulong *); +int generic_dis_filter(ulong, char *, unsigned int); +int kernel_BUG_encoding_bytes(void); +void display_sys_stats(void); +char *get_uptime(char *, ulonglong *); +void clone_bt_info(struct bt_info *, struct bt_info *, struct task_context *); +void dump_kernel_table(int); +void dump_bt_info(struct bt_info *, char *where); +void dump_log(int); +#define SHOW_LOG_LEVEL (0x1) +#define SHOW_LOG_DICT (0x2) +#define SHOW_LOG_TEXT (0x4) +#define SHOW_LOG_AUDIT (0x8) +void set_cpu(int); +void clear_machdep_cache(void); +struct stack_hook *gather_text_list(struct bt_info *); +int get_cpus_online(void); +int get_cpus_active(void); +int get_cpus_present(void); +int get_cpus_possible(void); +int check_offline_cpu(int); +int hide_offline_cpu(int); +int get_highest_cpu_online(void); +int get_highest_cpu_present(void); +int get_cpus_to_display(void); +void get_log_from_vmcoreinfo(char *file); +int in_cpu_map(int, int); +void paravirt_init(void); +void print_stack_text_syms(struct bt_info *, ulong, ulong); +void back_trace(struct bt_info *); +int in_alternate_stack(int, ulong); +ulong cpu_map_addr(const char *type); +#define BT_RAW (0x1ULL) +#define BT_SYMBOLIC_ARGS (0x2ULL) +#define BT_FULL (0x4ULL) +#define BT_TEXT_SYMBOLS (0x8ULL) +#define BT_TEXT_SYMBOLS_PRINT (0x10ULL) +#define BT_TEXT_SYMBOLS_NOPRINT (0x20ULL) +#define BT_USE_GDB (0x40ULL) +#define BT_EXCEPTION_FRAME (0x80ULL) +#define BT_LINE_NUMBERS (0x100ULL) +#define BT_USER_EFRAME (0x200ULL) +#define BT_INCOMPLETE_USER_EFRAME (BT_USER_EFRAME) +#define BT_SAVE_LASTSP (0x400ULL) +#define BT_FROM_EXCEPTION (0x800ULL) +#define BT_FROM_CALLFRAME (0x1000ULL) +#define BT_EFRAME_SEARCH (0x2000ULL) +#define BT_SPECULATE (0x4000ULL) +#define BT_FRAMESIZE_DISABLE (BT_SPECULATE) +#define BT_RESCHEDULE (0x8000ULL) +#define BT_SCHEDULE (BT_RESCHEDULE) +#define BT_RET_FROM_SMP_FORK (0x10000ULL) +#define BT_STRACE (0x20000ULL) +#define BT_KDUMP_ADJUST (BT_STRACE) +#define BT_KSTACKP (0x40000ULL) +#define BT_LOOP_TRAP (0x80000ULL) +#define BT_BUMP_FRAME_LEVEL (0x100000ULL) +#define BT_EFRAME_COUNT (0x200000ULL) +#define BT_CPU_IDLE (0x400000ULL) +#define BT_WRAP_TRAP (0x800000ULL) +#define BT_KERNEL_THREAD (0x1000000ULL) +#define BT_ERROR_MASK (BT_LOOP_TRAP|BT_WRAP_TRAP|BT_KERNEL_THREAD|BT_CPU_IDLE) +#define BT_UNWIND_ERROR (0x2000000ULL) +#define BT_OLD_BACK_TRACE (0x4000000ULL) +#define BT_OPT_BACK_TRACE (0x4000000ULL) +#define BT_FRAMESIZE_DEBUG (0x8000000ULL) +#define BT_CONTEXT_SWITCH (0x10000000ULL) +#define BT_HARDIRQ (0x20000000ULL) +#define BT_SOFTIRQ (0x40000000ULL) +#define BT_CHECK_CALLER (0x80000000ULL) +#define BT_NO_CHECK_CALLER (0x100000000ULL) +#define BT_EXCEPTION_STACK (0x200000000ULL) +#define BT_IRQSTACK (0x400000000ULL) +#define BT_DUMPFILE_SEARCH (0x800000000ULL) +#define BT_EFRAME_SEARCH2 (0x1000000000ULL) +#define BT_START (0x2000000000ULL) +#define BT_TEXT_SYMBOLS_ALL (0x4000000000ULL) +#define BT_XEN_STOP_THIS_CPU (0x8000000000ULL) +#define BT_THREAD_GROUP (0x10000000000ULL) +#define BT_SAVE_EFRAME_IP (0x20000000000ULL) +#define BT_FULL_SYM_SLAB (0x40000000000ULL) +#define BT_KDUMP_ELF_REGS (0x80000000000ULL) +#define BT_USER_SPACE (0x100000000000ULL) +#define BT_KERNEL_SPACE (0x200000000000ULL) +#define BT_FULL_SYM_SLAB2 (0x400000000000ULL) +#define BT_EFRAME_TARGET (0x800000000000ULL) +#define BT_CPUMASK (0x1000000000000ULL) +#define BT_SHOW_ALL_REGS (0x2000000000000ULL) +#define BT_REGS_NOT_FOUND (0x4000000000000ULL) +#define BT_SYMBOL_OFFSET (BT_SYMBOLIC_ARGS) + +#define BT_REF_HEXVAL (0x1) +#define BT_REF_SYMBOL (0x2) +#define BT_REF_FOUND (0x4) +#define BT_REFERENCE_CHECK(X) ((X)->ref) +#define BT_REFERENCE_FOUND(X) ((X)->ref && ((X)->ref->cmdflags & BT_REF_FOUND)) + +#define NO_MODULES() \ + (!kt->module_list || (kt->module_list == kt->kernel_module)) + +#define USER_EFRAME_ADDR(task) \ + ((ulong)task + UNION_SIZE("task_union") - SIZE(pt_regs)) + +struct remote_file { + char *filename; + char *local; + int fd; + int flags; + int type; + long csum; + off_t size; +}; + +#define REMOTE_VERBOSE (O_RDWR << 1) +#define REMOTE_COPY_DONE (REMOTE_VERBOSE << 1) +#define TYPE_ELF (REMOTE_VERBOSE << 2) +#define TYPE_DEVMEM (REMOTE_VERBOSE << 3) +#define TYPE_MCLXCD (REMOTE_VERBOSE << 4) +#define TYPE_LKCD (REMOTE_VERBOSE << 5) +#define TYPE_S390D (REMOTE_VERBOSE << 6) +#define TYPE_NETDUMP (REMOTE_VERBOSE << 7) + +ulonglong xen_m2p(ulonglong); + +void read_in_kernel_config(int); + +#define IKCFG_INIT (0) +#define IKCFG_READ (1) +#define IKCFG_SETUP (2) +#define IKCFG_FREE (3) + +int get_kernel_config(char *, char **); +enum { + IKCONFIG_N, + IKCONFIG_Y, + IKCONFIG_M, + IKCONFIG_STR, +}; + +#define MAGIC_START "IKCFG_ST" +#define MAGIC_END "IKCFG_ED" +#define MAGIC_SIZE (sizeof(MAGIC_START) - 1) + +/* + * dev.c + */ +void dev_init(void); +void dump_dev_table(void); +void devdump_extract(void *, ulonglong, char *, FILE *); +void devdump_info(void *, ulonglong, FILE *); + +/* + * ipcs.c + */ +void ipcs_init(void); +ulong idr_find(ulong, int); + +#ifdef ARM +void arm_init(int); +void arm_dump_machdep_table(ulong); +int arm_is_vmalloc_addr(ulong); +void arm_dump_backtrace_entry(struct bt_info *, int, ulong, ulong); + +#define display_idt_table() \ + error(FATAL, "-d option is not applicable to ARM architecture\n") + +struct arm_pt_regs { + ulong uregs[18]; +}; + +#define ARM_cpsr uregs[16] +#define ARM_pc uregs[15] +#define ARM_lr uregs[14] +#define ARM_sp uregs[13] +#define ARM_ip uregs[12] +#define ARM_fp uregs[11] +#define ARM_r10 uregs[10] +#define ARM_r9 uregs[9] +#define ARM_r8 uregs[8] +#define ARM_r7 uregs[7] +#define ARM_r6 uregs[6] +#define ARM_r5 uregs[5] +#define ARM_r4 uregs[4] +#define ARM_r3 uregs[3] +#define ARM_r2 uregs[2] +#define ARM_r1 uregs[1] +#define ARM_r0 uregs[0] +#define ARM_ORIG_r0 uregs[17] + +#define KSYMS_START (0x1) +#define PHYS_BASE (0x2) +#define PGTABLE_V2 (0x4) +#define IDMAP_PGD (0x8) + +#define KVBASE_MASK (0x1ffffff) + +struct machine_specific { + ulong phys_base; + ulong vmalloc_start_addr; + ulong modules_vaddr; + ulong modules_end; + ulong kernel_text_start; + ulong kernel_text_end; + ulong exception_text_start; + ulong exception_text_end; + ulonglong last_pgd_read_lpae; + ulonglong last_pmd_read_lpae; + ulonglong last_ptbl_read_lpae; + struct arm_pt_regs *crash_task_regs; + int unwind_index_prel31; +}; + +int init_unwind_tables(void); +void unwind_backtrace(struct bt_info *); +#endif /* ARM */ + +/* + * arm64.c + */ +#ifdef ARM64 +void arm64_init(int); +void arm64_dump_machdep_table(ulong); +ulong arm64_VTOP(ulong); +int arm64_IS_VMALLOC_ADDR(ulong); +ulong arm64_swp_type(ulong); +ulong arm64_swp_offset(ulong); +#endif + +/* + * alpha.c + */ +#ifdef ALPHA +void alpha_init(int); +void alpha_dump_machdep_table(ulong); +#define display_idt_table() \ + error(FATAL, "-d option is not applicable to alpha architecture\n") + +#define HWRESET_TASK(X) ((machdep->flags & HWRESET) && is_task_active(X) && \ + (task_to_context(X)->processor == 0)) +#endif + +/* + * x86.c + */ +#ifdef X86 +void x86_init(int); +void x86_dump_machdep_table(ulong); +void x86_display_idt_table(void); +#define display_idt_table() x86_display_idt_table() +#define KSYMS_START (0x1) +void x86_dump_eframe_common(struct bt_info *bt, ulong *, int); +char *x86_function_called_by(ulong); +struct syment *x86_jmp_error_code(ulong); +struct syment *x86_text_lock_jmp(ulong, ulong *); + +struct machine_specific { + ulong *idt_table; + ulong entry_tramp_start; + ulong entry_tramp_end; + physaddr_t entry_tramp_start_phys; + ulonglong last_pmd_read_PAE; + ulonglong last_ptbl_read_PAE; + ulong page_protnone; + int max_numnodes; + ulong *remap_start_vaddr; + ulong *remap_end_vaddr; + ulong *remap_start_pfn; +}; + +struct syment *x86_is_entry_tramp_address(ulong, ulong *); +#endif + +/* + * x86_64.c + */ +#ifdef X86_64 +void x86_64_init(int); +void x86_64_dump_machdep_table(ulong); +ulong x86_64_PTOV(ulong); +ulong x86_64_VTOP(ulong); +int x86_64_IS_VMALLOC_ADDR(ulong); +ulong x86_64_swp_type(ulong); +ulong x86_64_swp_offset(ulong); +void x86_64_display_idt_table(void); +#define display_idt_table() x86_64_display_idt_table() +long x86_64_exception_frame(ulong, ulong, char *, struct bt_info *, FILE *); +#define EFRAME_INIT (0) + +struct x86_64_pt_regs_offsets { + long r15; + long r14; + long r13; + long r12; + long rbp; + long rbx; +/* arguments: non interrupts/non tracing syscalls only save upto here*/ + long r11; + long r10; + long r9; + long r8; + long rax; + long rcx; + long rdx; + long rsi; + long rdi; + long orig_rax; +/* end of arguments */ +/* cpu exception frame or undefined */ + long rip; + long cs; + long eflags; + long rsp; + long ss; +}; + +#define MAX_EXCEPTION_STACKS 7 +#define NMI_STACK (machdep->machspec->stkinfo.NMI_stack_index) + +struct x86_64_stkinfo { + ulong ebase[NR_CPUS][MAX_EXCEPTION_STACKS]; + int esize[MAX_EXCEPTION_STACKS]; + ulong ibase[NR_CPUS]; + int isize; + int NMI_stack_index; + char *exception_stacks[MAX_EXCEPTION_STACKS]; +}; + +typedef struct __attribute__((__packed__)) { + signed short sp_offset; + signed short bp_offset; + unsigned int sp_reg:4; + unsigned int bp_reg:4; + unsigned int type:2; + unsigned int end:1; +} kernel_orc_entry; + +struct ORC_data { + int module_ORC; + uint lookup_num_blocks; + ulong __start_orc_unwind_ip; + ulong __stop_orc_unwind_ip; + ulong __start_orc_unwind; + ulong __stop_orc_unwind; + ulong orc_lookup; + ulong ip_entry; + ulong orc_entry; + kernel_orc_entry kernel_orc_entry; +}; + +#define ORC_TYPE_CALL 0 +#define ORC_TYPE_REGS 1 +#define ORC_TYPE_REGS_IRET 2 +#define UNWIND_HINT_TYPE_SAVE 3 +#define UNWIND_HINT_TYPE_RESTORE 4 + +#define ORC_REG_UNDEFINED 0 +#define ORC_REG_PREV_SP 1 +#define ORC_REG_DX 2 +#define ORC_REG_DI 3 +#define ORC_REG_BP 4 +#define ORC_REG_SP 5 +#define ORC_REG_R10 6 +#define ORC_REG_R13 7 +#define ORC_REG_BP_INDIRECT 8 +#define ORC_REG_SP_INDIRECT 9 +#define ORC_REG_MAX 15 + +struct machine_specific { + ulong userspace_top; + ulong page_offset; + ulong vmalloc_start_addr; + ulong vmalloc_end; + ulong vmemmap_vaddr; + ulong vmemmap_end; + ulong modules_vaddr; + ulong modules_end; + ulong phys_base; + char *pml4; + char *upml; + ulong last_upml_read; + ulong last_pml4_read; + char *irqstack; + ulong irq_eframe_link; + struct x86_64_pt_regs_offsets pto; + struct x86_64_stkinfo stkinfo; + ulong *current; + ulong *crash_nmi_rsp; + ulong vsyscall_page; + ulong thread_return; + ulong page_protnone; + ulong GART_start; + ulong GART_end; + ulong kernel_image_size; + ulong physical_mask_shift; + ulong pgdir_shift; + char *p4d; + ulong last_p4d_read; + struct ORC_data orc; + ulong irq_stack_gap; + ulong kpti_entry_stack; + ulong kpti_entry_stack_size; + ulong ptrs_per_pgd; + ulong cpu_entry_area_start; + ulong cpu_entry_area_end; + ulong page_offset_force; +}; + +#define KSYMS_START (0x1) +#define PT_REGS_INIT (0x2) +#define VM_ORIG (0x4) +#define VM_2_6_11 (0x8) +#define VM_XEN (0x10) +#define NO_TSS (0x20) +#define SCHED_TEXT (0x40) +#define PHYS_BASE (0x80) +#define VM_XEN_RHEL4 (0x100) +#define FRAMEPOINTER (0x200) +#define GART_REGION (0x400) +#define NESTED_NMI (0x800) +#define RANDOMIZED (0x1000) +#define VM_5LEVEL (0x2000) +#define ORC (0x4000) +#define KPTI (0x8000) +#define L1TF (0x10000) + +#define VM_FLAGS (VM_ORIG|VM_2_6_11|VM_XEN|VM_XEN_RHEL4|VM_5LEVEL) + +#define _2MB_PAGE_MASK (~((MEGABYTES(2))-1)) + +#endif + +#if defined(X86) || defined(X86_64) + +/* + * unwind_x86_32_64.c + */ +void init_unwind_table(void); +int dwarf_backtrace(struct bt_info *, int, ulong); +void dwarf_debug(struct bt_info *); +int dwarf_print_stack_entry(struct bt_info *, int); + +#endif + + +/* + * ppc64.c + */ + +/* + * This structure was copied from kernel source + * in include/asm-ppc/ptrace.h + */ +struct ppc64_pt_regs { + long gpr[32]; + long nip; + long msr; + long orig_gpr3; /* Used for restarting system calls */ + long ctr; + long link; + long xer; + long ccr; + long mq; /* 601 only (not used at present) */ + /* Used on APUS to hold IPL value. */ + long trap; /* Reason for being here */ + long dar; /* Fault registers */ + long dsisr; + long result; /* Result of a system call */ +}; + +struct ppc64_elf_siginfo { + int si_signo; + int si_code; + int si_errno; +}; + +struct ppc64_elf_prstatus { + struct ppc64_elf_siginfo pr_info; + short pr_cursig; + unsigned long pr_sigpend; + unsigned long pr_sighold; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct timeval pr_utime; + struct timeval pr_stime; + struct timeval pr_cutime; + struct timeval pr_cstime; + struct ppc64_pt_regs pr_reg; + int pr_fpvalid; +}; + +#ifdef PPC64 + +struct ppc64_opal { + uint64_t base; + uint64_t entry; + uint64_t size; +}; + +struct ppc64_vmemmap { + unsigned long phys; + unsigned long virt; +}; + +/* + * Used to store the HW interrupt stack. It is only for 2.4. + */ +struct machine_specific { + ulong hwintrstack[NR_CPUS]; + char *hwstackbuf; + uint hwstacksize; + + uint l4_index_size; + uint l3_index_size; + uint l2_index_size; + uint l1_index_size; + + uint ptrs_per_l4; + uint ptrs_per_l3; + uint ptrs_per_l2; + uint ptrs_per_l1; + + uint l4_shift; + uint l3_shift; + uint l2_shift; + uint l1_shift; + + uint pte_rpn_shift; + ulong pte_rpn_mask; + ulong pgd_masked_bits; + ulong pud_masked_bits; + ulong pmd_masked_bits; + + int vmemmap_cnt; + int vmemmap_psize; + ulong vmemmap_base; + struct ppc64_vmemmap *vmemmap_list; + ulong _page_pte; + ulong _page_present; + ulong _page_user; + ulong _page_rw; + ulong _page_guarded; + ulong _page_coherent; + ulong _page_no_cache; + ulong _page_writethru; + ulong _page_dirty; + ulong _page_accessed; + int (*is_kvaddr)(ulong); + int (*is_vmaddr)(ulong); + struct ppc64_opal opal; +}; + +void ppc64_init(int); +void ppc64_dump_machdep_table(ulong); +#define display_idt_table() \ + error(FATAL, "-d option is not applicable to PowerPC architecture\n") +#define KSYMS_START (0x1) +#define VM_ORIG (0x2) +#define VMEMMAP_AWARE (0x4) +#define BOOK3E (0x8) +#define PHYS_ENTRY_L4 (0x10) +#define SWAP_ENTRY_L4 (0x20) +/* + * The flag bit for radix MMU in cpu_spec.mmu_features + * in the kernel is also 0x40. + */ +#define RADIX_MMU (0x40) +#define OPAL_FW (0x80) + +#define REGION_SHIFT (60UL) +#define REGION_ID(addr) (((unsigned long)(addr)) >> REGION_SHIFT) +#define VMEMMAP_REGION_ID (0xfUL) +#endif + +/* + * ppc.c + */ +#ifdef PPC +void ppc_init(int); +void ppc_dump_machdep_table(ulong); +void ppc_relocate_nt_prstatus_percpu(void **, uint *); +#define display_idt_table() \ + error(FATAL, "-d option is not applicable to PowerPC architecture\n") +#define KSYMS_START (0x1) +/* This should match PPC_FEATURE_BOOKE from include/asm-powerpc/cputable.h */ +#define CPU_BOOKE (0x00008000) +#else +#define ppc_relocate_nt_prstatus_percpu(X,Y) do {} while (0) +#endif + +/* + * lkcd_fix_mem.c + */ + +struct _dump_header_asm_s; +struct _dump_header_s; +ulong get_lkcd_switch_stack(ulong); +int fix_addr_v8(struct _dump_header_asm_s *); +int lkcd_dump_init_v8_arch(struct _dump_header_s *dh); +int fix_addr_v7(int); +int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp); +int lkcd_get_kernel_start_v8(ulong *addr); + +/* + * lkcd_v8.c + */ +int get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp); + +/* + * ia64.c + */ +#ifdef IA64 +void ia64_init(int); +void ia64_dump_machdep_table(ulong); +void ia64_dump_line_number(ulong); +ulong ia64_get_switch_stack(ulong); +void ia64_exception_frame(ulong, struct bt_info *bt); +ulong ia64_PTOV(ulong); +ulong ia64_VTOP(ulong); +int ia64_IS_VMALLOC_ADDR(ulong); +#define display_idt_table() \ + error(FATAL, "-d option TBD on ia64 architecture\n"); +int ia64_in_init_stack(ulong addr); +int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt); +physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo); + +#define OLD_UNWIND (0x1) /* CONFIG_IA64_NEW_UNWIND not turned on */ +#define NEW_UNWIND (0x2) /* CONFIG_IA64_NEW_UNWIND turned on */ +#define NEW_UNW_V1 (0x4) +#define NEW_UNW_V2 (0x8) +#define NEW_UNW_V3 (0x10) +#define UNW_OUT_OF_SYNC (0x20) /* shared data structures out of sync */ +#define UNW_READ (0x40) /* kernel unw has been read successfully */ +#define MEM_LIMIT (0x80) +#define UNW_PTREGS (0x100) +#define UNW_R0 (0x200) + +#undef IA64_RBS_OFFSET +#undef IA64_STK_OFFSET +#define IA64_RBS_OFFSET ((SIZE(task_struct) + 15) & ~15) +#define IA64_STK_OFFSET (STACKSIZE()) + +struct machine_specific { + ulong cpu_data_address; + ulong unimpl_va_mask; + ulong unimpl_pa_mask; + long unw_tables_offset; + long unw_kernel_table_offset; + long unw_pt_regs_offsets; + int script_index; + struct unw_script *script_cache; + ulong script_cache_fills; + ulong script_cache_hits; + void *unw; + ulong mem_limit; + ulong kernel_region; + ulong kernel_start; + ulong phys_start; + ulong vmalloc_start; + char *ia64_memmap; + uint64_t efi_memmap_size; + uint64_t efi_memdesc_size; + void (*unwind_init)(void); + void (*unwind)(struct bt_info *); + void (*dump_unwind_stats)(void); + int (*unwind_debug)(ulong); + int ia64_init_stack_size; +}; + + +/* + * unwind.c + */ +void unwind_init_v1(void); +void unwind_v1(struct bt_info *); +void dump_unwind_stats_v1(void); +int unwind_debug_v1(ulong); + +void unwind_init_v2(void); +void unwind_v2(struct bt_info *); +void dump_unwind_stats_v2(void); +int unwind_debug_v2(ulong); + +void unwind_init_v3(void); +void unwind_v3(struct bt_info *); +void dump_unwind_stats_v3(void); +int unwind_debug_v3(ulong); + +#endif /* IA64 */ + +/* + * s390.c + */ +#ifdef S390 +void s390_init(int); +void s390_dump_machdep_table(ulong); +#define display_idt_table() \ + error(FATAL, "-d option is not applicable to S390 architecture\n") +#define KSYMS_START (0x1) +#endif + +/* + * s390_dump.c + */ +int is_s390_dump(char *); +FILE* s390_dump_init(char *); +int read_s390_dumpfile(int, void *, int, ulong, physaddr_t); +int write_s390_dumpfile(int, void *, int, ulong, physaddr_t); +uint s390_page_size(void); +int s390_memory_used(void); +int s390_free_memory(void); +int s390_memory_dump(FILE *); +ulong get_s390_panic_task(void); +void get_s390_panicmsg(char *); + +/* + * s390x.c + */ +#ifdef S390X +void s390x_init(int); +void s390x_dump_machdep_table(ulong); +#define display_idt_table() \ + error(FATAL, "-d option is not applicable to S390X architecture\n") +#define KSYMS_START (0x1) +#endif + +/* + * mips.c + */ +void mips_display_regs_from_elf_notes(int, FILE *); + +#ifdef MIPS +void mips_init(int); +void mips_dump_machdep_table(ulong); + +#define display_idt_table() \ + error(FATAL, "-d option is not applicable to MIPS architecture\n") + +struct mips_regset { + ulong regs[45]; +}; + +struct mips_pt_regs_main { + ulong regs[32]; + ulong cp0_status; + ulong hi; + ulong lo; +}; + +struct mips_pt_regs_cp0 { + ulong cp0_badvaddr; + ulong cp0_cause; + ulong cp0_epc; +}; + +#define KSYMS_START (0x1) +#define PHYS_BASE (0x2) + +#define KVBASE_MASK (0x1ffffff) + +struct machine_specific { + ulong phys_base; + ulong vmalloc_start_addr; + ulong modules_vaddr; + ulong modules_end; + + ulong _page_present; + ulong _page_read; + ulong _page_write; + ulong _page_accessed; + ulong _page_modified; + ulong _page_global; + ulong _page_valid; + ulong _page_no_read; + ulong _page_no_exec; + ulong _page_dirty; + + ulong _pfn_shift; + +#define _PAGE_PRESENT (machdep->machspec->_page_present) +#define _PAGE_READ (machdep->machspec->_page_read) +#define _PAGE_WRITE (machdep->machspec->_page_write) +#define _PAGE_ACCESSED (machdep->machspec->_page_accessed) +#define _PAGE_MODIFIED (machdep->machspec->_page_modified) +#define _PAGE_GLOBAL (machdep->machspec->_page_global) +#define _PAGE_VALID (machdep->machspec->_page_valid) +#define _PAGE_NO_READ (machdep->machspec->_page_no_read) +#define _PAGE_NO_EXEC (machdep->machspec->_page_no_exec) +#define _PAGE_DIRTY (machdep->machspec->_page_dirty) +#define _PFN_SHIFT (machdep->machspec->_pfn_shift) + + struct mips_regset *crash_task_regs; +}; +#endif /* MIPS */ + +/* + * sparc64.c + */ +#ifdef SPARC64 +void sparc64_init(int); +void sparc64_dump_machdep_table(ulong); +int sparc64_vmalloc_addr(ulong); +#define display_idt_table() \ + error(FATAL, "The -d option is not applicable to sparc64.\n") +#endif + +/* + * netdump.c + */ +int is_netdump(char *, ulong); +uint netdump_page_size(void); +int read_netdump(int, void *, int, ulong, physaddr_t); +int write_netdump(int, void *, int, ulong, physaddr_t); +int netdump_free_memory(void); +int netdump_memory_used(void); +int netdump_init(char *, FILE *); +ulong get_netdump_panic_task(void); +ulong get_netdump_switch_stack(ulong); +FILE *set_netdump_fp(FILE *); +int netdump_memory_dump(FILE *); +void get_netdump_regs(struct bt_info *, ulong *, ulong *); +int is_partial_netdump(void); +void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); +void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); +void dump_registers_for_elf_dumpfiles(void); +struct vmcore_data; +struct vmcore_data *get_kdump_vmcore_data(void); +int read_kdump(int, void *, int, ulong, physaddr_t); +int write_kdump(int, void *, int, ulong, physaddr_t); +int is_kdump(char *, ulong); +int kdump_init(char *, FILE *); +ulong get_kdump_panic_task(void); +uint kdump_page_size(void); +int kdump_free_memory(void); +int kdump_memory_used(void); +int kdump_memory_dump(FILE *); +void get_kdump_regs(struct bt_info *, ulong *, ulong *); +void xen_kdump_p2m_mfn(char *); +int is_sadump_xen(void); +void set_xen_phys_start(char *); +ulong xen_phys_start(void); +int xen_major_version(void); +int xen_minor_version(void); +int get_netdump_arch(void); +int exist_regs_in_elf_notes(struct task_context *); +void *get_regs_from_elf_notes(struct task_context *); +void map_cpus_to_prstatus(void); +int kdump_phys_base(ulong *); +int kdump_set_phys_base(ulong); +int arm_kdump_phys_base(ulong *); +int arm_kdump_phys_end(ulong *); +int is_proc_kcore(char *, ulong); +int proc_kcore_init(FILE *, int); +int read_proc_kcore(int, void *, int, ulong, physaddr_t); +int write_proc_kcore(int, void *, int, ulong, physaddr_t); +int kcore_memory_dump(FILE *); +void dump_registers_for_qemu_mem_dump(void); +void kdump_backup_region_init(void); +void display_regs_from_elf_notes(int, FILE *); +void display_ELF_note(int, int, void *, FILE *); +void *netdump_get_prstatus_percpu(int); +int kdump_kaslr_check(void); +void display_vmcoredd_note(void *ptr, FILE *ofp); +QEMUCPUState *kdump_get_qemucpustate(int); +void kdump_device_dump_info(FILE *); +void kdump_device_dump_extract(int, char *, FILE *); +#define PRSTATUS_NOTE (1) +#define QEMU_NOTE (2) + +/* + * ramdump.c + */ +int is_ramdump(char *pattern); +char *ramdump_to_elf(void); +void ramdump_elf_output_file(char *opt); +void ramdump_cleanup(void); +int read_ramdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr); +void show_ramdump_files(void); +void dump_ramdump_data(void); +int is_ramdump_image(void); + +/* + * diskdump.c + */ +int is_diskdump(char *); +uint diskdump_page_size(void); +int read_diskdump(int, void *, int, ulong, physaddr_t); +int write_diskdump(int, void *, int, ulong, physaddr_t); +int diskdump_free_memory(void); +int diskdump_memory_used(void); +int diskdump_init(char *, FILE *); +ulong get_diskdump_panic_task(void); +ulong get_diskdump_switch_stack(ulong); +int diskdump_memory_dump(FILE *); +FILE *set_diskdump_fp(FILE *); +void get_diskdump_regs(struct bt_info *, ulong *, ulong *); +int diskdump_phys_base(unsigned long *); +int diskdump_set_phys_base(unsigned long); +extern ulong *diskdump_flags; +int is_partial_diskdump(void); +int get_dump_level(void); +int dumpfile_is_split(void); +void show_split_dumpfiles(void); +void x86_process_elf_notes(void *, unsigned long); +void *diskdump_get_prstatus_percpu(int); +void map_cpus_to_prstatus_kdump_cmprs(void); +void diskdump_display_regs(int, FILE *); +void process_elf32_notes(void *, ulong); +void process_elf64_notes(void *, ulong); +void dump_registers_for_compressed_kdump(void); +int diskdump_kaslr_check(void); +QEMUCPUState *diskdump_get_qemucpustate(int); +void diskdump_device_dump_info(FILE *); +void diskdump_device_dump_extract(int, char *, FILE *); + +/* + * makedumpfile.c + */ +void check_flattened_format(char *file); +int is_flattened_format(char *file); +int read_flattened_format(int fd, off_t offset, void *buf, size_t size); +void dump_flat_header(FILE *); + +/* + * xendump.c + */ +int is_xendump(char *); +int read_xendump(int, void *, int, ulong, physaddr_t); +int write_xendump(int, void *, int, ulong, physaddr_t); +uint xendump_page_size(void); +int xendump_free_memory(void); +int xendump_memory_used(void); +int xendump_init(char *, FILE *); +int xendump_memory_dump(FILE *); +ulong get_xendump_panic_task(void); +void get_xendump_regs(struct bt_info *, ulong *, ulong *); +char *xc_core_mfn_to_page(ulong, char *); +int xc_core_mfn_to_page_index(ulong); +void xendump_panic_hook(char *); +int read_xendump_hyper(int, void *, int, ulong, physaddr_t); +struct xendump_data *get_xendump_data(void); + +/* + * kvmdump.c + */ +int is_kvmdump(char *); +int is_kvmdump_mapfile(char *); +int kvmdump_init(char *, FILE *); +int read_kvmdump(int, void *, int, ulong, physaddr_t); +int write_kvmdump(int, void *, int, ulong, physaddr_t); +int kvmdump_free_memory(void); +int kvmdump_memory_used(void); +int kvmdump_memory_dump(FILE *); +void get_kvmdump_regs(struct bt_info *, ulong *, ulong *); +ulong get_kvmdump_panic_task(void); +int kvmdump_phys_base(unsigned long *); +void kvmdump_display_regs(int, FILE *); +void set_kvmhost_type(char *); +void set_kvm_iohole(char *); +struct kvm_register_set { + union { + uint32_t cs; + uint32_t ss; + uint32_t ds; + uint32_t es; + uint32_t fs; + uint32_t gs; + uint64_t ip; + uint64_t flags; + uint64_t regs[16]; + } x86; +}; +int get_kvm_register_set(int, struct kvm_register_set *); + +/* + * sadump.c + */ +int is_sadump(char *); +uint sadump_page_size(void); +int read_sadump(int, void *, int, ulong, physaddr_t); +int write_sadump(int, void *, int, ulong, physaddr_t); +int sadump_init(char *, FILE *); +int sadump_is_diskset(void); +ulong get_sadump_panic_task(void); +ulong get_sadump_switch_stack(ulong); +int sadump_memory_used(void); +int sadump_free_memory(void); +int sadump_memory_dump(FILE *); +FILE *set_sadump_fp(FILE *); +void get_sadump_regs(struct bt_info *bt, ulong *ipp, ulong *spp); +void sadump_display_regs(int, FILE *); +int sadump_phys_base(ulong *); +int sadump_set_phys_base(ulong); +void sadump_show_diskset(void); +int sadump_is_zero_excluded(void); +void sadump_set_zero_excluded(void); +void sadump_unset_zero_excluded(void); +struct sadump_data; +struct sadump_data *get_sadump_data(void); +int sadump_calc_kaslr_offset(ulong *); +int sadump_get_cr3_idtr(ulong *, ulong *); + +/* + * qemu.c + */ +int qemu_init(char *); + +/* + * qemu-load.c + */ +int is_qemu_vm_file(char *); +void dump_qemu_header(FILE *); + +/* + * net.c + */ +void net_init(void); +void dump_net_table(void); +void dump_sockets_workhorse(ulong, ulong, struct reference *); + +/* + * remote.c + */ +int is_remote_daemon(char *); +physaddr_t get_remote_phys_base(physaddr_t, physaddr_t); +physaddr_t remote_vtop(int, physaddr_t); +int get_remote_regs(struct bt_info *, ulong *, ulong *); +physaddr_t get_remote_cr3(int); +void remote_fd_init(void); +int get_remote_file(struct remote_file *); +uint remote_page_size(void); +int find_remote_module_objfile(struct load_module *lm, char *, char *); +int remote_free_memory(void); +int remote_memory_dump(int); +int remote_memory_used(void); +void remote_exit(void); +int remote_execute(void); +void remote_clear_pipeline(void); +int remote_memory_read(int, char *, int, physaddr_t, int); + +/* + * vmware_vmss.c + */ +int is_vmware_vmss(char *filename); +int vmware_vmss_init(char *filename, FILE *ofp); +uint vmware_vmss_page_size(void); +int read_vmware_vmss(int, void *, int, ulong, physaddr_t); +int write_vmware_vmss(int, void *, int, ulong, physaddr_t); +void vmware_vmss_display_regs(int, FILE *); +void get_vmware_vmss_regs(struct bt_info *, ulong *, ulong *); +int vmware_vmss_memory_dump(FILE *); +void dump_registers_for_vmss_dump(void); +int vmware_vmss_valid_regs(struct bt_info *); +int vmware_vmss_get_cr3_idtr(ulong *, ulong *); +int vmware_vmss_phys_base(ulong *phys_base); +int vmware_vmss_set_phys_base(ulong); + +/* + * kaslr_helper.c + */ +int calc_kaslr_offset(ulong *, ulong *); + +/* + * gnu_binutils.c + */ + +/* NO LONGER IN USE */ + +/* + * test.c + */ +void cmd_template(void); +void foreach_test(ulong, ulong); + +/* + * va_server.c + */ +int mclx_page_size(void); +int vas_memory_used(void); +int vas_memory_dump(FILE *); +int vas_free_memory(char *); +void set_vas_debug(ulong); +size_t vas_write(void *, size_t); +int va_server_init(char *, ulong *, ulong *, ulong *); +size_t vas_read(void *, size_t); +int vas_lseek(ulong, int); + +/* + * lkcd_x86_trace.c + */ +int lkcd_x86_back_trace(struct bt_info *, int, FILE *); + +/* + * lkcd_common.c + */ +int lkcd_dump_init(FILE *, int, char *); +ulong get_lkcd_panic_task(void); +void get_lkcd_panicmsg(char *); +int is_lkcd_compressed_dump(char *); +void dump_lkcd_environment(ulong); +int lkcd_lseek(physaddr_t); +long lkcd_read(void *, long); +void set_lkcd_debug(ulong); +FILE *set_lkcd_fp(FILE *); +uint lkcd_page_size(void); +int lkcd_memory_used(void); +int lkcd_memory_dump(FILE *); +int lkcd_free_memory(void); +void lkcd_print(char *, ...); +void set_remote_lkcd_panic_data(ulong, char *); +void set_lkcd_nohash(void); +int lkcd_load_dump_page_header(void *, ulong); +void lkcd_dumpfile_complaint(uint32_t, uint32_t, int); +int set_mb_benchmark(ulong); +ulonglong fix_lkcd_address(ulonglong); +int lkcd_get_kernel_start(ulong *addr); +int get_lkcd_regs_for_cpu(struct bt_info *bt, ulong *eip, ulong *esp); + +/* + * lkcd_v1.c + */ +int lkcd_dump_init_v1(FILE *, int); +void dump_dump_page_v1(char *, void *); +void dump_lkcd_environment_v1(ulong); +uint32_t get_dp_size_v1(void); +uint32_t get_dp_flags_v1(void); +uint64_t get_dp_address_v1(void); + +/* + * lkcd_v2_v3.c + */ +int lkcd_dump_init_v2_v3(FILE *, int); +void dump_dump_page_v2_v3(char *, void *); +void dump_lkcd_environment_v2_v3(ulong); +uint32_t get_dp_size_v2_v3(void); +uint32_t get_dp_flags_v2_v3(void); +uint64_t get_dp_address_v2_v3(void); + +/* + * lkcd_v5.c + */ +int lkcd_dump_init_v5(FILE *, int); +void dump_dump_page_v5(char *, void *); +void dump_lkcd_environment_v5(ulong); +uint32_t get_dp_size_v5(void); +uint32_t get_dp_flags_v5(void); +uint64_t get_dp_address_v5(void); + +/* + * lkcd_v7.c + */ +int lkcd_dump_init_v7(FILE *, int, char *); +void dump_dump_page_v7(char *, void *); +void dump_lkcd_environment_v7(ulong); +uint32_t get_dp_size_v7(void); +uint32_t get_dp_flags_v7(void); +uint64_t get_dp_address_v7(void); + +/* + * lkcd_v8.c + */ +int lkcd_dump_init_v8(FILE *, int, char *); +void dump_dump_page_v8(char *, void *); +void dump_lkcd_environment_v8(ulong); +uint32_t get_dp_size_v8(void); +uint32_t get_dp_flags_v8(void); +uint64_t get_dp_address_v8(void); + +#ifdef LKCD_COMMON +/* + * Until they differ across versions, these remain usable in the common + * routines in lkcd_common.c + */ +#define LKCD_DUMP_MAGIC_NUMBER (0xa8190173618f23edULL) +#define LKCD_DUMP_MAGIC_LIVE (0xa8190173618f23cdULL) + +#define LKCD_DUMP_V1 (0x1) /* DUMP_VERSION_NUMBER */ +#define LKCD_DUMP_V2 (0x2) /* DUMP_VERSION_NUMBER */ +#define LKCD_DUMP_V3 (0x3) /* DUMP_VERSION_NUMBER */ +#define LKCD_DUMP_V5 (0x5) /* DUMP_VERSION_NUMBER */ +#define LKCD_DUMP_V6 (0x6) /* DUMP_VERSION_NUMBER */ +#define LKCD_DUMP_V7 (0x7) /* DUMP_VERSION_NUMBER */ +#define LKCD_DUMP_V8 (0x8) /* DUMP_VERSION_NUMBER */ +#define LKCD_DUMP_V9 (0x9) /* DUMP_VERSION_NUMBER */ +#define LKCD_DUMP_V10 (0xa) /* DUMP_VERSION_NUMBER */ + +#define LKCD_DUMP_VERSION_NUMBER_MASK (0xf) +#define LKCD_DUMP_RAW (0x1) /* DUMP_[DH_]RAW */ +#define LKCD_DUMP_COMPRESSED (0x2) /* DUMP_[DH_]COMPRESSED */ +#define LKCD_DUMP_END (0x4) /* DUMP_[DH_]END */ + +#define LKCD_DUMP_COMPRESS_NONE (0x0) /* DUMP_COMPRESS_NONE */ +#define LKCD_DUMP_COMPRESS_RLE (0x1) /* DUMP_COMPRESS_RLE */ +#define LKCD_DUMP_COMPRESS_GZIP (0x2) /* DUMP_COMPRESS_GZIP */ + +#define LKCD_DUMP_MCLX_V0 (0x80000000) /* MCLX mod of LKCD */ +#define LKCD_DUMP_MCLX_V1 (0x40000000) /* Extra page header data */ +#define LKCD_OFFSET_TO_FIRST_PAGE (65536) + +#define MCLX_PAGE_HEADERS (4096) +#define MCLX_V1_PAGE_HEADER_CACHE ((sizeof(uint64_t)) * MCLX_PAGE_HEADERS) + +/* + * lkcd_load_dump_page_header() return values + */ +#define LKCD_DUMPFILE_OK (0) +#define LKCD_DUMPFILE_EOF (1) +#define LKCD_DUMPFILE_END (2) + +/* + * Common handling of LKCD dump environment + */ +#define LKCD_CACHED_PAGES (16) +#define LKCD_PAGE_HASH (32) +#define LKCD_DUMP_HEADER_ONLY (1) /* arguments to lkcd_dump_environment */ +#define LKCD_DUMP_PAGE_ONLY (2) + +#define LKCD_VALID (0x1) /* flags */ +#define LKCD_REMOTE (0x2) +#define LKCD_NOHASH (0x4) +#define LKCD_MCLX (0x8) +#define LKCD_BAD_DUMP (0x10) + +struct page_hash_entry { + uint32_t pg_flags; + uint64_t pg_addr; + off_t pg_hdr_offset; + struct page_hash_entry *next; +}; + +struct page_desc { + off_t offset; /* lseek offset in dump file */ +}; + +struct physmem_zone { + uint64_t start; + struct page_desc *pages; +}; + +struct fix_addrs { + ulong task; + ulong saddr; + ulong sw; +}; + + +struct lkcd_environment { + int fd; /* dumpfile file descriptor */ + ulong flags; /* flags from above */ + ulong debug; /* shadow of pc->debug */ + FILE *fp; /* abstracted fp for fprintf */ + void *dump_header; /* header stash, v1 or v2 */ + void *dump_header_asm; /* architecture specific header for v2 */ + void *dump_header_asm_smp; /* architecture specific header for v7 & v8 */ + void *dump_page; /* current page header holder */ + uint32_t version; /* version number of this dump */ + uint32_t page_size; /* size of a Linux memory page */ + int page_shift; /* byte address to page */ + int bits; /* processor bitsize */ + ulong panic_task; /* panic task address */ + char *panic_string; /* pointer to stashed panic string */ + uint32_t compression; /* compression type */ + uint32_t (*get_dp_size)(void); /* returns current page's dp_size */ + uint32_t (*get_dp_flags)(void); /* returns current page's dp_size */ + uint64_t (*get_dp_address)(void); /* returns current page's dp_address*/ + size_t page_header_size; /* size of version's page header */ + unsigned long curpos; /* offset into current page */ + uint64_t curpaddr; /* current page's physical address */ + off_t curhdroffs; /* current page's header offset */ + char *curbufptr; /* pointer to uncompressed page buffer */ + uint64_t kvbase; /* physical-to-LKCD page address format*/ + char *page_cache_buf; /* base of cached buffer pages */ + char *compressed_page; /* copy of compressed page data */ + int evict_index; /* next page to evict */ + ulong evictions; /* total evictions done */ + struct page_cache_hdr { /* header for each cached page */ + uint32_t pg_flags; + uint64_t pg_addr; + char *pg_bufptr; + ulong pg_hit_count; + } page_cache_hdr[LKCD_CACHED_PAGES]; + struct page_hash_entry *page_hash; + ulong total_pages; + ulong benchmark_pages; + ulong benchmarks_done; + off_t *mb_hdr_offsets; + ulong total_reads; + ulong cached_reads; + ulong hashed_reads; + ulong hashed; + ulong compressed; + ulong raw; + + /* lkcd_v7 additions */ + char *dumpfile_index; /* array of offsets for each page */ + int ifd; /* index file for dump (LKCD V7+) */ + long memory_pages; /* Mamimum index of dump pages */ + off_t page_offset_max; /* Offset of page with greatest offset seen so far */ + long page_index_max; /* Index of page with greatest offset seen so far */ + off_t *page_offsets; /* Pointer to huge array with seek offsets */ + /* NB: There are no holes in the array */ + + struct physmem_zone *zones; /* Array of physical memory zones */ + int num_zones; /* Number of zones initialized */ + int max_zones; /* Size of the zones array */ + long zoned_offsets; /* Number of stored page offsets */ + uint64_t zone_mask; + int zone_shift; + + int fix_addr_num; /* Number of active stacks to switch to saved values */ + struct fix_addrs *fix_addr; /* Array of active stacks to switch to saved values */ + + +}; + +#define ZONE_ALLOC 128 +#define ZONE_SIZE (MEGABYTES(512)) + +#define MEGABYTE_ALIGNED(vaddr) (!((uint64_t)(vaddr) & MEGABYTE_MASK)) + +#define LKCD_PAGE_HASH_INDEX(paddr) \ + (((paddr) >> lkcd->page_shift) % LKCD_PAGE_HASH) +#define LKCD_PAGES_PER_MEGABYTE() (MEGABYTES(1) / lkcd->page_size) +#define LKCD_PAGE_MEGABYTE(page) ((page) / LKCD_PAGES_PER_MEGABYTE()) +#define LKCD_BENCHMARKS_DONE() (lkcd->benchmarks_done >= lkcd->benchmark_pages) +#define LKCD_VALID_PAGE(flags) ((flags) & LKCD_VALID) + +extern struct lkcd_environment *lkcd; + +#define LKCD_DEBUG(x) (lkcd->debug >= (x)) +#undef BITS +#undef BITS32 +#undef BITS64 +#define BITS() (lkcd->bits) +#define BITS32() (lkcd->bits == 32) +#define BITS64() (lkcd->bits == 64) + +#endif /* LKCD_COMMON */ + +/* + * gdb_interface.c + */ +void gdb_main_loop(int, char **); +void display_gdb_banner(void); +void get_gdb_version(void); +void gdb_session_init(void); +void gdb_interface(struct gnu_request *); +int gdb_pass_through(char *, FILE *, ulong); +int gdb_readmem_callback(ulong, void *, int, int); +int gdb_line_number_callback(ulong, ulong, ulong); +int gdb_print_callback(ulong); +void gdb_error_hook(void); +void restore_gdb_sanity(void); +int is_gdb_command(int, ulong); +char *gdb_command_string(int, char *, int); +void dump_gnu_request(struct gnu_request *, int); +int gdb_CRASHDEBUG(ulong); +void dump_gdb_data(void); +void update_gdb_hooks(void); +void gdb_readnow_warning(void); +int gdb_set_crash_scope(ulong, char *); +extern int *gdb_output_format; +extern unsigned int *gdb_print_max; +extern int *gdb_prettyprint_structs; +extern int *gdb_prettyprint_arrays; +extern int *gdb_repeat_count_threshold; +extern int *gdb_stop_print_at_null; +extern unsigned int *gdb_output_radix; + +/* + * gdb/top.c + */ +extern void execute_command (char *, int); +#if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) +extern void (*command_loop_hook)(void); +extern void (*error_hook)(void); +#else +extern void (*deprecated_command_loop_hook)(void); + +/* + * gdb/exceptions.c + */ +extern void (*error_hook)(void); +#endif + +/* + * gdb/symtab.c + */ +extern void gdb_command_funnel(struct gnu_request *); + +/* + * gdb/symfile.c + */ +#if defined(GDB_6_0) || defined(GDB_6_1) +struct objfile; +extern void (*target_new_objfile_hook)(struct objfile *); +#endif + +/* + * gdb/valprint.c + */ +extern unsigned output_radix; +#if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) +extern int output_format; +extern int prettyprint_structs; +extern int prettyprint_arrays; +extern int repeat_count_threshold; +extern unsigned int print_max; +extern int stop_print_at_null; +#endif + +#ifdef GDB_7_6 +/* + * gdb/cleanups.c + */ +struct cleanup; +extern struct cleanup *all_cleanups(void); +extern void do_cleanups(struct cleanup *); +#else +/* + * gdb/utils.c + */ +extern void do_cleanups(void *); +#endif + +/* + * gdb/version.c + */ +extern char *version; + +/* + * gdb/disasm.c + */ +#ifdef GDB_5_3 +extern int gdb_disassemble_from_exec; +#endif + +/* + * readline/readline.c + */ +#ifdef GDB_5_3 +extern char *readline(char *); +#else +extern char *readline(const char *); +#endif +extern int rl_editing_mode; + +/* + * readline/history.c + */ +extern int history_offset; + +/* + * external gdb routines + */ +extern int gdb_main_entry(int, char **); +#ifdef GDB_5_3 +extern unsigned long calc_crc32(unsigned long, unsigned char *, size_t); +#else +extern unsigned long gnu_debuglink_crc32 (unsigned long, unsigned char *, size_t); +#endif +extern int have_partial_symbols(void); +extern int have_full_symbols(void); + +#if defined(X86) || defined(X86_64) || defined(IA64) +#define XEN_HYPERVISOR_ARCH +#endif + +#endif /* !GDB_COMMON */ diff --git a/netdump.c b/netdump.c index 0054d6a..31ab6fa 100644 --- a/netdump.c +++ b/netdump.c @@ -1228,19 +1228,7 @@ netdump_memory_dump(FILE *fp) if (machine_type("X86_64")) netdump_print("%lx (relocate)\n", nd->arch_data1); else if (machine_type("ARM64")) - netdump_print("%lx (kimage_voffset)\n", nd->arch_data1); - } else - netdump_print("(unused)\n"); - netdump_print(" arch_data2: "); - if (nd->arch_data2) { - if (machine_type("ARM64")) - netdump_print("%016lx\n" - " CONFIG_ARM64_VA_BITS: %ld\n" - " VA_BITS_ACTUAL: %lld\n", - nd->arch_data2, nd->arch_data2 & 0xffffffff, - ((ulonglong)nd->arch_data2 >> 32)); - else - netdump_print("%016lx (?)\n", nd->arch_data2); + netdump_print("%lx (kimage_voffset)\n", nd->arch_data); } else netdump_print("(unused)\n"); netdump_print(" switch_stack: %lx\n", nd->switch_stack); @@ -1865,8 +1853,7 @@ vmcoreinfo_read_string(const char *key) int i, j, end; size_t value_length; size_t key_length = strlen(key); - char *vmcoreinfo; - uint size_vmcoreinfo; + char *vmcoreinfo = (char *)nd->vmcoreinfo; char *value = NULL; /* @@ -1875,49 +1862,25 @@ vmcoreinfo_read_string(const char *key) * the NT_TASKSTRUCT note. */ if ((pc->flags2 & SNAP)) { - if (STREQ(key, "NUMBER(kimage_voffset)") && nd->arch_data1) { + if (STREQ(key, "NUMBER(kimage_voffset)") && nd->arch_data) { value = calloc(VADDR_PRLEN+1, sizeof(char)); - sprintf(value, "%lx", nd->arch_data1); - if (nd->arch_data2 == 0) - pc->read_vmcoreinfo = no_vmcoreinfo; - return value; - } - if (STREQ(key, "NUMBER(VA_BITS)") && nd->arch_data2) { - value = calloc(VADDR_PRLEN+1, sizeof(char)); - sprintf(value, "%ld", nd->arch_data2 & 0xffffffff); - return value; - } - if (STREQ(key, "NUMBER(TCR_EL1_T1SZ)") && nd->arch_data2) { - value = calloc(VADDR_PRLEN+1, sizeof(char)); - sprintf(value, "%lld", ((ulonglong)nd->arch_data2 >> 32) & 0xffffffff); + sprintf(value, "%lx", nd->arch_data); pc->read_vmcoreinfo = no_vmcoreinfo; return value; } - if (STREQ(key, "relocate") && nd->arch_data1) { + if (STREQ(key, "relocate") && nd->arch_data) { value = calloc(VADDR_PRLEN+1, sizeof(char)); - sprintf(value, "%lx", nd->arch_data1); + sprintf(value, "%lx", nd->arch_data); pc->read_vmcoreinfo = no_vmcoreinfo; return value; } - return NULL; - } - - if (nd->vmcoreinfo) { - vmcoreinfo = (char *)nd->vmcoreinfo; - size_vmcoreinfo = nd->size_vmcoreinfo; - } else if (ACTIVE() && pkd->vmcoreinfo) { - vmcoreinfo = (char *)pkd->vmcoreinfo; - size_vmcoreinfo = pkd->size_vmcoreinfo; - } else { - vmcoreinfo = NULL; - size_vmcoreinfo = 0; } - if (!vmcoreinfo) + if (!nd->vmcoreinfo) return NULL; /* the '+ 1' is the equal sign */ - for (i = 0; i < (int)(size_vmcoreinfo - key_length + 1); i++) { + for (i = 0; i < (nd->size_vmcoreinfo - key_length + 1); i++) { /* * We must also check if we're at the beginning of VMCOREINFO * or the separating newline is there, and of course if we @@ -1931,7 +1894,7 @@ vmcoreinfo_read_string(const char *key) /* Found -- search for the next newline. */ for (j = i + key_length + 1; - j < size_vmcoreinfo; j++) { + j < nd->size_vmcoreinfo; j++) { if (vmcoreinfo[j] == '\n') { end = j; break; @@ -1944,7 +1907,7 @@ vmcoreinfo_read_string(const char *key) */ if (end == -1) { /* Point after the end. */ - end = size_vmcoreinfo + 1; + end = nd->size_vmcoreinfo + 1; } value_length = end - (1+ i + key_length); diff --git a/netdump.cgithub_9596b4388ea5.patch b/netdump.cgithub_9596b4388ea5.patch new file mode 100644 index 0000000..0054d6a --- /dev/null +++ b/netdump.cgithub_9596b4388ea5.patch @@ -0,0 +1,5250 @@ +/* netdump.c + * + * Copyright (C) 2002-2019 David Anderson + * Copyright (C) 2002-2019 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: David Anderson + */ + +#define _LARGEFILE64_SOURCE 1 /* stat64() */ + +#include "defs.h" +#include "netdump.h" +#include "sadump.h" +#include "xen_dom0.h" + +static struct vmcore_data vmcore_data = { 0 }; +static struct vmcore_data *nd = &vmcore_data; +static struct proc_kcore_data proc_kcore_data = { 0 }; +static struct proc_kcore_data *pkd = &proc_kcore_data; +static void netdump_print(char *, ...); +static size_t resize_elf_header(int, char *, char **, char **, ulong); +static void dump_Elf32_Ehdr(Elf32_Ehdr *); +static void dump_Elf32_Phdr(Elf32_Phdr *, int); +static size_t dump_Elf32_Nhdr(Elf32_Off offset, int); +static void dump_Elf64_Ehdr(Elf64_Ehdr *); +static void dump_Elf64_Phdr(Elf64_Phdr *, int); +static void dump_Elf64_Shdr(Elf64_Shdr *shdr); +static size_t dump_Elf64_Nhdr(Elf64_Off offset, int); +static void get_netdump_regs_32(struct bt_info *, ulong *, ulong *); +static void get_netdump_regs_ppc(struct bt_info *, ulong *, ulong *); +static void get_netdump_regs_ppc64(struct bt_info *, ulong *, ulong *); +static void get_netdump_regs_arm(struct bt_info *, ulong *, ulong *); +static void get_netdump_regs_arm64(struct bt_info *, ulong *, ulong *); +static void get_netdump_regs_mips(struct bt_info *, ulong *, ulong *); +static void check_dumpfile_size(char *); +static int proc_kcore_init_32(FILE *, int); +static int proc_kcore_init_64(FILE *, int); +static char *get_regs_from_note(char *, ulong *, ulong *); +static void kdump_get_osrelease(void); +static char *vmcoreinfo_read_string(const char *); + + +#define ELFSTORE 1 +#define ELFREAD 0 + +#define MIN_PAGE_SIZE (4096) + +/* + * Architectures that have configurable page sizes, + * can differ from the host machine's page size. + */ +#define READ_PAGESIZE_FROM_VMCOREINFO() \ + (machine_type("IA64") || machine_type("PPC64") || machine_type("PPC") || machine_type("ARM64")) + +/* + * kdump installs NT_PRSTATUS elf notes only to the cpus + * that were online during dumping. Hence we call into + * this function after reading the cpu map from the kernel, + * to remap the NT_PRSTATUS notes only to the online cpus. + */ +void +map_cpus_to_prstatus(void) +{ + void **nt_ptr; + int online, i, j, nrcpus; + size_t size; + + if (pc->flags2 & QEMU_MEM_DUMP_ELF) /* notes exist for all cpus */ + return; + + if (!(online = get_cpus_online()) || (online == kt->cpus)) + return; + + if (CRASHDEBUG(1)) + error(INFO, + "cpus: %d online: %d NT_PRSTATUS notes: %d (remapping)\n", + kt->cpus, online, nd->num_prstatus_notes); + + size = NR_CPUS * sizeof(void *); + + nt_ptr = (void **)GETBUF(size); + BCOPY(nd->nt_prstatus_percpu, nt_ptr, size); + BZERO(nd->nt_prstatus_percpu, size); + + /* + * Re-populate the array with the notes mapping to online cpus + */ + nrcpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); + + for (i = 0, j = 0; i < nrcpus; i++) { + if (in_cpu_map(ONLINE_MAP, i)) + nd->nt_prstatus_percpu[i] = nt_ptr[j++]; + } + + FREEBUF(nt_ptr); +} + +/* + * Determine whether a file is a netdump/diskdump/kdump creation, + * and if TRUE, initialize the vmcore_data structure. + */ +int +is_netdump(char *file, ulong source_query) +{ + int i, fd, swap; + Elf32_Ehdr *elf32; + Elf32_Phdr *load32; + Elf64_Ehdr *elf64; + Elf64_Phdr *load64; + char *eheader, *sect0; + char buf[BUFSIZE]; + size_t size, len, tot; + Elf32_Off offset32; + Elf64_Off offset64; + ulong format; + + if ((fd = open(file, O_RDWR)) < 0) { + if ((fd = open(file, O_RDONLY)) < 0) { + sprintf(buf, "%s: open", file); + perror(buf); + return FALSE; + } + } + + size = MIN_NETDUMP_ELF_HEADER_SIZE; + if ((eheader = (char *)malloc(size)) == NULL) { + fprintf(stderr, "cannot malloc minimum ELF header buffer\n"); + clean_exit(1); + } + + if (FLAT_FORMAT()) { + if (!read_flattened_format(fd, 0, eheader, size)) + goto bailout; + } else { + if (read(fd, eheader, size) != size) { + sprintf(buf, "%s: ELF header read", file); + perror(buf); + goto bailout; + } + } + + load32 = NULL; + load64 = NULL; + format = 0; + elf32 = (Elf32_Ehdr *)&eheader[0]; + elf64 = (Elf64_Ehdr *)&eheader[0]; + + /* + * Verify the ELF header, and determine the dumpfile format. + * + * For now, kdump vmcores differ from netdump/diskdump like so: + * + * 1. The first kdump PT_LOAD segment is packed just after + * the ELF header, whereas netdump/diskdump page-align + * the first PT_LOAD segment. + * 2. Each kdump PT_LOAD segment has a p_align field of zero, + * whereas netdump/diskdump have their p_align fields set + * to the system page-size. + * + * If either kdump difference is seen, presume kdump -- this + * is obviously subject to change. + */ + + if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) + goto bailout; + + swap = (((eheader[EI_DATA] == ELFDATA2LSB) && + (__BYTE_ORDER == __BIG_ENDIAN)) || + ((eheader[EI_DATA] == ELFDATA2MSB) && + (__BYTE_ORDER == __LITTLE_ENDIAN))); + + if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && + (swap16(elf32->e_type, swap) == ET_CORE) && + (swap32(elf32->e_version, swap) == EV_CURRENT) && + (swap16(elf32->e_phnum, swap) >= 2)) { + switch (swap16(elf32->e_machine, swap)) + { + case EM_386: + if (machine_type_mismatch(file, "X86", NULL, + source_query)) + goto bailout; + break; + + case EM_ARM: + if (machine_type_mismatch(file, "ARM", NULL, + source_query)) + goto bailout; + break; + + case EM_PPC: + if (machine_type_mismatch(file, "PPC", NULL, + source_query)) + goto bailout; + break; + + case EM_MIPS: + if (machine_type_mismatch(file, "MIPS", NULL, + source_query)) + goto bailout; + break; + + default: + if (machine_type_mismatch(file, "(unknown)", NULL, + source_query)) + goto bailout; + } + + if (endian_mismatch(file, elf32->e_ident[EI_DATA], + source_query)) + goto bailout; + + load32 = (Elf32_Phdr *) + &eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; + + if ((load32->p_offset & (MIN_PAGE_SIZE-1)) || + (load32->p_align == 0)) + format = KDUMP_ELF32; + else + format = NETDUMP_ELF32; + } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && + (swap16(elf64->e_type, swap) == ET_CORE) && + (swap32(elf64->e_version, swap) == EV_CURRENT) && + (swap16(elf64->e_phnum, swap) >= 2)) { + switch (swap16(elf64->e_machine, swap)) + { + case EM_IA_64: + if (machine_type_mismatch(file, "IA64", NULL, + source_query)) + goto bailout; + break; + + case EM_PPC64: + if (machine_type_mismatch(file, "PPC64", NULL, + source_query)) + goto bailout; + break; + + case EM_X86_64: + if (machine_type_mismatch(file, "X86_64", NULL, + source_query)) + goto bailout; + break; + + case EM_S390: + if (machine_type_mismatch(file, "S390X", NULL, + source_query)) + goto bailout; + break; + + case EM_386: + if (machine_type_mismatch(file, "X86", NULL, + source_query)) + goto bailout; + break; + + case EM_ARM: + if (machine_type_mismatch(file, "ARM", NULL, + source_query)) + goto bailout; + break; + + case EM_AARCH64: + if (machine_type_mismatch(file, "ARM64", NULL, + source_query)) + goto bailout; + break; + + case EM_MIPS: + if (machine_type_mismatch(file, "MIPS", NULL, + source_query)) + goto bailout; + break; + + default: + if (machine_type_mismatch(file, "(unknown)", NULL, + source_query)) + goto bailout; + } + + if (endian_mismatch(file, elf64->e_ident[EI_DATA], + source_query)) + goto bailout; + + load64 = (Elf64_Phdr *) + &eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; + + if ((load64->p_offset & (MIN_PAGE_SIZE-1)) || + (load64->p_align == 0)) + format = KDUMP_ELF64; + else + format = NETDUMP_ELF64; + } else { + if (CRASHDEBUG(2)) + error(INFO, "%s: not a %s ELF dumpfile\n", + file, source_query == NETDUMP_LOCAL ? + "netdump" : "kdump"); + + + goto bailout; + } + + if (source_query == KCORE_LOCAL) { + close(fd); + return TRUE; + } + + switch (format) + { + case NETDUMP_ELF32: + case NETDUMP_ELF64: + if (source_query & (NETDUMP_LOCAL|NETDUMP_REMOTE)) + break; + else + goto bailout; + + case KDUMP_ELF32: + case KDUMP_ELF64: + if (source_query & KDUMP_LOCAL) + break; + else + goto bailout; + } + + sect0 = NULL; + if (!(size = resize_elf_header(fd, file, &eheader, §0, format))) + goto bailout; + + nd->ndfd = fd; + nd->elf_header = eheader; + nd->flags = format | source_query; + + switch (format) + { + case NETDUMP_ELF32: + case KDUMP_ELF32: + nd->header_size = size; + nd->elf32 = (Elf32_Ehdr *)&nd->elf_header[0]; + nd->num_pt_load_segments = nd->elf32->e_phnum - 1; + if ((nd->pt_load_segments = (struct pt_load_segment *) + malloc(sizeof(struct pt_load_segment) * + nd->num_pt_load_segments)) == NULL) { + fprintf(stderr, "cannot malloc PT_LOAD segment buffers\n"); + clean_exit(1); + } + nd->notes32 = (Elf32_Phdr *) + &nd->elf_header[sizeof(Elf32_Ehdr)]; + nd->load32 = (Elf32_Phdr *) + &nd->elf_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; + if (format == NETDUMP_ELF32) + nd->page_size = (uint)nd->load32->p_align; + dump_Elf32_Ehdr(nd->elf32); + dump_Elf32_Phdr(nd->notes32, ELFREAD); + for (i = 0; i < nd->num_pt_load_segments; i++) + dump_Elf32_Phdr(nd->load32 + i, ELFSTORE+i); + offset32 = nd->notes32->p_offset; + for (tot = 0; tot < nd->notes32->p_filesz; tot += len) { + if (!(len = dump_Elf32_Nhdr(offset32, ELFSTORE))) + break; + offset32 += len; + } + break; + + case NETDUMP_ELF64: + case KDUMP_ELF64: + nd->header_size = size; + nd->elf64 = (Elf64_Ehdr *)&nd->elf_header[0]; + + /* + * Extended Numbering support + * See include/uapi/linux/elf.h and elf(5) for more information + */ + if (nd->elf64->e_phnum == PN_XNUM) { + nd->sect0_64 = (Elf64_Shdr *)sect0; + nd->num_pt_load_segments = nd->sect0_64->sh_info - 1; + } else + nd->num_pt_load_segments = nd->elf64->e_phnum - 1; + + if ((nd->pt_load_segments = (struct pt_load_segment *) + malloc(sizeof(struct pt_load_segment) * + nd->num_pt_load_segments)) == NULL) { + fprintf(stderr, "cannot malloc PT_LOAD segment buffers\n"); + clean_exit(1); + } + nd->notes64 = (Elf64_Phdr *) + &nd->elf_header[sizeof(Elf64_Ehdr)]; + nd->load64 = (Elf64_Phdr *) + &nd->elf_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; + if (format == NETDUMP_ELF64) + nd->page_size = (uint)nd->load64->p_align; + dump_Elf64_Ehdr(nd->elf64); + dump_Elf64_Phdr(nd->notes64, ELFREAD); + for (i = 0; i < nd->num_pt_load_segments; i++) + dump_Elf64_Phdr(nd->load64 + i, ELFSTORE+i); + offset64 = nd->notes64->p_offset; + for (tot = 0; tot < nd->notes64->p_filesz; tot += len) { + if (!(len = dump_Elf64_Nhdr(offset64, ELFSTORE))) + break; + offset64 += len; + } + break; + } + + if (CRASHDEBUG(1)) + netdump_memory_dump(fp); + + pc->read_vmcoreinfo = vmcoreinfo_read_string; + + if ((source_query == KDUMP_LOCAL) && + (pc->flags2 & GET_OSRELEASE)) + kdump_get_osrelease(); + + if ((source_query == KDUMP_LOCAL) && + (pc->flags2 & GET_LOG)) { + pc->dfd = nd->ndfd; + pc->readmem = read_kdump; + nd->flags |= KDUMP_LOCAL; + pc->flags |= KDUMP; + get_log_from_vmcoreinfo(file); + } + + return nd->header_size; + +bailout: + close(fd); + free(eheader); + return FALSE; +} + +/* + * Search through all PT_LOAD segments to determine the + * file offset where the physical memory segment(s) start + * in the vmcore, and consider everything prior to that as + * header contents. + */ + +static size_t +resize_elf_header(int fd, char *file, char **eheader_ptr, char **sect0_ptr, + ulong format) +{ + int i; + char buf[BUFSIZE]; + char *eheader; + Elf32_Ehdr *elf32; + Elf32_Phdr *load32; + Elf64_Ehdr *elf64; + Elf64_Phdr *load64; + Elf32_Off p_offset32; + Elf64_Off p_offset64; + size_t header_size; + uint num_pt_load_segments; + + eheader = *eheader_ptr; + header_size = num_pt_load_segments = 0; + elf32 = (Elf32_Ehdr *)&eheader[0]; + elf64 = (Elf64_Ehdr *)&eheader[0]; + + switch (format) + { + case NETDUMP_ELF32: + case KDUMP_ELF32: + num_pt_load_segments = elf32->e_phnum - 1; + header_size = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) + + (sizeof(Elf32_Phdr) * num_pt_load_segments); + break; + + case NETDUMP_ELF64: + case KDUMP_ELF64: + /* + * Extended Numbering support + * See include/uapi/linux/elf.h and elf(5) for more information + */ + if (elf64->e_phnum == PN_XNUM) { + Elf64_Shdr *shdr64; + + shdr64 = (Elf64_Shdr *)malloc(sizeof(*shdr64)); + if (!shdr64) { + fprintf(stderr, + "cannot malloc a section header buffer\n"); + return 0; + } + if (FLAT_FORMAT()) { + if (!read_flattened_format(fd, elf64->e_shoff, + shdr64, elf64->e_shentsize)) + return 0; + } else { + if (lseek(fd, elf64->e_shoff, SEEK_SET) != + elf64->e_shoff) { + sprintf(buf, "%s: section header lseek", + file); + perror(buf); + return 0; + } + if (read(fd, shdr64, elf64->e_shentsize) != + elf64->e_shentsize) { + sprintf(buf, "%s: section header read", + file); + perror(buf); + return 0; + } + } + num_pt_load_segments = shdr64->sh_info - 1; + *sect0_ptr = (char *)shdr64; + } else + num_pt_load_segments = elf64->e_phnum - 1; + + header_size = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) + + (sizeof(Elf64_Phdr) * num_pt_load_segments); + break; + } + + if ((eheader = (char *)realloc(eheader, header_size)) == NULL) { + fprintf(stderr, "cannot realloc interim ELF header buffer\n"); + clean_exit(1); + } else + *eheader_ptr = eheader; + + if (FLAT_FORMAT()) { + if (!read_flattened_format(fd, 0, eheader, header_size)) + return 0; + } else { + if (lseek(fd, 0, SEEK_SET) != 0) { + sprintf(buf, "%s: lseek", file); + perror(buf); + return 0; + } + if (read(fd, eheader, header_size) != header_size) { + sprintf(buf, "%s: ELF header read", file); + perror(buf); + return 0; + } + } + + switch (format) + { + case NETDUMP_ELF32: + case KDUMP_ELF32: + load32 = (Elf32_Phdr *)&eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; + p_offset32 = load32->p_offset; + for (i = 0; i < num_pt_load_segments; i++, load32 += 1) { + if (load32->p_offset && + (p_offset32 > load32->p_offset)) + p_offset32 = load32->p_offset; + } + header_size = (size_t)p_offset32; + break; + + case NETDUMP_ELF64: + case KDUMP_ELF64: + load64 = (Elf64_Phdr *)&eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; + p_offset64 = load64->p_offset; + for (i = 0; i < num_pt_load_segments; i++, load64 += 1) { + if (load64->p_offset && + (p_offset64 > load64->p_offset)) + p_offset64 = load64->p_offset; + } + header_size = (size_t)p_offset64; + break; + } + + if ((eheader = (char *)realloc(eheader, header_size)) == NULL) { + perror("realloc"); + fprintf(stderr, "cannot realloc resized ELF header buffer\n"); + clean_exit(1); + } else + *eheader_ptr = eheader; + + if (FLAT_FORMAT()) { + if (!read_flattened_format(fd, 0, eheader, header_size)) + return 0; + } else { + if (lseek(fd, 0, SEEK_SET) != 0) { + sprintf(buf, "%s: lseek", file); + perror(buf); + return 0; + } + if (read(fd, eheader, header_size) != header_size) { + sprintf(buf, "%s: ELF header read", file); + perror(buf); + return 0; + } + } + + return header_size; +} + +/* + * Return the e_version number of an ELF file + * (or -1 if its not readable ELF file) + */ +int +file_elf_version(char *file) +{ + int fd, size; + Elf32_Ehdr *elf32; + Elf64_Ehdr *elf64; + char header[MIN_NETDUMP_ELF_HEADER_SIZE]; + char buf[BUFSIZE]; + + if ((fd = open(file, O_RDONLY)) < 0) { + sprintf(buf, "%s: open", file); + perror(buf); + return -1; + } + + size = MIN_NETDUMP_ELF_HEADER_SIZE; + if (read(fd, header, size) != size) { + sprintf(buf, "%s: read", file); + perror(buf); + close(fd); + return -1; + } + close(fd); + + elf32 = (Elf32_Ehdr *)&header[0]; + elf64 = (Elf64_Ehdr *)&header[0]; + + if (STRNEQ(elf32->e_ident, ELFMAG) && + (elf32->e_ident[EI_CLASS] == ELFCLASS32) && + (elf32->e_ident[EI_DATA] == ELFDATA2LSB) && + (elf32->e_ident[EI_VERSION] == EV_CURRENT)) { + return (elf32->e_version); + } else if (STRNEQ(elf64->e_ident, ELFMAG) && + (elf64->e_ident[EI_CLASS] == ELFCLASS64) && + (elf64->e_ident[EI_VERSION] == EV_CURRENT)) { + return (elf64->e_version); + } + + return -1; +} + +/* + * Check whether any PT_LOAD segment goes beyond the file size. + */ +static void +check_dumpfile_size(char *file) +{ + int i; + struct stat64 stat; + struct pt_load_segment *pls; + uint64_t segment_end; + + if (is_ramdump_image()) + return; + + if (stat64(file, &stat) < 0) + return; + + if (S_ISBLK(stat.st_mode)) { + error(NOTE, "%s: No dump complete check for block devices\n", + file); + return; + } + for (i = 0; i < nd->num_pt_load_segments; i++) { + pls = &nd->pt_load_segments[i]; + + segment_end = pls->file_offset + + (pls->phys_end - pls->phys_start); + + if (segment_end > stat.st_size) { + error(WARNING, "%s: may be truncated or incomplete\n" + " PT_LOAD p_offset: %lld\n" + " p_filesz: %lld\n" + " bytes required: %lld\n" + " dumpfile size: %lld\n\n", + file, pls->file_offset, + pls->phys_end - pls->phys_start, + segment_end, stat.st_size); + return; + } + } +} + +/* + * Perform any post-dumpfile determination stuff here. + */ +int +netdump_init(char *unused, FILE *fptr) +{ + if (!VMCORE_VALID()) + return FALSE; + + nd->ofp = fptr; + + check_dumpfile_size(pc->dumpfile); + + return TRUE; +} + +/* + * Read from a netdump-created dumpfile. + */ +int +read_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) +{ + off_t offset; + ssize_t read_ret; + struct pt_load_segment *pls; + int i; + + offset = 0; + + /* + * The Elf32_Phdr has 32-bit fields for p_paddr, p_filesz and + * p_memsz, so for now, multiple PT_LOAD segment support is + * restricted to 64-bit machines for netdump/diskdump vmcores. + * However, kexec/kdump has introduced the optional use of a + * 64-bit ELF header for 32-bit processors. + */ + switch (DUMPFILE_FORMAT(nd->flags)) + { + case NETDUMP_ELF32: + offset = (off_t)paddr + (off_t)nd->header_size; + break; + + case NETDUMP_ELF64: + case KDUMP_ELF32: + case KDUMP_ELF64: + if (nd->num_pt_load_segments == 1) { + offset = (off_t)paddr + (off_t)nd->header_size - + (off_t)nd->pt_load_segments[0].phys_start; + break; + } + + for (i = offset = 0; i < nd->num_pt_load_segments; i++) { + pls = &nd->pt_load_segments[i]; + if ((paddr >= pls->phys_start) && + (paddr < pls->phys_end)) { + offset = (off_t)(paddr - pls->phys_start) + + pls->file_offset; + break; + } + if (pls->zero_fill && (paddr >= pls->phys_end) && + (paddr < pls->zero_fill)) { + memset(bufptr, 0, cnt); + if (CRASHDEBUG(8)) + fprintf(fp, "read_netdump: zero-fill: " + "addr: %lx paddr: %llx cnt: %d\n", + addr, (ulonglong)paddr, cnt); + return cnt; + } + } + + if (!offset) { + if (CRASHDEBUG(8)) + fprintf(fp, "read_netdump: READ_ERROR: " + "offset not found for paddr: %llx\n", + (ulonglong)paddr); + return READ_ERROR; + } + + break; + } + + if (CRASHDEBUG(8)) + fprintf(fp, "read_netdump: addr: %lx paddr: %llx cnt: %d offset: %llx\n", + addr, (ulonglong)paddr, cnt, (ulonglong)offset); + + if (FLAT_FORMAT()) { + if (!read_flattened_format(nd->ndfd, offset, bufptr, cnt)) { + if (CRASHDEBUG(8)) + fprintf(fp, "read_netdump: READ_ERROR: " + "read_flattened_format failed for offset:" + " %llx\n", + (ulonglong)offset); + return READ_ERROR; + } + } else { + if (lseek(nd->ndfd, offset, SEEK_SET) == -1) { + if (CRASHDEBUG(8)) + fprintf(fp, "read_netdump: SEEK_ERROR: " + "offset: %llx\n", (ulonglong)offset); + return SEEK_ERROR; + } + + read_ret = read(nd->ndfd, bufptr, cnt); + if (read_ret != cnt) { + /* + * If the incomplete flag has been set in the header, + * first check whether zero_excluded has been set. + */ + if (is_incomplete_dump() && (read_ret >= 0) && + (*diskdump_flags & ZERO_EXCLUDED)) { + if (CRASHDEBUG(8)) + fprintf(fp, "read_netdump: zero-fill: " + "addr: %lx paddr: %llx cnt: %d\n", + addr + read_ret, + (ulonglong)paddr + read_ret, + cnt - (int)read_ret); + bufptr += read_ret; + bzero(bufptr, cnt - read_ret); + return cnt; + } + if (CRASHDEBUG(8)) + fprintf(fp, "read_netdump: READ_ERROR: " + "offset: %llx\n", (ulonglong)offset); + return READ_ERROR; + } + } + + return cnt; +} + +/* + * Write to a netdump-created dumpfile. Note that cmd_wr() does not + * allow writes to dumpfiles, so you can't get here from there. + * But, if it would ever be helpful, here it is... + */ +int +write_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) +{ + off_t offset; + struct pt_load_segment *pls; + int i; + + offset = 0; + + switch (DUMPFILE_FORMAT(nd->flags)) + { + case NETDUMP_ELF32: + offset = (off_t)paddr + (off_t)nd->header_size; + break; + + case NETDUMP_ELF64: + case KDUMP_ELF32: + case KDUMP_ELF64: + if (nd->num_pt_load_segments == 1) { + offset = (off_t)paddr + (off_t)nd->header_size; + break; + } + + for (i = offset = 0; i < nd->num_pt_load_segments; i++) { + pls = &nd->pt_load_segments[i]; + if ((paddr >= pls->phys_start) && + (paddr < pls->phys_end)) { + offset = (off_t)(paddr - pls->phys_start) + + pls->file_offset; + break; + } + } + + if (!offset) + return READ_ERROR; + + break; + } + + if (lseek(nd->ndfd, offset, SEEK_SET) == -1) + return SEEK_ERROR; + + if (write(nd->ndfd, bufptr, cnt) != cnt) + return READ_ERROR; + + return cnt; +} + +/* + * Set the file pointer for debug output. + */ +FILE * +set_netdump_fp(FILE *fp) +{ + if (!VMCORE_VALID()) + return NULL; + + nd->ofp = fp; + return fp; +} + +/* + * Generic print routine to handle integral and remote daemon output. + */ +static void +netdump_print(char *fmt, ...) +{ + char buf[BUFSIZE]; + va_list ap; + + if (!fmt || !strlen(fmt) || !VMCORE_VALID()) + return; + + va_start(ap, fmt); + (void)vsnprintf(buf, BUFSIZE, fmt, ap); + va_end(ap); + + if (nd->ofp) + fprintf(nd->ofp, "%s", buf); + else + console(buf); +} + +uint +netdump_page_size(void) +{ + if (!VMCORE_VALID()) + return 0; + + return nd->page_size; +} + +int +netdump_free_memory(void) +{ + return (VMCORE_VALID() ? 0 : 0); +} + +int netdump_memory_used(void) +{ + return (VMCORE_VALID() ? 0 : 0); +} + +/* + * The netdump server will eventually use the NT_TASKSTRUCT section + * to pass the task address. Until such time, look at the ebp of the + * user_regs_struct, which is located at the end of the NT_PRSTATUS + * elf_prstatus structure, minus one integer: + * + * struct elf_prstatus + * { + * ... + * elf_gregset_t pr_reg; (maps to user_regs_struct) + * int pr_fpvalid; + * }; + * + * If it's a kernel stack address who's adjusted task_struct value is + * equal to one of the active set tasks, we'll presume it's legit. + * + */ +ulong +get_netdump_panic_task(void) +{ +#ifdef DAEMON + return nd->task_struct; +#else + int i, crashing_cpu; + size_t len; + char *user_regs; + ulong ebp, esp, task; + + if (!VMCORE_VALID() || !get_active_set()) + goto panic_task_undetermined; + + if (nd->task_struct) { + if (CRASHDEBUG(1)) + error(INFO, + "get_netdump_panic_task: NT_TASKSTRUCT: %lx\n", + nd->task_struct); + return nd->task_struct; + } + + switch (DUMPFILE_FORMAT(nd->flags)) + { + case NETDUMP_ELF32: + case NETDUMP_ELF64: + crashing_cpu = -1; + break; + + case KDUMP_ELF32: + case KDUMP_ELF64: + crashing_cpu = -1; + if (kernel_symbol_exists("crashing_cpu")) { + get_symbol_data("crashing_cpu", sizeof(int), &i); + if ((i >= 0) && in_cpu_map(ONLINE_MAP, i)) { + crashing_cpu = i; + if (CRASHDEBUG(1)) + error(INFO, + "get_netdump_panic_task: active_set[crashing_cpu: %d]: %lx\n", + crashing_cpu, + tt->active_set[crashing_cpu]); + } + } + + if ((nd->num_prstatus_notes > 1) && (crashing_cpu == -1)) + goto panic_task_undetermined; + break; + + default: + crashing_cpu = -1; + break; + } + + if (nd->elf32 && (nd->elf32->e_machine == EM_386)) { + Elf32_Nhdr *note32 = NULL; + + if (nd->num_prstatus_notes > 1) { + if (crashing_cpu != -1) + note32 = (Elf32_Nhdr *) + nd->nt_prstatus_percpu[crashing_cpu]; + } else + note32 = (Elf32_Nhdr *)nd->nt_prstatus; + + if (!note32) + goto panic_task_undetermined; + + len = sizeof(Elf32_Nhdr); + len = roundup(len + note32->n_namesz, 4); + len = roundup(len + note32->n_descsz, 4); + + user_regs = ((char *)note32 + len) + - SIZE(user_regs_struct) - sizeof(int); + ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); + esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); +check_ebp_esp: + if (CRASHDEBUG(1)) + error(INFO, + "get_netdump_panic_task: NT_PRSTATUS esp: %lx ebp: %lx\n", + esp, ebp); + if (IS_KVADDR(esp)) { + task = stkptr_to_task(esp); + if (CRASHDEBUG(1)) + error(INFO, + "get_netdump_panic_task: esp: %lx -> task: %lx\n", + esp, task); + for (i = 0; task && (i < NR_CPUS); i++) { + if (task == tt->active_set[i]) + return task; + } + } + if (IS_KVADDR(ebp)) { + task = stkptr_to_task(ebp); + if (CRASHDEBUG(1)) + error(INFO, + "get_netdump_panic_task: ebp: %lx -> task: %lx\n", + ebp, task); + for (i = 0; task && (i < NR_CPUS); i++) { + if (task == tt->active_set[i]) + return task; + } + } + } else if (nd->elf64) { + Elf64_Nhdr *note64 = NULL; + + if (nd->num_prstatus_notes > 1) { + if (crashing_cpu != -1) + note64 = (Elf64_Nhdr *) + nd->nt_prstatus_percpu[crashing_cpu]; + } else + note64 = (Elf64_Nhdr *)nd->nt_prstatus; + + if (!note64) + goto panic_task_undetermined; + + len = sizeof(Elf64_Nhdr); + len = roundup(len + note64->n_namesz, 4); + user_regs = (char *)((char *)note64 + len + + MEMBER_OFFSET("elf_prstatus", "pr_reg")); + + if (nd->elf64->e_machine == EM_386) { + ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); + esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); + goto check_ebp_esp; + } + + if (nd->elf64->e_machine == EM_PPC64) { + /* + * Get the GPR1 register value. + */ + esp = *(ulong *)((char *)user_regs + 8); + if (CRASHDEBUG(1)) + error(INFO, + "get_netdump_panic_task: NT_PRSTATUS esp: %lx\n", esp); + if (IS_KVADDR(esp)) { + task = stkptr_to_task(esp); + if (CRASHDEBUG(1)) + error(INFO, + "get_netdump_panic_task: esp: %lx -> task: %lx\n", + esp, task); + for (i = 0; task && (i < NR_CPUS); i++) { + if (task == tt->active_set[i]) + return task; + } + } + } + + if (nd->elf64->e_machine == EM_X86_64) { + if ((crashing_cpu != -1) && (crashing_cpu <= kt->cpus)) + return (tt->active_set[crashing_cpu]); + } + } + +panic_task_undetermined: + + if (CRASHDEBUG(1)) + error(INFO, "get_netdump_panic_task: failed\n"); + + return NO_TASK; +#endif +} + +/* + * Get the switch_stack address of the passed-in task. Currently only + * the panicking task reports its switch-stack address. + */ +ulong +get_netdump_switch_stack(ulong task) +{ +#ifdef DAEMON + if (nd->task_struct == task) + return nd->switch_stack; + return 0; +#else + if (!VMCORE_VALID() || !get_active_set()) + return 0; + + if (nd->task_struct == task) + return nd->switch_stack; + + return 0; +#endif +} + +int +netdump_memory_dump(FILE *fp) +{ + int i, others, wrap, flen; + size_t len, tot; + FILE *fpsave; + Elf32_Off offset32; + Elf32_Off offset64; + struct pt_load_segment *pls; + + if (!VMCORE_VALID()) + return FALSE; + + fpsave = nd->ofp; + nd->ofp = fp; + + if (FLAT_FORMAT()) + dump_flat_header(nd->ofp); + + netdump_print("vmcore_data: \n"); + netdump_print(" flags: %lx (", nd->flags); + others = 0; + if (nd->flags & NETDUMP_LOCAL) + netdump_print("%sNETDUMP_LOCAL", others++ ? "|" : ""); + if (nd->flags & KDUMP_LOCAL) + netdump_print("%sKDUMP_LOCAL", others++ ? "|" : ""); + if (nd->flags & NETDUMP_REMOTE) + netdump_print("%sNETDUMP_REMOTE", others++ ? "|" : ""); + if (nd->flags & NETDUMP_ELF32) + netdump_print("%sNETDUMP_ELF32", others++ ? "|" : ""); + if (nd->flags & NETDUMP_ELF64) + netdump_print("%sNETDUMP_ELF64", others++ ? "|" : ""); + if (nd->flags & KDUMP_ELF32) + netdump_print("%sKDUMP_ELF32", others++ ? "|" : ""); + if (nd->flags & KDUMP_ELF64) + netdump_print("%sKDUMP_ELF64", others++ ? "|" : ""); + if (nd->flags & PARTIAL_DUMP) + netdump_print("%sPARTIAL_DUMP", others++ ? "|" : ""); + if (nd->flags & QEMU_MEM_DUMP_KDUMP_BACKUP) + netdump_print("%sQEMU_MEM_DUMP_KDUMP_BACKUP", others++ ? "|" : ""); + netdump_print(") %s\n", FLAT_FORMAT() ? "[FLAT]" : ""); + if ((pc->flags & RUNTIME) && symbol_exists("dump_level")) { + int dump_level; + if (readmem(symbol_value("dump_level"), KVADDR, &dump_level, + sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) { + netdump_print(" dump_level: %d (0x%x) %s", + dump_level, dump_level, + dump_level > 0 ? "(" : ""); + +#define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ +#define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ +#define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ +#define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ +#define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ + + others = 0; + if (dump_level & DUMP_EXCLUDE_CACHE) + netdump_print("%sDUMP_EXCLUDE_CACHE", + others++ ? "|" : ""); + if (dump_level & DUMP_EXCLUDE_CLEAN) + netdump_print("%sDUMP_EXCLUDE_CLEAN", + others++ ? "|" : ""); + if (dump_level & DUMP_EXCLUDE_FREE) + netdump_print("%sDUMP_EXCLUDE_FREE", + others++ ? "|" : ""); + if (dump_level & DUMP_EXCLUDE_ANON) + netdump_print("%sDUMP_EXCLUDE_ANON", + others++ ? "|" : ""); + if (dump_level & DUMP_SAVE_PRIVATE) + netdump_print("%sDUMP_SAVE_PRIVATE", + others++ ? "|" : ""); + netdump_print("%s\n", dump_level > 0 ? ")" : ""); + } else + netdump_print(" dump_level: (unknown)\n"); + } else if (!(pc->flags & RUNTIME) && symbol_exists("dump_level")) + netdump_print(" dump_level: (undetermined)\n"); + + netdump_print(" ndfd: %d\n", nd->ndfd); + netdump_print(" ofp: %lx\n", nd->ofp); + netdump_print(" header_size: %d\n", nd->header_size); + netdump_print(" num_pt_load_segments: %d\n", nd->num_pt_load_segments); + for (i = 0; i < nd->num_pt_load_segments; i++) { + pls = &nd->pt_load_segments[i]; + netdump_print(" pt_load_segment[%d]:\n", i); + netdump_print(" file_offset: %lx\n", + pls->file_offset); + netdump_print(" phys_start: %llx\n", + pls->phys_start); + netdump_print(" phys_end: %llx\n", + pls->phys_end); + netdump_print(" zero_fill: %llx\n", + pls->zero_fill); + } + netdump_print(" elf_header: %lx\n", nd->elf_header); + netdump_print(" elf32: %lx\n", nd->elf32); + netdump_print(" notes32: %lx\n", nd->notes32); + netdump_print(" load32: %lx\n", nd->load32); + netdump_print(" elf64: %lx\n", nd->elf64); + netdump_print(" notes64: %lx\n", nd->notes64); + netdump_print(" load64: %lx\n", nd->load64); + netdump_print(" sect0_64: %lx\n", nd->sect0_64); + netdump_print(" nt_prstatus: %lx\n", nd->nt_prstatus); + netdump_print(" nt_prpsinfo: %lx\n", nd->nt_prpsinfo); + netdump_print(" nt_taskstruct: %lx\n", nd->nt_taskstruct); + netdump_print(" task_struct: %lx\n", nd->task_struct); + netdump_print(" arch_data1: "); + if (nd->arch_data1) { + if (machine_type("X86_64")) + netdump_print("%lx (relocate)\n", nd->arch_data1); + else if (machine_type("ARM64")) + netdump_print("%lx (kimage_voffset)\n", nd->arch_data1); + } else + netdump_print("(unused)\n"); + netdump_print(" arch_data2: "); + if (nd->arch_data2) { + if (machine_type("ARM64")) + netdump_print("%016lx\n" + " CONFIG_ARM64_VA_BITS: %ld\n" + " VA_BITS_ACTUAL: %lld\n", + nd->arch_data2, nd->arch_data2 & 0xffffffff, + ((ulonglong)nd->arch_data2 >> 32)); + else + netdump_print("%016lx (?)\n", nd->arch_data2); + } else + netdump_print("(unused)\n"); + netdump_print(" switch_stack: %lx\n", nd->switch_stack); + netdump_print(" page_size: %d\n", nd->page_size); + dump_xen_kdump_data(fp); + netdump_print(" num_prstatus_notes: %d\n", nd->num_prstatus_notes); + netdump_print(" num_qemu_notes: %d\n", nd->num_qemu_notes); + netdump_print(" vmcoreinfo: %lx\n", (ulong)nd->vmcoreinfo); + netdump_print(" size_vmcoreinfo: %d\n", nd->size_vmcoreinfo); + netdump_print(" nt_prstatus_percpu: "); + wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; + flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; + if (nd->num_prstatus_notes == 1) + netdump_print("%.*lx\n", flen, nd->nt_prstatus_percpu[0]); + else { + for (i = 0; i < nd->num_prstatus_notes; i++) { + if ((i % wrap) == 0) + netdump_print("\n "); + netdump_print("%.*lx ", flen, + nd->nt_prstatus_percpu[i]); + } + } + netdump_print("\n"); + netdump_print(" nt_qemu_percpu: "); + if (nd->num_qemu_notes == 1) + netdump_print("%.*lx\n", flen, nd->nt_qemu_percpu[0]); + else { + for (i = 0; i < nd->num_qemu_notes; i++) { + if ((i % wrap) == 0) + netdump_print("\n "); + netdump_print("%.*lx ", flen, + nd->nt_qemu_percpu[i]); + } + } + netdump_print("\n"); + netdump_print(" backup_src_start: %llx\n", nd->backup_src_start); + netdump_print(" backup_src_size: %lx\n", nd->backup_src_size); + netdump_print(" backup_offset: %llx\n", nd->backup_offset); + netdump_print("\n"); + + switch (DUMPFILE_FORMAT(nd->flags)) + { + case NETDUMP_ELF32: + case KDUMP_ELF32: + dump_Elf32_Ehdr(nd->elf32); + dump_Elf32_Phdr(nd->notes32, ELFREAD); + for (i = 0; i < nd->num_pt_load_segments; i++) + dump_Elf32_Phdr(nd->load32 + i, ELFREAD); + offset32 = nd->notes32->p_offset; + for (tot = 0; tot < nd->notes32->p_filesz; tot += len) { + if (!(len = dump_Elf32_Nhdr(offset32, ELFREAD))) + break; + offset32 += len; + } + break; + + case NETDUMP_ELF64: + case KDUMP_ELF64: + dump_Elf64_Ehdr(nd->elf64); + dump_Elf64_Phdr(nd->notes64, ELFREAD); + for (i = 0; i < nd->num_pt_load_segments; i++) + dump_Elf64_Phdr(nd->load64 + i, ELFREAD); + if (nd->sect0_64) + dump_Elf64_Shdr(nd->sect0_64); + offset64 = nd->notes64->p_offset; + for (tot = 0; tot < nd->notes64->p_filesz; tot += len) { + if (!(len = dump_Elf64_Nhdr(offset64, ELFREAD))) + break; + offset64 += len; + } + break; + } + + dump_ramdump_data(); + + nd->ofp = fpsave; + return TRUE; +} + +/* + * Dump an ELF file header. + */ +static void +dump_Elf32_Ehdr(Elf32_Ehdr *elf) +{ + char buf[BUFSIZE]; + + BZERO(buf, BUFSIZE); + BCOPY(elf->e_ident, buf, SELFMAG); + netdump_print("Elf32_Ehdr:\n"); + netdump_print(" e_ident: \\%o%s\n", buf[0], + &buf[1]); + netdump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); + switch (elf->e_ident[EI_CLASS]) + { + case ELFCLASSNONE: + netdump_print("(ELFCLASSNONE)"); + break; + case ELFCLASS32: + netdump_print("(ELFCLASS32)\n"); + break; + case ELFCLASS64: + netdump_print("(ELFCLASS64)\n"); + break; + case ELFCLASSNUM: + netdump_print("(ELFCLASSNUM)\n"); + break; + default: + netdump_print("(?)\n"); + break; + } + netdump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); + switch (elf->e_ident[EI_DATA]) + { + case ELFDATANONE: + netdump_print("(ELFDATANONE)\n"); + break; + case ELFDATA2LSB: + netdump_print("(ELFDATA2LSB)\n"); + break; + case ELFDATA2MSB: + netdump_print("(ELFDATA2MSB)\n"); + break; + case ELFDATANUM: + netdump_print("(ELFDATANUM)\n"); + break; + default: + netdump_print("(?)\n"); + } + netdump_print(" e_ident[EI_VERSION]: %d ", + elf->e_ident[EI_VERSION]); + if (elf->e_ident[EI_VERSION] == EV_CURRENT) + netdump_print("(EV_CURRENT)\n"); + else + netdump_print("(?)\n"); + netdump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); + switch (elf->e_ident[EI_OSABI]) + { + case ELFOSABI_SYSV: + netdump_print("(ELFOSABI_SYSV)\n"); + break; + case ELFOSABI_HPUX: + netdump_print("(ELFOSABI_HPUX)\n"); + break; + case ELFOSABI_ARM: + netdump_print("(ELFOSABI_ARM)\n"); + break; + case ELFOSABI_STANDALONE: + netdump_print("(ELFOSABI_STANDALONE)\n"); + break; + case ELFOSABI_LINUX: + netdump_print("(ELFOSABI_LINUX)\n"); + break; + default: + netdump_print("(?)\n"); + } + netdump_print(" e_ident[EI_ABIVERSION]: %d\n", + elf->e_ident[EI_ABIVERSION]); + + netdump_print(" e_type: %d ", elf->e_type); + switch (elf->e_type) + { + case ET_NONE: + netdump_print("(ET_NONE)\n"); + break; + case ET_REL: + netdump_print("(ET_REL)\n"); + break; + case ET_EXEC: + netdump_print("(ET_EXEC)\n"); + break; + case ET_DYN: + netdump_print("(ET_DYN)\n"); + break; + case ET_CORE: + netdump_print("(ET_CORE)\n"); + break; + case ET_NUM: + netdump_print("(ET_NUM)\n"); + break; + case ET_LOOS: + netdump_print("(ET_LOOS)\n"); + break; + case ET_HIOS: + netdump_print("(ET_HIOS)\n"); + break; + case ET_LOPROC: + netdump_print("(ET_LOPROC)\n"); + break; + case ET_HIPROC: + netdump_print("(ET_HIPROC)\n"); + break; + default: + netdump_print("(?)\n"); + } + + netdump_print(" e_machine: %d ", elf->e_machine); + switch (elf->e_machine) + { + case EM_ARM: + netdump_print("(EM_ARM)\n"); + break; + case EM_386: + netdump_print("(EM_386)\n"); + break; + case EM_MIPS: + netdump_print("(EM_MIPS)\n"); + break; + default: + netdump_print("(unsupported)\n"); + break; + } + + netdump_print(" e_version: %ld ", elf->e_version); + netdump_print("%s\n", elf->e_version == EV_CURRENT ? + "(EV_CURRENT)" : ""); + + netdump_print(" e_entry: %lx\n", elf->e_entry); + netdump_print(" e_phoff: %lx\n", elf->e_phoff); + netdump_print(" e_shoff: %lx\n", elf->e_shoff); + netdump_print(" e_flags: %lx\n", elf->e_flags); + if ((elf->e_flags & DUMP_ELF_INCOMPLETE) && + (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF32)) + pc->flags2 |= INCOMPLETE_DUMP; + netdump_print(" e_ehsize: %x\n", elf->e_ehsize); + netdump_print(" e_phentsize: %x\n", elf->e_phentsize); + netdump_print(" e_phnum: %x\n", elf->e_phnum); + netdump_print(" e_shentsize: %x\n", elf->e_shentsize); + netdump_print(" e_shnum: %x\n", elf->e_shnum); + netdump_print(" e_shstrndx: %x\n", elf->e_shstrndx); +} + +static void +dump_Elf64_Ehdr(Elf64_Ehdr *elf) +{ + char buf[BUFSIZE]; + + BZERO(buf, BUFSIZE); + BCOPY(elf->e_ident, buf, SELFMAG); + netdump_print("Elf64_Ehdr:\n"); + netdump_print(" e_ident: \\%o%s\n", buf[0], + &buf[1]); + netdump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); + switch (elf->e_ident[EI_CLASS]) + { + case ELFCLASSNONE: + netdump_print("(ELFCLASSNONE)"); + break; + case ELFCLASS32: + netdump_print("(ELFCLASS32)\n"); + break; + case ELFCLASS64: + netdump_print("(ELFCLASS64)\n"); + break; + case ELFCLASSNUM: + netdump_print("(ELFCLASSNUM)\n"); + break; + default: + netdump_print("(?)\n"); + break; + } + netdump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); + switch (elf->e_ident[EI_DATA]) + { + case ELFDATANONE: + netdump_print("(ELFDATANONE)\n"); + break; + case ELFDATA2LSB: + netdump_print("(ELFDATA2LSB)\n"); + break; + case ELFDATA2MSB: + netdump_print("(ELFDATA2MSB)\n"); + break; + case ELFDATANUM: + netdump_print("(ELFDATANUM)\n"); + break; + default: + netdump_print("(?)\n"); + } + netdump_print(" e_ident[EI_VERSION]: %d ", + elf->e_ident[EI_VERSION]); + if (elf->e_ident[EI_VERSION] == EV_CURRENT) + netdump_print("(EV_CURRENT)\n"); + else + netdump_print("(?)\n"); + netdump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); + switch (elf->e_ident[EI_OSABI]) + { + case ELFOSABI_SYSV: + netdump_print("(ELFOSABI_SYSV)\n"); + break; + case ELFOSABI_HPUX: + netdump_print("(ELFOSABI_HPUX)\n"); + break; + case ELFOSABI_ARM: + netdump_print("(ELFOSABI_ARM)\n"); + break; + case ELFOSABI_STANDALONE: + netdump_print("(ELFOSABI_STANDALONE)\n"); + break; + case ELFOSABI_LINUX: + netdump_print("(ELFOSABI_LINUX)\n"); + break; + default: + netdump_print("(?)\n"); + } + netdump_print(" e_ident[EI_ABIVERSION]: %d\n", + elf->e_ident[EI_ABIVERSION]); + + netdump_print(" e_type: %d ", elf->e_type); + switch (elf->e_type) + { + case ET_NONE: + netdump_print("(ET_NONE)\n"); + break; + case ET_REL: + netdump_print("(ET_REL)\n"); + break; + case ET_EXEC: + netdump_print("(ET_EXEC)\n"); + break; + case ET_DYN: + netdump_print("(ET_DYN)\n"); + break; + case ET_CORE: + netdump_print("(ET_CORE)\n"); + break; + case ET_NUM: + netdump_print("(ET_NUM)\n"); + break; + case ET_LOOS: + netdump_print("(ET_LOOS)\n"); + break; + case ET_HIOS: + netdump_print("(ET_HIOS)\n"); + break; + case ET_LOPROC: + netdump_print("(ET_LOPROC)\n"); + break; + case ET_HIPROC: + netdump_print("(ET_HIPROC)\n"); + break; + default: + netdump_print("(?)\n"); + } + + netdump_print(" e_machine: %d ", elf->e_machine); + switch (elf->e_machine) + { + case EM_386: + netdump_print("(EM_386)\n"); + break; + case EM_IA_64: + netdump_print("(EM_IA_64)\n"); + break; + case EM_PPC64: + netdump_print("(EM_PPC64)\n"); + break; + case EM_X86_64: + netdump_print("(EM_X86_64)\n"); + break; + case EM_S390: + netdump_print("(EM_S390)\n"); + break; + case EM_ARM: + netdump_print("(EM_ARM)\n"); + break; + case EM_AARCH64: + netdump_print("(EM_AARCH64)\n"); + break; + default: + netdump_print("(unsupported)\n"); + break; + } + + netdump_print(" e_version: %ld ", elf->e_version); + netdump_print("%s\n", elf->e_version == EV_CURRENT ? + "(EV_CURRENT)" : ""); + + netdump_print(" e_entry: %lx\n", elf->e_entry); + netdump_print(" e_phoff: %lx\n", elf->e_phoff); + netdump_print(" e_shoff: %lx\n", elf->e_shoff); + netdump_print(" e_flags: %lx\n", elf->e_flags); + if ((elf->e_flags & DUMP_ELF_INCOMPLETE) && + (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64)) + pc->flags2 |= INCOMPLETE_DUMP; + netdump_print(" e_ehsize: %x\n", elf->e_ehsize); + netdump_print(" e_phentsize: %x\n", elf->e_phentsize); + netdump_print(" e_phnum: %x\n", elf->e_phnum); + netdump_print(" e_shentsize: %x\n", elf->e_shentsize); + netdump_print(" e_shnum: %x\n", elf->e_shnum); + netdump_print(" e_shstrndx: %x\n", elf->e_shstrndx); +} + +/* + * Dump a program segment header + */ +static void +dump_Elf32_Phdr(Elf32_Phdr *prog, int store_pt_load_data) +{ + int others; + struct pt_load_segment *pls; + + if ((char *)prog > (nd->elf_header + nd->header_size)) + error(FATAL, + "Elf32_Phdr pointer: %lx ELF header end: %lx\n\n", + (char *)prog, nd->elf_header + nd->header_size); + + if (store_pt_load_data) + pls = &nd->pt_load_segments[store_pt_load_data-1]; + else + pls = NULL; + + netdump_print("Elf32_Phdr:\n"); + netdump_print(" p_type: %lx ", prog->p_type); + switch (prog->p_type) + { + case PT_NULL: + netdump_print("(PT_NULL)\n"); + break; + case PT_LOAD: + netdump_print("(PT_LOAD)\n"); + break; + case PT_DYNAMIC: + netdump_print("(PT_DYNAMIC)\n"); + break; + case PT_INTERP: + netdump_print("(PT_INTERP)\n"); + break; + case PT_NOTE: + netdump_print("(PT_NOTE)\n"); + break; + case PT_SHLIB: + netdump_print("(PT_SHLIB)\n"); + break; + case PT_PHDR: + netdump_print("(PT_PHDR)\n"); + break; + case PT_NUM: + netdump_print("(PT_NUM)\n"); + break; + case PT_LOOS: + netdump_print("(PT_LOOS)\n"); + break; + case PT_HIOS: + netdump_print("(PT_HIOS)\n"); + break; + case PT_LOPROC: + netdump_print("(PT_LOPROC)\n"); + break; + case PT_HIPROC: + netdump_print("(PT_HIPROC)\n"); + break; + default: + netdump_print("(?)\n"); + } + + netdump_print(" p_offset: %ld (%lx)\n", prog->p_offset, + prog->p_offset); + if (store_pt_load_data) + pls->file_offset = prog->p_offset; + netdump_print(" p_vaddr: %lx\n", prog->p_vaddr); + netdump_print(" p_paddr: %lx\n", prog->p_paddr); + if (store_pt_load_data) + pls->phys_start = prog->p_paddr; + netdump_print(" p_filesz: %lu (%lx)\n", prog->p_filesz, + prog->p_filesz); + if (store_pt_load_data) { + pls->phys_end = pls->phys_start + prog->p_filesz; + pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? + 0 : pls->phys_start + prog->p_memsz; + } + netdump_print(" p_memsz: %lu (%lx)\n", prog->p_memsz, + prog->p_memsz); + netdump_print(" p_flags: %lx (", prog->p_flags); + others = 0; + if (prog->p_flags & PF_X) + netdump_print("PF_X", others++); + if (prog->p_flags & PF_W) + netdump_print("%sPF_W", others++ ? "|" : ""); + if (prog->p_flags & PF_R) + netdump_print("%sPF_R", others++ ? "|" : ""); + netdump_print(")\n"); + netdump_print(" p_align: %ld\n", prog->p_align); +} + +static void +dump_Elf64_Phdr(Elf64_Phdr *prog, int store_pt_load_data) +{ + int others; + struct pt_load_segment *pls; + + if (store_pt_load_data) + pls = &nd->pt_load_segments[store_pt_load_data-1]; + else + pls = NULL; + + if ((char *)prog > (nd->elf_header + nd->header_size)) + error(FATAL, + "Elf64_Phdr pointer: %lx ELF header end: %lx\n\n", + (char *)prog, nd->elf_header + nd->header_size); + + netdump_print("Elf64_Phdr:\n"); + netdump_print(" p_type: %lx ", prog->p_type); + switch (prog->p_type) + { + case PT_NULL: + netdump_print("(PT_NULL)\n"); + break; + case PT_LOAD: + netdump_print("(PT_LOAD)\n"); + break; + case PT_DYNAMIC: + netdump_print("(PT_DYNAMIC)\n"); + break; + case PT_INTERP: + netdump_print("(PT_INTERP)\n"); + break; + case PT_NOTE: + netdump_print("(PT_NOTE)\n"); + break; + case PT_SHLIB: + netdump_print("(PT_SHLIB)\n"); + break; + case PT_PHDR: + netdump_print("(PT_PHDR)\n"); + break; + case PT_NUM: + netdump_print("(PT_NUM)\n"); + break; + case PT_LOOS: + netdump_print("(PT_LOOS)\n"); + break; + case PT_HIOS: + netdump_print("(PT_HIOS)\n"); + break; + case PT_LOPROC: + netdump_print("(PT_LOPROC)\n"); + break; + case PT_HIPROC: + netdump_print("(PT_HIPROC)\n"); + break; + default: + netdump_print("(?)\n"); + } + + netdump_print(" p_offset: %lld (%llx)\n", prog->p_offset, + prog->p_offset); + if (store_pt_load_data) + pls->file_offset = prog->p_offset; + netdump_print(" p_vaddr: %llx\n", prog->p_vaddr); + netdump_print(" p_paddr: %llx\n", prog->p_paddr); + if (store_pt_load_data) + pls->phys_start = prog->p_paddr; + netdump_print(" p_filesz: %llu (%llx)\n", prog->p_filesz, + prog->p_filesz); + if (store_pt_load_data) { + pls->phys_end = pls->phys_start + prog->p_filesz; + pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? + 0 : pls->phys_start + prog->p_memsz; + } + netdump_print(" p_memsz: %llu (%llx)\n", prog->p_memsz, + prog->p_memsz); + netdump_print(" p_flags: %lx (", prog->p_flags); + others = 0; + if (prog->p_flags & PF_X) + netdump_print("PF_X", others++); + if (prog->p_flags & PF_W) + netdump_print("%sPF_W", others++ ? "|" : ""); + if (prog->p_flags & PF_R) + netdump_print("%sPF_R", others++ ? "|" : ""); + netdump_print(")\n"); + netdump_print(" p_align: %lld\n", prog->p_align); +} + +static void +dump_Elf64_Shdr(Elf64_Shdr *shdr) +{ + netdump_print("Elf64_Shdr:\n"); + netdump_print(" sh_name: %x\n", shdr->sh_name); + netdump_print(" sh_type: %x ", shdr->sh_type); + switch (shdr->sh_type) + { + case SHT_NULL: + netdump_print("(SHT_NULL)\n"); + break; + default: + netdump_print("\n"); + break; + } + netdump_print(" sh_flags: %lx\n", shdr->sh_flags); + netdump_print(" sh_addr: %lx\n", shdr->sh_addr); + netdump_print(" sh_offset: %lx\n", shdr->sh_offset); + netdump_print(" sh_size: %lx\n", shdr->sh_size); + netdump_print(" sh_link: %x\n", shdr->sh_link); + netdump_print(" sh_info: %x (%u)\n", shdr->sh_info, + shdr->sh_info); + netdump_print(" sh_addralign: %lx\n", shdr->sh_addralign); + netdump_print(" sh_entsize: %lx\n", shdr->sh_entsize); +} + +/* + * VMCOREINFO + * + * This is a ELF note intented for makedumpfile that is exported by the + * kernel that crashes and presented as ELF note to the /proc/vmcore + * of the panic kernel. + */ + +#define VMCOREINFO_NOTE_NAME "VMCOREINFO" +#define VMCOREINFO_NOTE_NAME_BYTES (sizeof(VMCOREINFO_NOTE_NAME)) + +/* + * Reads a string value from VMCOREINFO. + * + * Returns a string (that has to be freed by the caller) that contains the + * value for key or NULL if the key has not been found. + */ +static char * +vmcoreinfo_read_string(const char *key) +{ + int i, j, end; + size_t value_length; + size_t key_length = strlen(key); + char *vmcoreinfo; + uint size_vmcoreinfo; + char *value = NULL; + + /* + * Borrow this function for ELF vmcores created by the snap.so + * extension module, where arch-specific data may be passed in + * the NT_TASKSTRUCT note. + */ + if ((pc->flags2 & SNAP)) { + if (STREQ(key, "NUMBER(kimage_voffset)") && nd->arch_data1) { + value = calloc(VADDR_PRLEN+1, sizeof(char)); + sprintf(value, "%lx", nd->arch_data1); + if (nd->arch_data2 == 0) + pc->read_vmcoreinfo = no_vmcoreinfo; + return value; + } + if (STREQ(key, "NUMBER(VA_BITS)") && nd->arch_data2) { + value = calloc(VADDR_PRLEN+1, sizeof(char)); + sprintf(value, "%ld", nd->arch_data2 & 0xffffffff); + return value; + } + if (STREQ(key, "NUMBER(TCR_EL1_T1SZ)") && nd->arch_data2) { + value = calloc(VADDR_PRLEN+1, sizeof(char)); + sprintf(value, "%lld", ((ulonglong)nd->arch_data2 >> 32) & 0xffffffff); + pc->read_vmcoreinfo = no_vmcoreinfo; + return value; + } + if (STREQ(key, "relocate") && nd->arch_data1) { + value = calloc(VADDR_PRLEN+1, sizeof(char)); + sprintf(value, "%lx", nd->arch_data1); + pc->read_vmcoreinfo = no_vmcoreinfo; + return value; + } + return NULL; + } + + if (nd->vmcoreinfo) { + vmcoreinfo = (char *)nd->vmcoreinfo; + size_vmcoreinfo = nd->size_vmcoreinfo; + } else if (ACTIVE() && pkd->vmcoreinfo) { + vmcoreinfo = (char *)pkd->vmcoreinfo; + size_vmcoreinfo = pkd->size_vmcoreinfo; + } else { + vmcoreinfo = NULL; + size_vmcoreinfo = 0; + } + + if (!vmcoreinfo) + return NULL; + + /* the '+ 1' is the equal sign */ + for (i = 0; i < (int)(size_vmcoreinfo - key_length + 1); i++) { + /* + * We must also check if we're at the beginning of VMCOREINFO + * or the separating newline is there, and of course if we + * have a equal sign after the key. + */ + if ((strncmp(vmcoreinfo+i, key, key_length) == 0) && + (i == 0 || vmcoreinfo[i-1] == '\n') && + (vmcoreinfo[i+key_length] == '=')) { + + end = -1; + + /* Found -- search for the next newline. */ + for (j = i + key_length + 1; + j < size_vmcoreinfo; j++) { + if (vmcoreinfo[j] == '\n') { + end = j; + break; + } + } + + /* + * If we didn't find an end, we assume it's the end + * of VMCOREINFO data. + */ + if (end == -1) { + /* Point after the end. */ + end = size_vmcoreinfo + 1; + } + + value_length = end - (1+ i + key_length); + value = calloc(value_length+1, sizeof(char)); + if (value) + strncpy(value, vmcoreinfo + i + key_length + 1, + value_length); + break; + } + } + + return value; +} + +/* + * Reads an integer value from VMCOREINFO. + */ +static long +vmcoreinfo_read_integer(const char *key, long default_value) +{ + char *string; + long retval = default_value; + + string = vmcoreinfo_read_string(key); + if (string) { + retval = atol(string); + free(string); + } + + return retval; +} + +void +display_vmcoredd_note(void *ptr, FILE *ofp) +{ + int sp; + unsigned int dump_size; + struct vmcoredd_header *vh; + + sp = VMCORE_VALID() ? 25 : 22; + vh = (struct vmcoredd_header *)ptr; + + dump_size = vh->n_descsz - VMCOREDD_MAX_NAME_BYTES; + fprintf(ofp, "%sname: \"%s\"\n", space(sp), vh->dump_name); + fprintf(ofp, "%ssize: %u\n", space(sp), dump_size); +} + +/* + * Dump a note section header -- the actual data is defined by netdump + */ + +static size_t +dump_Elf32_Nhdr(Elf32_Off offset, int store) +{ + int i, lf; + Elf32_Nhdr *note; + size_t len; + char buf[BUFSIZE]; + char *ptr; + ulong *uptr; + int xen_core, vmcoreinfo, vmcoreinfo_xen, eraseinfo, qemuinfo; + uint64_t remaining, notesize; + + note = (Elf32_Nhdr *)((char *)nd->elf32 + offset); + + BZERO(buf, BUFSIZE); + xen_core = vmcoreinfo = eraseinfo = qemuinfo = FALSE; + ptr = (char *)note + sizeof(Elf32_Nhdr); + + if (ptr > (nd->elf_header + nd->header_size)) { + error(WARNING, + "Elf32_Nhdr pointer: %lx ELF header end: %lx\n", + (char *)note, nd->elf_header + nd->header_size); + return 0; + } else + remaining = (uint64_t)((nd->elf_header + nd->header_size) - ptr); + + notesize = (uint64_t)note->n_namesz + (uint64_t)note->n_descsz; + + if ((note->n_namesz == 0) || !remaining || (notesize > remaining)) { + error(WARNING, + "possibly corrupt Elf32_Nhdr: " + "n_namesz: %ld n_descsz: %ld n_type: %lx\n%s", + note->n_namesz, note->n_descsz, note->n_type, + note->n_namesz || note->n_descsz || !remaining ? + "\n" : ""); + if (note->n_namesz || note->n_descsz || !remaining) + return 0; + } + + netdump_print("Elf32_Nhdr:\n"); + netdump_print(" n_namesz: %ld ", note->n_namesz); + + BCOPY(ptr, buf, note->n_namesz); + netdump_print("(\"%s\")\n", buf); + + netdump_print(" n_descsz: %ld\n", note->n_descsz); + netdump_print(" n_type: %lx ", note->n_type); + switch (note->n_type) + { + case NT_PRSTATUS: + netdump_print("(NT_PRSTATUS)\n"); + if (store) { + if (!nd->nt_prstatus) + nd->nt_prstatus = (void *)note; + for (i = 0; i < NR_CPUS; i++) { + if (!nd->nt_prstatus_percpu[i]) { + nd->nt_prstatus_percpu[i] = (void *)note; + nd->num_prstatus_notes++; + break; + } + } + } + if (machine_type("PPC") && (nd->num_prstatus_notes > 0)) + pc->flags2 |= ELF_NOTES; + break; + case NT_PRPSINFO: + netdump_print("(NT_PRPSINFO)\n"); + if (store) + nd->nt_prpsinfo = (void *)note; + break; + case NT_TASKSTRUCT: + netdump_print("(NT_TASKSTRUCT)\n"); + if (store) { + nd->nt_taskstruct = (void *)note; + nd->task_struct = *((ulong *)(ptr + note->n_namesz)); + } + break; + case NT_DISKDUMP: + netdump_print("(NT_DISKDUMP)\n"); + uptr = (ulong *)(ptr + note->n_namesz); + if (*uptr && store) + nd->flags |= PARTIAL_DUMP; + break; +#ifdef NOTDEF + /* + * Note: Based upon the original, abandoned, proposal for + * its contents -- keep around for potential future use. + */ + case NT_KDUMPINFO: + netdump_print("(NT_KDUMPINFO)\n"); + if (store) { + uptr = (note->n_namesz == 5) ? + (ulong *)(ptr + ((note->n_namesz + 3) & ~3)) : + (ulong *)(ptr + note->n_namesz); + nd->page_size = (uint)(1 << *uptr); + uptr++; + nd->task_struct = *uptr; + } + break; +#endif + case NT_VMCOREDD: + netdump_print("(NT_VMCOREDD)\n"); + if (store) { + for (i = 0; i < NR_DEVICE_DUMPS; i++) { + if (!nd->nt_vmcoredd_array[i]) { + nd->nt_vmcoredd_array[i] = (void *)note; + nd->num_vmcoredd_notes++; + break; + } + } + } + break; + default: + xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); + if (STRNEQ(buf, "VMCOREINFO_XEN")) + vmcoreinfo_xen = TRUE; + else + vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); + eraseinfo = STRNEQ(buf, "ERASEINFO"); + qemuinfo = STRNEQ(buf, "QEMU"); + if (xen_core) { + netdump_print("(unknown Xen n_type)\n"); + if (store) + error(WARNING, "unknown Xen n_type: %lx\n\n", + note->n_type); + } else if (vmcoreinfo) { + netdump_print("(unused)\n"); + nd->vmcoreinfo = (char *)(ptr + note->n_namesz + 1); + nd->size_vmcoreinfo = note->n_descsz; + if (READ_PAGESIZE_FROM_VMCOREINFO() && store) + nd->page_size = (uint) + vmcoreinfo_read_integer("PAGESIZE", 0); + pc->flags2 |= VMCOREINFO; + } else if (eraseinfo) { + netdump_print("(unused)\n"); + if (note->n_descsz) + pc->flags2 |= ERASEINFO_DATA; + } else if (qemuinfo) { + pc->flags2 |= QEMU_MEM_DUMP_ELF; + netdump_print("(QEMUCPUState)\n"); + } else if (vmcoreinfo_xen) + netdump_print("(unused)\n"); + else + netdump_print("(?)\n"); + break; + + case NT_XEN_KDUMP_CR3: + netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); + /* FALL THROUGH */ + + case XEN_ELFNOTE_CRASH_INFO: + /* + * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure + */ + if (note->n_type == XEN_ELFNOTE_CRASH_INFO) + netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); + xen_core = TRUE; + if (store) + process_xen_note(note->n_type, + ptr + roundup(note->n_namesz, 4), + note->n_descsz); + break; + + case XEN_ELFNOTE_CRASH_REGS: + /* + * x86 and x86_64: cr0, cr2, cr3, cr4 + */ + xen_core = TRUE; + netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); + break; + } + + uptr = (ulong *)(ptr + note->n_namesz); + + /* + * kdumps are off-by-1, because their n_namesz is 5 for "CORE". + */ + if ((nd->flags & KDUMP_ELF32) && (note->n_namesz == 5)) + uptr = (ulong *)(ptr + ((note->n_namesz + 3) & ~3)); + + if (xen_core) + uptr = (ulong *)roundup((ulong)uptr, 4); + + if (store && qemuinfo) { + for(i = 0; i < NR_CPUS; i++) { + if (!nd->nt_qemu_percpu[i]) { + nd->nt_qemu_percpu[i] = (void *)uptr; + nd->num_qemu_notes++; + break; + } + } + } + + if (vmcoreinfo || eraseinfo || vmcoreinfo_xen) { + netdump_print(" "); + ptr += note->n_namesz + 1; + for (i = 0; i < note->n_descsz; i++, ptr++) { + netdump_print("%c", *ptr); + if (*ptr == '\n') + netdump_print(" "); + } + lf = 0; + } else if (note->n_type == NT_VMCOREDD) { + if (nd->ofp) + display_vmcoredd_note(note, nd->ofp); + } else { + if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { + if (machine_type("X86")) { + if (note->n_type == NT_PRSTATUS) + display_ELF_note(EM_386, PRSTATUS_NOTE, note, nd->ofp); + else if (qemuinfo) + display_ELF_note(EM_386, QEMU_NOTE, note, nd->ofp); + } + } + for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { + if (((i%4)==0)) { + netdump_print("%s ", + i ? "\n" : ""); + lf++; + } else + lf = 0; + netdump_print("%08lx ", *uptr++); + } + } + if (!lf || (note->n_type == NT_TASKSTRUCT) || + (note->n_type == NT_DISKDUMP) || xen_core) + netdump_print("\n"); + + len = sizeof(Elf32_Nhdr); + len = roundup(len + note->n_namesz, 4); + len = roundup(len + note->n_descsz, 4); + + return len; +} + + +static size_t +dump_Elf64_Nhdr(Elf64_Off offset, int store) +{ + int i = 0, lf = 0; + Elf64_Nhdr *note; + size_t len; + char buf[BUFSIZE]; + char *ptr; + ulonglong *uptr; + int *iptr; + int xen_core, vmcoreinfo, vmcoreinfo_xen, eraseinfo, qemuinfo; + uint64_t remaining, notesize; + + note = (Elf64_Nhdr *)((char *)nd->elf64 + offset); + + BZERO(buf, BUFSIZE); + ptr = (char *)note + sizeof(Elf64_Nhdr); + xen_core = vmcoreinfo = vmcoreinfo_xen = eraseinfo = qemuinfo = FALSE; + + if (ptr > (nd->elf_header + nd->header_size)) { + error(WARNING, + "Elf64_Nhdr pointer: %lx ELF header end: %lx\n\n", + (char *)note, nd->elf_header + nd->header_size); + return 0; + } else + remaining = (uint64_t)((nd->elf_header + nd->header_size) - ptr); + + notesize = (uint64_t)note->n_namesz + (uint64_t)note->n_descsz; + + if ((note->n_namesz == 0) || !remaining || (notesize > remaining)) { + error(WARNING, + "possibly corrupt Elf64_Nhdr: " + "n_namesz: %ld n_descsz: %ld n_type: %lx\n%s", + note->n_namesz, note->n_descsz, note->n_type, + note->n_namesz || note->n_descsz || !remaining ? + "\n" : ""); + if (note->n_namesz || note->n_descsz || !remaining) + return 0; + } + + netdump_print("Elf64_Nhdr:\n"); + netdump_print(" n_namesz: %ld ", note->n_namesz); + + BCOPY(ptr, buf, note->n_namesz); + netdump_print("(\"%s\")\n", buf); + + netdump_print(" n_descsz: %ld\n", note->n_descsz); + netdump_print(" n_type: %lx ", note->n_type); + switch (note->n_type) + { + case NT_PRSTATUS: + netdump_print("(NT_PRSTATUS)\n"); + if (store) { + if (!nd->nt_prstatus) + nd->nt_prstatus = (void *)note; + for (i = 0; i < NR_CPUS; i++) { + if (!nd->nt_prstatus_percpu[i]) { + nd->nt_prstatus_percpu[i] = (void *)note; + nd->num_prstatus_notes++; + break; + } + } + } + break; + case NT_PRPSINFO: + netdump_print("(NT_PRPSINFO)\n"); + if (store) + nd->nt_prpsinfo = (void *)note; + break; + case NT_FPREGSET: + netdump_print("(NT_FPREGSET)\n"); + break; + case NT_S390_TIMER: + netdump_print("(NT_S390_TIMER)\n"); + break; + case NT_S390_TODCMP: + netdump_print("(NT_S390_TODCMP)\n"); + break; + case NT_S390_TODPREG: + netdump_print("(NT_S390_TODPREG)\n"); + break; + case NT_S390_CTRS: + netdump_print("(NT_S390_CTRS)\n"); + break; + case NT_S390_PREFIX: + netdump_print("(NT_S390_PREFIX)\n"); + break; + case NT_S390_VXRS_LOW: + netdump_print("(NT_S390_VXRS_LOW)\n"); + break; + case NT_S390_VXRS_HIGH: + netdump_print("(NT_S390_VXRS_HIGH)\n"); + break; + case NT_TASKSTRUCT: + netdump_print("(NT_TASKSTRUCT)\n"); + if (STRNEQ(buf, "SNAP")) + pc->flags2 |= (LIVE_DUMP|SNAP); + if (store) { + nd->nt_taskstruct = (void *)note; + nd->task_struct = *((ulong *)(ptr + note->n_namesz)); + if (pc->flags2 & SNAP) { + if (note->n_descsz >= 16) + nd->arch_data1 = *((ulong *) + (ptr + note->n_namesz + sizeof(ulong))); + if (note->n_descsz >= 24) + nd->arch_data2 = *((ulong *) + (ptr + note->n_namesz + sizeof(ulong) + sizeof(ulong))); + } else if (machine_type("IA64")) + nd->switch_stack = *((ulong *) + (ptr + note->n_namesz + sizeof(ulong))); + } + break; + case NT_DISKDUMP: + netdump_print("(NT_DISKDUMP)\n"); + iptr = (int *)(ptr + note->n_namesz); + if (*iptr && store) + nd->flags |= PARTIAL_DUMP; + if (note->n_descsz < sizeof(ulonglong)) + netdump_print(" %08x", *iptr); + break; +#ifdef NOTDEF + /* + * Note: Based upon the original, abandoned, proposal for + * its contents -- keep around for potential future use. + */ + case NT_KDUMPINFO: + netdump_print("(NT_KDUMPINFO)\n"); + if (store) { + uint32_t *u32ptr; + + if (nd->elf64->e_machine == EM_386) { + u32ptr = (note->n_namesz == 5) ? + (uint *)(ptr + ((note->n_namesz + 3) & ~3)) : + (uint *)(ptr + note->n_namesz); + nd->page_size = 1 << *u32ptr; + u32ptr++; + nd->task_struct = *u32ptr; + } else { + uptr = (note->n_namesz == 5) ? + (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)) : + (ulonglong *)(ptr + note->n_namesz); + nd->page_size = (uint)(1 << *uptr); + uptr++; + nd->task_struct = *uptr; + } + } + break; +#endif + case NT_VMCOREDD: + netdump_print("(NT_VMCOREDD)\n"); + if (store) { + for (i = 0; i < NR_DEVICE_DUMPS; i++) { + if (!nd->nt_vmcoredd_array[i]) { + nd->nt_vmcoredd_array[i] = (void *)note; + nd->num_vmcoredd_notes++; + break; + } + } + } + break; + default: + xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); + if (STRNEQ(buf, "VMCOREINFO_XEN")) + vmcoreinfo_xen = TRUE; + else + vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); + eraseinfo = STRNEQ(buf, "ERASEINFO"); + qemuinfo = STRNEQ(buf, "QEMU"); + if (xen_core) { + netdump_print("(unknown Xen n_type)\n"); + if (store) + error(WARNING, + "unknown Xen n_type: %lx\n\n", note->n_type); + } else if (vmcoreinfo) { + netdump_print("(unused)\n"); + + nd->vmcoreinfo = (char *)nd->elf64 + offset + + (sizeof(Elf64_Nhdr) + + ((note->n_namesz + 3) & ~3)); + nd->size_vmcoreinfo = note->n_descsz; + + if (READ_PAGESIZE_FROM_VMCOREINFO() && store) + nd->page_size = (uint) + vmcoreinfo_read_integer("PAGESIZE", 0); + pc->flags2 |= VMCOREINFO; + } else if (eraseinfo) { + netdump_print("(unused)\n"); + if (note->n_descsz) + pc->flags2 |= ERASEINFO_DATA; + } else if (qemuinfo) { + pc->flags2 |= QEMU_MEM_DUMP_ELF; + netdump_print("(QEMUCPUState)\n"); + } else if (vmcoreinfo_xen) + netdump_print("(unused)\n"); + else + netdump_print("(?)\n"); + break; + + case NT_XEN_KDUMP_CR3: + netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); + /* FALL THROUGH */ + + case XEN_ELFNOTE_CRASH_INFO: + /* + * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure + */ + if (note->n_type == XEN_ELFNOTE_CRASH_INFO) + netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); + xen_core = TRUE; + if (store) + process_xen_note(note->n_type, + ptr + roundup(note->n_namesz, 4), + note->n_descsz); + break; + + case XEN_ELFNOTE_CRASH_REGS: + /* + * x86 and x86_64: cr0, cr2, cr3, cr4 + */ + xen_core = TRUE; + netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); + break; + } + + if (machine_type("S390X")) { + if (store) + machdep->dumpfile_init(nd->num_prstatus_notes, note); + + uptr = (ulonglong *) + ((void *)note + roundup(sizeof(*note) + note->n_namesz, 4)); + } else { + uptr = (ulonglong *)(ptr + note->n_namesz); + + /* + * kdumps are off-by-1, because their n_namesz is 5 for "CORE". + */ + if ((nd->flags & KDUMP_ELF64) && (note->n_namesz == 5)) + uptr = (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)); + + if (xen_core) + uptr = (ulonglong *)roundup((ulong)uptr, 4); + } + + if (store && qemuinfo) { + for(i=0; int_qemu_percpu[i]) { + nd->nt_qemu_percpu[i] = (void *)uptr; + nd->num_qemu_notes++; + break; + } + } + } + + if (note->n_type == NT_VMCOREDD) { + if (nd->ofp) + display_vmcoredd_note(note, nd->ofp); + } else if (BITS32() && (xen_core || (note->n_type == NT_PRSTATUS) || qemuinfo)) { + if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { + if (machine_type("X86")) { + if (note->n_type == NT_PRSTATUS) + display_ELF_note(EM_386, PRSTATUS_NOTE, note, nd->ofp); + else if (qemuinfo) + display_ELF_note(EM_386, QEMU_NOTE, note, nd->ofp); + } + } + + iptr = (int *)uptr; + for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { + if (((i%4)==0)) { + netdump_print("%s ", + i ? "\n" : ""); + lf++; + } else + lf = 0; + netdump_print("%08lx ", *iptr++); + } + } else if (vmcoreinfo || eraseinfo || vmcoreinfo_xen) { + netdump_print(" "); + ptr += note->n_namesz + 1; + for (i = 0; i < note->n_descsz; i++, ptr++) { + netdump_print("%c", *ptr); + if (*ptr == '\n') + netdump_print(" "); + } + lf = 0; + } else if (note->n_descsz == 4) { + i = 0; lf = 1; + iptr = (int *)uptr; + netdump_print(" %08lx\n", *iptr); + } else { + if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { + if (machine_type("X86_64")) { + if (note->n_type == NT_PRSTATUS) + display_ELF_note(EM_X86_64, PRSTATUS_NOTE, note, nd->ofp); + else if (qemuinfo) + display_ELF_note(EM_X86_64, QEMU_NOTE, note, nd->ofp); + } + if (machine_type("PPC64") && (note->n_type == NT_PRSTATUS)) + display_ELF_note(EM_PPC64, PRSTATUS_NOTE, note, nd->ofp); + if (machine_type("ARM64") && (note->n_type == NT_PRSTATUS)) + display_ELF_note(EM_AARCH64, PRSTATUS_NOTE, note, nd->ofp); + } + for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) { + if (((i%2)==0)) { + netdump_print("%s ", + i ? "\n" : ""); + lf++; + } else + lf = 0; + netdump_print("%016llx ", *uptr++); + } + } + if (!lf) + netdump_print("\n"); + else if (i && (i&1)) + netdump_print("\n"); + + len = sizeof(Elf64_Nhdr); + len = roundup(len + note->n_namesz, 4); + len = roundup(len + note->n_descsz, 4); + + return len; +} + +void * +netdump_get_prstatus_percpu(int cpu) +{ + int online; + + if ((cpu < 0) || (cpu >= nd->num_prstatus_notes)) + return NULL; + + /* + * If no cpu mapping was done, then there must be + * a one-to-one relationship between the number + * of online cpus and the number of notes. + */ + if ((online = get_cpus_online()) && + (online == kt->cpus) && + (online != nd->num_prstatus_notes)) + return NULL; + + return nd->nt_prstatus_percpu[cpu]; +} + +/* + * Send the request to the proper architecture hander. + */ +void +get_netdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) +{ + int e_machine; + + if (nd->elf32) + e_machine = nd->elf32->e_machine; + else if (nd->elf64) + e_machine = nd->elf64->e_machine; + else + e_machine = EM_NONE; + + switch (e_machine) + { + case EM_386: + return get_netdump_regs_x86(bt, eip, esp); + break; + + case EM_IA_64: + /* For normal backtraces, this information will be obtained + * frome the switch_stack structure, which is pointed to by + * the thread.ksp field of the task_struct. But it's still + * needed by the "bt -t" option. + */ + machdep->get_stack_frame(bt, eip, esp); + break; + + case EM_PPC: + return get_netdump_regs_ppc(bt, eip, esp); + break; + + case EM_PPC64: + return get_netdump_regs_ppc64(bt, eip, esp); + break; + + case EM_X86_64: + return get_netdump_regs_x86_64(bt, eip, esp); + break; + + case EM_S390: + machdep->get_stack_frame(bt, eip, esp); + break; + + case EM_ARM: + return get_netdump_regs_arm(bt, eip, esp); + break; + + case EM_AARCH64: + return get_netdump_regs_arm64(bt, eip, esp); + break; + + case EM_MIPS: + return get_netdump_regs_mips(bt, eip, esp); + break; + + default: + error(FATAL, + "support for ELF machine type %d not available\n", + e_machine); + } +} + +/* + * get regs from elf note, and return the address of user_regs. + */ +static char * +get_regs_from_note(char *note, ulong *ip, ulong *sp) +{ + Elf32_Nhdr *note32; + Elf64_Nhdr *note64; + size_t len; + char *user_regs; + long offset_sp, offset_ip; + + if (machine_type("X86_64")) { + note64 = (Elf64_Nhdr *)note; + len = sizeof(Elf64_Nhdr); + len = roundup(len + note64->n_namesz, 4); + len = roundup(len + note64->n_descsz, 4); + offset_sp = OFFSET(user_regs_struct_rsp); + offset_ip = OFFSET(user_regs_struct_rip); + } else if (machine_type("X86")) { + note32 = (Elf32_Nhdr *)note; + len = sizeof(Elf32_Nhdr); + len = roundup(len + note32->n_namesz, 4); + len = roundup(len + note32->n_descsz, 4); + offset_sp = OFFSET(user_regs_struct_esp); + offset_ip = OFFSET(user_regs_struct_eip); + } else + return NULL; + + user_regs = note + len - SIZE(user_regs_struct) - sizeof(long); + *sp = ULONG(user_regs + offset_sp); + *ip = ULONG(user_regs + offset_ip); + + return user_regs; +} + +void +display_regs_from_elf_notes(int cpu, FILE *ofp) +{ + Elf32_Nhdr *note32; + Elf64_Nhdr *note64; + size_t len; + char *user_regs; + int c, skipped_count; + + /* + * Kdump NT_PRSTATUS notes are only related to online cpus, + * so offline cpus should be skipped. + */ + if (pc->flags2 & QEMU_MEM_DUMP_ELF) + skipped_count = 0; + else { + for (c = skipped_count = 0; c < cpu; c++) { + if (check_offline_cpu(c)) + skipped_count++; + } + } + + if ((cpu - skipped_count) >= nd->num_prstatus_notes && + !machine_type("MIPS")) { + error(INFO, "registers not collected for cpu %d\n", cpu); + return; + } + + if (machine_type("X86_64")) { + if (nd->num_prstatus_notes > 1) + note64 = (Elf64_Nhdr *) + nd->nt_prstatus_percpu[cpu]; + else + note64 = (Elf64_Nhdr *)nd->nt_prstatus; + len = sizeof(Elf64_Nhdr); + len = roundup(len + note64->n_namesz, 4); + len = roundup(len + note64->n_descsz, 4); + user_regs = ((char *)note64) + len - SIZE(user_regs_struct) - sizeof(long); + + fprintf(ofp, + " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" + " RAX: %016llx RBX: %016llx RCX: %016llx\n" + " RDX: %016llx RSI: %016llx RDI: %016llx\n" + " RBP: %016llx R8: %016llx R9: %016llx\n" + " R10: %016llx R11: %016llx R12: %016llx\n" + " R13: %016llx R14: %016llx R15: %016llx\n" + " CS: %04x SS: %04x\n", + ULONGLONG(user_regs + OFFSET(user_regs_struct_rip)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_rsp)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_eflags)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_rax)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_rbx)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_rcx)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_rdx)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_rsi)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_rdi)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_rbp)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_r8)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_r9)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_r10)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_r11)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_r12)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_r13)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_r14)), + ULONGLONG(user_regs + OFFSET(user_regs_struct_r15)), + USHORT(user_regs + OFFSET(user_regs_struct_cs)), + USHORT(user_regs + OFFSET(user_regs_struct_ss)) + ); + } else if (machine_type("X86")) { + if (nd->num_prstatus_notes > 1) + note32 = (Elf32_Nhdr *) + nd->nt_prstatus_percpu[cpu]; + else + note32 = (Elf32_Nhdr *)nd->nt_prstatus; + len = sizeof(Elf32_Nhdr); + len = roundup(len + note32->n_namesz, 4); + len = roundup(len + note32->n_descsz, 4); + user_regs = ((char *)note32) + len - SIZE(user_regs_struct) - sizeof(long); + + fprintf(ofp, + " EAX: %08x EBX: %08x ECX: %08x EDX: %08x\n" + " ESP: %08x EIP: %08x ESI: %08x EDI: %08x\n" + " CS: %04x DS: %04x ES: %04x FS: %04x\n" + " GS: %04x SS: %04x\n" + " EBP: %08x EFLAGS: %08x\n", + UINT(user_regs + OFFSET(user_regs_struct_eax)), + UINT(user_regs + OFFSET(user_regs_struct_ebx)), + UINT(user_regs + OFFSET(user_regs_struct_ecx)), + UINT(user_regs + OFFSET(user_regs_struct_edx)), + UINT(user_regs + OFFSET(user_regs_struct_esp)), + UINT(user_regs + OFFSET(user_regs_struct_eip)), + UINT(user_regs + OFFSET(user_regs_struct_esi)), + UINT(user_regs + OFFSET(user_regs_struct_edi)), + USHORT(user_regs + OFFSET(user_regs_struct_cs)), + USHORT(user_regs + OFFSET(user_regs_struct_ds)), + USHORT(user_regs + OFFSET(user_regs_struct_es)), + USHORT(user_regs + OFFSET(user_regs_struct_fs)), + USHORT(user_regs + OFFSET(user_regs_struct_gs)), + USHORT(user_regs + OFFSET(user_regs_struct_ss)), + UINT(user_regs + OFFSET(user_regs_struct_ebp)), + UINT(user_regs + OFFSET(user_regs_struct_eflags)) + ); + } else if (machine_type("PPC64")) { + struct ppc64_elf_prstatus *prs; + struct ppc64_pt_regs *pr; + + if (nd->num_prstatus_notes > 1) + note64 = (Elf64_Nhdr *)nd->nt_prstatus_percpu[cpu]; + else + note64 = (Elf64_Nhdr *)nd->nt_prstatus; + + prs = (struct ppc64_elf_prstatus *) + ((char *)note64 + sizeof(Elf64_Nhdr) + note64->n_namesz); + prs = (struct ppc64_elf_prstatus *)roundup((ulong)prs, 4); + pr = &prs->pr_reg; + + fprintf(ofp, + " R0: %016lx R1: %016lx R2: %016lx\n" + " R3: %016lx R4: %016lx R5: %016lx\n" + " R6: %016lx R7: %016lx R8: %016lx\n" + " R9: %016lx R10: %016lx R11: %016lx\n" + " R12: %016lx R13: %016lx R14: %016lx\n" + " R15: %016lx R16: %016lx R16: %016lx\n" + " R18: %016lx R19: %016lx R20: %016lx\n" + " R21: %016lx R22: %016lx R23: %016lx\n" + " R24: %016lx R25: %016lx R26: %016lx\n" + " R27: %016lx R28: %016lx R29: %016lx\n" + " R30: %016lx R31: %016lx\n" + " NIP: %016lx MSR: %016lx\n" + " OGPR3: %016lx CTR: %016lx\n" + " LINK: %016lx XER: %016lx\n" + " CCR: %016lx MQ: %016lx\n" + " TRAP: %016lx DAR: %016lx\n" + " DSISR: %016lx RESULT: %016lx\n", + pr->gpr[0], pr->gpr[1], pr->gpr[2], + pr->gpr[3], pr->gpr[4], pr->gpr[5], + pr->gpr[6], pr->gpr[7], pr->gpr[8], + pr->gpr[9], pr->gpr[10], pr->gpr[11], + pr->gpr[12], pr->gpr[13], pr->gpr[14], + pr->gpr[15], pr->gpr[16], pr->gpr[17], + pr->gpr[18], pr->gpr[19], pr->gpr[20], + pr->gpr[21], pr->gpr[22], pr->gpr[23], + pr->gpr[24], pr->gpr[25], pr->gpr[26], + pr->gpr[27], pr->gpr[28], pr->gpr[29], + pr->gpr[30], pr->gpr[31], + pr->nip, pr->msr, + pr->orig_gpr3, pr->ctr, + pr->link, pr->xer, + pr->ccr, pr->mq, + pr->trap, pr->dar, + pr->dsisr, pr->result); + } else if (machine_type("ARM64")) { + if (nd->num_prstatus_notes > 1) + note64 = (Elf64_Nhdr *) + nd->nt_prstatus_percpu[cpu]; + else + note64 = (Elf64_Nhdr *)nd->nt_prstatus; + len = sizeof(Elf64_Nhdr); + len = roundup(len + note64->n_namesz, 4); + len = roundup(len + note64->n_descsz, 4); + user_regs = (char *)note64 + len - SIZE(elf_prstatus) + OFFSET(elf_prstatus_pr_reg); + fprintf(ofp, + " X0: %016lx X1: %016lx X2: %016lx\n" + " X3: %016lx X4: %016lx X5: %016lx\n" + " X6: %016lx X7: %016lx X8: %016lx\n" + " X9: %016lx X10: %016lx X11: %016lx\n" + " X12: %016lx X13: %016lx X14: %016lx\n" + " X15: %016lx X16: %016lx X17: %016lx\n" + " X18: %016lx X19: %016lx X20: %016lx\n" + " X21: %016lx X22: %016lx X23: %016lx\n" + " X24: %016lx X25: %016lx X26: %016lx\n" + " X27: %016lx X28: %016lx X29: %016lx\n" + " LR: %016lx SP: %016lx PC: %016lx\n" + " PSTATE: %08lx FPVALID: %08x\n", + ULONG(user_regs + sizeof(ulong) * 0), + ULONG(user_regs + sizeof(ulong) * 1), + ULONG(user_regs + sizeof(ulong) * 2), + ULONG(user_regs + sizeof(ulong) * 3), + ULONG(user_regs + sizeof(ulong) * 4), + ULONG(user_regs + sizeof(ulong) * 5), + ULONG(user_regs + sizeof(ulong) * 6), + ULONG(user_regs + sizeof(ulong) * 7), + ULONG(user_regs + sizeof(ulong) * 8), + ULONG(user_regs + sizeof(ulong) * 9), + ULONG(user_regs + sizeof(ulong) * 10), + ULONG(user_regs + sizeof(ulong) * 11), + ULONG(user_regs + sizeof(ulong) * 12), + ULONG(user_regs + sizeof(ulong) * 13), + ULONG(user_regs + sizeof(ulong) * 14), + ULONG(user_regs + sizeof(ulong) * 15), + ULONG(user_regs + sizeof(ulong) * 16), + ULONG(user_regs + sizeof(ulong) * 17), + ULONG(user_regs + sizeof(ulong) * 18), + ULONG(user_regs + sizeof(ulong) * 19), + ULONG(user_regs + sizeof(ulong) * 20), + ULONG(user_regs + sizeof(ulong) * 21), + ULONG(user_regs + sizeof(ulong) * 22), + ULONG(user_regs + sizeof(ulong) * 23), + ULONG(user_regs + sizeof(ulong) * 24), + ULONG(user_regs + sizeof(ulong) * 25), + ULONG(user_regs + sizeof(ulong) * 26), + ULONG(user_regs + sizeof(ulong) * 27), + ULONG(user_regs + sizeof(ulong) * 28), + ULONG(user_regs + sizeof(ulong) * 29), + ULONG(user_regs + sizeof(ulong) * 30), + ULONG(user_regs + sizeof(ulong) * 31), + ULONG(user_regs + sizeof(ulong) * 32), + ULONG(user_regs + sizeof(ulong) * 33), + UINT(user_regs + sizeof(ulong) * 34)); + } else if (machine_type("MIPS")) { + mips_display_regs_from_elf_notes(cpu, ofp); + } +} + +void +dump_registers_for_elf_dumpfiles(void) +{ + int c; + + if (!(machine_type("X86") || machine_type("X86_64") || + machine_type("ARM64") || machine_type("PPC64") || + machine_type("MIPS"))) + error(FATAL, "-r option not supported for this dumpfile\n"); + + if (NETDUMP_DUMPFILE()) { + display_regs_from_elf_notes(0, fp); + return; + } + + for (c = 0; c < kt->cpus; c++) { + if (check_offline_cpu(c)) { + fprintf(fp, "%sCPU %d: [OFFLINE]\n", c ? "\n" : "", c); + continue; + } + + fprintf(fp, "%sCPU %d:\n", c ? "\n" : "", c); + display_regs_from_elf_notes(c, fp); + } +} + +struct x86_64_user_regs_struct { + unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10; + unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; + unsigned long rip,cs,eflags; + unsigned long rsp,ss; + unsigned long fs_base, gs_base; + unsigned long ds,es,fs,gs; +}; + +struct x86_64_prstatus { + int si_signo; + int si_code; + int si_errno; + short cursig; + unsigned long sigpend; + unsigned long sighold; + int pid; + int ppid; + int pgrp; + int sid; + struct timeval utime; + struct timeval stime; + struct timeval cutime; + struct timeval cstime; + struct x86_64_user_regs_struct regs; + int fpvalid; +}; + +static void +display_prstatus_x86_64(void *note_ptr, FILE *ofp) +{ + struct x86_64_prstatus *pr; + Elf64_Nhdr *note; + int sp; + + note = (Elf64_Nhdr *)note_ptr; + pr = (struct x86_64_prstatus *)( + (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); + pr = (struct x86_64_prstatus *)roundup((ulong)pr, 4); + sp = nd->num_prstatus_notes ? 25 : 22; + + fprintf(ofp, + "%ssi.signo: %d si.code: %d si.errno: %d\n" + "%scursig: %d sigpend: %lx sighold: %lx\n" + "%spid: %d ppid: %d pgrp: %d sid:%d\n" + "%sutime: %01lld.%06d stime: %01lld.%06d\n" + "%scutime: %01lld.%06d cstime: %01lld.%06d\n" + "%sORIG_RAX: %lx fpvalid: %d\n" + "%s R15: %016lx R14: %016lx\n" + "%s R13: %016lx R12: %016lx\n" + "%s RBP: %016lx RBX: %016lx\n" + "%s R11: %016lx R10: %016lx\n" + "%s R9: %016lx R8: %016lx\n" + "%s RAX: %016lx RCX: %016lx\n" + "%s RDX: %016lx RSI: %016lx\n" + "%s RDI: %016lx RIP: %016lx\n" + "%s RFLAGS: %016lx RSP: %016lx\n" + "%s FS_BASE: %016lx\n" + "%s GS_BASE: %016lx\n" + "%s CS: %04lx SS: %04lx DS: %04lx\n" + "%s ES: %04lx FS: %04lx GS: %04lx\n", + space(sp), pr->si_signo, pr->si_code, pr->si_errno, + space(sp), pr->cursig, pr->sigpend, pr->sighold, + space(sp), pr->pid, pr->ppid, pr->pgrp, pr->sid, + space(sp), (long long)pr->utime.tv_sec, (int)pr->utime.tv_usec, + (long long)pr->stime.tv_sec, (int)pr->stime.tv_usec, + space(sp), (long long)pr->cutime.tv_sec, (int)pr->cutime.tv_usec, + (long long)pr->cstime.tv_sec, (int)pr->cstime.tv_usec, + space(sp), pr->regs.orig_rax, pr->fpvalid, + space(sp), pr->regs.r15, pr->regs.r14, + space(sp), pr->regs.r13, pr->regs.r12, + space(sp), pr->regs.rbp, pr->regs.rbx, + space(sp), pr->regs.r11, pr->regs.r10, + space(sp), pr->regs.r9, pr->regs.r8, + space(sp), pr->regs.rax, pr->regs.rcx, + space(sp), pr->regs.rdx, pr->regs.rsi, + space(sp), pr->regs.rdi, pr->regs.rip, + space(sp), pr->regs.eflags, pr->regs.rsp, + space(sp), pr->regs.fs_base, + space(sp), pr->regs.gs_base, + space(sp), pr->regs.cs, pr->regs.ss, pr->regs.ds, + space(sp), pr->regs.es, pr->regs.fs, pr->regs.gs); +} + +struct x86_user_regs_struct { + unsigned long ebx,ecx,edx,esi,edi,ebp,eax; + unsigned long ds,es,fs,gs,orig_eax; + unsigned long eip,cs,eflags; + unsigned long esp,ss; +}; + +struct x86_prstatus { + int si_signo; + int si_code; + int si_errno; + short cursig; + unsigned long sigpend; + unsigned long sighold; + int pid; + int ppid; + int pgrp; + int sid; + struct timeval utime; + struct timeval stime; + struct timeval cutime; + struct timeval cstime; + struct x86_user_regs_struct regs; + int fpvalid; +}; + +static void +display_prstatus_x86(void *note_ptr, FILE *ofp) +{ + struct x86_prstatus *pr; + Elf32_Nhdr *note; + int sp; + + note = (Elf32_Nhdr *)note_ptr; + pr = (struct x86_prstatus *)( + (char *)note + sizeof(Elf32_Nhdr) + note->n_namesz); + pr = (struct x86_prstatus *)roundup((ulong)pr, 4); + sp = nd->num_prstatus_notes ? 25 : 22; + + fprintf(ofp, + "%ssi.signo: %d si.code: %d si.errno: %d\n" + "%scursig: %d sigpend: %lx sighold : %lx\n" + "%spid: %d ppid: %d pgrp: %d sid: %d\n" + "%sutime: %01lld.%06d stime: %01lld.%06d\n" + "%scutime: %01lld.%06d cstime: %01lld.%06d\n" + "%sORIG_EAX: %lx fpvalid: %d\n" + "%s EBX: %08lx ECX: %08lx\n" + "%s EDX: %08lx ESI: %08lx\n" + "%s EDI: %08lx EBP: %08lx\n" + "%s EAX: %08lx EIP: %08lx\n" + "%s EFLAGS: %08lx ESP: %08lx\n" + "%s DS: %04lx ES: %04lx FS: %04lx\n" + "%s GS: %04lx CS: %04lx SS: %04lx\n", + space(sp), pr->si_signo, pr->si_code, pr->si_errno, + space(sp), pr->cursig, pr->sigpend, pr->sighold, + space(sp), pr->pid, pr->ppid, pr->pgrp, pr->sid, + space(sp), (long long)pr->utime.tv_sec, (int)pr->utime.tv_usec, + (long long)pr->stime.tv_sec, (int)pr->stime.tv_usec, + space(sp), (long long)pr->cutime.tv_sec, (int)pr->cutime.tv_usec, + (long long)pr->cstime.tv_sec, (int)pr->cstime.tv_usec, + space(sp), pr->regs.orig_eax, pr->fpvalid, + space(sp), pr->regs.ebx, pr->regs.ecx, + space(sp), pr->regs.edx, pr->regs.esi, + space(sp), pr->regs.edi, pr->regs.ebp, + space(sp), pr->regs.eax, pr->regs.eip, + space(sp), pr->regs.eflags, pr->regs.esp, + space(sp), pr->regs.ds, pr->regs.es, pr->regs.fs, + space(sp), pr->regs.gs, pr->regs.cs, pr->regs.ss); +} + +static void +display_qemu_x86_64(void *note_ptr, FILE *ofp) +{ + int i, sp; + Elf64_Nhdr *note; + QEMUCPUState *ptr; + QEMUCPUSegment *seg; + char *seg_names[] = {"CS", "DS", "ES", "FS", "GS", "SS", "LDT", "TR", + "GDT", "IDT"}; + + note = (Elf64_Nhdr *)note_ptr; + ptr = (QEMUCPUState *)( + (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); + ptr = (QEMUCPUState *)roundup((ulong)ptr, 4); + seg = &(ptr->cs); + sp = VMCORE_VALID()? 25 : 22; + + fprintf(ofp, + "%sversion: %d size: %d\n" + "%sRAX: %016llx RBX: %016llx\n" + "%sRCX: %016llx RDX: %016llx\n" + "%sRSI: %016llx RDI: %016llx\n" + "%sRSP: %016llx RBP: %016llx\n" + "%sRIP: %016llx RFLAGS: %016llx\n" + "%s R8: %016llx R9: %016llx\n" + "%sR10: %016llx R11: %016llx\n" + "%sR12: %016llx R13: %016llx\n" + "%sR14: %016llx R15: %016llx\n", + space(sp), ptr->version, ptr->size, + space(sp), (ulonglong)ptr->rax, (ulonglong)ptr->rbx, + space(sp), (ulonglong)ptr->rcx, (ulonglong)ptr->rdx, + space(sp), (ulonglong)ptr->rsi, (ulonglong)ptr->rdi, + space(sp), (ulonglong)ptr->rsp, (ulonglong)ptr->rbp, + space(sp), (ulonglong)ptr->rip, (ulonglong)ptr->rflags, + space(sp), (ulonglong)ptr->r8, (ulonglong)ptr->r9, + space(sp), (ulonglong)ptr->r10, (ulonglong)ptr->r11, + space(sp), (ulonglong)ptr->r12, (ulonglong)ptr->r13, + space(sp), (ulonglong)ptr->r14, (ulonglong)ptr->r15); + + for (i = 0; i < sizeof(seg_names)/sizeof(seg_names[0]); i++) { + fprintf(ofp, "%s%s", space(sp), strlen(seg_names[i]) > 2 ? "" : " "); + fprintf(ofp, + "%s: " + "selector: %04x limit: %08x flags: %08x\n" + "%spad: %08x base: %016llx\n", + seg_names[i], + seg->selector, seg->limit, seg->flags, + space(sp+5), seg->pad, (ulonglong)seg->base); + seg++; + } + + fprintf(ofp, + "%sCR0: %016llx CR1: %016llx\n" + "%sCR2: %016llx CR3: %016llx\n" + "%sCR4: %016llx\n", + space(sp), (ulonglong)ptr->cr[0], (ulonglong)ptr->cr[1], + space(sp), (ulonglong)ptr->cr[2], (ulonglong)ptr->cr[3], + space(sp), (ulonglong)ptr->cr[4]); +} + +static void +display_qemu_x86(void *note_ptr, FILE *ofp) +{ + int i, sp; + Elf32_Nhdr *note; + QEMUCPUState *ptr; + QEMUCPUSegment *seg; + char *seg_names[] = {"CS", "DS", "ES", "FS", "GS", "SS", "LDT", "TR", + "GDT", "IDT"}; + + note = (Elf32_Nhdr *)note_ptr; + ptr = (QEMUCPUState *)( + (char *)note + sizeof(Elf32_Nhdr) + note->n_namesz); + ptr = (QEMUCPUState *)roundup((ulong)ptr, 4); + seg = &(ptr->cs); + sp = VMCORE_VALID()? 25 : 22; + + fprintf(ofp, + "%sversion: %d size: %d\n" + "%sEAX: %016llx EBX: %016llx\n" + "%sECX: %016llx EDX: %016llx\n" + "%sESI: %016llx EDI: %016llx\n" + "%sESP: %016llx EBP: %016llx\n" + "%sEIP: %016llx EFLAGS: %016llx\n", + space(sp), ptr->version, ptr->size, + space(sp), (ulonglong)ptr->rax, (ulonglong)ptr->rbx, + space(sp), (ulonglong)ptr->rcx, (ulonglong)ptr->rdx, + space(sp), (ulonglong)ptr->rsi, (ulonglong)ptr->rdi, + space(sp), (ulonglong)ptr->rsp, (ulonglong)ptr->rbp, + space(sp), (ulonglong)ptr->rip, (ulonglong)ptr->rflags); + + for(i = 0; i < sizeof(seg_names)/sizeof(seg_names[0]); i++) { + fprintf(ofp, "%s%s", space(sp), strlen(seg_names[i]) > 2 ? "" : " "); + fprintf(ofp, + "%s: " + "selector: %04x limit: %08x flags: %08x\n" + "%spad: %08x base: %016llx\n", + seg_names[i], + seg->selector, seg->limit, seg->flags, + space(sp+5), + seg->pad, (ulonglong)seg->base); + seg++; + } + + fprintf(ofp, + "%sCR0: %016llx CR1: %016llx\n" + "%sCR2: %016llx CR3: %016llx\n" + "%sCR4: %016llx\n", + space(sp), (ulonglong)ptr->cr[0], (ulonglong)ptr->cr[1], + space(sp), (ulonglong)ptr->cr[2], (ulonglong)ptr->cr[3], + space(sp), (ulonglong)ptr->cr[4]); +} + +static void +display_prstatus_ppc64(void *note_ptr, FILE *ofp) +{ + struct ppc64_elf_prstatus *pr; + Elf64_Nhdr *note; + int sp; + + note = (Elf64_Nhdr *)note_ptr; + pr = (struct ppc64_elf_prstatus *)( + (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); + pr = (struct ppc64_elf_prstatus *)roundup((ulong)pr, 4); + sp = nd->num_prstatus_notes ? 25 : 22; + + fprintf(ofp, + "%ssi.signo: %d si.code: %d si.errno: %d\n" + "%scursig: %d sigpend: %lx sighold: %lx\n" + "%spid: %d ppid: %d pgrp: %d sid:%d\n" + "%sutime: %01lld.%06d stime: %01lld.%06d\n" + "%scutime: %01lld.%06d cstime: %01lld.%06d\n" + "%s R0: %016lx R1: %016lx R2: %016lx\n" + "%s R3: %016lx R4: %016lx R5: %016lx\n" + "%s R6: %016lx R7: %016lx R8: %016lx\n" + "%s R9: %016lx R10: %016lx R11: %016lx\n" + "%sR12: %016lx R13: %016lx R14: %016lx\n" + "%sR15: %016lx R16: %016lx R16: %016lx\n" + "%sR18: %016lx R19: %016lx R20: %016lx\n" + "%sR21: %016lx R22: %016lx R23: %016lx\n" + "%sR24: %016lx R25: %016lx R26: %016lx\n" + "%sR27: %016lx R28: %016lx R29: %016lx\n" + "%sR30: %016lx R31: %016lx\n" + "%s NIP: %016lx MSR: %016lx\n" + "%sOGPR3: %016lx CTR: %016lx\n" + "%s LINK: %016lx XER: %016lx\n" + "%s CCR: %016lx MQ: %016lx\n" + "%s TRAP: %016lx DAR: %016lx\n" + "%sDSISR: %016lx RESULT: %016lx\n", + space(sp), pr->pr_info.si_signo, pr->pr_info.si_code, pr->pr_info.si_errno, + space(sp), pr->pr_cursig, pr->pr_sigpend, pr->pr_sighold, + space(sp), pr->pr_pid, pr->pr_ppid, pr->pr_pgrp, pr->pr_sid, + space(sp), (long long)pr->pr_utime.tv_sec, (int)pr->pr_utime.tv_usec, + (long long)pr->pr_stime.tv_sec, (int)pr->pr_stime.tv_usec, + space(sp), (long long)pr->pr_cutime.tv_sec, (int)pr->pr_cutime.tv_usec, + (long long)pr->pr_cstime.tv_sec, (int)pr->pr_cstime.tv_usec, + space(sp), pr->pr_reg.gpr[0], pr->pr_reg.gpr[1], pr->pr_reg.gpr[2], + space(sp), pr->pr_reg.gpr[3], pr->pr_reg.gpr[4], pr->pr_reg.gpr[5], + space(sp), pr->pr_reg.gpr[6], pr->pr_reg.gpr[7], pr->pr_reg.gpr[8], + space(sp), pr->pr_reg.gpr[9], pr->pr_reg.gpr[10], pr->pr_reg.gpr[11], + space(sp), pr->pr_reg.gpr[12], pr->pr_reg.gpr[13], pr->pr_reg.gpr[14], + space(sp), pr->pr_reg.gpr[15], pr->pr_reg.gpr[16], pr->pr_reg.gpr[17], + space(sp), pr->pr_reg.gpr[18], pr->pr_reg.gpr[19], pr->pr_reg.gpr[20], + space(sp), pr->pr_reg.gpr[21], pr->pr_reg.gpr[22], pr->pr_reg.gpr[23], + space(sp), pr->pr_reg.gpr[24], pr->pr_reg.gpr[25], pr->pr_reg.gpr[26], + space(sp), pr->pr_reg.gpr[27], pr->pr_reg.gpr[28], pr->pr_reg.gpr[29], + space(sp), pr->pr_reg.gpr[30], pr->pr_reg.gpr[31], + space(sp), pr->pr_reg.nip, pr->pr_reg.msr, + space(sp), pr->pr_reg.orig_gpr3, pr->pr_reg.ctr, + space(sp), pr->pr_reg.link, pr->pr_reg.xer, + space(sp), pr->pr_reg.ccr, pr->pr_reg.mq, + space(sp), pr->pr_reg.trap, pr->pr_reg.dar, + space(sp), pr->pr_reg.dsisr, pr->pr_reg.result); +} + +struct arm64_elf_siginfo { + int si_signo; + int si_code; + int si_errno; +}; + +struct arm64_elf_prstatus { + struct arm64_elf_siginfo pr_info; + short pr_cursig; + unsigned long pr_sigpend; + unsigned long pr_sighold; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct timeval pr_utime; + struct timeval pr_stime; + struct timeval pr_cutime; + struct timeval pr_cstime; +/* arm64_elf_gregset_t pr_reg; -> typedef unsigned long [34] arm64_elf_gregset_t */ + unsigned long pr_reg[34]; + int pr_fpvalid; +}; + +/* + Note that the ARM64 elf_gregset_t includes the 31 numbered registers + plus the sp, pc and pstate: + + typedef unsigned long [34] elf_gregset_t; + + struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + u64 regs[31]; + u64 sp; + u64 pc; + u64 pstate; + }; + }; + u64 orig_x0; + u64 syscallno; + } +*/ + +static void +display_prstatus_arm64(void *note_ptr, FILE *ofp) +{ + struct arm64_elf_prstatus *pr; + Elf64_Nhdr *note; + int sp; + + note = (Elf64_Nhdr *)note_ptr; + pr = (struct arm64_elf_prstatus *)( + (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); + pr = (struct arm64_elf_prstatus *)roundup((ulong)pr, 4); + sp = nd->num_prstatus_notes ? 25 : 22; + + fprintf(ofp, + "%ssi.signo: %d si.code: %d si.errno: %d\n" + "%scursig: %d sigpend: %lx sighold: %lx\n" + "%spid: %d ppid: %d pgrp: %d sid:%d\n" + "%sutime: %01lld.%06d stime: %01lld.%06d\n" + "%scutime: %01lld.%06d cstime: %01lld.%06d\n", + space(sp), pr->pr_info.si_signo, pr->pr_info.si_code, pr->pr_info.si_errno, + space(sp), pr->pr_cursig, pr->pr_sigpend, pr->pr_sighold, + space(sp), pr->pr_pid, pr->pr_ppid, pr->pr_pgrp, pr->pr_sid, + space(sp), (long long)pr->pr_utime.tv_sec, (int)pr->pr_utime.tv_usec, + (long long)pr->pr_stime.tv_sec, (int)pr->pr_stime.tv_usec, + space(sp), (long long)pr->pr_cutime.tv_sec, (int)pr->pr_cutime.tv_usec, + (long long)pr->pr_cstime.tv_sec, (int)pr->pr_cstime.tv_usec); + fprintf(ofp, + "%s X0: %016lx X1: %016lx X2: %016lx\n" + "%s X3: %016lx X4: %016lx X5: %016lx\n" + "%s X6: %016lx X7: %016lx X8: %016lx\n" + "%s X9: %016lx X10: %016lx X11: %016lx\n" + "%sX12: %016lx X13: %016lx X14: %016lx\n" + "%sX15: %016lx X16: %016lx X17: %016lx\n" + "%sX18: %016lx X19: %016lx X20: %016lx\n" + "%sX21: %016lx X22: %016lx X23: %016lx\n" + "%sX24: %016lx X25: %016lx X26: %016lx\n" + "%sX27: %016lx X28: %016lx X29: %016lx\n" + "%s LR: %016lx SP: %016lx PC: %016lx\n" + "%sPSTATE: %08lx FPVALID: %08x\n", + space(sp), pr->pr_reg[0], pr->pr_reg[1], pr->pr_reg[2], + space(sp), pr->pr_reg[3], pr->pr_reg[4], pr->pr_reg[5], + space(sp), pr->pr_reg[6], pr->pr_reg[7], pr->pr_reg[8], + space(sp), pr->pr_reg[9], pr->pr_reg[10], pr->pr_reg[11], + space(sp), pr->pr_reg[12], pr->pr_reg[13], pr->pr_reg[14], + space(sp), pr->pr_reg[15], pr->pr_reg[16], pr->pr_reg[17], + space(sp), pr->pr_reg[18], pr->pr_reg[19], pr->pr_reg[20], + space(sp), pr->pr_reg[21], pr->pr_reg[22], pr->pr_reg[23], + space(sp), pr->pr_reg[24], pr->pr_reg[25], pr->pr_reg[26], + space(sp), pr->pr_reg[27], pr->pr_reg[28], pr->pr_reg[29], + space(sp), pr->pr_reg[30], pr->pr_reg[31], pr->pr_reg[32], + space(sp), pr->pr_reg[33], pr->pr_fpvalid); +} + + +void +display_ELF_note(int machine, int type, void *note, FILE *ofp) +{ + if (note == NULL) + return; + + switch (machine) + { + case EM_386: + switch (type) + { + case PRSTATUS_NOTE: + display_prstatus_x86(note, ofp); + break; + case QEMU_NOTE: + display_qemu_x86(note, ofp); + break; + } + break; + + case EM_X86_64: + switch (type) + { + case PRSTATUS_NOTE: + display_prstatus_x86_64(note, ofp); + break; + case QEMU_NOTE: + display_qemu_x86_64(note, ofp); + break; + } + break; + + case EM_PPC64: + switch (type) + { + case PRSTATUS_NOTE: + display_prstatus_ppc64(note, ofp); + break; + } + break; + + case EM_AARCH64: + switch (type) + { + case PRSTATUS_NOTE: + display_prstatus_arm64(note, ofp); + break; + } + break; + + default: + return; + } +} + +void +get_netdump_regs_x86_64(struct bt_info *bt, ulong *ripp, ulong *rspp) +{ + Elf64_Nhdr *note; + size_t len; + char *user_regs; + ulong regs_size, rsp_offset, rip_offset; + ulong rip, rsp; + + if (is_task_active(bt->task)) + bt->flags |= BT_DUMPFILE_SEARCH; + + if (((NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) && + VALID_STRUCT(user_regs_struct) && + ((bt->task == tt->panic_task) || (pc->flags2 & QEMU_MEM_DUMP_ELF))) || + (KDUMP_DUMPFILE() && (kt->flags & DWARF_UNWIND) && + (bt->flags & BT_DUMPFILE_SEARCH))) { + if (nd->num_prstatus_notes > 1) + note = (Elf64_Nhdr *) + nd->nt_prstatus_percpu[bt->tc->processor]; + else + note = (Elf64_Nhdr *)nd->nt_prstatus; + + if (!note) + goto no_nt_prstatus_exists; + + len = sizeof(Elf64_Nhdr); + len = roundup(len + note->n_namesz, 4); + len = roundup(len + note->n_descsz, 4); + + regs_size = VALID_STRUCT(user_regs_struct) ? + SIZE(user_regs_struct) : + sizeof(struct x86_64_user_regs_struct); + rsp_offset = VALID_MEMBER(user_regs_struct_rsp) ? + OFFSET(user_regs_struct_rsp) : + offsetof(struct x86_64_user_regs_struct, rsp); + rip_offset = VALID_MEMBER(user_regs_struct_rip) ? + OFFSET(user_regs_struct_rip) : + offsetof(struct x86_64_user_regs_struct, rip); + + user_regs = ((char *)note + len) - regs_size - sizeof(long); + rsp = ULONG(user_regs + rsp_offset); + rip = ULONG(user_regs + rip_offset); + + if (INSTACK(rsp, bt) || + in_alternate_stack(bt->tc->processor, rsp)) { + if (CRASHDEBUG(1)) + netdump_print("ELF prstatus rsp: %lx rip: %lx\n", + rsp, rip); + + if (KDUMP_DUMPFILE()) { + *rspp = rsp; + *ripp = rip; + + if (*ripp && *rspp) + bt->flags |= BT_KDUMP_ELF_REGS; + } + + bt->machdep = (void *)user_regs; + } + } + + if (ELF_NOTES_VALID() && + (bt->flags & BT_DUMPFILE_SEARCH) && DISKDUMP_DUMPFILE() && + (note = (Elf64_Nhdr *) + diskdump_get_prstatus_percpu(bt->tc->processor))) { + + if (!note) + goto no_nt_prstatus_exists; + + user_regs = get_regs_from_note((char *)note, &rip, &rsp); + + if (INSTACK(rsp, bt) || + in_alternate_stack(bt->tc->processor, rsp)) { + if (CRASHDEBUG(1)) + netdump_print("ELF prstatus rsp: %lx rip: %lx\n", + rsp, rip); + + *rspp = rsp; + *ripp = rip; + + if (*ripp && *rspp) + bt->flags |= BT_KDUMP_ELF_REGS; + + bt->machdep = (void *)user_regs; + } + } + +no_nt_prstatus_exists: + machdep->get_stack_frame(bt, ripp, rspp); +} + +/* + * Netdump doesn't save state of the active tasks in the TSS, so poke around + * the raw stack for some reasonable hooks. + */ + +void +get_netdump_regs_x86(struct bt_info *bt, ulong *eip, ulong *esp) +{ + int i, search, panic, panic_task, altered; + char *sym; + ulong *up; + ulong ipintr_eip, ipintr_esp, ipintr_func; + ulong halt_eip, halt_esp, panic_eip, panic_esp; + int check_hardirq, check_softirq; + ulong stackbase, stacktop; + Elf32_Nhdr *note; + char *user_regs ATTRIBUTE_UNUSED; + ulong ip, sp; + + if (!is_task_active(bt->task)) { + machdep->get_stack_frame(bt, eip, esp); + return; + } + + panic_task = tt->panic_task == bt->task ? TRUE : FALSE; + + ipintr_eip = ipintr_esp = ipintr_func = panic = altered = 0; + halt_eip = halt_esp = panic_eip = panic_esp = 0; + check_hardirq = check_softirq = tt->flags & IRQSTACKS ? TRUE : FALSE; + search = ((bt->flags & BT_TEXT_SYMBOLS) && (tt->flags & TASK_INIT_DONE)) + || (machdep->flags & OMIT_FRAME_PTR); + stackbase = bt->stackbase; + stacktop = bt->stacktop; +retry: + for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){ + sym = closest_symbol(*up); + + if (XEN_CORE_DUMPFILE()) { + if (STREQ(sym, "xen_machine_kexec")) { + *eip = *up; + *esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); + return; + } + if (STREQ(sym, "crash_kexec")) { + halt_eip = *up; + halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); + } + } else if (STREQ(sym, "netconsole_netdump") || + STREQ(sym, "netpoll_start_netdump") || + STREQ(sym, "start_disk_dump") || + (STREQ(sym, "crash_kexec") && !KVMDUMP_DUMPFILE()) || + STREQ(sym, "disk_dump")) { +crash_kexec: + *eip = *up; + *esp = search ? + bt->stackbase + ((char *)(up+1) - bt->stackbuf) : + *(up-1); + return; + } + + if (STREQ(sym, "panic")) { + *eip = *up; + *esp = search ? + bt->stackbase + ((char *)(up+1) - bt->stackbuf) : + *(up-1); + panic_eip = *eip; + panic_esp = *esp; + panic = TRUE; + continue; /* keep looking for die */ + } + + if (STREQ(sym, "die")) { + *eip = *up; + *esp = search ? + bt->stackbase + ((char *)(up+1) - bt->stackbuf) : + *(up-1); + for (i++, up++; i < LONGS_PER_STACK; i++, up++) { + sym = closest_symbol(*up); + if (STREQ(sym, "sysrq_handle_crash")) + goto next_sysrq; + } + return; + } + + if (STREQ(sym, "sysrq_handle_crash")) { +next_sysrq: + *eip = *up; + *esp = bt->stackbase + ((char *)(up+4) - bt->stackbuf); + pc->flags |= SYSRQ; + for (i++, up++; i < LONGS_PER_STACK; i++, up++) { + sym = closest_symbol(*up); + if (STREQ(sym, "crash_kexec") && !KVMDUMP_DUMPFILE()) + goto crash_kexec; + if (STREQ(sym, "sysrq_handle_crash")) + goto next_sysrq; + } + if (!panic) + return; + } + + /* + * Obsolete -- replaced by sysrq_handle_crash + */ + if (STREQ(sym, "sysrq_handle_netdump")) { + *eip = *up; + *esp = search ? + bt->stackbase + ((char *)(up+1) - bt->stackbuf) : + *(up-1); + pc->flags |= SYSRQ; + return; + } + + if (STREQ(sym, "crash_nmi_callback")) { + *eip = *up; + *esp = search ? + bt->stackbase + ((char *)(up+1) - bt->stackbuf) : + *(up-1); + return; + } + + if (STREQ(sym, "stop_this_cpu")) { + *eip = *up; + *esp = search ? + bt->stackbase + ((char *)(up+1) - bt->stackbuf) : + *(up-1); + return; + } + + if (STREQ(sym, "smp_call_function_interrupt")) { + if (ipintr_eip && IS_VMALLOC_ADDR(ipintr_func) && + IS_KERNEL_STATIC_TEXT(*(up - 2))) + continue; + ipintr_eip = *up; + ipintr_esp = search ? + bt->stackbase + ((char *)(up+1) - bt->stackbuf) : + bt->stackbase + ((char *)(up-1) - bt->stackbuf); + ipintr_func = *(up - 2); + } + + if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && + STREQ(sym, "safe_halt")) { + halt_eip = *up; + halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); + } + + if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && + !halt_eip && STREQ(sym, "xen_idle")) { + halt_eip = *up; + halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); + } + } + + if (panic) { + *eip = panic_eip; + *esp = panic_esp; + return; + } + + if (ipintr_eip) { + *eip = ipintr_eip; + *esp = ipintr_esp; + return; + } + + if (halt_eip && halt_esp) { + *eip = halt_eip; + *esp = halt_esp; + return; + } + + bt->flags &= ~(BT_HARDIRQ|BT_SOFTIRQ); + + if (check_hardirq && + (tt->hardirq_tasks[bt->tc->processor] == bt->tc->task)) { + bt->stackbase = tt->hardirq_ctx[bt->tc->processor]; + bt->stacktop = bt->stackbase + STACKSIZE(); + alter_stackbuf(bt); + bt->flags |= BT_HARDIRQ; + check_hardirq = FALSE; + altered = TRUE; + goto retry; + } + + if (check_softirq && + (tt->softirq_tasks[bt->tc->processor] == bt->tc->task)) { + bt->stackbase = tt->softirq_ctx[bt->tc->processor]; + bt->stacktop = bt->stackbase + STACKSIZE(); + alter_stackbuf(bt); + bt->flags |= BT_SOFTIRQ; + check_softirq = FALSE; + altered = TRUE; + goto retry; + } + + if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE() && + (note = (Elf32_Nhdr *) + diskdump_get_prstatus_percpu(bt->tc->processor))) { + user_regs = get_regs_from_note((char *)note, &ip, &sp); + if (is_kernel_text(ip) && + (((sp >= GET_STACKBASE(bt->task)) && + (sp < GET_STACKTOP(bt->task))) || + in_alternate_stack(bt->tc->processor, sp))) { + bt->flags |= BT_KERNEL_SPACE; + *eip = ip; + *esp = sp; + return; + } + + if (!is_kernel_text(ip) && in_user_stack(bt->tc->task, sp)) { + bt->flags |= BT_USER_SPACE; + *eip = ip; + *esp = sp; + return; + } + } + + if (CRASHDEBUG(1)) + error(INFO, + "get_netdump_regs_x86: cannot find anything useful (task: %lx)\n", bt->task); + + if (altered) { + bt->stackbase = stackbase; + bt->stacktop = stacktop; + alter_stackbuf(bt); + } + + if (XEN_CORE_DUMPFILE() && !panic_task && is_task_active(bt->task) && + !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS))) + error(FATAL, + "starting backtrace locations of the active (non-crashing) " + "xen tasks\n cannot be determined: try -t or -T options\n"); + + if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE()) + bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; + + machdep->get_stack_frame(bt, eip, esp); +} + +static void +get_netdump_regs_32(struct bt_info *bt, ulong *eip, ulong *esp) +{ + Elf32_Nhdr *note; + size_t len; + + if ((bt->task == tt->panic_task) || + (is_task_active(bt->task) && nd->num_prstatus_notes)) { + /* + * Registers are saved during the dump process for the + * panic task. Whereas in kdump, regs are captured for all + * CPUs if they responded to an IPI. + */ + if (nd->num_prstatus_notes > 1) { + if (!nd->nt_prstatus_percpu[bt->tc->processor]) + error(FATAL, + "cannot determine NT_PRSTATUS ELF note " + "for %s task: %lx\n", + (bt->task == tt->panic_task) ? + "panic" : "active", bt->task); + note = (Elf32_Nhdr *) + nd->nt_prstatus_percpu[bt->tc->processor]; + } else + note = (Elf32_Nhdr *)nd->nt_prstatus; + + if (!note) + goto no_nt_prstatus_exists; + + len = sizeof(Elf32_Nhdr); + len = roundup(len + note->n_namesz, 4); + bt->machdep = (void *)((char *)note + len + + MEMBER_OFFSET("elf_prstatus", "pr_reg")); + } + +no_nt_prstatus_exists: + machdep->get_stack_frame(bt, eip, esp); +} + +static void +get_netdump_regs_ppc(struct bt_info *bt, ulong *eip, ulong *esp) +{ + ppc_relocate_nt_prstatus_percpu(nd->nt_prstatus_percpu, + &nd->num_prstatus_notes); + + get_netdump_regs_32(bt, eip, esp); +} + +static void +get_netdump_regs_ppc64(struct bt_info *bt, ulong *eip, ulong *esp) +{ + Elf64_Nhdr *note; + size_t len; + + if ((bt->task == tt->panic_task) || + (is_task_active(bt->task) && nd->num_prstatus_notes > 1)) { + /* + * Registers are saved during the dump process for the + * panic task. Whereas in kdump, regs are captured for all + * CPUs if they responded to an IPI. + */ + if (nd->num_prstatus_notes > 1) { + if (!nd->nt_prstatus_percpu[bt->tc->processor]) + error(FATAL, + "cannot determine NT_PRSTATUS ELF note " + "for %s task: %lx\n", + (bt->task == tt->panic_task) ? + "panic" : "active", bt->task); + note = (Elf64_Nhdr *) + nd->nt_prstatus_percpu[bt->tc->processor]; + } else + note = (Elf64_Nhdr *)nd->nt_prstatus; + + if (!note) + goto no_nt_prstatus_exists; + + len = sizeof(Elf64_Nhdr); + len = roundup(len + note->n_namesz, 4); + bt->machdep = (void *)((char *)note + len + + MEMBER_OFFSET("elf_prstatus", "pr_reg")); + } + +no_nt_prstatus_exists: + machdep->get_stack_frame(bt, eip, esp); +} + +static void +get_netdump_regs_arm(struct bt_info *bt, ulong *eip, ulong *esp) +{ + machdep->get_stack_frame(bt, eip, esp); +} + +static void +get_netdump_regs_arm64(struct bt_info *bt, ulong *eip, ulong *esp) +{ + machdep->get_stack_frame(bt, eip, esp); +} + +static void +get_netdump_regs_mips(struct bt_info *bt, ulong *eip, ulong *esp) +{ + machdep->get_stack_frame(bt, eip, esp); +} + +int +is_partial_netdump(void) +{ + return (nd->flags & PARTIAL_DUMP ? TRUE : FALSE); +} + + +/* + * kexec/kdump generated vmcore files are similar enough in + * nature to netdump/diskdump such that most vmcore access + * functionality may be borrowed from the equivalent netdump + * function. If not, re-work them here. + */ +int +is_kdump(char *file, ulong source_query) +{ + return is_netdump(file, source_query); +} + +int +kdump_init(char *unused, FILE *fptr) +{ + return netdump_init(unused, fptr); +} + +ulong +get_kdump_panic_task(void) +{ + return get_netdump_panic_task(); +} + +int +read_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) +{ + physaddr_t paddr_in = paddr; + + if ((nd->flags & QEMU_MEM_DUMP_KDUMP_BACKUP) && + (paddr >= nd->backup_src_start) && + (paddr < nd->backup_src_start + nd->backup_src_size)) { + + paddr += nd->backup_offset - nd->backup_src_start; + + if (CRASHDEBUG(1)) + error(INFO, + "qemu_mem_dump: kdump backup region: %#llx => %#llx\n", + paddr_in, paddr); + } + + if (XEN_CORE_DUMPFILE() && !XEN_HYPER_MODE()) { + if ((paddr = xen_kdump_p2m(paddr)) == P2M_FAILURE) { + if (CRASHDEBUG(8)) + fprintf(fp, "read_kdump: xen_kdump_p2m(%llx): " + "P2M_FAILURE\n", (ulonglong)paddr_in); + return READ_ERROR; + } + if (CRASHDEBUG(8)) + fprintf(fp, "read_kdump: xen_kdump_p2m(%llx): %llx\n", + (ulonglong)paddr_in, (ulonglong)paddr); + } + + return read_netdump(fd, bufptr, cnt, addr, paddr); +} + +int +write_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) +{ + return write_netdump(fd, bufptr, cnt, addr, paddr); +} + +void +get_kdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) +{ + get_netdump_regs(bt, eip, esp); +} + +uint +kdump_page_size(void) +{ + uint pagesz; + + if (!VMCORE_VALID()) + return 0; + + if (!(pagesz = nd->page_size)) + pagesz = (uint)getpagesize(); + + return pagesz; +} + +int +kdump_free_memory(void) +{ + return netdump_free_memory(); +} + +int +kdump_memory_used(void) +{ + return netdump_memory_used(); +} + +int +kdump_memory_dump(FILE *fp) +{ + return netdump_memory_dump(fp); +} + +struct vmcore_data * +get_kdump_vmcore_data(void) +{ + if (!VMCORE_VALID() || !KDUMP_DUMPFILE()) + return NULL; + + return &vmcore_data; +} + +/* + * The following set of functions are not used by the crash + * source code, but are available to extension modules for + * gathering register sets from ELF NT_PRSTATUS note sections. + * + * Contributed by: Sharyathi Nagesh (sharyath@in.ibm.com) + */ + +static void *get_ppc_regs_from_elf_notes(struct task_context *); +static void *get_ppc64_regs_from_elf_notes(struct task_context *); +static void *get_x86_regs_from_elf_notes(struct task_context *); +static void *get_x86_64_regs_from_elf_notes(struct task_context *); +static void *get_arm_regs_from_elf_notes(struct task_context *); + +int get_netdump_arch(void) +{ + int e_machine; + + if (nd->elf32) + e_machine = nd->elf32->e_machine; + else if (nd->elf64) + e_machine = nd->elf64->e_machine; + else + e_machine = EM_NONE; + + return e_machine; +} + +int +exist_regs_in_elf_notes(struct task_context *tc) +{ + if ((tc->task == tt->panic_task) || + (is_task_active(tc->task) && (nd->num_prstatus_notes > 1) && + (tc->processor < nd->num_prstatus_notes))) + return TRUE; + else + return FALSE; +} + +void * +get_regs_from_elf_notes(struct task_context *tc) +{ + int e_machine = get_netdump_arch(); + + switch (e_machine) + { + case EM_386: + case EM_PPC: + case EM_PPC64: + case EM_X86_64: + case EM_ARM: + break; + case EM_AARCH64: + error(FATAL, + "get_regs_from_elf_notes: ARM64 support TBD\n"); + default: + error(FATAL, + "support for ELF machine type %d not available\n", + e_machine); + } + + if (!exist_regs_in_elf_notes(tc)) + error(FATAL, "cannot determine register set " + "for active task: %lx comm: \"%s\"\n", + tc->task, tc->comm); + + switch(e_machine) + { + case EM_386: + return get_x86_regs_from_elf_notes(tc); + case EM_PPC: + return get_ppc_regs_from_elf_notes(tc); + case EM_PPC64: + return get_ppc64_regs_from_elf_notes(tc); + case EM_X86_64: + return get_x86_64_regs_from_elf_notes(tc); + case EM_ARM: + return get_arm_regs_from_elf_notes(tc); + case EM_AARCH64: + break; /* TBD */ + } + + return NULL; +} + +static void * +get_x86_regs_from_elf_notes(struct task_context *tc) +{ + Elf32_Nhdr *note_32; + Elf64_Nhdr *note_64; + void *note; + size_t len; + void *pt_regs; + + len = 0; + pt_regs = NULL; + + if (nd->num_prstatus_notes > 1) + note = (void *)nd->nt_prstatus_percpu[tc->processor]; + else + note = (void *)nd->nt_prstatus; + + if (!note) + goto no_nt_prstatus_exists; + + if (nd->elf32) { + note_32 = (Elf32_Nhdr *)note; + len = sizeof(Elf32_Nhdr); + len = roundup(len + note_32->n_namesz, 4); + } else if (nd->elf64) { + note_64 = (Elf64_Nhdr *)note; + len = sizeof(Elf64_Nhdr); + len = roundup(len + note_64->n_namesz, 4); + } + + pt_regs = (void *)((char *)note + len + + MEMBER_OFFSET("elf_prstatus", "pr_reg")); + /* NEED TO BE FIXED: Hack to get the proper alignment */ + pt_regs +=4; + +no_nt_prstatus_exists: + return pt_regs; + +} + +static void * +get_x86_64_regs_from_elf_notes(struct task_context *tc) +{ + Elf64_Nhdr *note; + size_t len; + void *pt_regs; + + pt_regs = NULL; + + if (nd->num_prstatus_notes > 1) + note = (Elf64_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; + else + note = (Elf64_Nhdr *)nd->nt_prstatus; + + if (!note) + goto no_nt_prstatus_exists; + + len = sizeof(Elf64_Nhdr); + len = roundup(len + note->n_namesz, 4); + pt_regs = (void *)((char *)note + len + + MEMBER_OFFSET("elf_prstatus", "pr_reg")); + +no_nt_prstatus_exists: + return pt_regs; +} + +static void * +get_ppc_regs_from_elf_notes(struct task_context *tc) +{ + Elf32_Nhdr *note; + size_t len; + void *pt_regs; + extern struct vmcore_data *nd; + + pt_regs = NULL; + + /* + * Registers are always saved during the dump process for the + * panic task. Kdump also captures registers for all CPUs if + * they responded to an IPI. + */ + if (nd->num_prstatus_notes > 1) { + note = (Elf32_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; + } else + note = (Elf32_Nhdr *)nd->nt_prstatus; + + if (!note) + goto no_nt_prstatus_exists; + + len = sizeof(Elf32_Nhdr); + len = roundup(len + note->n_namesz, 4); + pt_regs = (void *)((char *)note + len + + MEMBER_OFFSET("elf_prstatus", "pr_reg")); + +no_nt_prstatus_exists: + return pt_regs; +} + +static void * +get_ppc64_regs_from_elf_notes(struct task_context *tc) +{ + Elf64_Nhdr *note; + size_t len; + void *pt_regs; + extern struct vmcore_data *nd; + + pt_regs = NULL; + + /* + * Registers are always saved during the dump process for the + * panic task. Kdump also captures registers for all CPUs if + * they responded to an IPI. + */ + if (nd->num_prstatus_notes > 1) { + note = (Elf64_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; + } else + note = (Elf64_Nhdr *)nd->nt_prstatus; + + if (!note) + goto no_nt_prstatus_exists; + + len = sizeof(Elf64_Nhdr); + len = roundup(len + note->n_namesz, 4); + pt_regs = (void *)((char *)note + len + + MEMBER_OFFSET("elf_prstatus", "pr_reg")); + +no_nt_prstatus_exists: + return pt_regs; +} + +int +kdump_phys_base(ulong *phys_base) +{ + if (!kdump_kaslr_check()) + return FALSE; + + *phys_base = nd->phys_base; + + return TRUE; +} + +int +kdump_set_phys_base(ulong phys_base) +{ + if (!kdump_kaslr_check()) + return FALSE; + + nd->phys_base = phys_base; + + return TRUE; +} + +/* + * In case of ARM we need to determine correct PHYS_OFFSET from the kdump file. + * This is done by taking lowest physical address (LMA) from given load + * segments. Normally this is the right one. + * + * Alternative would be to store phys_base in VMCOREINFO but current kernel + * kdump doesn't do that yet. + */ +int arm_kdump_phys_base(ulong *phys_base) +{ + struct pt_load_segment *pls; + ulong paddr = ULONG_MAX; + int i; + + for (i = 0; i < nd->num_pt_load_segments; i++) { + pls = &nd->pt_load_segments[i]; + if (pls->phys_start < paddr) + paddr = pls->phys_start; + } + + if (paddr != ULONG_MAX) { + *phys_base = paddr; + return TRUE; + } + return FALSE; +} + +/* + * physical memory size, calculated by given load segments + */ +int +arm_kdump_phys_end(ulong *phys_end) +{ + struct pt_load_segment *pls; + ulong paddr = 0; + int i; + + for (i = 0; i < nd->num_pt_load_segments; i++) { + pls = &nd->pt_load_segments[i]; + if (pls->phys_end > paddr) + paddr = pls->phys_end; + } + + if (paddr != 0) { + *phys_end = paddr; + return TRUE; + } + return FALSE; +} + +static void * +get_arm_regs_from_elf_notes(struct task_context *tc) +{ + Elf32_Nhdr *note_32; + Elf64_Nhdr *note_64; + void *note; + size_t len; + void *pt_regs; + + len = 0; + pt_regs = NULL; + + if (nd->num_prstatus_notes > 1) + note = (void *)nd->nt_prstatus_percpu[tc->processor]; + else + note = (void *)nd->nt_prstatus; + + if (!note) + goto no_nt_prstatus_exists; + + if (nd->elf32) { + note_32 = (Elf32_Nhdr *)note; + len = sizeof(Elf32_Nhdr); + len = roundup(len + note_32->n_namesz, 4); + } else if (nd->elf64) { + note_64 = (Elf64_Nhdr *)note; + len = sizeof(Elf64_Nhdr); + len = roundup(len + note_64->n_namesz, 4); + } + + pt_regs = (void *)((char *)note + len + + MEMBER_OFFSET("elf_prstatus", "pr_reg")); + +no_nt_prstatus_exists: + return pt_regs; +} + +/* + * Read from /proc/kcore. + */ +int +read_proc_kcore(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) +{ + int i; + size_t readcnt; + ulong kvaddr; + Elf32_Phdr *lp32; + Elf64_Phdr *lp64; + off_t offset; + + if (paddr != KCORE_USE_VADDR) { + if (!machdep->verify_paddr(paddr)) { + if (CRASHDEBUG(1)) + error(INFO, "verify_paddr(%lx) failed\n", paddr); + return READ_ERROR; + } + } + + /* + * Unless specified otherwise, turn the physical address into + * a unity-mapped kernel virtual address, which should work + * for 64-bit architectures, and for lowmem access for 32-bit + * architectures. + */ + if (paddr == KCORE_USE_VADDR) + kvaddr = addr; + else + kvaddr = PTOV((ulong)paddr); + + offset = UNINITIALIZED; + readcnt = cnt; + + switch (pkd->flags & (KCORE_ELF32|KCORE_ELF64)) + { + case KCORE_ELF32: + for (i = 0; i < pkd->segments; i++) { + lp32 = pkd->load32 + i; + if ((kvaddr >= lp32->p_vaddr) && + (kvaddr < (lp32->p_vaddr + lp32->p_memsz))) { + offset = (off_t)(kvaddr - lp32->p_vaddr) + + (off_t)lp32->p_offset; + break; + } + } + /* + * If it's not accessible via unity-mapping, check whether + * it's a request for a vmalloc address that can be found + * in the header. + */ + if (pc->curcmd_flags & MEMTYPE_KVADDR) + pc->curcmd_flags &= ~MEMTYPE_KVADDR; + else + break; + + for (i = 0; i < pkd->segments; i++) { + lp32 = pkd->load32 + i; + if ((addr >= lp32->p_vaddr) && + (addr < (lp32->p_vaddr + lp32->p_memsz))) { + offset = (off_t)(addr - lp32->p_vaddr) + + (off_t)lp32->p_offset; + break; + } + } + + break; + + case KCORE_ELF64: + /* + * If KASLR, the PAGE_OFFSET may be unknown early on, so try + * the (hopefully) mapped kernel address first. + */ + if (!(pc->flags & RUNTIME) && + (pc->curcmd_flags & MEMTYPE_KVADDR) && (kvaddr != addr)) { + pc->curcmd_flags &= ~MEMTYPE_KVADDR; + for (i = 0; i < pkd->segments; i++) { + lp64 = pkd->load64 + i; + if ((addr >= lp64->p_vaddr) && + (addr < (lp64->p_vaddr + lp64->p_memsz))) { + offset = (off_t)(addr - lp64->p_vaddr) + + (off_t)lp64->p_offset; + break; + } + } + if (offset != UNINITIALIZED) + break; + } + + for (i = 0; i < pkd->segments; i++) { + lp64 = pkd->load64 + i; + if ((kvaddr >= lp64->p_vaddr) && + (kvaddr < (lp64->p_vaddr + lp64->p_memsz))) { + offset = (off_t)(kvaddr - lp64->p_vaddr) + + (off_t)lp64->p_offset; + break; + } + } + + break; + } + + if (offset == UNINITIALIZED) + return SEEK_ERROR; + + if (lseek(fd, offset, SEEK_SET) != offset) + perror("lseek"); + + if (read(fd, bufptr, readcnt) != readcnt) + return READ_ERROR; + + return cnt; +} + +/* + * place holder -- cannot write to /proc/kcore + */ +int +write_proc_kcore(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) +{ + error(FATAL, "cannot write to /proc/kcore\n"); + return FALSE; +} + +int +is_proc_kcore(char *file, ulong source_query) +{ + if (STREQ(file, "/proc/kcore") || same_file(file, "/proc/kcore")) { + if (!is_netdump(file, source_query)) + error(FATAL, + "cannot translate the ELF header of /proc/kcore\n"); + pkd->flags |= KCORE_LOCAL; + return TRUE; + } else + return FALSE; +} + +int +proc_kcore_init(FILE *fp, int kcore_fd) +{ + if (pkd->flags & (KCORE_ELF32|KCORE_ELF64)) + return TRUE; + + if (BITS32()) + return proc_kcore_init_32(fp, kcore_fd); + else + return proc_kcore_init_64(fp, kcore_fd); +} + +static int +proc_kcore_init_32(FILE *fp, int kcore_fd) +{ + int fd; + Elf32_Ehdr *elf32; + Elf32_Phdr *load32; + Elf32_Phdr *notes32; + char eheader[MAX_KCORE_ELF_HEADER_SIZE]; + char buf[BUFSIZE]; + size_t load_size, notes_size; + + if (kcore_fd == UNUSED) { + if ((fd = open("/proc/kcore", O_RDONLY)) < 0) { + error(INFO, "/proc/kcore: %s\n", strerror(errno)); + return FALSE; + } + } else + fd = kcore_fd; + + if (read(fd, eheader, MAX_KCORE_ELF_HEADER_SIZE) != MAX_KCORE_ELF_HEADER_SIZE) { + sprintf(buf, "/proc/kcore: read"); + perror(buf); + goto bailout; + } + + if (lseek(fd, 0, SEEK_SET) != 0) { + sprintf(buf, "/proc/kcore: lseek"); + perror(buf); + goto bailout; + } + + if (fd != kcore_fd) + close(fd); + + elf32 = (Elf32_Ehdr *)&eheader[0]; + notes32 = (Elf32_Phdr *)&eheader[sizeof(Elf32_Ehdr)]; + load32 = (Elf32_Phdr *)&eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; + + pkd->segments = elf32->e_phnum - 1; + + notes_size = load_size = 0; + if (notes32->p_type == PT_NOTE) + notes_size = notes32->p_offset + notes32->p_filesz; + if (notes32->p_type == PT_LOAD) + load_size = (ulong)(load32+(elf32->e_phnum)) - (ulong)elf32; + pkd->header_size = MAX(notes_size, load_size); + if (!pkd->header_size) + pkd->header_size = MAX_KCORE_ELF_HEADER_SIZE; + + if ((pkd->elf_header = (char *)malloc(pkd->header_size)) == NULL) { + error(INFO, "/proc/kcore: cannot malloc ELF header buffer\n"); + clean_exit(1); + } + + BCOPY(&eheader[0], &pkd->elf_header[0], pkd->header_size); + pkd->notes32 = (Elf32_Phdr *)&pkd->elf_header[sizeof(Elf32_Ehdr)]; + pkd->load32 = (Elf32_Phdr *) + &pkd->elf_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; + pkd->flags |= KCORE_ELF32; + + kcore_memory_dump(CRASHDEBUG(1) ? fp : pc->nullfp); + + return TRUE; + +bailout: + if (fd != kcore_fd) + close(fd); + return FALSE; +} + +static int +proc_kcore_init_64(FILE *fp, int kcore_fd) +{ + int fd; + Elf64_Ehdr *elf64; + Elf64_Phdr *load64; + Elf64_Phdr *notes64; + char eheader[MAX_KCORE_ELF_HEADER_SIZE]; + char buf[BUFSIZE]; + size_t load_size, notes_size; + + if (kcore_fd == UNUSED) { + if ((fd = open("/proc/kcore", O_RDONLY)) < 0) { + error(INFO, "/proc/kcore: %s\n", strerror(errno)); + return FALSE; + } + } else + fd = kcore_fd; + + if (read(fd, eheader, MAX_KCORE_ELF_HEADER_SIZE) != MAX_KCORE_ELF_HEADER_SIZE) { + sprintf(buf, "/proc/kcore: read"); + perror(buf); + goto bailout; + } + + if (lseek(fd, 0, SEEK_SET) != 0) { + sprintf(buf, "/proc/kcore: lseek"); + perror(buf); + goto bailout; + } + + if (fd != kcore_fd) + close(fd); + + elf64 = (Elf64_Ehdr *)&eheader[0]; + notes64 = (Elf64_Phdr *)&eheader[sizeof(Elf64_Ehdr)]; + load64 = (Elf64_Phdr *)&eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; + + pkd->segments = elf64->e_phnum - 1; + + notes_size = load_size = 0; + if (notes64->p_type == PT_NOTE) + notes_size = notes64->p_offset + notes64->p_filesz; + if (notes64->p_type == PT_LOAD) + load_size = (ulong)(load64+(elf64->e_phnum)) - (ulong)elf64; + + pkd->header_size = MAX(notes_size, load_size); + if (!pkd->header_size) + pkd->header_size = MAX_KCORE_ELF_HEADER_SIZE; + + if ((pkd->elf_header = (char *)malloc(pkd->header_size)) == NULL) { + error(INFO, "/proc/kcore: cannot malloc ELF header buffer\n"); + clean_exit(1); + } + + BCOPY(&eheader[0], &pkd->elf_header[0], pkd->header_size); + pkd->notes64 = (Elf64_Phdr *)&pkd->elf_header[sizeof(Elf64_Ehdr)]; + pkd->load64 = (Elf64_Phdr *) + &pkd->elf_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; + pkd->flags |= KCORE_ELF64; + + kcore_memory_dump(CRASHDEBUG(1) ? fp : pc->nullfp); + + return TRUE; + +bailout: + if (fd != kcore_fd) + close(fd); + return FALSE; +} + +int +kcore_memory_dump(FILE *ofp) +{ + int i, others; + Elf32_Phdr *ph32; + Elf64_Phdr *ph64; + Elf32_Nhdr *note32; + Elf64_Nhdr *note64; + size_t tot, len; + char *name, *ptr, buf[BUFSIZE]; + + fprintf(ofp, "proc_kcore_data:\n"); + fprintf(ofp, " flags: %x (", pkd->flags); + others = 0; + if (pkd->flags & KCORE_LOCAL) + fprintf(ofp, "%sKCORE_LOCAL", others++ ? "|" : ""); + if (pkd->flags & KCORE_ELF32) + fprintf(ofp, "%sKCORE_ELF32", others++ ? "|" : ""); + if (pkd->flags & KCORE_ELF64) + fprintf(ofp, "%sKCORE_ELF64", others++ ? "|" : ""); + fprintf(ofp, ")\n"); + fprintf(ofp, " segments: %d\n", + pkd->segments); + fprintf(ofp, " elf_header: %lx\n", (ulong)pkd->elf_header); + fprintf(ofp, " header_size: %ld\n", (ulong)pkd->header_size); + fprintf(ofp, " notes64: %lx\n", (ulong)pkd->notes64); + fprintf(ofp, " load64: %lx\n", (ulong)pkd->load64); + fprintf(ofp, " notes32: %lx\n", (ulong)pkd->notes32); + fprintf(ofp, " load32: %lx\n", (ulong)pkd->load32); + fprintf(ofp, " vmcoreinfo: %lx\n", (ulong)pkd->vmcoreinfo); + fprintf(ofp, " size_vmcoreinfo: %d\n\n", pkd->size_vmcoreinfo); + + if (pkd->flags & KCORE_ELF32) { + ph32 = pkd->notes32; + + fprintf(ofp, " Elf32_Phdr:\n"); + fprintf(ofp, " p_type: %x ", ph32->p_type); + switch (ph32->p_type) + { + case PT_NOTE: + fprintf(ofp, "(PT_NOTE)\n"); + break; + case PT_LOAD: + fprintf(ofp, "(PT_LOAD)\n"); + break; + default: + fprintf(ofp, "(unknown)\n"); + break; + } + fprintf(ofp, " p_flags: %x\n", ph32->p_flags); + fprintf(ofp, " p_offset: %x\n", ph32->p_offset); + fprintf(ofp, " p_vaddr: %x\n", ph32->p_vaddr); + fprintf(ofp, " p_paddr: %x\n", ph32->p_paddr); + fprintf(ofp, " p_filesz: %d\n", ph32->p_filesz); + fprintf(ofp, " p_memsz: %d\n", ph32->p_memsz); + fprintf(ofp, " p_align: %d\n", ph32->p_align); + fprintf(ofp, "\n"); + + for (i = 0; i < pkd->segments; i++) { + ph32 = pkd->load32 + i; + + fprintf(ofp, " Elf32_Phdr:\n"); + fprintf(ofp, " p_type: %x ", ph32->p_type); + switch (ph32->p_type) + { + case PT_NOTE: + fprintf(ofp, "(PT_NOTE)\n"); + break; + case PT_LOAD: + fprintf(ofp, "(PT_LOAD)\n"); + break; + default: + fprintf(ofp, "(unknown)\n"); + break; + } + fprintf(ofp, " p_flags: %x\n", ph32->p_flags); + fprintf(ofp, " p_offset: %x\n", ph32->p_offset); + fprintf(ofp, " p_vaddr: %x\n", ph32->p_vaddr); + fprintf(ofp, " p_paddr: %x\n", ph32->p_paddr); + fprintf(ofp, " p_filesz: %d\n", ph32->p_filesz); + fprintf(ofp, " p_memsz: %d\n", ph32->p_memsz); + fprintf(ofp, " p_align: %d\n", ph32->p_align); + fprintf(ofp, "\n"); + } + + note32 = (Elf32_Nhdr *)(pkd->elf_header + pkd->notes32->p_offset); + + for (tot = 0; tot < pkd->notes32->p_filesz; tot += len) { + name = (char *)((ulong)note32 + sizeof(Elf32_Nhdr)); + snprintf(buf, note32->n_namesz, "%s", name); + + fprintf(ofp, " Elf32_Nhdr:\n"); + fprintf(ofp, " n_namesz: %d (\"%s\")\n", note32->n_namesz, buf); + fprintf(ofp, " n_descsz: %d\n", note32->n_descsz); + fprintf(ofp, " n_type: %d ", note32->n_type); + switch (note32->n_type) + { + case NT_PRSTATUS: + fprintf(ofp, "(NT_PRSTATUS)\n"); + break; + case NT_PRPSINFO: + fprintf(ofp, "(NT_PRPSINFO)\n"); + break; + case NT_TASKSTRUCT: + fprintf(ofp, "(NT_TASKSTRUCT)\n"); + break; + default: + fprintf(ofp, "(unknown)\n"); + if (STRNEQ(name, "VMCOREINFO")) { + ptr = (char *)note32 + + sizeof(Elf32_Nhdr) + + note32->n_namesz + 1; + pkd->vmcoreinfo = (void *)ptr; + pkd->size_vmcoreinfo = note32->n_descsz; + pc->read_vmcoreinfo = vmcoreinfo_read_string; + fprintf(ofp, "\n "); + for (i = 0; i < note32->n_descsz; i++, ptr++) { + fprintf(ofp, "%c%s", *ptr, + *ptr == '\n' ? " " : ""); + } + } + break; + } + + fprintf(ofp, "\n"); + + len = sizeof(Elf32_Nhdr); + len = roundup(len + note32->n_namesz, 4); + len = roundup(len + note32->n_descsz, 4); + note32 = (Elf32_Nhdr *)((ulong)note32 + len); + } + } + + if (pkd->flags & KCORE_ELF64) { + ph64 = pkd->notes64; + + fprintf(ofp, " Elf64_Phdr:\n"); + fprintf(ofp, " p_type: %x ", ph64->p_type); + switch (ph64->p_type) + { + case PT_NOTE: + fprintf(ofp, "(PT_NOTE)\n"); + break; + case PT_LOAD: + fprintf(ofp, "(PT_LOAD)\n"); + break; + default: + fprintf(ofp, "(unknown)\n"); + break; + } + fprintf(ofp, " p_flags: %x\n", ph64->p_flags); + fprintf(ofp, " p_offset: %llx\n", (ulonglong)ph64->p_offset); + fprintf(ofp, " p_vaddr: %llx\n", (ulonglong)ph64->p_vaddr); + fprintf(ofp, " p_paddr: %llx\n", (ulonglong)ph64->p_paddr); + fprintf(ofp, " p_filesz: %lld\n", (ulonglong)ph64->p_filesz); + fprintf(ofp, " p_memsz: %lld\n", (ulonglong)ph64->p_memsz); + fprintf(ofp, " p_align: %lld\n", (ulonglong)ph64->p_align); + fprintf(ofp, "\n"); + + for (i = 0; i < pkd->segments; i++) { + ph64 = pkd->load64 + i; + + fprintf(ofp, " Elf64_Phdr:\n"); + fprintf(ofp, " p_type: %x ", ph64->p_type); + switch (ph64->p_type) + { + case PT_NOTE: + fprintf(ofp, "(PT_NOTE)\n"); + break; + case PT_LOAD: + fprintf(ofp, "(PT_LOAD)\n"); + break; + default: + fprintf(ofp, "(unknown)\n"); + break; + } + fprintf(ofp, " p_flags: %x\n", ph64->p_flags); + fprintf(ofp, " p_offset: %llx\n", (ulonglong)ph64->p_offset); + fprintf(ofp, " p_vaddr: %llx\n", (ulonglong)ph64->p_vaddr); + fprintf(ofp, " p_paddr: %llx\n", (ulonglong)ph64->p_paddr); + fprintf(ofp, " p_filesz: %lld\n", (ulonglong)ph64->p_filesz); + fprintf(ofp, " p_memsz: %lld\n", (ulonglong)ph64->p_memsz); + fprintf(ofp, " p_align: %lld\n", (ulonglong)ph64->p_align); + fprintf(ofp, "\n"); + } + + note64 = (Elf64_Nhdr *)(pkd->elf_header + pkd->notes64->p_offset); + + for (tot = 0; tot < pkd->notes64->p_filesz; tot += len) { + name = (char *)((ulong)note64 + sizeof(Elf64_Nhdr)); + snprintf(buf, note64->n_namesz, "%s", name); + + fprintf(ofp, " Elf64_Nhdr:\n"); + fprintf(ofp, " n_namesz: %d (\"%s\")\n", note64->n_namesz, buf); + fprintf(ofp, " n_descsz: %d\n", note64->n_descsz); + fprintf(ofp, " n_type: %d ", note64->n_type); + switch (note64->n_type) + { + case NT_PRSTATUS: + fprintf(ofp, "(NT_PRSTATUS)\n"); + break; + case NT_PRPSINFO: + fprintf(ofp, "(NT_PRPSINFO)\n"); + break; + case NT_TASKSTRUCT: + fprintf(ofp, "(NT_TASKSTRUCT)\n"); + break; + default: + fprintf(ofp, "(unknown)\n"); + if (STRNEQ(name, "VMCOREINFO")) { + ptr = (char *)note64 + + sizeof(Elf64_Nhdr) + + note64->n_namesz + 1; + pkd->vmcoreinfo = (void *)ptr; + pkd->size_vmcoreinfo = note64->n_descsz; + pc->read_vmcoreinfo = vmcoreinfo_read_string; + fprintf(ofp, "\n "); + for (i = 0; i < note64->n_descsz; i++, ptr++) { + fprintf(ofp, "%c%s", *ptr, + *ptr == '\n' ? " " : ""); + } + } + break; + } + + fprintf(ofp, "\n"); + + len = sizeof(Elf64_Nhdr); + len = roundup(len + note64->n_namesz, 4); + len = roundup(len + note64->n_descsz, 4); + note64 = (Elf64_Nhdr *)((ulong)note64 + len); + } + } + + return TRUE; +} + +static void +kdump_get_osrelease(void) +{ + char *string; + + if ((string = vmcoreinfo_read_string("OSRELEASE"))) { + fprintf(fp, "%s\n", string); + free(string); + } else + pc->flags2 &= ~GET_OSRELEASE; +} + +void +dump_registers_for_qemu_mem_dump(void) +{ + int i; + QEMUCPUState *ptr; + FILE *fpsave; + + fpsave = nd->ofp; + nd->ofp = fp; + + for (i = 0; i < nd->num_qemu_notes; i++) { + ptr = (QEMUCPUState *)nd->nt_qemu_percpu[i]; + + if (i) + netdump_print("\n"); + + if (hide_offline_cpu(i)) { + netdump_print("CPU %d: [OFFLINE]\n", i); + continue; + } else + netdump_print("CPU %d:\n", i); + + if (CRASHDEBUG(1)) + netdump_print(" version:%d size:%d\n", + ptr->version, ptr->size); + netdump_print(" RAX: %016llx RBX: %016llx RCX: %016llx\n", + ptr->rax, ptr->rbx, ptr->rcx); + netdump_print(" RDX: %016llx RSI: %016llx RDI:%016llx\n", + ptr->rdx, ptr->rsi, ptr->rdi); + netdump_print(" RSP: %016llx RBP: %016llx ", + ptr->rsp, ptr->rbp); + + if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64) { + netdump_print(" R8: %016llx\n", + ptr->r8); + netdump_print(" R9: %016llx R10: %016llx R11: %016llx\n", + ptr->r9, ptr->r10, ptr->r11); + netdump_print(" R12: %016llx R13: %016llx R14: %016llx\n", + ptr->r12, ptr->r13, ptr->r14); + netdump_print(" R15: %016llx", + ptr->r15); + } else + netdump_print("\n"); + + netdump_print(" RIP: %016llx RFLAGS: %08llx\n", + ptr->rip, ptr->rflags); + netdump_print(" CS: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->cs.selector, ptr->cs.limit, ptr->cs.flags, + ptr->cs.pad, ptr->cs.base); + netdump_print(" DS: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->ds.selector, ptr->ds.limit, ptr->ds.flags, + ptr->ds.pad, ptr->ds.base); + netdump_print(" ES: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->es.selector, ptr->es.limit, ptr->es.flags, + ptr->es.pad, ptr->es.base); + netdump_print(" FS: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->fs.selector, ptr->fs.limit, ptr->fs.flags, + ptr->fs.pad, ptr->fs.base); + netdump_print(" GS: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->gs.selector, ptr->gs.limit, ptr->gs.flags, + ptr->gs.pad, ptr->gs.base); + netdump_print(" SS: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->ss.selector, ptr->ss.limit, ptr->ss.flags, + ptr->ss.pad, ptr->ss.base); + netdump_print(" LDT: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->ldt.selector, ptr->ldt.limit, ptr->ldt.flags, + ptr->ldt.pad, ptr->ldt.base); + netdump_print(" TR: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->tr.selector, ptr->tr.limit, ptr->tr.flags, + ptr->tr.pad, ptr->tr.base); + netdump_print(" GDT: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->gdt.selector, ptr->gdt.limit, ptr->gdt.flags, + ptr->gdt.pad, ptr->gdt.base); + netdump_print(" IDT: selector: %04lx limit: %08lx flags: %08lx\n\ + pad: %08lx base: %016llx\n", + ptr->idt.selector, ptr->idt.limit, ptr->idt.flags, + ptr->idt.pad, ptr->idt.base); + netdump_print(" CR0: %016llx CR1: %016llx CR2: %016llx\n", + ptr->cr[0], ptr->cr[1], ptr->cr[2]); + netdump_print(" CR3: %016llx CR4: %016llx\n", + ptr->cr[3], ptr->cr[4]); + } + + nd->ofp = fpsave; +} + +/* + * kdump saves the first 640kB physical memory for BIOS to use the + * range on boot of 2nd kernel. Read request to the 640k should be + * translated to the back up region. This function searches kexec + * resources for the backup region. + */ +void +kdump_backup_region_init(void) +{ + char buf[BUFSIZE]; + ulong i, total, kexec_crash_image_p, elfcorehdr_p; + Elf32_Off e_phoff32; + Elf64_Off e_phoff64; + uint16_t e_phnum, e_phentsize; + ulonglong backup_offset; + ulonglong backup_src_start; + ulong backup_src_size; + int kimage_segment_len; + size_t bufsize; + struct vmcore_data *vd; + struct sadump_data *sd; + int is_32_bit; + char typename[BUFSIZE]; + + e_phoff32 = e_phoff64 = 0; + vd = NULL; + sd = NULL; + + if (SADUMP_DUMPFILE()) { + sd = get_sadump_data(); + is_32_bit = FALSE; + sprintf(typename, "sadump"); + } else if (pc->flags2 & QEMU_MEM_DUMP_ELF) { + vd = get_kdump_vmcore_data(); + if (vd->flags & KDUMP_ELF32) + is_32_bit = TRUE; + else + is_32_bit = FALSE; + sprintf(typename, "qemu mem dump"); + } else + return; + + if (symbol_exists("kexec_crash_image")) { + if (!readmem(symbol_value("kexec_crash_image"), KVADDR, + &kexec_crash_image_p, sizeof(ulong), + "kexec backup region: kexec_crash_image", + QUIET|RETURN_ON_ERROR)) + goto error; + } else + kexec_crash_image_p = 0; + + if (!kexec_crash_image_p) { + if (CRASHDEBUG(1)) + error(INFO, "%s: kexec_crash_image not loaded\n", typename); + return; + } + + kimage_segment_len = get_array_length("kimage.segment", NULL, + STRUCT_SIZE("kexec_segment")); + + if (!readmem(kexec_crash_image_p + MEMBER_OFFSET("kimage", "segment"), + KVADDR, buf, MEMBER_SIZE("kimage", "segment"), + "kexec backup region: kexec_crash_image->segment", + QUIET|RETURN_ON_ERROR)) + goto error; + + elfcorehdr_p = 0; + for (i = 0; i < kimage_segment_len; ++i) { + char e_ident[EI_NIDENT]; + ulong mem; + + mem = ULONG(buf + i * STRUCT_SIZE("kexec_segment") + + MEMBER_OFFSET("kexec_segment", "mem")); + if (!mem) + continue; + + if (!readmem(mem, PHYSADDR, e_ident, SELFMAG, + "elfcorehdr: e_ident", + QUIET|RETURN_ON_ERROR)) + goto error; + + if (strncmp(ELFMAG, e_ident, SELFMAG) == 0) { + elfcorehdr_p = mem; + break; + } + } + if (!elfcorehdr_p) { + if (CRASHDEBUG(1)) + error(INFO, + "%s: elfcorehdr not found in segments of kexec_crash_image\n", typename); + goto error; + } + + if (is_32_bit) { + if (!readmem(elfcorehdr_p, PHYSADDR, buf, STRUCT_SIZE("elf32_hdr"), + "elfcorehdr", QUIET|RETURN_ON_ERROR)) + goto error; + + e_phnum = USHORT(buf + MEMBER_OFFSET("elf32_hdr", "e_phnum")); + e_phentsize = USHORT(buf + MEMBER_OFFSET("elf32_hdr", "e_phentsize")); + e_phoff32 = ULONG(buf + MEMBER_OFFSET("elf32_hdr", "e_phoff")); + } else { + if (!readmem(elfcorehdr_p, PHYSADDR, buf, STRUCT_SIZE("elf64_hdr"), + "elfcorehdr", QUIET|RETURN_ON_ERROR)) + goto error; + + e_phnum = USHORT(buf + MEMBER_OFFSET("elf64_hdr", "e_phnum")); + e_phentsize = USHORT(buf + MEMBER_OFFSET("elf64_hdr", "e_phentsize")); + e_phoff64 = ULONG(buf + MEMBER_OFFSET("elf64_hdr", "e_phoff")); + } + + backup_src_start = backup_src_size = backup_offset = 0; + + for (i = 0; i < e_phnum; ++i) { + uint32_t p_type; + Elf32_Off p_offset32; + Elf64_Off p_offset64; + Elf32_Addr p_paddr32; + Elf64_Addr p_paddr64; + uint32_t p_memsz32; + uint64_t p_memsz64; + + if (is_32_bit) { + if (!readmem(elfcorehdr_p + e_phoff32 + i * e_phentsize, + PHYSADDR, buf, e_phentsize, + "elfcorehdr: program header", + QUIET|RETURN_ON_ERROR)) + goto error; + + p_type = UINT(buf+MEMBER_OFFSET("elf32_phdr","p_type")); + p_offset32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_offset")); + p_paddr32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_paddr")); + p_memsz32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_memsz")); + } else { + if (!readmem(elfcorehdr_p + e_phoff64 + i * e_phentsize, + PHYSADDR, buf, e_phentsize, + "elfcorehdr: program header", + QUIET|RETURN_ON_ERROR)) + goto error; + + p_type = UINT(buf+MEMBER_OFFSET("elf64_phdr","p_type")); + p_offset64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_offset")); + p_paddr64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_paddr")); + p_memsz64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_memsz")); + } + + /* + * kexec marks backup region PT_LOAD by assigning + * backup region address in p_offset, and p_addr in + * p_offsets for other PT_LOAD entries. + */ + if (is_32_bit) { + if (p_type == PT_LOAD && + p_paddr32 <= KEXEC_BACKUP_SRC_END && + p_paddr32 != p_offset32) { + + backup_src_start = p_paddr32; + backup_src_size = p_memsz32; + backup_offset = p_offset32; + + if (CRASHDEBUG(1)) + error(INFO, + "%s: kexec backup region found: " + "START: %#016llx SIZE: %#016lx OFFSET: %#016llx\n", + typename, backup_src_start, backup_src_size, backup_offset); + + break; + } + } else { + if (p_type == PT_LOAD && + p_paddr64 <= KEXEC_BACKUP_SRC_END && + p_paddr64 != p_offset64) { + + backup_src_start = p_paddr64; + backup_src_size = p_memsz64; + backup_offset = p_offset64; + + if (CRASHDEBUG(1)) + error(INFO, + "%s: kexec backup region found: " + "START: %#016llx SIZE: %#016lx OFFSET: %#016llx\n", + typename, backup_src_start, backup_src_size, backup_offset); + + break; + } + } + } + + if (!backup_offset) { + if (CRASHDEBUG(1)) + error(WARNING, "%s: backup region not found in elfcorehdr\n", typename); + return; + } + + bufsize = BUFSIZE; + for (total = 0; total < backup_src_size; total += bufsize) { + char backup_buf[BUFSIZE]; + int j; + + if (backup_src_size - total < BUFSIZE) + bufsize = backup_src_size - total; + + if (!readmem(backup_offset + total, PHYSADDR, backup_buf, + bufsize, "backup source", QUIET|RETURN_ON_ERROR)) + goto error; + + /* + * We're assuming the backup region is initialized + * with 0 filled if kdump has not run. + */ + for (j = 0; j < bufsize; ++j) { + if (backup_buf[j]) { + + if (SADUMP_DUMPFILE()) { + sd->flags |= SADUMP_KDUMP_BACKUP; + sd->backup_src_start = backup_src_start; + sd->backup_src_size = backup_src_size; + sd->backup_offset = backup_offset; + } else if (pc->flags2 & QEMU_MEM_DUMP_ELF) { + vd->flags |= QEMU_MEM_DUMP_KDUMP_BACKUP; + vd->backup_src_start = backup_src_start; + vd->backup_src_size = backup_src_size; + vd->backup_offset = backup_offset; + } + + if (CRASHDEBUG(1)) +error(INFO, "%s: backup region is used: %llx\n", typename, backup_offset + total + j); + + return; + } + } + } + + if (CRASHDEBUG(1)) + error(INFO, "%s: kexec backup region not used\n", typename); + + return; + +error: + error(WARNING, "failed to init kexec backup region\n"); +} + +int +kdump_kaslr_check(void) +{ + if (!QEMU_MEM_DUMP_NO_VMCOREINFO()) + return FALSE; + + /* If vmcore has QEMU note, need to calculate kaslr offset */ + if (nd->num_qemu_notes) + return TRUE; + else + return FALSE; +} + +#ifdef X86_64 +QEMUCPUState * +kdump_get_qemucpustate(int cpu) +{ + if (cpu >= nd->num_qemu_notes) { + if (CRASHDEBUG(1)) + error(INFO, + "Invalid index for QEMU Note: %d (>= %d)\n", + cpu, nd->num_qemu_notes); + return NULL; + } + + if (!nd->elf64 || (nd->elf64->e_machine != EM_X86_64)) { + if (CRASHDEBUG(1)) + error(INFO, "Only x86_64 64bit is supported.\n"); + return NULL; + } + + return (QEMUCPUState *)nd->nt_qemu_percpu[cpu]; +} +#endif + +static void * +get_kdump_device_dump_offset(void) +{ + void *elf_base = NULL; + + if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64) + elf_base = (void *)nd->elf64; + else if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF32) + elf_base = (void *)nd->elf32; + else + error(FATAL, "no device dumps found in this dumpfile\n"); + + return elf_base; +} + +/* + * extract hardware specific device dumps from coredump. + */ +void +kdump_device_dump_extract(int index, char *outfile, FILE *ofp) +{ + ulonglong offset; + void *elf_base; + + if (!nd->num_vmcoredd_notes) + error(FATAL, "no device dumps found in this dumpfile\n"); + else if (index >= nd->num_vmcoredd_notes) + error(FATAL, "no device dump found at index: %d", index); + + elf_base = get_kdump_device_dump_offset(); + + offset = nd->nt_vmcoredd_array[index] - elf_base; + + devdump_extract(nd->nt_vmcoredd_array[index], offset, outfile, ofp); +} + +/* + * list all hardware specific device dumps present in coredump. + */ +void kdump_device_dump_info(FILE *ofp) +{ + ulonglong offset; + char buf[BUFSIZE]; + void *elf_base; + ulong i; + + if (!nd->num_vmcoredd_notes) + error(FATAL, "no device dumps found in this dumpfile\n"); + + fprintf(fp, "%s ", mkstring(buf, strlen("INDEX"), LJUST, "INDEX")); + fprintf(fp, " %s ", mkstring(buf, LONG_LONG_PRLEN, LJUST, "OFFSET")); + fprintf(fp, " %s ", mkstring(buf, LONG_PRLEN, LJUST, "SIZE")); + fprintf(fp, "NAME\n"); + + elf_base = get_kdump_device_dump_offset(); + + for (i = 0; i < nd->num_vmcoredd_notes; i++) { + fprintf(fp, "%s ", mkstring(buf, strlen("INDEX"), CENTER | INT_DEC, MKSTR(i))); + offset = nd->nt_vmcoredd_array[i] - elf_base; + devdump_info(nd->nt_vmcoredd_array[i], offset, ofp); + } +}