2 * drivers/misc/tegra-profiler/exh_tables.c
4 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
26 #include <linux/tegra_profiler.h>
28 #include "eh_unwind.h"
29 #include "backtrace.h"
31 #include "dwarf_unwind.h"
33 #define QUADD_EXTABS_SIZE 0x100
35 #define GET_NR_PAGES(a, l) \
36 ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
48 struct ex_region_info *entries;
50 unsigned long curr_nr;
56 struct quadd_unwind_ctx {
57 struct regions_data *rd;
60 unsigned long ex_tables_size;
70 unsigned long fp_thumb;
78 struct unwind_ctrl_block {
79 u32 vrs[16]; /* virtual register set */
80 const u32 *insn; /* pointer to the current instr word */
81 int entries; /* number of entries left */
82 int byte; /* current byte in the instr word */
85 struct pin_pages_work {
86 struct work_struct work;
87 unsigned long vm_start;
90 static struct quadd_unwind_ctx ctx;
93 validate_mmap_addr(struct quadd_mmap_area *mmap,
94 unsigned long addr, unsigned long nbytes)
96 struct vm_area_struct *vma = mmap->mmap_vma;
97 unsigned long size = vma->vm_end - vma->vm_start;
98 unsigned long data = (unsigned long)mmap->data;
101 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
102 __func__, addr, data, data + size,
103 vma->vm_start, vma->vm_end);
107 if (addr < data || addr >= data + (size - nbytes)) {
108 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
109 __func__, addr, data, data + size,
110 vma->vm_start, vma->vm_end);
117 #define read_user_data(addr, retval) \
121 pagefault_disable(); \
122 ret = __get_user(retval, addr); \
123 pagefault_enable(); \
126 pr_debug("%s: failed for address: %p\n", \
128 ret = -QUADD_URC_EACCESS; \
135 read_mmap_data(struct quadd_mmap_area *mmap, const u32 *addr, u32 *retval)
137 if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32))) {
139 return -QUADD_URC_EACCESS;
146 static inline unsigned long
147 ex_addr_to_mmap_addr(unsigned long addr,
148 struct ex_region_info *ri,
151 unsigned long offset;
152 struct extab_info *ti;
154 ti = &ri->ex_sec[sec_type];
155 offset = addr - ti->addr;
157 return ti->mmap_offset + offset + (unsigned long)ri->mmap->data;
160 static inline unsigned long
161 mmap_addr_to_ex_addr(unsigned long addr,
162 struct ex_region_info *ri,
165 unsigned long offset;
166 struct extab_info *ti;
168 ti = &ri->ex_sec[sec_type];
169 offset = addr - ti->mmap_offset - (unsigned long)ri->mmap->data;
171 return ti->addr + offset;
175 prel31_to_addr(const u32 *ptr)
180 if (read_user_data(ptr, value))
183 /* sign-extend to 32 bits */
184 offset = (((s32)value) << 1) >> 1;
185 return (u32)(unsigned long)ptr + offset;
189 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
190 int src_type, int dst_type, int to_mmap)
194 unsigned long addr_res;
197 offset = (((s32)value) << 1) >> 1;
199 addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, src_type);
204 addr_res = ex_addr_to_mmap_addr(addr_res, ri, dst_type);
210 add_ex_region(struct regions_data *rd,
211 struct ex_region_info *new_entry)
213 unsigned int i_min, i_max, mid;
214 struct ex_region_info *array = rd->entries;
215 unsigned long size = rd->curr_nr;
221 memcpy(&array[0], new_entry, sizeof(*new_entry));
223 } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
230 if (array[0].vm_start > new_entry->vm_start) {
231 memmove(array + 1, array,
232 size * sizeof(*array));
233 memcpy(&array[0], new_entry, sizeof(*new_entry));
235 } else if (array[size - 1].vm_start < new_entry->vm_start) {
236 memcpy(&array[size], new_entry, sizeof(*new_entry));
240 while (i_min < i_max) {
241 mid = i_min + (i_max - i_min) / 2;
243 if (new_entry->vm_start <= array[mid].vm_start)
249 if (array[i_max].vm_start == new_entry->vm_start) {
252 memmove(array + i_max + 1,
254 (size - i_max) * sizeof(*array));
255 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
261 remove_ex_region(struct regions_data *rd,
262 struct ex_region_info *entry)
264 unsigned int i_min, i_max, mid;
265 struct ex_region_info *array = rd->entries;
266 unsigned long size = rd->curr_nr;
275 if (array[0].vm_start == entry->vm_start)
281 if (array[0].vm_start > entry->vm_start)
283 else if (array[size - 1].vm_start < entry->vm_start)
289 while (i_min < i_max) {
290 mid = i_min + (i_max - i_min) / 2;
292 if (entry->vm_start <= array[mid].vm_start)
298 if (array[i_max].vm_start == entry->vm_start) {
299 memmove(array + i_max,
301 (size - i_max) * sizeof(*array));
308 static struct ex_region_info *
309 __search_ex_region(struct ex_region_info *array,
313 unsigned int i_min, i_max, mid;
321 while (i_min < i_max) {
322 mid = i_min + (i_max - i_min) / 2;
324 if (key <= array[mid].vm_start)
330 if (array[i_max].vm_start == key)
331 return &array[i_max];
337 search_ex_region(unsigned long key, struct ex_region_info *ri)
339 struct regions_data *rd;
340 struct ex_region_info *ri_p = NULL;
344 rd = rcu_dereference(ctx.rd);
348 ri_p = __search_ex_region(rd->entries, rd->curr_nr, key);
350 memcpy(ri, ri_p, sizeof(*ri));
354 return ri_p ? 0 : -ENOENT;
358 get_extabs_ehabi(unsigned long key, struct ex_region_info *ri)
361 struct extab_info *ti_extab, *ti_exidx;
363 err = search_ex_region(key, ri);
367 ti_extab = &ri->ex_sec[QUADD_SEC_TYPE_EXTAB];
368 ti_exidx = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
370 return (ti_extab->length && ti_exidx->length) ? 0 : -ENOENT;
374 quadd_get_dw_frames(unsigned long key, struct ex_region_info *ri)
377 struct extab_info *ti, *ti_hdr;
379 err = search_ex_region(key, ri);
383 ti = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME];
384 ti_hdr = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME_HDR];
386 if (ti->length && ti_hdr->length)
389 ti = &ri->ex_sec[QUADD_SEC_TYPE_DEBUG_FRAME];
390 ti_hdr = &ri->ex_sec[QUADD_SEC_TYPE_DEBUG_FRAME_HDR];
392 return (ti->length && ti_hdr->length) ? 0 : -ENOENT;
395 static struct regions_data *rd_alloc(unsigned long size)
397 struct regions_data *rd;
399 rd = kzalloc(sizeof(*rd), GFP_ATOMIC);
403 rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_ATOMIC);
415 static void rd_free(struct regions_data *rd)
423 static void rd_free_rcu(struct rcu_head *rh)
425 struct regions_data *rd = container_of(rh, struct regions_data, rcu);
429 int quadd_unwind_set_extab(struct quadd_sections *extabs,
430 struct quadd_mmap_area *mmap)
433 unsigned long nr_entries, nr_added, new_size;
434 struct ex_region_info ri_entry;
435 struct extab_info *ti;
436 struct regions_data *rd, *rd_new;
437 struct ex_region_info *ex_entry;
439 if (mmap->type != QUADD_MMAP_TYPE_EXTABS)
442 spin_lock(&ctx.lock);
444 rd = rcu_dereference(ctx.rd);
446 pr_warn("%s: warning: rd\n", __func__);
447 new_size = QUADD_EXTABS_SIZE;
451 nr_entries = rd->curr_nr;
454 if (nr_entries >= new_size)
455 new_size += new_size >> 1;
457 rd_new = rd_alloc(new_size);
458 if (IS_ERR_OR_NULL(rd_new)) {
459 pr_err("%s: error: rd_alloc\n", __func__);
464 if (rd && nr_entries)
465 memcpy(rd_new->entries, rd->entries,
466 nr_entries * sizeof(*rd->entries));
468 rd_new->curr_nr = nr_entries;
470 ri_entry.vm_start = extabs->vm_start;
471 ri_entry.vm_end = extabs->vm_end;
473 ri_entry.mmap = mmap;
475 for (i = 0; i < QUADD_SEC_TYPE_MAX; i++) {
476 struct quadd_sec_info *si = &extabs->sec[i];
478 ti = &ri_entry.ex_sec[i];
492 ti->length = si->length;
493 ti->mmap_offset = si->mmap_offset;
496 nr_added = add_ex_region(rd_new, &ri_entry);
500 rd_new->curr_nr += nr_added;
502 ex_entry = kzalloc(sizeof(*ex_entry), GFP_ATOMIC);
507 memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
509 INIT_LIST_HEAD(&ex_entry->list);
510 list_add_tail(&ex_entry->list, &mmap->ex_entries);
512 rcu_assign_pointer(ctx.rd, rd_new);
515 call_rcu(&rd->rcu, rd_free_rcu);
517 spin_unlock(&ctx.lock);
524 spin_unlock(&ctx.lock);
529 quadd_unwind_set_tail_info(unsigned long vm_start,
531 unsigned long tf_start,
532 unsigned long tf_end)
534 struct ex_region_info *ri;
535 unsigned long nr_entries, size;
536 struct regions_data *rd, *rd_new;
537 struct extab_info *ti;
539 spin_lock(&ctx.lock);
541 rd = rcu_dereference(ctx.rd);
543 if (!rd || rd->curr_nr == 0)
547 nr_entries = rd->curr_nr;
549 rd_new = rd_alloc(size);
550 if (IS_ERR_OR_NULL(rd_new)) {
551 pr_err_once("%s: error: rd_alloc\n", __func__);
555 memcpy(rd_new->entries, rd->entries,
556 nr_entries * sizeof(*rd->entries));
558 rd_new->curr_nr = nr_entries;
560 ri = __search_ex_region(rd_new->entries, nr_entries, vm_start);
564 ti = &ri->ex_sec[secid];
566 ti->tf_start = tf_start;
569 rcu_assign_pointer(ctx.rd, rd_new);
571 call_rcu(&rd->rcu, rd_free_rcu);
572 spin_unlock(&ctx.lock);
580 spin_unlock(&ctx.lock);
584 clean_mmap(struct regions_data *rd, struct quadd_mmap_area *mmap, int rm_ext)
587 struct ex_region_info *entry, *next;
592 list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
594 nr_removed += remove_ex_region(rd, entry);
596 list_del(&entry->list);
603 void quadd_unwind_delete_mmap(struct quadd_mmap_area *mmap)
605 unsigned long nr_entries, nr_removed, new_size;
606 struct regions_data *rd, *rd_new;
611 spin_lock(&ctx.lock);
613 rd = rcu_dereference(ctx.rd);
614 if (!rd || !rd->curr_nr)
617 nr_entries = rd->curr_nr;
618 new_size = min_t(unsigned long, rd->size, nr_entries);
620 rd_new = rd_alloc(new_size);
621 if (IS_ERR_OR_NULL(rd_new)) {
622 pr_err("%s: error: rd_alloc\n", __func__);
625 rd_new->size = new_size;
626 rd_new->curr_nr = nr_entries;
628 memcpy(rd_new->entries, rd->entries,
629 nr_entries * sizeof(*rd->entries));
631 nr_removed = clean_mmap(rd_new, mmap, 1);
632 rd_new->curr_nr -= nr_removed;
634 rcu_assign_pointer(ctx.rd, rd_new);
635 call_rcu(&rd->rcu, rd_free_rcu);
638 spin_unlock(&ctx.lock);
641 static const struct unwind_idx *
642 unwind_find_idx(struct ex_region_info *ri, u32 addr)
645 unsigned long length;
646 struct extab_info *ti;
647 struct unwind_idx *start;
648 struct unwind_idx *stop;
649 struct unwind_idx *mid = NULL;
651 ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
653 length = ti->length / sizeof(*start);
655 if (unlikely(!length))
658 start = (struct unwind_idx *)((char *)ri->mmap->data + ti->mmap_offset);
659 stop = start + length - 1;
661 value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri,
662 QUADD_SEC_TYPE_EXIDX,
663 QUADD_SEC_TYPE_EXTAB, 0);
667 value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri,
668 QUADD_SEC_TYPE_EXIDX,
669 QUADD_SEC_TYPE_EXTAB, 0);
673 while (start < stop - 1) {
674 mid = start + ((stop - start) >> 1);
676 value = (u32)mmap_prel31_to_addr(&mid->addr_offset, ri,
677 QUADD_SEC_TYPE_EXIDX,
678 QUADD_SEC_TYPE_EXTAB, 0);
690 unwind_get_byte(struct quadd_mmap_area *mmap,
691 struct unwind_ctrl_block *ctrl, long *err)
698 if (ctrl->entries <= 0) {
699 pr_err_once("%s: error: corrupt unwind table\n", __func__);
700 *err = -QUADD_URC_TBL_IS_CORRUPT;
704 *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
708 ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
710 if (ctrl->byte == 0) {
721 read_uleb128(struct quadd_mmap_area *mmap,
722 struct unwind_ctrl_block *ctrl,
726 unsigned long result;
735 byte = unwind_get_byte(mmap, ctrl, &err);
741 result |= (byte & 0x7f) << shift;
754 * Execute the current unwind instruction.
757 unwind_exec_insn(struct quadd_mmap_area *mmap,
758 struct unwind_ctrl_block *ctrl)
762 unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
767 pr_debug("%s: insn = %08lx\n", __func__, insn);
769 if ((insn & 0xc0) == 0x00) {
770 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
772 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
773 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
774 } else if ((insn & 0xc0) == 0x40) {
775 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
777 pr_debug("CMD_DATA_PUSH: vsp = vsp – %lu (new: %#x)\n",
778 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
779 } else if ((insn & 0xf0) == 0x80) {
781 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
782 int load_sp, reg = 4;
784 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
788 mask = insn & 0x0fff;
790 pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
792 return -QUADD_URC_REFUSE_TO_UNWIND;
795 /* pop R4-R15 according to mask */
796 load_sp = mask & (1 << (13 - 4));
799 err = read_user_data(vsp++, ctrl->vrs[reg]);
803 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
809 ctrl->vrs[SP] = (unsigned long)vsp;
811 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
812 } else if ((insn & 0xf0) == 0x90 &&
813 (insn & 0x0d) != 0x0d) {
814 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
815 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
816 } else if ((insn & 0xf0) == 0xa0) {
817 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
820 /* pop R4-R[4+bbb] */
821 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
822 err = read_user_data(vsp++, ctrl->vrs[reg]);
826 pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
830 err = read_user_data(vsp++, ctrl->vrs[14]);
834 pr_debug("CMD_REG_POP: pop {r14}\n");
837 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
838 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
839 } else if (insn == 0xb0) {
840 if (ctrl->vrs[PC] == 0)
841 ctrl->vrs[PC] = ctrl->vrs[LR];
842 /* no further processing */
845 pr_debug("CMD_FINISH\n");
846 } else if (insn == 0xb1) {
847 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
848 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
854 if (mask == 0 || mask & 0xf0) {
855 pr_debug("unwind: Spare encoding %04lx\n",
857 return -QUADD_URC_SPARE_ENCODING;
860 /* pop R0-R3 according to mask */
863 err = read_user_data(vsp++, ctrl->vrs[reg]);
867 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
873 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
874 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
875 } else if (insn == 0xb2) {
877 unsigned long uleb128 = 0;
879 count = read_uleb128(mmap, ctrl, &uleb128);
884 return -QUADD_URC_TBL_IS_CORRUPT;
886 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
888 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (%#lx), new vsp: %#x\n",
889 0x204 + (uleb128 << 2), 0x204 + (uleb128 << 2),
891 } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
892 unsigned long data, reg_from, reg_to;
893 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
895 data = unwind_get_byte(mmap, ctrl, &err);
899 reg_from = (data & 0xf0) >> 4;
900 reg_to = reg_from + (data & 0x0f);
907 for (i = reg_from; i <= reg_to; i++)
913 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
915 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
916 insn, data, reg_from, reg_to);
917 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
918 } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
919 unsigned long reg_to;
920 unsigned long data = insn & 0x07;
921 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
925 for (i = 8; i <= reg_to; i++)
928 if ((insn & 0xf8) == 0xb8)
931 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
933 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
935 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
937 pr_debug("error: unhandled instruction %02lx\n", insn);
938 return -QUADD_URC_UNHANDLED_INSTRUCTION;
941 pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
943 ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
944 ctrl->vrs[LR], ctrl->vrs[PC]);
950 * Unwind a single frame starting with *sp for the symbol at *pc. It
951 * updates the *pc and *sp with the new values.
954 unwind_frame(struct ex_region_info *ri,
955 struct stackframe *frame,
956 struct vm_area_struct *vma_sp)
958 unsigned long high, low;
959 const struct unwind_idx *idx;
960 struct unwind_ctrl_block ctrl;
964 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
965 return -QUADD_URC_SP_INCORRECT;
967 /* only go to a higher address on the stack */
969 high = vma_sp->vm_end;
971 pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
972 frame->pc, frame->lr, frame->sp, low, high);
974 idx = unwind_find_idx(ri, frame->pc);
975 if (IS_ERR_OR_NULL(idx))
976 return -QUADD_URC_IDX_NOT_FOUND;
978 pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
980 ctrl.vrs[FP_THUMB] = frame->fp_thumb;
981 ctrl.vrs[FP_ARM] = frame->fp_arm;
983 ctrl.vrs[SP] = frame->sp;
984 ctrl.vrs[LR] = frame->lr;
987 err = read_mmap_data(ri->mmap, &idx->insn, &val);
993 return -QUADD_URC_CANTUNWIND;
994 } else if ((val & 0x80000000) == 0) {
995 /* prel31 to the unwind table */
996 ctrl.insn = (u32 *)(unsigned long)
997 mmap_prel31_to_addr(&idx->insn, ri,
998 QUADD_SEC_TYPE_EXIDX,
999 QUADD_SEC_TYPE_EXTAB, 1);
1001 return -QUADD_URC_EACCESS;
1002 } else if ((val & 0xff000000) == 0x80000000) {
1003 /* only personality routine 0 supported in the index */
1004 ctrl.insn = &idx->insn;
1006 pr_debug("unsupported personality routine %#x in the index at %p\n",
1008 return -QUADD_URC_UNSUPPORTED_PR;
1011 err = read_mmap_data(ri->mmap, ctrl.insn, &val);
1015 /* check the personality routine */
1016 if ((val & 0xff000000) == 0x80000000) {
1019 } else if ((val & 0xff000000) == 0x81000000) {
1021 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
1023 pr_debug("unsupported personality routine %#x at %p\n",
1025 return -QUADD_URC_UNSUPPORTED_PR;
1028 while (ctrl.entries > 0) {
1029 err = unwind_exec_insn(ri->mmap, &ctrl);
1033 if (ctrl.vrs[SP] & 0x03 ||
1034 ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
1035 return -QUADD_URC_SP_INCORRECT;
1038 if (ctrl.vrs[PC] == 0)
1039 ctrl.vrs[PC] = ctrl.vrs[LR];
1041 if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
1042 return -QUADD_URC_PC_INCORRECT;
1044 frame->fp_thumb = ctrl.vrs[FP_THUMB];
1045 frame->fp_arm = ctrl.vrs[FP_ARM];
1047 frame->sp = ctrl.vrs[SP];
1048 frame->lr = ctrl.vrs[LR];
1049 frame->pc = ctrl.vrs[PC];
1055 unwind_backtrace(struct quadd_callchain *cc,
1056 struct ex_region_info *ri,
1057 struct stackframe *frame,
1058 struct vm_area_struct *vma_sp,
1059 struct task_struct *task)
1061 struct ex_region_info ri_new;
1063 cc->urc_ut = QUADD_URC_FAILURE;
1065 pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
1066 frame->fp_arm, frame->fp_thumb,
1067 frame->sp, frame->lr, frame->pc);
1068 pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
1069 vma_sp->vm_start, vma_sp->vm_end,
1070 vma_sp->vm_end - vma_sp->vm_start);
1075 struct extab_info *ti;
1076 unsigned long where = frame->pc;
1077 struct vm_area_struct *vma_pc;
1078 struct mm_struct *mm = task->mm;
1083 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32))) {
1084 cc->urc_ut = QUADD_URC_SP_INCORRECT;
1088 vma_pc = find_vma(mm, frame->pc);
1092 ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
1094 if (!is_vma_addr(ti->addr, vma_pc, sizeof(u32))) {
1095 err = get_extabs_ehabi(vma_pc->vm_start, &ri_new);
1097 cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
1104 err = unwind_frame(ri, frame, vma_sp);
1106 pr_debug("end unwind, urc: %ld\n", err);
1111 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1114 cc->curr_sp = frame->sp;
1115 cc->curr_fp = frame->fp_arm;
1116 cc->curr_fp_thumb = frame->fp_thumb;
1117 cc->curr_pc = frame->pc;
1119 nr_added = quadd_callchain_store(cc, frame->pc,
1127 quadd_get_user_cc_arm32_ehabi(struct pt_regs *regs,
1128 struct quadd_callchain *cc,
1129 struct task_struct *task)
1132 int nr_prev = cc->nr;
1133 unsigned long ip, sp, lr;
1134 struct vm_area_struct *vma, *vma_sp;
1135 struct mm_struct *mm = task->mm;
1136 struct ex_region_info ri;
1137 struct stackframe frame;
1143 if (!compat_user_mode(regs))
1147 if (cc->urc_ut == QUADD_URC_LEVEL_TOO_DEEP)
1150 cc->urc_ut = QUADD_URC_FAILURE;
1157 frame.fp_thumb = cc->curr_fp_thumb;
1158 frame.fp_arm = cc->curr_fp;
1160 ip = instruction_pointer(regs);
1161 sp = quadd_user_stack_pointer(regs);
1162 lr = quadd_user_link_register(regs);
1165 frame.fp_thumb = regs->compat_usr(7);
1166 frame.fp_arm = regs->compat_usr(11);
1168 frame.fp_thumb = regs->ARM_r7;
1169 frame.fp_arm = regs->ARM_fp;
1177 pr_debug("pc: %#lx, lr: %#lx\n", ip, lr);
1178 pr_debug("sp: %#lx, fp_arm: %#lx, fp_thumb: %#lx\n",
1179 sp, frame.fp_arm, frame.fp_thumb);
1181 vma = find_vma(mm, ip);
1185 vma_sp = find_vma(mm, sp);
1189 err = get_extabs_ehabi(vma->vm_start, &ri);
1191 cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
1195 unwind_backtrace(cc, &ri, &frame, vma_sp, task);
1197 pr_debug("%s: exit, cc->nr: %d --> %d\n",
1198 __func__, nr_prev, cc->nr);
1204 quadd_is_ex_entry_exist_arm32_ehabi(struct pt_regs *regs,
1206 struct task_struct *task)
1210 const struct unwind_idx *idx;
1211 struct ex_region_info ri;
1212 struct vm_area_struct *vma;
1213 struct mm_struct *mm = task->mm;
1218 vma = find_vma(mm, addr);
1222 err = get_extabs_ehabi(vma->vm_start, &ri);
1226 idx = unwind_find_idx(&ri, addr);
1227 if (IS_ERR_OR_NULL(idx))
1230 err = read_mmap_data(ri.mmap, &idx->insn, &value);
1234 /* EXIDX_CANTUNWIND */
1241 int quadd_unwind_start(struct task_struct *task)
1244 struct regions_data *rd, *rd_old;
1246 rd = rd_alloc(QUADD_EXTABS_SIZE);
1247 if (IS_ERR_OR_NULL(rd)) {
1248 pr_err("%s: error: rd_alloc\n", __func__);
1252 err = quadd_dwarf_unwind_start();
1258 spin_lock(&ctx.lock);
1260 rd_old = rcu_dereference(ctx.rd);
1262 pr_warn("%s: warning: rd_old\n", __func__);
1264 rcu_assign_pointer(ctx.rd, rd);
1267 call_rcu(&rd_old->rcu, rd_free_rcu);
1269 ctx.pid = task->tgid;
1271 ctx.ex_tables_size = 0;
1273 spin_unlock(&ctx.lock);
1278 void quadd_unwind_stop(void)
1281 unsigned long nr_entries, size;
1282 struct regions_data *rd;
1283 struct ex_region_info *ri;
1285 quadd_dwarf_unwind_stop();
1287 spin_lock(&ctx.lock);
1291 rd = rcu_dereference(ctx.rd);
1295 nr_entries = rd->curr_nr;
1298 for (i = 0; i < nr_entries; i++) {
1299 ri = &rd->entries[i];
1300 clean_mmap(rd, ri->mmap, 0);
1303 rcu_assign_pointer(ctx.rd, NULL);
1304 call_rcu(&rd->rcu, rd_free_rcu);
1307 spin_unlock(&ctx.lock);
1308 pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1311 int quadd_unwind_init(void)
1315 err = quadd_dwarf_unwind_init();
1319 spin_lock_init(&ctx.lock);
1320 rcu_assign_pointer(ctx.rd, NULL);
1326 void quadd_unwind_deinit(void)
1328 quadd_unwind_stop();