2 * drivers/misc/tegra-profiler/exh_tables.c
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
26 #include <linux/tegra_profiler.h>
28 #include "eh_unwind.h"
29 #include "backtrace.h"
32 #define QUADD_EXTABS_SIZE 0x100
34 #define GET_NR_PAGES(a, l) \
35 ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
50 unsigned long mmap_offset;
54 struct extab_info extab;
55 struct extab_info exidx;
58 struct ex_region_info {
59 unsigned long vm_start;
63 struct quadd_extabs_mmap *mmap;
65 struct list_head list;
69 struct ex_region_info *entries;
71 unsigned long curr_nr;
77 struct quadd_unwind_ctx {
78 struct regions_data *rd;
81 unsigned long ex_tables_size;
91 unsigned long fp_thumb;
99 struct unwind_ctrl_block {
100 u32 vrs[16]; /* virtual register set */
101 const u32 *insn; /* pointer to the current instr word */
102 int entries; /* number of entries left */
103 int byte; /* current byte in the instr word */
106 struct pin_pages_work {
107 struct work_struct work;
108 unsigned long vm_start;
111 struct quadd_unwind_ctx ctx;
114 validate_stack_addr(unsigned long addr,
115 struct vm_area_struct *vma,
116 unsigned long nbytes)
121 return is_vma_addr(addr, vma, nbytes);
125 validate_mmap_addr(struct quadd_extabs_mmap *mmap,
126 unsigned long addr, unsigned long nbytes)
128 struct vm_area_struct *vma = mmap->mmap_vma;
129 unsigned long size = vma->vm_end - vma->vm_start;
130 unsigned long data = (unsigned long)mmap->data;
133 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
134 __func__, addr, data, data + size,
135 vma->vm_start, vma->vm_end);
139 if (addr < data || addr >= data + (size - nbytes)) {
140 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
141 __func__, addr, data, data + size,
142 vma->vm_start, vma->vm_end);
150 * TBD: why probe_kernel_address() can lead to random crashes
151 * on 64-bit kernel, and replacing it to __get_user() fixed the issue.
153 #define read_user_data(addr, retval) \
157 pagefault_disable(); \
158 ret = __get_user(retval, addr); \
159 pagefault_enable(); \
162 pr_debug("%s: failed for address: %p\n", \
164 ret = -QUADD_URC_EACCESS; \
171 read_mmap_data(struct quadd_extabs_mmap *mmap, const u32 *addr, u32 *retval)
173 if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32)))
174 return -QUADD_URC_EACCESS;
180 static inline unsigned long
181 ex_addr_to_mmap_addr(unsigned long addr,
182 struct ex_region_info *ri,
185 unsigned long offset;
186 struct extab_info *ei;
188 ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
189 offset = addr - ei->addr;
191 return ei->mmap_offset + offset + (unsigned long)ri->mmap->data;
194 static inline unsigned long
195 mmap_addr_to_ex_addr(unsigned long addr,
196 struct ex_region_info *ri,
199 unsigned long offset;
200 struct extab_info *ei;
202 ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
203 offset = addr - ei->mmap_offset - (unsigned long)ri->mmap->data;
205 return ei->addr + offset;
209 prel31_to_addr(const u32 *ptr)
214 if (read_user_data(ptr, value))
217 /* sign-extend to 32 bits */
218 offset = (((s32)value) << 1) >> 1;
219 return (u32)(unsigned long)ptr + offset;
223 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
224 int is_src_exidx, int is_dst_exidx, int to_mmap)
227 unsigned long addr_res;
229 struct extab_info *ei_src, *ei_dst;
231 ei_src = is_src_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
232 ei_dst = is_dst_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
235 offset = (((s32)value) << 1) >> 1;
237 addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, is_src_exidx);
242 addr_res = ex_addr_to_mmap_addr(addr_res, ri, is_dst_exidx);
248 add_ex_region(struct regions_data *rd,
249 struct ex_region_info *new_entry)
251 unsigned int i_min, i_max, mid;
252 struct ex_region_info *array = rd->entries;
253 unsigned long size = rd->curr_nr;
259 memcpy(&array[0], new_entry, sizeof(*new_entry));
261 } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
268 if (array[0].vm_start > new_entry->vm_start) {
269 memmove(array + 1, array,
270 size * sizeof(*array));
271 memcpy(&array[0], new_entry, sizeof(*new_entry));
273 } else if (array[size - 1].vm_start < new_entry->vm_start) {
274 memcpy(&array[size], new_entry, sizeof(*new_entry));
278 while (i_min < i_max) {
279 mid = i_min + (i_max - i_min) / 2;
281 if (new_entry->vm_start <= array[mid].vm_start)
287 if (array[i_max].vm_start == new_entry->vm_start) {
290 memmove(array + i_max + 1,
292 (size - i_max) * sizeof(*array));
293 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
299 remove_ex_region(struct regions_data *rd,
300 struct ex_region_info *entry)
302 unsigned int i_min, i_max, mid;
303 struct ex_region_info *array = rd->entries;
304 unsigned long size = rd->curr_nr;
313 if (array[0].vm_start == entry->vm_start)
319 if (array[0].vm_start > entry->vm_start)
321 else if (array[size - 1].vm_start < entry->vm_start)
327 while (i_min < i_max) {
328 mid = i_min + (i_max - i_min) / 2;
330 if (entry->vm_start <= array[mid].vm_start)
336 if (array[i_max].vm_start == entry->vm_start) {
337 memmove(array + i_max,
339 (size - i_max) * sizeof(*array));
346 static struct ex_region_info *
347 search_ex_region(struct ex_region_info *array,
350 struct ex_region_info *ri)
352 unsigned int i_min, i_max, mid;
360 while (i_min < i_max) {
361 mid = i_min + (i_max - i_min) / 2;
363 if (key <= array[mid].vm_start)
369 if (array[i_max].vm_start == key) {
370 memcpy(ri, &array[i_max], sizeof(*ri));
371 return &array[i_max];
378 __search_ex_region(unsigned long key, struct ex_region_info *ri)
380 struct regions_data *rd;
381 struct ex_region_info *ri_p = NULL;
385 rd = rcu_dereference(ctx.rd);
389 ri_p = search_ex_region(rd->entries, rd->curr_nr, key, ri);
393 return ri_p ? 0 : -ENOENT;
396 static struct regions_data *rd_alloc(unsigned long size)
398 struct regions_data *rd;
400 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
404 rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_KERNEL);
416 static void rd_free(struct regions_data *rd)
424 static void rd_free_rcu(struct rcu_head *rh)
426 struct regions_data *rd = container_of(rh, struct regions_data, rcu);
430 int quadd_unwind_set_extab(struct quadd_extables *extabs,
431 struct quadd_extabs_mmap *mmap)
434 unsigned long nr_entries, nr_added, new_size;
435 struct ex_region_info ri_entry;
436 struct extab_info *ti;
437 struct regions_data *rd, *rd_new;
438 struct ex_region_info *ex_entry;
440 spin_lock(&ctx.lock);
442 rd = rcu_dereference(ctx.rd);
444 pr_warn("%s: warning: rd\n", __func__);
445 new_size = QUADD_EXTABS_SIZE;
449 nr_entries = rd->curr_nr;
452 if (nr_entries >= new_size)
453 new_size += new_size >> 1;
455 rd_new = rd_alloc(new_size);
456 if (IS_ERR_OR_NULL(rd_new)) {
457 pr_err("%s: error: rd_alloc\n", __func__);
462 if (rd && nr_entries)
463 memcpy(rd_new->entries, rd->entries,
464 nr_entries * sizeof(*rd->entries));
466 rd_new->curr_nr = nr_entries;
468 ri_entry.vm_start = extabs->vm_start;
469 ri_entry.vm_end = extabs->vm_end;
471 ri_entry.mmap = mmap;
473 ti = &ri_entry.tabs.exidx;
474 ti->addr = extabs->exidx.addr;
475 ti->length = extabs->exidx.length;
476 ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXIDX_OFFSET];
477 ctx.ex_tables_size += ti->length;
479 ti = &ri_entry.tabs.extab;
480 ti->addr = extabs->extab.addr;
481 ti->length = extabs->extab.length;
482 ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXTAB_OFFSET];
483 ctx.ex_tables_size += ti->length;
485 nr_added = add_ex_region(rd_new, &ri_entry);
488 rd_new->curr_nr += nr_added;
490 ex_entry = kzalloc(sizeof(*ex_entry), GFP_KERNEL);
495 memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
497 INIT_LIST_HEAD(&ex_entry->list);
498 list_add_tail(&ex_entry->list, &mmap->ex_entries);
500 rcu_assign_pointer(ctx.rd, rd_new);
503 call_rcu(&rd->rcu, rd_free_rcu);
505 spin_unlock(&ctx.lock);
512 spin_unlock(&ctx.lock);
517 clean_mmap(struct regions_data *rd, struct quadd_extabs_mmap *mmap, int rm_ext)
520 struct ex_region_info *entry, *next;
525 list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
527 nr_removed += remove_ex_region(rd, entry);
529 list_del(&entry->list);
536 void quadd_unwind_delete_mmap(struct quadd_extabs_mmap *mmap)
538 unsigned long nr_entries, nr_removed, new_size;
539 struct regions_data *rd, *rd_new;
544 spin_lock(&ctx.lock);
546 rd = rcu_dereference(ctx.rd);
547 if (!rd || !rd->curr_nr)
550 nr_entries = rd->curr_nr;
551 new_size = min_t(unsigned long, rd->size, nr_entries);
553 rd_new = rd_alloc(new_size);
554 if (IS_ERR_OR_NULL(rd_new)) {
555 pr_err("%s: error: rd_alloc\n", __func__);
558 rd_new->size = new_size;
559 rd_new->curr_nr = nr_entries;
561 memcpy(rd_new->entries, rd->entries,
562 nr_entries * sizeof(*rd->entries));
564 nr_removed = clean_mmap(rd_new, mmap, 1);
565 rd_new->curr_nr -= nr_removed;
567 rcu_assign_pointer(ctx.rd, rd_new);
568 call_rcu(&rd->rcu, rd_free_rcu);
571 spin_unlock(&ctx.lock);
574 static const struct unwind_idx *
575 unwind_find_idx(struct ex_region_info *ri, u32 addr)
577 unsigned long length;
579 struct unwind_idx *start;
580 struct unwind_idx *stop;
581 struct unwind_idx *mid = NULL;
582 length = ri->tabs.exidx.length / sizeof(*start);
584 if (unlikely(!length))
587 start = (struct unwind_idx *)((char *)ri->mmap->data +
588 ri->tabs.exidx.mmap_offset);
589 stop = start + length - 1;
591 value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri, 1, 0, 0);
595 value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri, 1, 0, 0);
599 while (start < stop - 1) {
600 mid = start + ((stop - start) >> 1);
602 value = (u32)mmap_prel31_to_addr(&mid->addr_offset,
615 unwind_get_byte(struct quadd_extabs_mmap *mmap,
616 struct unwind_ctrl_block *ctrl, long *err)
623 if (ctrl->entries <= 0) {
624 pr_err_once("%s: error: corrupt unwind table\n", __func__);
625 *err = -QUADD_URC_TBL_IS_CORRUPT;
629 *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
633 ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
635 if (ctrl->byte == 0) {
646 * Execute the current unwind instruction.
649 unwind_exec_insn(struct quadd_extabs_mmap *mmap,
650 struct unwind_ctrl_block *ctrl)
654 unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
659 pr_debug("%s: insn = %08lx\n", __func__, insn);
661 if ((insn & 0xc0) == 0x00) {
662 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
664 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
665 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
666 } else if ((insn & 0xc0) == 0x40) {
667 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
669 pr_debug("CMD_DATA_PUSH: vsp = vsp – %lu (new: %#x)\n",
670 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
671 } else if ((insn & 0xf0) == 0x80) {
673 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
674 int load_sp, reg = 4;
676 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
680 mask = insn & 0x0fff;
682 pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
684 return -QUADD_URC_REFUSE_TO_UNWIND;
687 /* pop R4-R15 according to mask */
688 load_sp = mask & (1 << (13 - 4));
691 err = read_user_data(vsp++, ctrl->vrs[reg]);
695 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
701 ctrl->vrs[SP] = (unsigned long)vsp;
703 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
704 } else if ((insn & 0xf0) == 0x90 &&
705 (insn & 0x0d) != 0x0d) {
706 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
707 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
708 } else if ((insn & 0xf0) == 0xa0) {
709 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
712 /* pop R4-R[4+bbb] */
713 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
714 err = read_user_data(vsp++, ctrl->vrs[reg]);
718 pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
722 err = read_user_data(vsp++, ctrl->vrs[14]);
726 pr_debug("CMD_REG_POP: pop {r14}\n");
729 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
730 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
731 } else if (insn == 0xb0) {
732 if (ctrl->vrs[PC] == 0)
733 ctrl->vrs[PC] = ctrl->vrs[LR];
734 /* no further processing */
737 pr_debug("CMD_FINISH\n");
738 } else if (insn == 0xb1) {
739 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
740 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
746 if (mask == 0 || mask & 0xf0) {
747 pr_debug("unwind: Spare encoding %04lx\n",
749 return -QUADD_URC_SPARE_ENCODING;
752 /* pop R0-R3 according to mask */
755 err = read_user_data(vsp++, ctrl->vrs[reg]);
759 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
765 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
766 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
767 } else if (insn == 0xb2) {
768 unsigned long uleb128 = unwind_get_byte(mmap, ctrl, &err);
772 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
774 pr_debug("CMD_DATA_POP: vsp = vsp + %lu, new vsp: %#x\n",
775 0x204 + (uleb128 << 2), ctrl->vrs[SP]);
776 } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
777 unsigned long data, reg_from, reg_to;
778 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
780 data = unwind_get_byte(mmap, ctrl, &err);
784 reg_from = (data & 0xf0) >> 4;
785 reg_to = reg_from + (data & 0x0f);
792 for (i = reg_from; i <= reg_to; i++)
798 ctrl->vrs[SP] = (unsigned long)vsp;
799 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
801 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
802 insn, data, reg_from, reg_to);
803 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
804 } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
805 unsigned long reg_to;
806 unsigned long data = insn & 0x07;
807 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
811 for (i = 8; i <= reg_to; i++)
814 if ((insn & 0xf8) == 0xb8)
817 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
819 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
821 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
823 pr_debug("error: unhandled instruction %02lx\n", insn);
824 return -QUADD_URC_UNHANDLED_INSTRUCTION;
827 pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
829 ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
830 ctrl->vrs[LR], ctrl->vrs[PC]);
836 * Unwind a single frame starting with *sp for the symbol at *pc. It
837 * updates the *pc and *sp with the new values.
840 unwind_frame(struct ex_region_info *ri,
841 struct stackframe *frame,
842 struct vm_area_struct *vma_sp,
843 unsigned int *unw_type)
845 unsigned long high, low;
846 const struct unwind_idx *idx;
847 struct unwind_ctrl_block ctrl;
851 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
852 return -QUADD_URC_SP_INCORRECT;
854 /* only go to a higher address on the stack */
856 high = vma_sp->vm_end;
858 pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
859 frame->pc, frame->lr, frame->sp, low, high);
861 idx = unwind_find_idx(ri, frame->pc);
862 if (IS_ERR_OR_NULL(idx))
863 return -QUADD_URC_IDX_NOT_FOUND;
865 pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
867 ctrl.vrs[FP_THUMB] = frame->fp_thumb;
868 ctrl.vrs[FP_ARM] = frame->fp_arm;
870 ctrl.vrs[SP] = frame->sp;
871 ctrl.vrs[LR] = frame->lr;
874 err = read_mmap_data(ri->mmap, &idx->insn, &val);
880 return -QUADD_URC_CANTUNWIND;
881 } else if ((val & 0x80000000) == 0) {
882 /* prel31 to the unwind table */
883 ctrl.insn = (u32 *)(unsigned long)
884 mmap_prel31_to_addr(&idx->insn, ri, 1, 0, 1);
886 return -QUADD_URC_EACCESS;
887 } else if ((val & 0xff000000) == 0x80000000) {
888 /* only personality routine 0 supported in the index */
889 ctrl.insn = &idx->insn;
891 pr_debug("unsupported personality routine %#x in the index at %p\n",
893 return -QUADD_URC_UNSUPPORTED_PR;
896 err = read_mmap_data(ri->mmap, ctrl.insn, &val);
900 /* check the personality routine */
901 if ((val & 0xff000000) == 0x80000000) {
904 } else if ((val & 0xff000000) == 0x81000000) {
906 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
908 pr_debug("unsupported personality routine %#x at %p\n",
910 return -QUADD_URC_UNSUPPORTED_PR;
913 while (ctrl.entries > 0) {
914 err = unwind_exec_insn(ri->mmap, &ctrl);
918 if (ctrl.vrs[SP] & 0x03 ||
919 ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
920 return -QUADD_URC_SP_INCORRECT;
923 if (ctrl.vrs[PC] == 0) {
924 ctrl.vrs[PC] = ctrl.vrs[LR];
925 *unw_type = QUADD_UNW_TYPE_LR_UT;
927 *unw_type = QUADD_UNW_TYPE_UT;
930 if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
931 return -QUADD_URC_PC_INCORRECT;
933 frame->fp_thumb = ctrl.vrs[FP_THUMB];
934 frame->fp_arm = ctrl.vrs[FP_ARM];
936 frame->sp = ctrl.vrs[SP];
937 frame->lr = ctrl.vrs[LR];
938 frame->pc = ctrl.vrs[PC];
944 unwind_backtrace(struct quadd_callchain *cc,
945 struct ex_region_info *ri,
946 struct stackframe *frame,
947 struct vm_area_struct *vma_sp,
948 struct task_struct *task)
950 unsigned int unw_type;
951 struct ex_region_info ri_new;
953 cc->unw_rc = QUADD_URC_FAILURE;
955 pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
956 frame->fp_arm, frame->fp_thumb,
957 frame->sp, frame->lr, frame->pc);
958 pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
959 vma_sp->vm_start, vma_sp->vm_end,
960 vma_sp->vm_end - vma_sp->vm_start);
965 unsigned long where = frame->pc;
966 struct vm_area_struct *vma_pc;
967 struct mm_struct *mm = task->mm;
972 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32))) {
973 cc->unw_rc = -QUADD_URC_SP_INCORRECT;
977 vma_pc = find_vma(mm, frame->pc);
981 if (!is_vma_addr(ri->tabs.exidx.addr, vma_pc, sizeof(u32))) {
982 err = __search_ex_region(vma_pc->vm_start, &ri_new);
984 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
991 err = unwind_frame(ri, frame, vma_sp, &unw_type);
993 pr_debug("end unwind, urc: %ld\n", err);
998 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1001 cc->curr_sp = frame->sp;
1002 cc->curr_fp = frame->fp_arm;
1003 cc->curr_pc = frame->pc;
1005 nr_added = quadd_callchain_store(cc, frame->pc, unw_type);
1012 quadd_get_user_callchain_ut(struct pt_regs *regs,
1013 struct quadd_callchain *cc,
1014 struct task_struct *task)
1017 int nr_prev = cc->nr;
1018 unsigned long ip, sp, lr;
1019 struct vm_area_struct *vma, *vma_sp;
1020 struct mm_struct *mm = task->mm;
1021 struct ex_region_info ri;
1022 struct stackframe frame;
1028 if (!compat_user_mode(regs)) {
1029 pr_warn_once("user_mode 64: unsupported\n");
1034 if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
1037 cc->unw_rc = QUADD_URC_FAILURE;
1045 frame.fp_arm = cc->curr_fp;
1047 ip = instruction_pointer(regs);
1048 sp = quadd_user_stack_pointer(regs);
1049 lr = quadd_user_link_register(regs);
1052 frame.fp_thumb = regs->compat_usr(7);
1053 frame.fp_arm = regs->compat_usr(11);
1055 frame.fp_thumb = regs->ARM_r7;
1056 frame.fp_arm = regs->ARM_fp;
1064 vma = find_vma(mm, ip);
1068 vma_sp = find_vma(mm, sp);
1072 err = __search_ex_region(vma->vm_start, &ri);
1074 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1078 unwind_backtrace(cc, &ri, &frame, vma_sp, task);
1084 quadd_is_ex_entry_exist(struct pt_regs *regs,
1086 struct task_struct *task)
1090 const struct unwind_idx *idx;
1091 struct ex_region_info ri;
1092 struct vm_area_struct *vma;
1093 struct mm_struct *mm = task->mm;
1099 if (!compat_user_mode(regs))
1103 vma = find_vma(mm, addr);
1107 err = __search_ex_region(vma->vm_start, &ri);
1111 idx = unwind_find_idx(&ri, addr);
1112 if (IS_ERR_OR_NULL(idx))
1115 err = read_mmap_data(ri.mmap, &idx->insn, &value);
1125 int quadd_unwind_start(struct task_struct *task)
1127 struct regions_data *rd, *rd_old;
1128 rd = rd_alloc(QUADD_EXTABS_SIZE);
1130 spin_lock(&ctx.lock);
1132 rd_old = rcu_dereference(ctx.rd);
1134 pr_warn("%s: warning: rd_old\n", __func__);
1136 if (IS_ERR_OR_NULL(rd)) {
1137 pr_err("%s: error: rd_alloc\n", __func__);
1138 spin_unlock(&ctx.lock);
1142 rcu_assign_pointer(ctx.rd, rd);
1145 call_rcu(&rd_old->rcu, rd_free_rcu);
1147 ctx.pid = task->tgid;
1149 ctx.ex_tables_size = 0;
1151 spin_unlock(&ctx.lock);
1156 void quadd_unwind_stop(void)
1159 unsigned long nr_entries, size;
1160 struct regions_data *rd;
1161 struct ex_region_info *ri;
1163 spin_lock(&ctx.lock);
1167 rd = rcu_dereference(ctx.rd);
1171 nr_entries = rd->curr_nr;
1174 for (i = 0; i < nr_entries; i++) {
1175 ri = &rd->entries[i];
1176 clean_mmap(rd, ri->mmap, 0);
1179 rcu_assign_pointer(ctx.rd, NULL);
1180 call_rcu(&rd->rcu, rd_free_rcu);
1183 spin_unlock(&ctx.lock);
1184 pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1187 int quadd_unwind_init(void)
1189 spin_lock_init(&ctx.lock);
1190 rcu_assign_pointer(ctx.rd, NULL);
1196 void quadd_unwind_deinit(void)
1198 quadd_unwind_stop();