2 * drivers/misc/tegra-profiler/exh_tables.c
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
26 #include <linux/tegra_profiler.h>
28 #include "eh_unwind.h"
29 #include "backtrace.h"
32 #define QUADD_EXTABS_SIZE 0x100
34 #define GET_NR_PAGES(a, l) \
35 ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
50 unsigned long mmap_offset;
54 struct extab_info extab;
55 struct extab_info exidx;
58 struct ex_region_info {
59 unsigned long vm_start;
63 struct quadd_extabs_mmap *mmap;
65 struct list_head list;
69 struct ex_region_info *entries;
71 unsigned long curr_nr;
77 struct quadd_unwind_ctx {
78 struct regions_data *rd;
81 unsigned long ex_tables_size;
91 unsigned long fp_thumb;
99 struct unwind_ctrl_block {
100 u32 vrs[16]; /* virtual register set */
101 const u32 *insn; /* pointer to the current instr word */
102 int entries; /* number of entries left */
103 int byte; /* current byte in the instr word */
106 struct pin_pages_work {
107 struct work_struct work;
108 unsigned long vm_start;
111 struct quadd_unwind_ctx ctx;
114 validate_stack_addr(unsigned long addr,
115 struct vm_area_struct *vma,
116 unsigned long nbytes)
121 return is_vma_addr(addr, vma, nbytes);
125 validate_pc_addr(unsigned long addr, unsigned long nbytes)
127 return addr && addr < TASK_SIZE - nbytes;
131 validate_mmap_addr(struct quadd_extabs_mmap *mmap,
132 unsigned long addr, unsigned long nbytes)
134 struct vm_area_struct *vma = mmap->mmap_vma;
135 unsigned long size = vma->vm_end - vma->vm_start;
136 unsigned long data = (unsigned long)mmap->data;
139 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
140 __func__, addr, data, data + size,
141 vma->vm_start, vma->vm_end);
145 if (addr < data || addr >= data + (size - nbytes)) {
146 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
147 __func__, addr, data, data + size,
148 vma->vm_start, vma->vm_end);
155 #define read_user_data(addr, retval) \
158 ret = probe_kernel_address(addr, retval); \
160 ret = -QUADD_URC_EACCESS; \
165 read_mmap_data(struct quadd_extabs_mmap *mmap, const u32 *addr, u32 *retval)
167 if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32)))
168 return -QUADD_URC_EACCESS;
174 static inline unsigned long
175 ex_addr_to_mmap_addr(unsigned long addr,
176 struct ex_region_info *ri,
179 unsigned long offset;
180 struct extab_info *ei;
182 ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
183 offset = addr - ei->addr;
185 return ei->mmap_offset + offset + (unsigned long)ri->mmap->data;
188 static inline unsigned long
189 mmap_addr_to_ex_addr(unsigned long addr,
190 struct ex_region_info *ri,
193 unsigned long offset;
194 struct extab_info *ei;
196 ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
197 offset = addr - ei->mmap_offset - (unsigned long)ri->mmap->data;
199 return ei->addr + offset;
203 prel31_to_addr(const u32 *ptr)
208 if (read_user_data(ptr, value))
211 /* sign-extend to 32 bits */
212 offset = (((s32)value) << 1) >> 1;
213 return (u32)(unsigned long)ptr + offset;
217 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
218 int is_src_exidx, int is_dst_exidx, int to_mmap)
221 unsigned long addr_res;
223 struct extab_info *ei_src, *ei_dst;
225 ei_src = is_src_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
226 ei_dst = is_dst_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
229 offset = (((s32)value) << 1) >> 1;
231 addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, is_src_exidx);
236 addr_res = ex_addr_to_mmap_addr(addr_res, ri, is_dst_exidx);
242 add_ex_region(struct regions_data *rd,
243 struct ex_region_info *new_entry)
245 unsigned int i_min, i_max, mid;
246 struct ex_region_info *array = rd->entries;
247 unsigned long size = rd->curr_nr;
253 memcpy(&array[0], new_entry, sizeof(*new_entry));
255 } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
262 if (array[0].vm_start > new_entry->vm_start) {
263 memmove(array + 1, array,
264 size * sizeof(*array));
265 memcpy(&array[0], new_entry, sizeof(*new_entry));
267 } else if (array[size - 1].vm_start < new_entry->vm_start) {
268 memcpy(&array[size], new_entry, sizeof(*new_entry));
272 while (i_min < i_max) {
273 mid = i_min + (i_max - i_min) / 2;
275 if (new_entry->vm_start <= array[mid].vm_start)
281 if (array[i_max].vm_start == new_entry->vm_start) {
284 memmove(array + i_max + 1,
286 (size - i_max) * sizeof(*array));
287 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
293 remove_ex_region(struct regions_data *rd,
294 struct ex_region_info *entry)
296 unsigned int i_min, i_max, mid;
297 struct ex_region_info *array = rd->entries;
298 unsigned long size = rd->curr_nr;
307 if (array[0].vm_start == entry->vm_start)
313 if (array[0].vm_start > entry->vm_start)
315 else if (array[size - 1].vm_start < entry->vm_start)
321 while (i_min < i_max) {
322 mid = i_min + (i_max - i_min) / 2;
324 if (entry->vm_start <= array[mid].vm_start)
330 if (array[i_max].vm_start == entry->vm_start) {
331 memmove(array + i_max,
333 (size - i_max) * sizeof(*array));
340 static struct ex_region_info *
341 search_ex_region(struct ex_region_info *array,
344 struct ex_region_info *ri)
346 unsigned int i_min, i_max, mid;
354 while (i_min < i_max) {
355 mid = i_min + (i_max - i_min) / 2;
357 if (key <= array[mid].vm_start)
363 if (array[i_max].vm_start == key) {
364 memcpy(ri, &array[i_max], sizeof(*ri));
365 return &array[i_max];
372 __search_ex_region(unsigned long key, struct ex_region_info *ri)
374 struct regions_data *rd;
375 struct ex_region_info *ri_p = NULL;
379 rd = rcu_dereference(ctx.rd);
383 ri_p = search_ex_region(rd->entries, rd->curr_nr, key, ri);
387 return ri_p ? 0 : -ENOENT;
390 static struct regions_data *rd_alloc(unsigned long size)
392 struct regions_data *rd;
394 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
398 rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_KERNEL);
410 static void rd_free(struct regions_data *rd)
418 static void rd_free_rcu(struct rcu_head *rh)
420 struct regions_data *rd = container_of(rh, struct regions_data, rcu);
424 int quadd_unwind_set_extab(struct quadd_extables *extabs,
425 struct quadd_extabs_mmap *mmap)
428 unsigned long nr_entries, nr_added, new_size;
429 struct ex_region_info ri_entry;
430 struct extab_info *ti;
431 struct regions_data *rd, *rd_new;
432 struct ex_region_info *ex_entry;
434 spin_lock(&ctx.lock);
436 rd = rcu_dereference(ctx.rd);
438 pr_warn("%s: warning: rd\n", __func__);
439 new_size = QUADD_EXTABS_SIZE;
443 nr_entries = rd->curr_nr;
446 if (nr_entries >= new_size)
447 new_size += new_size >> 1;
449 rd_new = rd_alloc(new_size);
450 if (IS_ERR_OR_NULL(rd_new)) {
451 pr_err("%s: error: rd_alloc\n", __func__);
456 if (rd && nr_entries)
457 memcpy(rd_new->entries, rd->entries,
458 nr_entries * sizeof(*rd->entries));
460 rd_new->curr_nr = nr_entries;
462 ri_entry.vm_start = extabs->vm_start;
463 ri_entry.vm_end = extabs->vm_end;
465 ri_entry.mmap = mmap;
467 ti = &ri_entry.tabs.exidx;
468 ti->addr = extabs->exidx.addr;
469 ti->length = extabs->exidx.length;
470 ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXIDX_OFFSET];
471 ctx.ex_tables_size += ti->length;
473 ti = &ri_entry.tabs.extab;
474 ti->addr = extabs->extab.addr;
475 ti->length = extabs->extab.length;
476 ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXTAB_OFFSET];
477 ctx.ex_tables_size += ti->length;
479 nr_added = add_ex_region(rd_new, &ri_entry);
482 rd_new->curr_nr += nr_added;
484 ex_entry = kzalloc(sizeof(*ex_entry), GFP_KERNEL);
489 memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
491 INIT_LIST_HEAD(&ex_entry->list);
492 list_add_tail(&ex_entry->list, &mmap->ex_entries);
494 rcu_assign_pointer(ctx.rd, rd_new);
497 call_rcu(&rd->rcu, rd_free_rcu);
499 spin_unlock(&ctx.lock);
506 spin_unlock(&ctx.lock);
511 clean_mmap(struct regions_data *rd, struct quadd_extabs_mmap *mmap, int rm_ext)
514 struct ex_region_info *entry, *next;
519 list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
521 nr_removed += remove_ex_region(rd, entry);
523 list_del(&entry->list);
530 void quadd_unwind_delete_mmap(struct quadd_extabs_mmap *mmap)
532 unsigned long nr_entries, nr_removed, new_size;
533 struct regions_data *rd, *rd_new;
538 spin_lock(&ctx.lock);
540 rd = rcu_dereference(ctx.rd);
541 if (!rd || !rd->curr_nr)
544 nr_entries = rd->curr_nr;
545 new_size = min_t(unsigned long, rd->size, nr_entries);
547 rd_new = rd_alloc(new_size);
548 if (IS_ERR_OR_NULL(rd_new)) {
549 pr_err("%s: error: rd_alloc\n", __func__);
552 rd_new->size = new_size;
553 rd_new->curr_nr = nr_entries;
555 memcpy(rd_new->entries, rd->entries,
556 nr_entries * sizeof(*rd->entries));
558 nr_removed = clean_mmap(rd_new, mmap, 1);
559 rd_new->curr_nr -= nr_removed;
561 rcu_assign_pointer(ctx.rd, rd_new);
562 call_rcu(&rd->rcu, rd_free_rcu);
565 spin_unlock(&ctx.lock);
568 static const struct unwind_idx *
569 unwind_find_idx(struct ex_region_info *ri, u32 addr)
571 unsigned long length;
573 struct unwind_idx *start;
574 struct unwind_idx *stop;
575 struct unwind_idx *mid = NULL;
576 length = ri->tabs.exidx.length / sizeof(*start);
578 if (unlikely(!length))
581 start = (struct unwind_idx *)((char *)ri->mmap->data +
582 ri->tabs.exidx.mmap_offset);
583 stop = start + length - 1;
585 value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri, 1, 0, 0);
589 value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri, 1, 0, 0);
593 while (start < stop - 1) {
594 mid = start + ((stop - start) >> 1);
596 value = (u32)mmap_prel31_to_addr(&mid->addr_offset,
609 unwind_get_byte(struct quadd_extabs_mmap *mmap,
610 struct unwind_ctrl_block *ctrl, long *err)
617 if (ctrl->entries <= 0) {
618 pr_err_once("%s: error: corrupt unwind table\n", __func__);
619 *err = -QUADD_URC_TBL_IS_CORRUPT;
623 *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
627 ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
629 if (ctrl->byte == 0) {
640 * Execute the current unwind instruction.
643 unwind_exec_insn(struct quadd_extabs_mmap *mmap,
644 struct unwind_ctrl_block *ctrl)
648 unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
653 pr_debug("%s: insn = %08lx\n", __func__, insn);
655 if ((insn & 0xc0) == 0x00) {
656 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
658 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
659 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
660 } else if ((insn & 0xc0) == 0x40) {
661 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
663 pr_debug("CMD_DATA_PUSH: vsp = vsp – %lu (new: %#x)\n",
664 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
665 } else if ((insn & 0xf0) == 0x80) {
667 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
668 int load_sp, reg = 4;
670 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
674 mask = insn & 0x0fff;
676 pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
678 return -QUADD_URC_REFUSE_TO_UNWIND;
681 /* pop R4-R15 according to mask */
682 load_sp = mask & (1 << (13 - 4));
685 err = read_user_data(vsp++, ctrl->vrs[reg]);
689 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
695 ctrl->vrs[SP] = (unsigned long)vsp;
697 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
698 } else if ((insn & 0xf0) == 0x90 &&
699 (insn & 0x0d) != 0x0d) {
700 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
701 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
702 } else if ((insn & 0xf0) == 0xa0) {
703 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
706 /* pop R4-R[4+bbb] */
707 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
708 err = read_user_data(vsp++, ctrl->vrs[reg]);
712 pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
716 err = read_user_data(vsp++, ctrl->vrs[14]);
720 pr_debug("CMD_REG_POP: pop {r14}\n");
723 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
724 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
725 } else if (insn == 0xb0) {
726 if (ctrl->vrs[PC] == 0)
727 ctrl->vrs[PC] = ctrl->vrs[LR];
728 /* no further processing */
731 pr_debug("CMD_FINISH\n");
732 } else if (insn == 0xb1) {
733 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
734 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
740 if (mask == 0 || mask & 0xf0) {
741 pr_debug("unwind: Spare encoding %04lx\n",
743 return -QUADD_URC_SPARE_ENCODING;
746 /* pop R0-R3 according to mask */
749 err = read_user_data(vsp++, ctrl->vrs[reg]);
753 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
759 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
760 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
761 } else if (insn == 0xb2) {
762 unsigned long uleb128 = unwind_get_byte(mmap, ctrl, &err);
766 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
768 pr_debug("CMD_DATA_POP: vsp = vsp + %lu, new vsp: %#x\n",
769 0x204 + (uleb128 << 2), ctrl->vrs[SP]);
770 } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
771 unsigned long data, reg_from, reg_to;
772 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
774 data = unwind_get_byte(mmap, ctrl, &err);
778 reg_from = (data & 0xf0) >> 4;
779 reg_to = reg_from + (data & 0x0f);
786 for (i = reg_from; i <= reg_to; i++)
792 ctrl->vrs[SP] = (unsigned long)vsp;
793 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
795 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
796 insn, data, reg_from, reg_to);
797 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
798 } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
799 unsigned long reg_to;
800 unsigned long data = insn & 0x07;
801 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
805 for (i = 8; i <= reg_to; i++)
808 if ((insn & 0xf8) == 0xb8)
811 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
813 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
815 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
817 pr_debug("error: unhandled instruction %02lx\n", insn);
818 return -QUADD_URC_UNHANDLED_INSTRUCTION;
821 pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
823 ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
824 ctrl->vrs[LR], ctrl->vrs[PC]);
830 * Unwind a single frame starting with *sp for the symbol at *pc. It
831 * updates the *pc and *sp with the new values.
834 unwind_frame(struct ex_region_info *ri,
835 struct stackframe *frame,
836 struct vm_area_struct *vma_sp)
838 unsigned long high, low;
839 const struct unwind_idx *idx;
840 struct unwind_ctrl_block ctrl;
844 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
845 return -QUADD_URC_SP_INCORRECT;
847 /* only go to a higher address on the stack */
849 high = vma_sp->vm_end;
851 pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
852 frame->pc, frame->lr, frame->sp, low, high);
854 idx = unwind_find_idx(ri, frame->pc);
855 if (IS_ERR_OR_NULL(idx))
856 return -QUADD_URC_IDX_NOT_FOUND;
858 pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
860 ctrl.vrs[FP_THUMB] = frame->fp_thumb;
861 ctrl.vrs[FP_ARM] = frame->fp_arm;
863 ctrl.vrs[SP] = frame->sp;
864 ctrl.vrs[LR] = frame->lr;
867 err = read_mmap_data(ri->mmap, &idx->insn, &val);
873 return -QUADD_URC_CANTUNWIND;
874 } else if ((val & 0x80000000) == 0) {
875 /* prel31 to the unwind table */
876 ctrl.insn = (u32 *)(unsigned long)
877 mmap_prel31_to_addr(&idx->insn, ri, 1, 0, 1);
879 return -QUADD_URC_EACCESS;
880 } else if ((val & 0xff000000) == 0x80000000) {
881 /* only personality routine 0 supported in the index */
882 ctrl.insn = &idx->insn;
884 pr_debug("unsupported personality routine %#x in the index at %p\n",
886 return -QUADD_URC_UNSUPPORTED_PR;
889 err = read_mmap_data(ri->mmap, ctrl.insn, &val);
893 /* check the personality routine */
894 if ((val & 0xff000000) == 0x80000000) {
897 } else if ((val & 0xff000000) == 0x81000000) {
899 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
901 pr_debug("unsupported personality routine %#x at %p\n",
903 return -QUADD_URC_UNSUPPORTED_PR;
906 while (ctrl.entries > 0) {
907 err = unwind_exec_insn(ri->mmap, &ctrl);
911 if (ctrl.vrs[SP] & 0x03 ||
912 ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
913 return -QUADD_URC_SP_INCORRECT;
916 if (ctrl.vrs[PC] == 0)
917 ctrl.vrs[PC] = ctrl.vrs[LR];
919 /* check for infinite loop */
920 if (frame->pc == ctrl.vrs[PC])
921 return -QUADD_URC_FAILURE;
923 if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
924 return -QUADD_URC_PC_INCORRECT;
926 frame->fp_thumb = ctrl.vrs[FP_THUMB];
927 frame->fp_arm = ctrl.vrs[FP_ARM];
929 frame->sp = ctrl.vrs[SP];
930 frame->lr = ctrl.vrs[LR];
931 frame->pc = ctrl.vrs[PC];
937 unwind_backtrace(struct quadd_callchain *cc,
938 struct ex_region_info *ri,
939 struct pt_regs *regs,
940 struct vm_area_struct *vma_sp,
941 struct task_struct *task)
943 struct ex_region_info ri_new;
944 struct stackframe frame;
947 frame.fp_thumb = regs->compat_usr(7);
948 frame.fp_arm = regs->compat_usr(11);
950 frame.fp_thumb = regs->ARM_r7;
951 frame.fp_arm = regs->ARM_fp;
954 frame.pc = instruction_pointer(regs);
955 frame.sp = quadd_user_stack_pointer(regs);
956 frame.lr = quadd_user_link_register(regs);
958 cc->unw_rc = QUADD_URC_FAILURE;
960 pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
961 frame.fp_arm, frame.fp_thumb, frame.sp, frame.lr, frame.pc);
962 pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
963 vma_sp->vm_start, vma_sp->vm_end,
964 vma_sp->vm_end - vma_sp->vm_start);
969 unsigned long where = frame.pc;
970 struct vm_area_struct *vma_pc;
971 struct mm_struct *mm = task->mm;
976 if (!validate_stack_addr(frame.sp, vma_sp, sizeof(u32))) {
977 cc->unw_rc = -QUADD_URC_SP_INCORRECT;
981 vma_pc = find_vma(mm, frame.pc);
985 if (!is_vma_addr(ri->tabs.exidx.addr, vma_pc, sizeof(u32))) {
986 err = __search_ex_region(vma_pc->vm_start, &ri_new);
988 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
995 err = unwind_frame(ri, &frame, vma_sp);
997 pr_debug("end unwind, urc: %ld\n", err);
1002 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1005 cc->curr_sp = frame.sp;
1006 cc->curr_fp = frame.fp_arm;
1008 nr_added = quadd_callchain_store(cc, frame.pc);
1015 quadd_get_user_callchain_ut(struct pt_regs *regs,
1016 struct quadd_callchain *cc,
1017 struct task_struct *task)
1020 unsigned long ip, sp;
1021 struct vm_area_struct *vma, *vma_sp;
1022 struct mm_struct *mm = task->mm;
1023 struct ex_region_info ri;
1025 cc->unw_method = QUADD_UNW_METHOD_EHT;
1026 cc->unw_rc = QUADD_URC_FAILURE;
1029 if (!compat_user_mode(regs)) {
1030 pr_warn_once("user_mode 64: unsupported\n");
1038 ip = instruction_pointer(regs);
1039 sp = quadd_user_stack_pointer(regs);
1041 vma = find_vma(mm, ip);
1045 vma_sp = find_vma(mm, sp);
1049 err = __search_ex_region(vma->vm_start, &ri);
1051 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1055 unwind_backtrace(cc, &ri, regs, vma_sp, task);
1060 int quadd_unwind_start(struct task_struct *task)
1062 struct regions_data *rd, *rd_old;
1064 spin_lock(&ctx.lock);
1066 rd_old = rcu_dereference(ctx.rd);
1068 pr_warn("%s: warning: rd_old\n", __func__);
1070 rd = rd_alloc(QUADD_EXTABS_SIZE);
1071 if (IS_ERR_OR_NULL(rd)) {
1072 pr_err("%s: error: rd_alloc\n", __func__);
1073 spin_unlock(&ctx.lock);
1077 rcu_assign_pointer(ctx.rd, rd);
1080 call_rcu(&rd_old->rcu, rd_free_rcu);
1082 ctx.pid = task->tgid;
1084 ctx.ex_tables_size = 0;
1086 spin_unlock(&ctx.lock);
1091 void quadd_unwind_stop(void)
1094 unsigned long nr_entries, size;
1095 struct regions_data *rd;
1096 struct ex_region_info *ri;
1098 spin_lock(&ctx.lock);
1102 rd = rcu_dereference(ctx.rd);
1106 nr_entries = rd->curr_nr;
1109 for (i = 0; i < nr_entries; i++) {
1110 ri = &rd->entries[i];
1111 clean_mmap(rd, ri->mmap, 0);
1114 rcu_assign_pointer(ctx.rd, NULL);
1115 call_rcu(&rd->rcu, rd_free_rcu);
1118 spin_unlock(&ctx.lock);
1119 pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1122 int quadd_unwind_init(void)
1124 spin_lock_init(&ctx.lock);
1125 rcu_assign_pointer(ctx.rd, NULL);
1131 void quadd_unwind_deinit(void)
1133 quadd_unwind_stop();