2 * drivers/misc/tegra-profiler/exh_tables.c
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
26 #include <linux/tegra_profiler.h>
28 #include "eh_unwind.h"
29 #include "backtrace.h"
31 #define QUADD_EXTABS_SIZE 0x100
33 #define GET_NR_PAGES(a, l) \
34 ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
51 struct extab_info exidx;
52 struct extab_info extab;
55 struct ex_region_info {
56 unsigned long vm_start;
63 struct ex_region_info *entries;
65 unsigned long curr_nr;
71 struct quadd_unwind_ctx {
72 struct regions_data *rd;
76 unsigned long pinned_pages;
77 unsigned long pinned_size;
88 unsigned long fp_thumb;
96 struct unwind_ctrl_block {
97 u32 vrs[16]; /* virtual register set */
98 const u32 *insn; /* pointer to the current instr word */
99 int entries; /* number of entries left */
100 int byte; /* current byte in the instr word */
103 struct pin_pages_work {
104 struct work_struct work;
105 unsigned long vm_start;
108 struct quadd_unwind_ctx ctx;
111 validate_stack_addr(unsigned long addr,
112 struct vm_area_struct *vma,
113 unsigned long nbytes)
118 return is_vma_addr(addr, vma, nbytes);
122 validate_pc_addr(unsigned long addr, unsigned long nbytes)
124 return addr && addr < TASK_SIZE - nbytes;
127 #define read_user_data(addr, retval) \
130 ret = probe_kernel_address(addr, retval); \
132 ret = -QUADD_URC_EACCESS; \
137 add_ex_region(struct regions_data *rd,
138 struct ex_region_info *new_entry)
140 unsigned int i_min, i_max, mid;
141 struct ex_region_info *array = rd->entries;
142 unsigned long size = rd->curr_nr;
148 memcpy(&array[0], new_entry, sizeof(*new_entry));
150 } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
157 if (array[0].vm_start > new_entry->vm_start) {
158 memmove(array + 1, array,
159 size * sizeof(*array));
160 memcpy(&array[0], new_entry, sizeof(*new_entry));
162 } else if (array[size - 1].vm_start < new_entry->vm_start) {
163 memcpy(&array[size], new_entry, sizeof(*new_entry));
167 while (i_min < i_max) {
168 mid = i_min + (i_max - i_min) / 2;
170 if (new_entry->vm_start <= array[mid].vm_start)
176 if (array[i_max].vm_start == new_entry->vm_start) {
179 memmove(array + i_max + 1,
181 (size - i_max) * sizeof(*array));
182 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
187 static struct ex_region_info *
188 search_ex_region(struct ex_region_info *array,
191 struct extables *tabs)
193 unsigned int i_min, i_max, mid;
201 while (i_min < i_max) {
202 mid = i_min + (i_max - i_min) / 2;
204 if (key <= array[mid].vm_start)
210 if (array[i_max].vm_start == key) {
211 memcpy(tabs, &array[i_max].tabs, sizeof(*tabs));
212 return &array[i_max];
219 __search_ex_region(unsigned long key, struct extables *tabs)
221 struct regions_data *rd;
222 struct ex_region_info *ri = NULL;
226 rd = rcu_dereference(ctx.rd);
230 ri = search_ex_region(rd->entries, rd->curr_nr, key, tabs);
234 return ri ? 0 : -ENOENT;
237 static void pin_user_pages(struct extables *tabs)
240 struct extab_info *ti;
241 unsigned long nr_pages, addr;
243 struct task_struct *task = NULL;
244 struct mm_struct *mm;
248 pid_s = find_vpid(ctx.pid);
250 task = pid_task(pid_s, PIDTYPE_PID);
261 down_write(&mm->mmap_sem);
264 addr = ti->addr & PAGE_MASK;
265 nr_pages = GET_NR_PAGES(ti->addr, ti->length);
267 ret = get_user_pages(task, mm, addr, nr_pages, 0, 0,
270 pr_debug("%s: warning: addr/nr_pages: %#lx/%lu\n",
271 __func__, ti->addr, nr_pages);
275 ctx.pinned_pages += ret;
276 ctx.pinned_size += ti->length;
278 pr_debug("%s: pin exidx: addr/nr_pages: %#lx/%lu\n",
279 __func__, ti->addr, nr_pages);
282 addr = ti->addr & PAGE_MASK;
283 nr_pages = GET_NR_PAGES(ti->addr, ti->length);
285 ret = get_user_pages(task, mm, addr, nr_pages, 0, 0,
288 pr_debug("%s: warning: addr/nr_pages: %#lx/%lu\n",
289 __func__, ti->addr, nr_pages);
293 ctx.pinned_pages += ret;
294 ctx.pinned_size += ti->length;
296 pr_debug("%s: pin extab: addr/nr_pages: %#lx/%lu\n",
297 __func__, ti->addr, nr_pages);
300 up_write(&mm->mmap_sem);
304 pin_user_pages_work(struct work_struct *w)
307 struct extables tabs;
308 struct pin_pages_work *work;
310 work = container_of(w, struct pin_pages_work, work);
312 err = __search_ex_region(work->vm_start, &tabs);
314 pin_user_pages(&tabs);
320 __pin_user_pages(unsigned long vm_start)
322 struct pin_pages_work *work;
324 work = kmalloc(sizeof(*work), GFP_ATOMIC);
328 INIT_WORK(&work->work, pin_user_pages_work);
329 work->vm_start = vm_start;
331 schedule_work(&work->work);
336 static struct regions_data *rd_alloc(unsigned long size)
338 struct regions_data *rd;
340 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
344 rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_KERNEL);
356 static void rd_free(struct regions_data *rd)
364 static void rd_free_rcu(struct rcu_head *rh)
366 struct regions_data *rd = container_of(rh, struct regions_data, rcu);
370 int quadd_unwind_set_extab(struct quadd_extables *extabs)
373 unsigned long nr_entries, nr_added, new_size;
374 struct ex_region_info ri_entry;
375 struct extab_info *ti;
376 struct regions_data *rd, *rd_new;
378 spin_lock(&ctx.lock);
380 rd = rcu_dereference(ctx.rd);
382 pr_warn("%s: warning: rd\n", __func__);
383 new_size = QUADD_EXTABS_SIZE;
387 nr_entries = rd->curr_nr;
390 if (nr_entries >= new_size)
391 new_size += new_size >> 1;
393 rd_new = rd_alloc(new_size);
394 if (IS_ERR_OR_NULL(rd_new)) {
395 pr_err("%s: error: rd_alloc\n", __func__);
400 if (rd && nr_entries)
401 memcpy(rd_new->entries, rd->entries,
402 nr_entries * sizeof(*rd->entries));
404 rd_new->curr_nr = nr_entries;
406 ri_entry.vm_start = extabs->vm_start;
407 ri_entry.vm_end = extabs->vm_end;
409 ti = &ri_entry.tabs.exidx;
410 ti->addr = extabs->exidx.addr;
411 ti->length = extabs->exidx.length;
413 ti = &ri_entry.tabs.extab;
414 ti->addr = extabs->extab.addr;
415 ti->length = extabs->extab.length;
417 nr_added = add_ex_region(rd_new, &ri_entry);
422 rd_new->curr_nr += nr_added;
424 rcu_assign_pointer(ctx.rd, rd_new);
427 call_rcu(&rd->rcu, rd_free_rcu);
429 spin_unlock(&ctx.lock);
431 __pin_user_pages(ri_entry.vm_start);
436 spin_unlock(&ctx.lock);
441 prel31_to_addr(const u32 *ptr)
446 if (read_user_data(ptr, value))
449 /* sign-extend to 32 bits */
450 offset = (((s32)value) << 1) >> 1;
451 return (u32)(unsigned long)ptr + offset;
454 static const struct unwind_idx *
455 unwind_find_origin(const struct unwind_idx *start,
456 const struct unwind_idx *stop)
458 while (start < stop) {
460 const struct unwind_idx *mid = start + ((stop - start) >> 1);
462 if (read_user_data(&mid->addr_offset, addr_offset))
463 return ERR_PTR(-EFAULT);
465 if (addr_offset >= 0x40000000)
466 /* negative offset */
469 /* positive offset */
477 * Binary search in the unwind index. The entries are
478 * guaranteed to be sorted in ascending order by the linker.
480 * start = first entry
481 * origin = first entry with positive offset (or stop if there is no such entry)
482 * stop - 1 = last entry
484 static const struct unwind_idx *
485 search_index(u32 addr,
486 const struct unwind_idx *start,
487 const struct unwind_idx *origin,
488 const struct unwind_idx *stop)
492 pr_debug("%#x, %p, %p, %p\n", addr, start, origin, stop);
495 * only search in the section with the matching sign. This way the
496 * prel31 numbers can be compared as unsigned longs.
498 if (addr < (u32)(unsigned long)start)
499 /* negative offsets: [start; origin) */
502 /* positive offsets: [origin; stop) */
505 /* prel31 for address relavive to start */
506 addr_prel31 = (addr - (u32)(unsigned long)start) & 0x7fffffff;
508 while (start < stop - 1) {
511 const struct unwind_idx *mid = start + ((stop - start) >> 1);
514 * As addr_prel31 is relative to start an offset is needed to
515 * make it relative to mid.
517 if (read_user_data(&mid->addr_offset, addr_offset))
518 return ERR_PTR(-EFAULT);
520 d = (u32)(unsigned long)mid - (u32)(unsigned long)start;
522 if (addr_prel31 - d < addr_offset) {
525 /* keep addr_prel31 relative to start */
526 addr_prel31 -= ((u32)(unsigned long)mid -
527 (u32)(unsigned long)start);
532 if (likely(start->addr_offset <= addr_prel31))
535 pr_debug("Unknown address %#x\n", addr);
539 static const struct unwind_idx *
540 unwind_find_idx(struct extab_info *exidx, u32 addr)
542 const struct unwind_idx *start;
543 const struct unwind_idx *origin;
544 const struct unwind_idx *stop;
545 const struct unwind_idx *idx = NULL;
547 start = (const struct unwind_idx *)exidx->addr;
548 stop = start + exidx->length / sizeof(*start);
550 origin = unwind_find_origin(start, stop);
554 idx = search_index(addr, start, origin, stop);
556 pr_debug("addr: %#x, start: %p, origin: %p, stop: %p, idx: %p\n",
557 addr, start, origin, stop, idx);
563 unwind_get_byte(struct unwind_ctrl_block *ctrl, long *err)
570 if (ctrl->entries <= 0) {
571 pr_debug("error: corrupt unwind table\n");
572 *err = -QUADD_URC_TBL_IS_CORRUPT;
576 *err = read_user_data(ctrl->insn, insn_word);
580 ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
582 if (ctrl->byte == 0) {
593 * Execute the current unwind instruction.
595 static long unwind_exec_insn(struct unwind_ctrl_block *ctrl)
599 unsigned long insn = unwind_get_byte(ctrl, &err);
604 pr_debug("%s: insn = %08lx\n", __func__, insn);
606 if ((insn & 0xc0) == 0x00) {
607 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
609 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
610 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
611 } else if ((insn & 0xc0) == 0x40) {
612 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
614 pr_debug("CMD_DATA_PUSH: vsp = vsp – %lu (new: %#x)\n",
615 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
616 } else if ((insn & 0xf0) == 0x80) {
618 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
619 int load_sp, reg = 4;
621 insn = (insn << 8) | unwind_get_byte(ctrl, &err);
625 mask = insn & 0x0fff;
627 pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
629 return -QUADD_URC_REFUSE_TO_UNWIND;
632 /* pop R4-R15 according to mask */
633 load_sp = mask & (1 << (13 - 4));
636 err = read_user_data(vsp++, ctrl->vrs[reg]);
640 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
646 ctrl->vrs[SP] = (unsigned long)vsp;
648 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
649 } else if ((insn & 0xf0) == 0x90 &&
650 (insn & 0x0d) != 0x0d) {
651 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
652 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
653 } else if ((insn & 0xf0) == 0xa0) {
654 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
657 /* pop R4-R[4+bbb] */
658 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
659 err = read_user_data(vsp++, ctrl->vrs[reg]);
663 pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
667 err = read_user_data(vsp++, ctrl->vrs[14]);
671 pr_debug("CMD_REG_POP: pop {r14}\n");
674 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
675 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
676 } else if (insn == 0xb0) {
677 if (ctrl->vrs[PC] == 0)
678 ctrl->vrs[PC] = ctrl->vrs[LR];
679 /* no further processing */
682 pr_debug("CMD_FINISH\n");
683 } else if (insn == 0xb1) {
684 unsigned long mask = unwind_get_byte(ctrl, &err);
685 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
691 if (mask == 0 || mask & 0xf0) {
692 pr_debug("unwind: Spare encoding %04lx\n",
694 return -QUADD_URC_SPARE_ENCODING;
697 /* pop R0-R3 according to mask */
700 err = read_user_data(vsp++, ctrl->vrs[reg]);
704 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
710 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
711 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
712 } else if (insn == 0xb2) {
713 unsigned long uleb128 = unwind_get_byte(ctrl, &err);
717 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
719 pr_debug("CMD_DATA_POP: vsp = vsp + %lu, new vsp: %#x\n",
720 0x204 + (uleb128 << 2), ctrl->vrs[SP]);
721 } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
722 unsigned long data, reg_from, reg_to;
723 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
725 data = unwind_get_byte(ctrl, &err);
729 reg_from = (data & 0xf0) >> 4;
730 reg_to = reg_from + (data & 0x0f);
737 for (i = reg_from; i <= reg_to; i++)
743 ctrl->vrs[SP] = (unsigned long)vsp;
744 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
746 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
747 insn, data, reg_from, reg_to);
748 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
749 } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
750 unsigned long reg_to;
751 unsigned long data = insn & 0x07;
752 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
756 for (i = 8; i <= reg_to; i++)
759 if ((insn & 0xf8) == 0xb8)
762 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
764 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
766 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
768 pr_debug("error: unhandled instruction %02lx\n", insn);
769 return -QUADD_URC_UNHANDLED_INSTRUCTION;
772 pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
774 ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
775 ctrl->vrs[LR], ctrl->vrs[PC]);
781 * Unwind a single frame starting with *sp for the symbol at *pc. It
782 * updates the *pc and *sp with the new values.
785 unwind_frame(struct extab_info *exidx,
786 struct stackframe *frame,
787 struct vm_area_struct *vma_sp)
789 unsigned long high, low;
790 const struct unwind_idx *idx;
791 struct unwind_ctrl_block ctrl;
795 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
796 return -QUADD_URC_SP_INCORRECT;
798 /* only go to a higher address on the stack */
800 high = vma_sp->vm_end;
802 pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
803 frame->pc, frame->lr, frame->sp, low, high);
805 idx = unwind_find_idx(exidx, frame->pc);
806 if (IS_ERR_OR_NULL(idx))
807 return -QUADD_URC_IDX_NOT_FOUND;
809 pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
811 ctrl.vrs[FP_THUMB] = frame->fp_thumb;
812 ctrl.vrs[FP_ARM] = frame->fp_arm;
814 ctrl.vrs[SP] = frame->sp;
815 ctrl.vrs[LR] = frame->lr;
818 err = read_user_data(&idx->insn, val);
824 return -QUADD_URC_CANTUNWIND;
825 } else if ((val & 0x80000000) == 0) {
826 /* prel31 to the unwind table */
827 ctrl.insn = (u32 *)(unsigned long)prel31_to_addr(&idx->insn);
829 return -QUADD_URC_EACCESS;
830 } else if ((val & 0xff000000) == 0x80000000) {
831 /* only personality routine 0 supported in the index */
832 ctrl.insn = &idx->insn;
834 pr_debug("unsupported personality routine %#x in the index at %p\n",
836 return -QUADD_URC_UNSUPPORTED_PR;
839 err = read_user_data(ctrl.insn, val);
843 /* check the personality routine */
844 if ((val & 0xff000000) == 0x80000000) {
847 } else if ((val & 0xff000000) == 0x81000000) {
849 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
851 pr_debug("unsupported personality routine %#x at %p\n",
853 return -QUADD_URC_UNSUPPORTED_PR;
856 while (ctrl.entries > 0) {
857 err = unwind_exec_insn(&ctrl);
861 if (ctrl.vrs[SP] & 0x03 ||
862 ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
863 return -QUADD_URC_SP_INCORRECT;
866 if (ctrl.vrs[PC] == 0)
867 ctrl.vrs[PC] = ctrl.vrs[LR];
869 /* check for infinite loop */
870 if (frame->pc == ctrl.vrs[PC])
871 return -QUADD_URC_FAILURE;
873 if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
874 return -QUADD_URC_PC_INCORRECT;
876 frame->fp_thumb = ctrl.vrs[FP_THUMB];
877 frame->fp_arm = ctrl.vrs[FP_ARM];
879 frame->sp = ctrl.vrs[SP];
880 frame->lr = ctrl.vrs[LR];
881 frame->pc = ctrl.vrs[PC];
887 unwind_backtrace(struct quadd_callchain *cc,
888 struct extab_info *exidx,
889 struct pt_regs *regs,
890 struct vm_area_struct *vma_sp,
891 struct task_struct *task)
893 struct extables tabs;
894 struct stackframe frame;
897 frame.fp_thumb = regs->compat_usr(7);
898 frame.fp_arm = regs->compat_usr(11);
900 frame.fp_thumb = regs->ARM_r7;
901 frame.fp_arm = regs->ARM_fp;
904 frame.pc = instruction_pointer(regs);
905 frame.sp = quadd_user_stack_pointer(regs);
906 frame.lr = quadd_user_link_register(regs);
908 cc->unw_rc = QUADD_URC_FAILURE;
910 pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
911 frame.fp_arm, frame.fp_thumb, frame.sp, frame.lr, frame.pc);
912 pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
913 vma_sp->vm_start, vma_sp->vm_end,
914 vma_sp->vm_end - vma_sp->vm_start);
918 unsigned long where = frame.pc;
919 struct vm_area_struct *vma_pc;
920 struct mm_struct *mm = task->mm;
925 if (!validate_stack_addr(frame.sp, vma_sp, sizeof(u32))) {
926 cc->unw_rc = -QUADD_URC_SP_INCORRECT;
930 vma_pc = find_vma(mm, frame.pc);
934 if (!is_vma_addr(exidx->addr, vma_pc, sizeof(u32))) {
935 err = __search_ex_region(vma_pc->vm_start, &tabs);
937 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
944 err = unwind_frame(exidx, &frame, vma_sp);
946 pr_debug("end unwind, urc: %ld\n", err);
951 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
954 quadd_callchain_store(cc, frame.pc);
956 cc->curr_sp = frame.sp;
957 cc->curr_fp = frame.fp_arm;
962 quadd_get_user_callchain_ut(struct pt_regs *regs,
963 struct quadd_callchain *cc,
964 struct task_struct *task)
967 unsigned long ip, sp;
968 struct vm_area_struct *vma, *vma_sp;
969 struct mm_struct *mm = task->mm;
970 struct extables tabs;
972 cc->unw_method = QUADD_UNW_METHOD_EHT;
973 cc->unw_rc = QUADD_URC_FAILURE;
976 if (!compat_user_mode(regs)) {
977 pr_warn_once("user_mode 64: unsupported\n");
985 ip = instruction_pointer(regs);
986 sp = quadd_user_stack_pointer(regs);
988 vma = find_vma(mm, ip);
992 vma_sp = find_vma(mm, sp);
996 err = __search_ex_region(vma->vm_start, &tabs);
998 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1002 unwind_backtrace(cc, &tabs.exidx, regs, vma_sp, task);
1007 int quadd_unwind_start(struct task_struct *task)
1009 struct regions_data *rd, *rd_old;
1011 spin_lock(&ctx.lock);
1013 rd_old = rcu_dereference(ctx.rd);
1015 pr_warn("%s: warning: rd_old\n", __func__);
1017 rd = rd_alloc(QUADD_EXTABS_SIZE);
1018 if (IS_ERR_OR_NULL(rd)) {
1019 pr_err("%s: error: rd_alloc\n", __func__);
1020 spin_unlock(&ctx.lock);
1024 rcu_assign_pointer(ctx.rd, rd);
1027 call_rcu(&rd_old->rcu, rd_free_rcu);
1029 ctx.pid = task->tgid;
1031 ctx.pinned_pages = 0;
1032 ctx.pinned_size = 0;
1034 spin_unlock(&ctx.lock);
1039 void quadd_unwind_stop(void)
1041 struct regions_data *rd;
1043 spin_lock(&ctx.lock);
1047 rd = rcu_dereference(ctx.rd);
1049 rcu_assign_pointer(ctx.rd, NULL);
1050 call_rcu(&rd->rcu, rd_free_rcu);
1053 spin_unlock(&ctx.lock);
1055 pr_info("exception tables size: %lu bytes\n", ctx.pinned_size);
1056 pr_info("pinned pages: %lu (%lu bytes)\n", ctx.pinned_pages,
1057 ctx.pinned_pages * PAGE_SIZE);
1060 int quadd_unwind_init(void)
1062 spin_lock_init(&ctx.lock);
1063 rcu_assign_pointer(ctx.rd, NULL);
1069 void quadd_unwind_deinit(void)
1071 quadd_unwind_stop();