]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/misc/tegra-profiler/eh_unwind.c
68b3733c09f8579a2c67b3da76ab585cc0daa42d
[sojka/nv-tegra/linux-3.10.git] / drivers / misc / tegra-profiler / eh_unwind.c
1 /*
2  * drivers/misc/tegra-profiler/exh_tables.c
3  *
4  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/mm.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
25
26 #include <linux/tegra_profiler.h>
27
28 #include "eh_unwind.h"
29 #include "backtrace.h"
30
31 #define QUADD_EXTABS_SIZE       0x100
32
33 #define GET_NR_PAGES(a, l) \
34         ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
35
36 enum regs {
37         FP_THUMB = 7,
38         FP_ARM = 11,
39
40         SP = 13,
41         LR = 14,
42         PC = 15
43 };
44
45 struct extab_info {
46         unsigned long addr;
47         unsigned long length;
48 };
49
50 struct extables {
51         struct extab_info exidx;
52         struct extab_info extab;
53 };
54
55 struct ex_region_info {
56         unsigned long vm_start;
57         unsigned long vm_end;
58
59         struct extables tabs;
60 };
61
62 struct regions_data {
63         struct ex_region_info *entries;
64
65         unsigned long curr_nr;
66         unsigned long size;
67
68         struct rcu_head rcu;
69 };
70
71 struct quadd_unwind_ctx {
72         struct regions_data *rd;
73
74         pid_t pid;
75
76         unsigned long pinned_pages;
77         unsigned long pinned_size;
78
79         spinlock_t lock;
80 };
81
82 struct unwind_idx {
83         u32 addr_offset;
84         u32 insn;
85 };
86
87 struct stackframe {
88         unsigned long fp_thumb;
89         unsigned long fp_arm;
90
91         unsigned long sp;
92         unsigned long lr;
93         unsigned long pc;
94 };
95
96 struct unwind_ctrl_block {
97         u32 vrs[16];            /* virtual register set */
98         const u32 *insn;        /* pointer to the current instr word */
99         int entries;            /* number of entries left */
100         int byte;               /* current byte in the instr word */
101 };
102
103 struct pin_pages_work {
104         struct work_struct work;
105         unsigned long vm_start;
106 };
107
108 struct quadd_unwind_ctx ctx;
109
110 static inline int
111 validate_stack_addr(unsigned long addr,
112                     struct vm_area_struct *vma,
113                     unsigned long nbytes)
114 {
115         if (addr & 0x03)
116                 return 0;
117
118         return is_vma_addr(addr, vma, nbytes);
119 }
120
121 static inline int
122 validate_pc_addr(unsigned long addr, unsigned long nbytes)
123 {
124         return addr && addr < TASK_SIZE - nbytes;
125 }
126
127 #define read_user_data(addr, retval)                    \
128 ({                                                      \
129         long ret;                                       \
130         ret = probe_kernel_address(addr, retval);       \
131         if (ret)                                        \
132                 ret = -QUADD_URC_EACCESS;               \
133         ret;                                            \
134 })
135
136 static int
137 add_ex_region(struct regions_data *rd,
138               struct ex_region_info *new_entry)
139 {
140         unsigned int i_min, i_max, mid;
141         struct ex_region_info *array = rd->entries;
142         unsigned long size = rd->curr_nr;
143
144         if (!array)
145                 return 0;
146
147         if (size == 0) {
148                 memcpy(&array[0], new_entry, sizeof(*new_entry));
149                 return 1;
150         } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
151                 return 0;
152         }
153
154         i_min = 0;
155         i_max = size;
156
157         if (array[0].vm_start > new_entry->vm_start) {
158                 memmove(array + 1, array,
159                         size * sizeof(*array));
160                 memcpy(&array[0], new_entry, sizeof(*new_entry));
161                 return 1;
162         } else if (array[size - 1].vm_start < new_entry->vm_start) {
163                 memcpy(&array[size], new_entry, sizeof(*new_entry));
164                 return 1;
165         }
166
167         while (i_min < i_max) {
168                 mid = i_min + (i_max - i_min) / 2;
169
170                 if (new_entry->vm_start <= array[mid].vm_start)
171                         i_max = mid;
172                 else
173                         i_min = mid + 1;
174         }
175
176         if (array[i_max].vm_start == new_entry->vm_start) {
177                 return 0;
178         } else {
179                 memmove(array + i_max + 1,
180                         array + i_max,
181                         (size - i_max) * sizeof(*array));
182                 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
183                 return 1;
184         }
185 }
186
187 static struct ex_region_info *
188 search_ex_region(struct ex_region_info *array,
189                  unsigned long size,
190                  unsigned long key,
191                  struct extables *tabs)
192 {
193         unsigned int i_min, i_max, mid;
194
195         if (size == 0)
196                 return NULL;
197
198         i_min = 0;
199         i_max = size;
200
201         while (i_min < i_max) {
202                 mid = i_min + (i_max - i_min) / 2;
203
204                 if (key <= array[mid].vm_start)
205                         i_max = mid;
206                 else
207                         i_min = mid + 1;
208         }
209
210         if (array[i_max].vm_start == key) {
211                 memcpy(tabs, &array[i_max].tabs, sizeof(*tabs));
212                 return &array[i_max];
213         }
214
215         return NULL;
216 }
217
218 static long
219 __search_ex_region(unsigned long key, struct extables *tabs)
220 {
221         struct regions_data *rd;
222         struct ex_region_info *ri = NULL;
223
224         rcu_read_lock();
225
226         rd = rcu_dereference(ctx.rd);
227         if (!rd)
228                 goto out;
229
230         ri = search_ex_region(rd->entries, rd->curr_nr, key, tabs);
231
232 out:
233         rcu_read_unlock();
234         return ri ? 0 : -ENOENT;
235 }
236
237 static void pin_user_pages(struct extables *tabs)
238 {
239         long ret;
240         struct extab_info *ti;
241         unsigned long nr_pages, addr;
242         struct pid *pid_s;
243         struct task_struct *task = NULL;
244         struct mm_struct *mm;
245
246         rcu_read_lock();
247
248         pid_s = find_vpid(ctx.pid);
249         if (pid_s)
250                 task = pid_task(pid_s, PIDTYPE_PID);
251
252         rcu_read_unlock();
253
254         if (!task)
255                 return;
256
257         mm = task->mm;
258         if (!mm)
259                 return;
260
261         down_write(&mm->mmap_sem);
262
263         ti = &tabs->exidx;
264         addr = ti->addr & PAGE_MASK;
265         nr_pages = GET_NR_PAGES(ti->addr, ti->length);
266
267         ret = get_user_pages(task, mm, addr, nr_pages, 0, 0,
268                              NULL, NULL);
269         if (ret < 0) {
270                 pr_debug("%s: warning: addr/nr_pages: %#lx/%lu\n",
271                          __func__, ti->addr, nr_pages);
272                 goto error_out;
273         }
274
275         ctx.pinned_pages += ret;
276         ctx.pinned_size += ti->length;
277
278         pr_debug("%s: pin exidx: addr/nr_pages: %#lx/%lu\n",
279                  __func__, ti->addr, nr_pages);
280
281         ti = &tabs->extab;
282         addr = ti->addr & PAGE_MASK;
283         nr_pages = GET_NR_PAGES(ti->addr, ti->length);
284
285         ret = get_user_pages(task, mm, addr, nr_pages, 0, 0,
286                              NULL, NULL);
287         if (ret < 0) {
288                 pr_debug("%s: warning: addr/nr_pages: %#lx/%lu\n",
289                          __func__, ti->addr, nr_pages);
290                 goto error_out;
291         }
292
293         ctx.pinned_pages += ret;
294         ctx.pinned_size += ti->length;
295
296         pr_debug("%s: pin extab: addr/nr_pages: %#lx/%lu\n",
297                  __func__, ti->addr, nr_pages);
298
299 error_out:
300         up_write(&mm->mmap_sem);
301 }
302
303 static void
304 pin_user_pages_work(struct work_struct *w)
305 {
306         long err;
307         struct extables tabs;
308         struct pin_pages_work *work;
309
310         work = container_of(w, struct pin_pages_work, work);
311
312         err = __search_ex_region(work->vm_start, &tabs);
313         if (!err)
314                 pin_user_pages(&tabs);
315
316         kfree(w);
317 }
318
319 static int
320 __pin_user_pages(unsigned long vm_start)
321 {
322         struct pin_pages_work *work;
323
324         work = kmalloc(sizeof(*work), GFP_ATOMIC);
325         if (!work)
326                 return -ENOMEM;
327
328         INIT_WORK(&work->work, pin_user_pages_work);
329         work->vm_start = vm_start;
330
331         schedule_work(&work->work);
332
333         return 0;
334 }
335
336 static struct regions_data *rd_alloc(unsigned long size)
337 {
338         struct regions_data *rd;
339
340         rd = kzalloc(sizeof(*rd), GFP_KERNEL);
341         if (!rd)
342                 return NULL;
343
344         rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_KERNEL);
345         if (!rd->entries) {
346                 kfree(rd);
347                 return NULL;
348         }
349
350         rd->size = size;
351         rd->curr_nr = 0;
352
353         return rd;
354 }
355
356 static void rd_free(struct regions_data *rd)
357 {
358         if (rd)
359                 kfree(rd->entries);
360
361         kfree(rd);
362 }
363
364 static void rd_free_rcu(struct rcu_head *rh)
365 {
366         struct regions_data *rd = container_of(rh, struct regions_data, rcu);
367         rd_free(rd);
368 }
369
370 int quadd_unwind_set_extab(struct quadd_extables *extabs)
371 {
372         int err = 0;
373         unsigned long nr_entries, nr_added, new_size;
374         struct ex_region_info ri_entry;
375         struct extab_info *ti;
376         struct regions_data *rd, *rd_new;
377
378         spin_lock(&ctx.lock);
379
380         rd = rcu_dereference(ctx.rd);
381         if (!rd) {
382                 pr_warn("%s: warning: rd\n", __func__);
383                 new_size = QUADD_EXTABS_SIZE;
384                 nr_entries = 0;
385         } else {
386                 new_size = rd->size;
387                 nr_entries = rd->curr_nr;
388         }
389
390         if (nr_entries >= new_size)
391                 new_size += new_size >> 1;
392
393         rd_new = rd_alloc(new_size);
394         if (IS_ERR_OR_NULL(rd_new)) {
395                 pr_err("%s: error: rd_alloc\n", __func__);
396                 err = -ENOMEM;
397                 goto error_out;
398         }
399
400         if (rd && nr_entries)
401                 memcpy(rd_new->entries, rd->entries,
402                        nr_entries * sizeof(*rd->entries));
403
404         rd_new->curr_nr = nr_entries;
405
406         ri_entry.vm_start = extabs->vm_start;
407         ri_entry.vm_end = extabs->vm_end;
408
409         ti = &ri_entry.tabs.exidx;
410         ti->addr = extabs->exidx.addr;
411         ti->length = extabs->exidx.length;
412
413         ti = &ri_entry.tabs.extab;
414         ti->addr = extabs->extab.addr;
415         ti->length = extabs->extab.length;
416
417         nr_added = add_ex_region(rd_new, &ri_entry);
418         if (nr_added == 0) {
419                 rd_free(rd_new);
420                 goto error_out;
421         }
422         rd_new->curr_nr += nr_added;
423
424         rcu_assign_pointer(ctx.rd, rd_new);
425
426         if (rd)
427                 call_rcu(&rd->rcu, rd_free_rcu);
428
429         spin_unlock(&ctx.lock);
430
431         __pin_user_pages(ri_entry.vm_start);
432
433         return 0;
434
435 error_out:
436         spin_unlock(&ctx.lock);
437         return err;
438 }
439
440 static u32
441 prel31_to_addr(const u32 *ptr)
442 {
443         u32 value;
444         s32 offset;
445
446         if (read_user_data(ptr, value))
447                 return 0;
448
449         /* sign-extend to 32 bits */
450         offset = (((s32)value) << 1) >> 1;
451         return (u32)(unsigned long)ptr + offset;
452 }
453
454 static const struct unwind_idx *
455 unwind_find_origin(const struct unwind_idx *start,
456                    const struct unwind_idx *stop)
457 {
458         while (start < stop) {
459                 u32 addr_offset;
460                 const struct unwind_idx *mid = start + ((stop - start) >> 1);
461
462                 if (read_user_data(&mid->addr_offset, addr_offset))
463                         return ERR_PTR(-EFAULT);
464
465                 if (addr_offset >= 0x40000000)
466                         /* negative offset */
467                         start = mid + 1;
468                 else
469                         /* positive offset */
470                         stop = mid;
471         }
472
473         return stop;
474 }
475
476 /*
477  * Binary search in the unwind index. The entries are
478  * guaranteed to be sorted in ascending order by the linker.
479  *
480  * start = first entry
481  * origin = first entry with positive offset (or stop if there is no such entry)
482  * stop - 1 = last entry
483  */
484 static const struct unwind_idx *
485 search_index(u32 addr,
486              const struct unwind_idx *start,
487              const struct unwind_idx *origin,
488              const struct unwind_idx *stop)
489 {
490         u32 addr_prel31;
491
492         pr_debug("%#x, %p, %p, %p\n", addr, start, origin, stop);
493
494         /*
495          * only search in the section with the matching sign. This way the
496          * prel31 numbers can be compared as unsigned longs.
497          */
498         if (addr < (u32)(unsigned long)start)
499                 /* negative offsets: [start; origin) */
500                 stop = origin;
501         else
502                 /* positive offsets: [origin; stop) */
503                 start = origin;
504
505         /* prel31 for address relavive to start */
506         addr_prel31 = (addr - (u32)(unsigned long)start) & 0x7fffffff;
507
508         while (start < stop - 1) {
509                 u32 addr_offset, d;
510
511                 const struct unwind_idx *mid = start + ((stop - start) >> 1);
512
513                 /*
514                  * As addr_prel31 is relative to start an offset is needed to
515                  * make it relative to mid.
516                  */
517                 if (read_user_data(&mid->addr_offset, addr_offset))
518                         return ERR_PTR(-EFAULT);
519
520                 d = (u32)(unsigned long)mid - (u32)(unsigned long)start;
521
522                 if (addr_prel31 - d < addr_offset) {
523                         stop = mid;
524                 } else {
525                         /* keep addr_prel31 relative to start */
526                         addr_prel31 -= ((u32)(unsigned long)mid -
527                                         (u32)(unsigned long)start);
528                         start = mid;
529                 }
530         }
531
532         if (likely(start->addr_offset <= addr_prel31))
533                 return start;
534
535         pr_debug("Unknown address %#x\n", addr);
536         return NULL;
537 }
538
539 static const struct unwind_idx *
540 unwind_find_idx(struct extab_info *exidx, u32 addr)
541 {
542         const struct unwind_idx *start;
543         const struct unwind_idx *origin;
544         const struct unwind_idx *stop;
545         const struct unwind_idx *idx = NULL;
546
547         start = (const struct unwind_idx *)exidx->addr;
548         stop = start + exidx->length / sizeof(*start);
549
550         origin = unwind_find_origin(start, stop);
551         if (IS_ERR(origin))
552                 return origin;
553
554         idx = search_index(addr, start, origin, stop);
555
556         pr_debug("addr: %#x, start: %p, origin: %p, stop: %p, idx: %p\n",
557                 addr, start, origin, stop, idx);
558
559         return idx;
560 }
561
562 static unsigned long
563 unwind_get_byte(struct unwind_ctrl_block *ctrl, long *err)
564 {
565         unsigned long ret;
566         u32 insn_word;
567
568         *err = 0;
569
570         if (ctrl->entries <= 0) {
571                 pr_debug("error: corrupt unwind table\n");
572                 *err = -QUADD_URC_TBL_IS_CORRUPT;
573                 return 0;
574         }
575
576         *err = read_user_data(ctrl->insn, insn_word);
577         if (*err < 0)
578                 return 0;
579
580         ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
581
582         if (ctrl->byte == 0) {
583                 ctrl->insn++;
584                 ctrl->entries--;
585                 ctrl->byte = 3;
586         } else
587                 ctrl->byte--;
588
589         return ret;
590 }
591
592 /*
593  * Execute the current unwind instruction.
594  */
595 static long unwind_exec_insn(struct unwind_ctrl_block *ctrl)
596 {
597         long err;
598         unsigned int i;
599         unsigned long insn = unwind_get_byte(ctrl, &err);
600
601         if (err < 0)
602                 return err;
603
604         pr_debug("%s: insn = %08lx\n", __func__, insn);
605
606         if ((insn & 0xc0) == 0x00) {
607                 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
608
609                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
610                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
611         } else if ((insn & 0xc0) == 0x40) {
612                 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
613
614                 pr_debug("CMD_DATA_PUSH: vsp = vsp â€“ %lu (new: %#x)\n",
615                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
616         } else if ((insn & 0xf0) == 0x80) {
617                 unsigned long mask;
618                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
619                 int load_sp, reg = 4;
620
621                 insn = (insn << 8) | unwind_get_byte(ctrl, &err);
622                 if (err < 0)
623                         return err;
624
625                 mask = insn & 0x0fff;
626                 if (mask == 0) {
627                         pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
628                                    insn);
629                         return -QUADD_URC_REFUSE_TO_UNWIND;
630                 }
631
632                 /* pop R4-R15 according to mask */
633                 load_sp = mask & (1 << (13 - 4));
634                 while (mask) {
635                         if (mask & 1) {
636                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
637                                 if (err < 0)
638                                         return err;
639
640                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
641                         }
642                         mask >>= 1;
643                         reg++;
644                 }
645                 if (!load_sp)
646                         ctrl->vrs[SP] = (unsigned long)vsp;
647
648                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
649         } else if ((insn & 0xf0) == 0x90 &&
650                    (insn & 0x0d) != 0x0d) {
651                 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
652                 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
653         } else if ((insn & 0xf0) == 0xa0) {
654                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
655                 unsigned int reg;
656
657                 /* pop R4-R[4+bbb] */
658                 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
659                         err = read_user_data(vsp++, ctrl->vrs[reg]);
660                         if (err < 0)
661                                 return err;
662
663                         pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
664                 }
665
666                 if (insn & 0x08) {
667                         err = read_user_data(vsp++, ctrl->vrs[14]);
668                         if (err < 0)
669                                 return err;
670
671                         pr_debug("CMD_REG_POP: pop {r14}\n");
672                 }
673
674                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
675                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
676         } else if (insn == 0xb0) {
677                 if (ctrl->vrs[PC] == 0)
678                         ctrl->vrs[PC] = ctrl->vrs[LR];
679                 /* no further processing */
680                 ctrl->entries = 0;
681
682                 pr_debug("CMD_FINISH\n");
683         } else if (insn == 0xb1) {
684                 unsigned long mask = unwind_get_byte(ctrl, &err);
685                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
686                 int reg = 0;
687
688                 if (err < 0)
689                         return err;
690
691                 if (mask == 0 || mask & 0xf0) {
692                         pr_debug("unwind: Spare encoding %04lx\n",
693                                (insn << 8) | mask);
694                         return -QUADD_URC_SPARE_ENCODING;
695                 }
696
697                 /* pop R0-R3 according to mask */
698                 while (mask) {
699                         if (mask & 1) {
700                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
701                                 if (err < 0)
702                                         return err;
703
704                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
705                         }
706                         mask >>= 1;
707                         reg++;
708                 }
709
710                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
711                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
712         } else if (insn == 0xb2) {
713                 unsigned long uleb128 = unwind_get_byte(ctrl, &err);
714                 if (err < 0)
715                         return err;
716
717                 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
718
719                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu, new vsp: %#x\n",
720                          0x204 + (uleb128 << 2), ctrl->vrs[SP]);
721         } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
722                 unsigned long data, reg_from, reg_to;
723                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
724
725                 data = unwind_get_byte(ctrl, &err);
726                 if (err < 0)
727                         return err;
728
729                 reg_from = (data & 0xf0) >> 4;
730                 reg_to = reg_from + (data & 0x0f);
731
732                 if (insn == 0xc8) {
733                         reg_from += 16;
734                         reg_to += 16;
735                 }
736
737                 for (i = reg_from; i <= reg_to; i++)
738                         vsp += 2;
739
740                 if (insn == 0xb3)
741                         vsp++;
742
743                 ctrl->vrs[SP] = (unsigned long)vsp;
744                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
745
746                 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
747                          insn, data, reg_from, reg_to);
748                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
749         } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
750                 unsigned long reg_to;
751                 unsigned long data = insn & 0x07;
752                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
753
754                 reg_to = 8 + data;
755
756                 for (i = 8; i <= reg_to; i++)
757                         vsp += 2;
758
759                 if ((insn & 0xf8) == 0xb8)
760                         vsp++;
761
762                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
763
764                 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
765                          insn, reg_to);
766                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
767         } else {
768                 pr_debug("error: unhandled instruction %02lx\n", insn);
769                 return -QUADD_URC_UNHANDLED_INSTRUCTION;
770         }
771
772         pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
773                  __func__,
774                  ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
775                  ctrl->vrs[LR], ctrl->vrs[PC]);
776
777         return 0;
778 }
779
780 /*
781  * Unwind a single frame starting with *sp for the symbol at *pc. It
782  * updates the *pc and *sp with the new values.
783  */
784 static long
785 unwind_frame(struct extab_info *exidx,
786              struct stackframe *frame,
787              struct vm_area_struct *vma_sp)
788 {
789         unsigned long high, low;
790         const struct unwind_idx *idx;
791         struct unwind_ctrl_block ctrl;
792         unsigned long err;
793         u32 val;
794
795         if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
796                 return -QUADD_URC_SP_INCORRECT;
797
798         /* only go to a higher address on the stack */
799         low = frame->sp;
800         high = vma_sp->vm_end;
801
802         pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
803                 frame->pc, frame->lr, frame->sp, low, high);
804
805         idx = unwind_find_idx(exidx, frame->pc);
806         if (IS_ERR_OR_NULL(idx))
807                 return -QUADD_URC_IDX_NOT_FOUND;
808
809         pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
810
811         ctrl.vrs[FP_THUMB] = frame->fp_thumb;
812         ctrl.vrs[FP_ARM] = frame->fp_arm;
813
814         ctrl.vrs[SP] = frame->sp;
815         ctrl.vrs[LR] = frame->lr;
816         ctrl.vrs[PC] = 0;
817
818         err = read_user_data(&idx->insn, val);
819         if (err < 0)
820                 return err;
821
822         if (val == 1) {
823                 /* can't unwind */
824                 return -QUADD_URC_CANTUNWIND;
825         } else if ((val & 0x80000000) == 0) {
826                 /* prel31 to the unwind table */
827                 ctrl.insn = (u32 *)(unsigned long)prel31_to_addr(&idx->insn);
828                 if (!ctrl.insn)
829                         return -QUADD_URC_EACCESS;
830         } else if ((val & 0xff000000) == 0x80000000) {
831                 /* only personality routine 0 supported in the index */
832                 ctrl.insn = &idx->insn;
833         } else {
834                 pr_debug("unsupported personality routine %#x in the index at %p\n",
835                          val, idx);
836                 return -QUADD_URC_UNSUPPORTED_PR;
837         }
838
839         err = read_user_data(ctrl.insn, val);
840         if (err < 0)
841                 return err;
842
843         /* check the personality routine */
844         if ((val & 0xff000000) == 0x80000000) {
845                 ctrl.byte = 2;
846                 ctrl.entries = 1;
847         } else if ((val & 0xff000000) == 0x81000000) {
848                 ctrl.byte = 1;
849                 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
850         } else {
851                 pr_debug("unsupported personality routine %#x at %p\n",
852                          val, ctrl.insn);
853                 return -QUADD_URC_UNSUPPORTED_PR;
854         }
855
856         while (ctrl.entries > 0) {
857                 err = unwind_exec_insn(&ctrl);
858                 if (err < 0)
859                         return err;
860
861                 if (ctrl.vrs[SP] & 0x03 ||
862                     ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
863                         return -QUADD_URC_SP_INCORRECT;
864         }
865
866         if (ctrl.vrs[PC] == 0)
867                 ctrl.vrs[PC] = ctrl.vrs[LR];
868
869         /* check for infinite loop */
870         if (frame->pc == ctrl.vrs[PC])
871                 return -QUADD_URC_FAILURE;
872
873         if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
874                 return -QUADD_URC_PC_INCORRECT;
875
876         frame->fp_thumb = ctrl.vrs[FP_THUMB];
877         frame->fp_arm = ctrl.vrs[FP_ARM];
878
879         frame->sp = ctrl.vrs[SP];
880         frame->lr = ctrl.vrs[LR];
881         frame->pc = ctrl.vrs[PC];
882
883         return 0;
884 }
885
886 static void
887 unwind_backtrace(struct quadd_callchain *cc,
888                  struct extab_info *exidx,
889                  struct pt_regs *regs,
890                  struct vm_area_struct *vma_sp,
891                  struct task_struct *task)
892 {
893         struct extables tabs;
894         struct stackframe frame;
895
896 #ifdef CONFIG_ARM64
897         frame.fp_thumb = regs->compat_usr(7);
898         frame.fp_arm = regs->compat_usr(11);
899 #else
900         frame.fp_thumb = regs->ARM_r7;
901         frame.fp_arm = regs->ARM_fp;
902 #endif
903
904         frame.pc = instruction_pointer(regs);
905         frame.sp = quadd_user_stack_pointer(regs);
906         frame.lr = quadd_user_link_register(regs);
907
908         cc->unw_rc = QUADD_URC_FAILURE;
909
910         pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
911                  frame.fp_arm, frame.fp_thumb, frame.sp, frame.lr, frame.pc);
912         pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
913                  vma_sp->vm_start, vma_sp->vm_end,
914                  vma_sp->vm_end - vma_sp->vm_start);
915
916         while (1) {
917                 long err;
918                 unsigned long where = frame.pc;
919                 struct vm_area_struct *vma_pc;
920                 struct mm_struct *mm = task->mm;
921
922                 if (!mm)
923                         break;
924
925                 if (!validate_stack_addr(frame.sp, vma_sp, sizeof(u32))) {
926                         cc->unw_rc = -QUADD_URC_SP_INCORRECT;
927                         break;
928                 }
929
930                 vma_pc = find_vma(mm, frame.pc);
931                 if (!vma_pc)
932                         break;
933
934                 if (!is_vma_addr(exidx->addr, vma_pc, sizeof(u32))) {
935                         err = __search_ex_region(vma_pc->vm_start, &tabs);
936                         if (err) {
937                                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
938                                 break;
939                         }
940
941                         exidx = &tabs.exidx;
942                 }
943
944                 err = unwind_frame(exidx, &frame, vma_sp);
945                 if (err < 0) {
946                         pr_debug("end unwind, urc: %ld\n", err);
947                         cc->unw_rc = -err;
948                         break;
949                 }
950
951                 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
952                          where, frame.pc);
953
954                 quadd_callchain_store(cc, frame.pc);
955
956                 cc->curr_sp = frame.sp;
957                 cc->curr_fp = frame.fp_arm;
958         }
959 }
960
961 unsigned int
962 quadd_get_user_callchain_ut(struct pt_regs *regs,
963                             struct quadd_callchain *cc,
964                             struct task_struct *task)
965 {
966         long err;
967         unsigned long ip, sp;
968         struct vm_area_struct *vma, *vma_sp;
969         struct mm_struct *mm = task->mm;
970         struct extables tabs;
971
972         cc->unw_method = QUADD_UNW_METHOD_EHT;
973         cc->unw_rc = QUADD_URC_FAILURE;
974
975 #ifdef CONFIG_ARM64
976         if (!compat_user_mode(regs)) {
977                 pr_warn_once("user_mode 64: unsupported\n");
978                 return 0;
979         }
980 #endif
981
982         if (!regs || !mm)
983                 return 0;
984
985         ip = instruction_pointer(regs);
986         sp = quadd_user_stack_pointer(regs);
987
988         vma = find_vma(mm, ip);
989         if (!vma)
990                 return 0;
991
992         vma_sp = find_vma(mm, sp);
993         if (!vma_sp)
994                 return 0;
995
996         err = __search_ex_region(vma->vm_start, &tabs);
997         if (err) {
998                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
999                 return 0;
1000         }
1001
1002         unwind_backtrace(cc, &tabs.exidx, regs, vma_sp, task);
1003
1004         return cc->nr;
1005 }
1006
1007 int quadd_unwind_start(struct task_struct *task)
1008 {
1009         struct regions_data *rd, *rd_old;
1010
1011         spin_lock(&ctx.lock);
1012
1013         rd_old = rcu_dereference(ctx.rd);
1014         if (rd_old)
1015                 pr_warn("%s: warning: rd_old\n", __func__);
1016
1017         rd = rd_alloc(QUADD_EXTABS_SIZE);
1018         if (IS_ERR_OR_NULL(rd)) {
1019                 pr_err("%s: error: rd_alloc\n", __func__);
1020                 spin_unlock(&ctx.lock);
1021                 return -ENOMEM;
1022         }
1023
1024         rcu_assign_pointer(ctx.rd, rd);
1025
1026         if (rd_old)
1027                 call_rcu(&rd_old->rcu, rd_free_rcu);
1028
1029         ctx.pid = task->tgid;
1030
1031         ctx.pinned_pages = 0;
1032         ctx.pinned_size = 0;
1033
1034         spin_unlock(&ctx.lock);
1035
1036         return 0;
1037 }
1038
1039 void quadd_unwind_stop(void)
1040 {
1041         struct regions_data *rd;
1042
1043         spin_lock(&ctx.lock);
1044
1045         ctx.pid = 0;
1046
1047         rd = rcu_dereference(ctx.rd);
1048         if (rd) {
1049                 rcu_assign_pointer(ctx.rd, NULL);
1050                 call_rcu(&rd->rcu, rd_free_rcu);
1051         }
1052
1053         spin_unlock(&ctx.lock);
1054
1055         pr_info("exception tables size: %lu bytes\n", ctx.pinned_size);
1056         pr_info("pinned pages: %lu (%lu bytes)\n", ctx.pinned_pages,
1057                 ctx.pinned_pages * PAGE_SIZE);
1058 }
1059
1060 int quadd_unwind_init(void)
1061 {
1062         spin_lock_init(&ctx.lock);
1063         rcu_assign_pointer(ctx.rd, NULL);
1064         ctx.pid = 0;
1065
1066         return 0;
1067 }
1068
1069 void quadd_unwind_deinit(void)
1070 {
1071         quadd_unwind_stop();
1072         rcu_barrier();
1073 }