]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/misc/tegra-profiler/eh_unwind.c
misc: tegra-profiler: support too deep stack level
[sojka/nv-tegra/linux-3.10.git] / drivers / misc / tegra-profiler / eh_unwind.c
1 /*
2  * drivers/misc/tegra-profiler/exh_tables.c
3  *
4  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/mm.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
25
26 #include <linux/tegra_profiler.h>
27
28 #include "eh_unwind.h"
29 #include "backtrace.h"
30 #include "comm.h"
31
32 #define QUADD_EXTABS_SIZE       0x100
33
34 #define GET_NR_PAGES(a, l) \
35         ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
36
37 enum regs {
38         FP_THUMB = 7,
39         FP_ARM = 11,
40
41         SP = 13,
42         LR = 14,
43         PC = 15
44 };
45
46 struct extab_info {
47         unsigned long addr;
48         unsigned long length;
49
50         unsigned long mmap_offset;
51 };
52
53 struct extables {
54         struct extab_info extab;
55         struct extab_info exidx;
56 };
57
58 struct ex_region_info {
59         unsigned long vm_start;
60         unsigned long vm_end;
61
62         struct extables tabs;
63         struct quadd_extabs_mmap *mmap;
64
65         struct list_head list;
66 };
67
68 struct regions_data {
69         struct ex_region_info *entries;
70
71         unsigned long curr_nr;
72         unsigned long size;
73
74         struct rcu_head rcu;
75 };
76
77 struct quadd_unwind_ctx {
78         struct regions_data *rd;
79
80         pid_t pid;
81         unsigned long ex_tables_size;
82         spinlock_t lock;
83 };
84
85 struct unwind_idx {
86         u32 addr_offset;
87         u32 insn;
88 };
89
90 struct stackframe {
91         unsigned long fp_thumb;
92         unsigned long fp_arm;
93
94         unsigned long sp;
95         unsigned long lr;
96         unsigned long pc;
97 };
98
99 struct unwind_ctrl_block {
100         u32 vrs[16];            /* virtual register set */
101         const u32 *insn;        /* pointer to the current instr word */
102         int entries;            /* number of entries left */
103         int byte;               /* current byte in the instr word */
104 };
105
106 struct pin_pages_work {
107         struct work_struct work;
108         unsigned long vm_start;
109 };
110
111 struct quadd_unwind_ctx ctx;
112
113 static inline int
114 validate_stack_addr(unsigned long addr,
115                     struct vm_area_struct *vma,
116                     unsigned long nbytes)
117 {
118         if (addr & 0x03)
119                 return 0;
120
121         return is_vma_addr(addr, vma, nbytes);
122 }
123
124 static inline int
125 validate_pc_addr(unsigned long addr, unsigned long nbytes)
126 {
127         return addr && addr < TASK_SIZE - nbytes;
128 }
129
130 static inline int
131 validate_mmap_addr(struct quadd_extabs_mmap *mmap,
132                    unsigned long addr, unsigned long nbytes)
133 {
134         struct vm_area_struct *vma = mmap->mmap_vma;
135         unsigned long size = vma->vm_end - vma->vm_start;
136         unsigned long data = (unsigned long)mmap->data;
137
138         if (addr & 0x03) {
139                 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
140                             __func__, addr, data, data + size,
141                        vma->vm_start, vma->vm_end);
142                 return 0;
143         }
144
145         if (addr < data || addr >= data + (size - nbytes)) {
146                 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
147                             __func__, addr, data, data + size,
148                        vma->vm_start, vma->vm_end);
149                 return 0;
150         }
151
152         return 1;
153 }
154
155 #define read_user_data(addr, retval)                    \
156 ({                                                      \
157         long ret;                                       \
158         ret = probe_kernel_address(addr, retval);       \
159         if (ret)                                        \
160                 ret = -QUADD_URC_EACCESS;               \
161         ret;                                            \
162 })
163
164 static inline long
165 read_mmap_data(struct quadd_extabs_mmap *mmap, const u32 *addr, u32 *retval)
166 {
167         if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32)))
168                 return -QUADD_URC_EACCESS;
169
170         *retval = *addr;
171         return 0;
172 }
173
174 static inline unsigned long
175 ex_addr_to_mmap_addr(unsigned long addr,
176                      struct ex_region_info *ri,
177                      int exidx)
178 {
179         unsigned long offset;
180         struct extab_info *ei;
181
182         ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
183         offset = addr - ei->addr;
184
185         return ei->mmap_offset + offset + (unsigned long)ri->mmap->data;
186 }
187
188 static inline unsigned long
189 mmap_addr_to_ex_addr(unsigned long addr,
190                      struct ex_region_info *ri,
191                      int exidx)
192 {
193         unsigned long offset;
194         struct extab_info *ei;
195
196         ei = exidx ? &ri->tabs.exidx : &ri->tabs.extab;
197         offset = addr - ei->mmap_offset - (unsigned long)ri->mmap->data;
198
199         return ei->addr + offset;
200 }
201
202 static inline u32
203 prel31_to_addr(const u32 *ptr)
204 {
205         u32 value;
206         s32 offset;
207
208         if (read_user_data(ptr, value))
209                 return 0;
210
211         /* sign-extend to 32 bits */
212         offset = (((s32)value) << 1) >> 1;
213         return (u32)(unsigned long)ptr + offset;
214 }
215
216 static unsigned long
217 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
218                     int is_src_exidx, int is_dst_exidx, int to_mmap)
219 {
220         u32 value, addr;
221         unsigned long addr_res;
222         s32 offset;
223         struct extab_info *ei_src, *ei_dst;
224
225         ei_src = is_src_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
226         ei_dst = is_dst_exidx ? &ri->tabs.exidx : &ri->tabs.extab;
227
228         value = *ptr;
229         offset = (((s32)value) << 1) >> 1;
230
231         addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, is_src_exidx);
232         addr += offset;
233         addr_res = addr;
234
235         if (to_mmap)
236                 addr_res = ex_addr_to_mmap_addr(addr_res, ri, is_dst_exidx);
237
238         return addr_res;
239 }
240
241 static int
242 add_ex_region(struct regions_data *rd,
243               struct ex_region_info *new_entry)
244 {
245         unsigned int i_min, i_max, mid;
246         struct ex_region_info *array = rd->entries;
247         unsigned long size = rd->curr_nr;
248
249         if (!array)
250                 return 0;
251
252         if (size == 0) {
253                 memcpy(&array[0], new_entry, sizeof(*new_entry));
254                 return 1;
255         } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
256                 return 0;
257         }
258
259         i_min = 0;
260         i_max = size;
261
262         if (array[0].vm_start > new_entry->vm_start) {
263                 memmove(array + 1, array,
264                         size * sizeof(*array));
265                 memcpy(&array[0], new_entry, sizeof(*new_entry));
266                 return 1;
267         } else if (array[size - 1].vm_start < new_entry->vm_start) {
268                 memcpy(&array[size], new_entry, sizeof(*new_entry));
269                 return 1;
270         }
271
272         while (i_min < i_max) {
273                 mid = i_min + (i_max - i_min) / 2;
274
275                 if (new_entry->vm_start <= array[mid].vm_start)
276                         i_max = mid;
277                 else
278                         i_min = mid + 1;
279         }
280
281         if (array[i_max].vm_start == new_entry->vm_start) {
282                 return 0;
283         } else {
284                 memmove(array + i_max + 1,
285                         array + i_max,
286                         (size - i_max) * sizeof(*array));
287                 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
288                 return 1;
289         }
290 }
291
292 static int
293 remove_ex_region(struct regions_data *rd,
294                  struct ex_region_info *entry)
295 {
296         unsigned int i_min, i_max, mid;
297         struct ex_region_info *array = rd->entries;
298         unsigned long size = rd->curr_nr;
299
300         if (!array)
301                 return 0;
302
303         if (size == 0)
304                 return 0;
305
306         if (size == 1) {
307                 if (array[0].vm_start == entry->vm_start)
308                         return 1;
309                 else
310                         return 0;
311         }
312
313         if (array[0].vm_start > entry->vm_start)
314                 return 0;
315         else if (array[size - 1].vm_start < entry->vm_start)
316                 return 0;
317
318         i_min = 0;
319         i_max = size;
320
321         while (i_min < i_max) {
322                 mid = i_min + (i_max - i_min) / 2;
323
324                 if (entry->vm_start <= array[mid].vm_start)
325                         i_max = mid;
326                 else
327                         i_min = mid + 1;
328         }
329
330         if (array[i_max].vm_start == entry->vm_start) {
331                 memmove(array + i_max,
332                         array + i_max + 1,
333                         (size - i_max) * sizeof(*array));
334                 return 1;
335         } else {
336                 return 0;
337         }
338 }
339
340 static struct ex_region_info *
341 search_ex_region(struct ex_region_info *array,
342                  unsigned long size,
343                  unsigned long key,
344                  struct ex_region_info *ri)
345 {
346         unsigned int i_min, i_max, mid;
347
348         if (size == 0)
349                 return NULL;
350
351         i_min = 0;
352         i_max = size;
353
354         while (i_min < i_max) {
355                 mid = i_min + (i_max - i_min) / 2;
356
357                 if (key <= array[mid].vm_start)
358                         i_max = mid;
359                 else
360                         i_min = mid + 1;
361         }
362
363         if (array[i_max].vm_start == key) {
364                 memcpy(ri, &array[i_max], sizeof(*ri));
365                 return &array[i_max];
366         }
367
368         return NULL;
369 }
370
371 static long
372 __search_ex_region(unsigned long key, struct ex_region_info *ri)
373 {
374         struct regions_data *rd;
375         struct ex_region_info *ri_p = NULL;
376
377         rcu_read_lock();
378
379         rd = rcu_dereference(ctx.rd);
380         if (!rd)
381                 goto out;
382
383         ri_p = search_ex_region(rd->entries, rd->curr_nr, key, ri);
384
385 out:
386         rcu_read_unlock();
387         return ri_p ? 0 : -ENOENT;
388 }
389
390 static struct regions_data *rd_alloc(unsigned long size)
391 {
392         struct regions_data *rd;
393
394         rd = kzalloc(sizeof(*rd), GFP_KERNEL);
395         if (!rd)
396                 return NULL;
397
398         rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_KERNEL);
399         if (!rd->entries) {
400                 kfree(rd);
401                 return NULL;
402         }
403
404         rd->size = size;
405         rd->curr_nr = 0;
406
407         return rd;
408 }
409
410 static void rd_free(struct regions_data *rd)
411 {
412         if (rd)
413                 kfree(rd->entries);
414
415         kfree(rd);
416 }
417
418 static void rd_free_rcu(struct rcu_head *rh)
419 {
420         struct regions_data *rd = container_of(rh, struct regions_data, rcu);
421         rd_free(rd);
422 }
423
424 int quadd_unwind_set_extab(struct quadd_extables *extabs,
425                            struct quadd_extabs_mmap *mmap)
426 {
427         int err = 0;
428         unsigned long nr_entries, nr_added, new_size;
429         struct ex_region_info ri_entry;
430         struct extab_info *ti;
431         struct regions_data *rd, *rd_new;
432         struct ex_region_info *ex_entry;
433
434         spin_lock(&ctx.lock);
435
436         rd = rcu_dereference(ctx.rd);
437         if (!rd) {
438                 pr_warn("%s: warning: rd\n", __func__);
439                 new_size = QUADD_EXTABS_SIZE;
440                 nr_entries = 0;
441         } else {
442                 new_size = rd->size;
443                 nr_entries = rd->curr_nr;
444         }
445
446         if (nr_entries >= new_size)
447                 new_size += new_size >> 1;
448
449         rd_new = rd_alloc(new_size);
450         if (IS_ERR_OR_NULL(rd_new)) {
451                 pr_err("%s: error: rd_alloc\n", __func__);
452                 err = -ENOMEM;
453                 goto error_out;
454         }
455
456         if (rd && nr_entries)
457                 memcpy(rd_new->entries, rd->entries,
458                        nr_entries * sizeof(*rd->entries));
459
460         rd_new->curr_nr = nr_entries;
461
462         ri_entry.vm_start = extabs->vm_start;
463         ri_entry.vm_end = extabs->vm_end;
464
465         ri_entry.mmap = mmap;
466
467         ti = &ri_entry.tabs.exidx;
468         ti->addr = extabs->exidx.addr;
469         ti->length = extabs->exidx.length;
470         ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXIDX_OFFSET];
471         ctx.ex_tables_size += ti->length;
472
473         ti = &ri_entry.tabs.extab;
474         ti->addr = extabs->extab.addr;
475         ti->length = extabs->extab.length;
476         ti->mmap_offset = extabs->reserved[QUADD_EXT_IDX_EXTAB_OFFSET];
477         ctx.ex_tables_size += ti->length;
478
479         nr_added = add_ex_region(rd_new, &ri_entry);
480         if (nr_added == 0)
481                 goto error_free;
482         rd_new->curr_nr += nr_added;
483
484         ex_entry = kzalloc(sizeof(*ex_entry), GFP_KERNEL);
485         if (!ex_entry) {
486                 err = -ENOMEM;
487                 goto error_free;
488         }
489         memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
490
491         INIT_LIST_HEAD(&ex_entry->list);
492         list_add_tail(&ex_entry->list, &mmap->ex_entries);
493
494         rcu_assign_pointer(ctx.rd, rd_new);
495
496         if (rd)
497                 call_rcu(&rd->rcu, rd_free_rcu);
498
499         spin_unlock(&ctx.lock);
500
501         return 0;
502
503 error_free:
504         rd_free(rd_new);
505 error_out:
506         spin_unlock(&ctx.lock);
507         return err;
508 }
509
510 static int
511 clean_mmap(struct regions_data *rd, struct quadd_extabs_mmap *mmap, int rm_ext)
512 {
513         int nr_removed = 0;
514         struct ex_region_info *entry, *next;
515
516         if (!rd || !mmap)
517                 return 0;
518
519         list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
520                 if (rm_ext)
521                         nr_removed += remove_ex_region(rd, entry);
522
523                 list_del(&entry->list);
524                 kfree(entry);
525         }
526
527         return nr_removed;
528 }
529
530 void quadd_unwind_delete_mmap(struct quadd_extabs_mmap *mmap)
531 {
532         unsigned long nr_entries, nr_removed, new_size;
533         struct regions_data *rd, *rd_new;
534
535         if (!mmap)
536                 return;
537
538         spin_lock(&ctx.lock);
539
540         rd = rcu_dereference(ctx.rd);
541         if (!rd || !rd->curr_nr)
542                 goto error_out;
543
544         nr_entries = rd->curr_nr;
545         new_size = min_t(unsigned long, rd->size, nr_entries);
546
547         rd_new = rd_alloc(new_size);
548         if (IS_ERR_OR_NULL(rd_new)) {
549                 pr_err("%s: error: rd_alloc\n", __func__);
550                 goto error_out;
551         }
552         rd_new->size = new_size;
553         rd_new->curr_nr = nr_entries;
554
555         memcpy(rd_new->entries, rd->entries,
556                 nr_entries * sizeof(*rd->entries));
557
558         nr_removed = clean_mmap(rd_new, mmap, 1);
559         rd_new->curr_nr -= nr_removed;
560
561         rcu_assign_pointer(ctx.rd, rd_new);
562         call_rcu(&rd->rcu, rd_free_rcu);
563
564 error_out:
565         spin_unlock(&ctx.lock);
566 }
567
568 static const struct unwind_idx *
569 unwind_find_idx(struct ex_region_info *ri, u32 addr)
570 {
571         unsigned long length;
572         u32 value;
573         struct unwind_idx *start;
574         struct unwind_idx *stop;
575         struct unwind_idx *mid = NULL;
576         length = ri->tabs.exidx.length / sizeof(*start);
577
578         if (unlikely(!length))
579                 return NULL;
580
581         start = (struct unwind_idx *)((char *)ri->mmap->data +
582                 ri->tabs.exidx.mmap_offset);
583         stop = start + length - 1;
584
585         value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri, 1, 0, 0);
586         if (addr < value)
587                 return NULL;
588
589         value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri, 1, 0, 0);
590         if (addr >= value)
591                 return NULL;
592
593         while (start < stop - 1) {
594                 mid = start + ((stop - start) >> 1);
595
596                 value = (u32)mmap_prel31_to_addr(&mid->addr_offset,
597                                                  ri, 1, 0, 0);
598
599                 if (addr < value)
600                         stop = mid;
601                 else
602                         start = mid;
603         }
604
605         return start;
606 }
607
608 static unsigned long
609 unwind_get_byte(struct quadd_extabs_mmap *mmap,
610                 struct unwind_ctrl_block *ctrl, long *err)
611 {
612         unsigned long ret;
613         u32 insn_word;
614
615         *err = 0;
616
617         if (ctrl->entries <= 0) {
618                 pr_err_once("%s: error: corrupt unwind table\n", __func__);
619                 *err = -QUADD_URC_TBL_IS_CORRUPT;
620                 return 0;
621         }
622
623         *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
624         if (*err < 0)
625                 return 0;
626
627         ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
628
629         if (ctrl->byte == 0) {
630                 ctrl->insn++;
631                 ctrl->entries--;
632                 ctrl->byte = 3;
633         } else
634                 ctrl->byte--;
635
636         return ret;
637 }
638
639 /*
640  * Execute the current unwind instruction.
641  */
642 static long
643 unwind_exec_insn(struct quadd_extabs_mmap *mmap,
644                  struct unwind_ctrl_block *ctrl)
645 {
646         long err;
647         unsigned int i;
648         unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
649
650         if (err < 0)
651                 return err;
652
653         pr_debug("%s: insn = %08lx\n", __func__, insn);
654
655         if ((insn & 0xc0) == 0x00) {
656                 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
657
658                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
659                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
660         } else if ((insn & 0xc0) == 0x40) {
661                 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
662
663                 pr_debug("CMD_DATA_PUSH: vsp = vsp â€“ %lu (new: %#x)\n",
664                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
665         } else if ((insn & 0xf0) == 0x80) {
666                 unsigned long mask;
667                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
668                 int load_sp, reg = 4;
669
670                 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
671                 if (err < 0)
672                         return err;
673
674                 mask = insn & 0x0fff;
675                 if (mask == 0) {
676                         pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
677                                    insn);
678                         return -QUADD_URC_REFUSE_TO_UNWIND;
679                 }
680
681                 /* pop R4-R15 according to mask */
682                 load_sp = mask & (1 << (13 - 4));
683                 while (mask) {
684                         if (mask & 1) {
685                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
686                                 if (err < 0)
687                                         return err;
688
689                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
690                         }
691                         mask >>= 1;
692                         reg++;
693                 }
694                 if (!load_sp)
695                         ctrl->vrs[SP] = (unsigned long)vsp;
696
697                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
698         } else if ((insn & 0xf0) == 0x90 &&
699                    (insn & 0x0d) != 0x0d) {
700                 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
701                 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
702         } else if ((insn & 0xf0) == 0xa0) {
703                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
704                 unsigned int reg;
705
706                 /* pop R4-R[4+bbb] */
707                 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
708                         err = read_user_data(vsp++, ctrl->vrs[reg]);
709                         if (err < 0)
710                                 return err;
711
712                         pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
713                 }
714
715                 if (insn & 0x08) {
716                         err = read_user_data(vsp++, ctrl->vrs[14]);
717                         if (err < 0)
718                                 return err;
719
720                         pr_debug("CMD_REG_POP: pop {r14}\n");
721                 }
722
723                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
724                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
725         } else if (insn == 0xb0) {
726                 if (ctrl->vrs[PC] == 0)
727                         ctrl->vrs[PC] = ctrl->vrs[LR];
728                 /* no further processing */
729                 ctrl->entries = 0;
730
731                 pr_debug("CMD_FINISH\n");
732         } else if (insn == 0xb1) {
733                 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
734                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
735                 int reg = 0;
736
737                 if (err < 0)
738                         return err;
739
740                 if (mask == 0 || mask & 0xf0) {
741                         pr_debug("unwind: Spare encoding %04lx\n",
742                                (insn << 8) | mask);
743                         return -QUADD_URC_SPARE_ENCODING;
744                 }
745
746                 /* pop R0-R3 according to mask */
747                 while (mask) {
748                         if (mask & 1) {
749                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
750                                 if (err < 0)
751                                         return err;
752
753                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
754                         }
755                         mask >>= 1;
756                         reg++;
757                 }
758
759                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
760                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
761         } else if (insn == 0xb2) {
762                 unsigned long uleb128 = unwind_get_byte(mmap, ctrl, &err);
763                 if (err < 0)
764                         return err;
765
766                 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
767
768                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu, new vsp: %#x\n",
769                          0x204 + (uleb128 << 2), ctrl->vrs[SP]);
770         } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
771                 unsigned long data, reg_from, reg_to;
772                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
773
774                 data = unwind_get_byte(mmap, ctrl, &err);
775                 if (err < 0)
776                         return err;
777
778                 reg_from = (data & 0xf0) >> 4;
779                 reg_to = reg_from + (data & 0x0f);
780
781                 if (insn == 0xc8) {
782                         reg_from += 16;
783                         reg_to += 16;
784                 }
785
786                 for (i = reg_from; i <= reg_to; i++)
787                         vsp += 2;
788
789                 if (insn == 0xb3)
790                         vsp++;
791
792                 ctrl->vrs[SP] = (unsigned long)vsp;
793                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
794
795                 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
796                          insn, data, reg_from, reg_to);
797                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
798         } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
799                 unsigned long reg_to;
800                 unsigned long data = insn & 0x07;
801                 u32 *vsp = (u32 *)(unsigned long)ctrl->vrs[SP];
802
803                 reg_to = 8 + data;
804
805                 for (i = 8; i <= reg_to; i++)
806                         vsp += 2;
807
808                 if ((insn & 0xf8) == 0xb8)
809                         vsp++;
810
811                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
812
813                 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
814                          insn, reg_to);
815                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
816         } else {
817                 pr_debug("error: unhandled instruction %02lx\n", insn);
818                 return -QUADD_URC_UNHANDLED_INSTRUCTION;
819         }
820
821         pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
822                  __func__,
823                  ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
824                  ctrl->vrs[LR], ctrl->vrs[PC]);
825
826         return 0;
827 }
828
829 /*
830  * Unwind a single frame starting with *sp for the symbol at *pc. It
831  * updates the *pc and *sp with the new values.
832  */
833 static long
834 unwind_frame(struct ex_region_info *ri,
835              struct stackframe *frame,
836              struct vm_area_struct *vma_sp)
837 {
838         unsigned long high, low;
839         const struct unwind_idx *idx;
840         struct unwind_ctrl_block ctrl;
841         unsigned long err;
842         u32 val;
843
844         if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
845                 return -QUADD_URC_SP_INCORRECT;
846
847         /* only go to a higher address on the stack */
848         low = frame->sp;
849         high = vma_sp->vm_end;
850
851         pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
852                 frame->pc, frame->lr, frame->sp, low, high);
853
854         idx = unwind_find_idx(ri, frame->pc);
855         if (IS_ERR_OR_NULL(idx))
856                 return -QUADD_URC_IDX_NOT_FOUND;
857
858         pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
859
860         ctrl.vrs[FP_THUMB] = frame->fp_thumb;
861         ctrl.vrs[FP_ARM] = frame->fp_arm;
862
863         ctrl.vrs[SP] = frame->sp;
864         ctrl.vrs[LR] = frame->lr;
865         ctrl.vrs[PC] = 0;
866
867         err = read_mmap_data(ri->mmap, &idx->insn, &val);
868         if (err < 0)
869                 return err;
870
871         if (val == 1) {
872                 /* can't unwind */
873                 return -QUADD_URC_CANTUNWIND;
874         } else if ((val & 0x80000000) == 0) {
875                 /* prel31 to the unwind table */
876                 ctrl.insn = (u32 *)(unsigned long)
877                                 mmap_prel31_to_addr(&idx->insn, ri, 1, 0, 1);
878                 if (!ctrl.insn)
879                         return -QUADD_URC_EACCESS;
880         } else if ((val & 0xff000000) == 0x80000000) {
881                 /* only personality routine 0 supported in the index */
882                 ctrl.insn = &idx->insn;
883         } else {
884                 pr_debug("unsupported personality routine %#x in the index at %p\n",
885                          val, idx);
886                 return -QUADD_URC_UNSUPPORTED_PR;
887         }
888
889         err = read_mmap_data(ri->mmap, ctrl.insn, &val);
890         if (err < 0)
891                 return err;
892
893         /* check the personality routine */
894         if ((val & 0xff000000) == 0x80000000) {
895                 ctrl.byte = 2;
896                 ctrl.entries = 1;
897         } else if ((val & 0xff000000) == 0x81000000) {
898                 ctrl.byte = 1;
899                 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
900         } else {
901                 pr_debug("unsupported personality routine %#x at %p\n",
902                          val, ctrl.insn);
903                 return -QUADD_URC_UNSUPPORTED_PR;
904         }
905
906         while (ctrl.entries > 0) {
907                 err = unwind_exec_insn(ri->mmap, &ctrl);
908                 if (err < 0)
909                         return err;
910
911                 if (ctrl.vrs[SP] & 0x03 ||
912                     ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
913                         return -QUADD_URC_SP_INCORRECT;
914         }
915
916         if (ctrl.vrs[PC] == 0)
917                 ctrl.vrs[PC] = ctrl.vrs[LR];
918
919         /* check for infinite loop */
920         if (frame->pc == ctrl.vrs[PC])
921                 return -QUADD_URC_FAILURE;
922
923         if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
924                 return -QUADD_URC_PC_INCORRECT;
925
926         frame->fp_thumb = ctrl.vrs[FP_THUMB];
927         frame->fp_arm = ctrl.vrs[FP_ARM];
928
929         frame->sp = ctrl.vrs[SP];
930         frame->lr = ctrl.vrs[LR];
931         frame->pc = ctrl.vrs[PC];
932
933         return 0;
934 }
935
936 static void
937 unwind_backtrace(struct quadd_callchain *cc,
938                  struct ex_region_info *ri,
939                  struct pt_regs *regs,
940                  struct vm_area_struct *vma_sp,
941                  struct task_struct *task)
942 {
943         struct ex_region_info ri_new;
944         struct stackframe frame;
945
946 #ifdef CONFIG_ARM64
947         frame.fp_thumb = regs->compat_usr(7);
948         frame.fp_arm = regs->compat_usr(11);
949 #else
950         frame.fp_thumb = regs->ARM_r7;
951         frame.fp_arm = regs->ARM_fp;
952 #endif
953
954         frame.pc = instruction_pointer(regs);
955         frame.sp = quadd_user_stack_pointer(regs);
956         frame.lr = quadd_user_link_register(regs);
957
958         cc->unw_rc = QUADD_URC_FAILURE;
959
960         pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
961                  frame.fp_arm, frame.fp_thumb, frame.sp, frame.lr, frame.pc);
962         pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
963                  vma_sp->vm_start, vma_sp->vm_end,
964                  vma_sp->vm_end - vma_sp->vm_start);
965
966         while (1) {
967                 long err;
968                 int nr_added;
969                 unsigned long where = frame.pc;
970                 struct vm_area_struct *vma_pc;
971                 struct mm_struct *mm = task->mm;
972
973                 if (!mm)
974                         break;
975
976                 if (!validate_stack_addr(frame.sp, vma_sp, sizeof(u32))) {
977                         cc->unw_rc = -QUADD_URC_SP_INCORRECT;
978                         break;
979                 }
980
981                 vma_pc = find_vma(mm, frame.pc);
982                 if (!vma_pc)
983                         break;
984
985                 if (!is_vma_addr(ri->tabs.exidx.addr, vma_pc, sizeof(u32))) {
986                         err = __search_ex_region(vma_pc->vm_start, &ri_new);
987                         if (err) {
988                                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
989                                 break;
990                         }
991
992                         ri = &ri_new;
993                 }
994
995                 err = unwind_frame(ri, &frame, vma_sp);
996                 if (err < 0) {
997                         pr_debug("end unwind, urc: %ld\n", err);
998                         cc->unw_rc = -err;
999                         break;
1000                 }
1001
1002                 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1003                          where, frame.pc);
1004
1005                 cc->curr_sp = frame.sp;
1006                 cc->curr_fp = frame.fp_arm;
1007
1008                 nr_added = quadd_callchain_store(cc, frame.pc);
1009                 if (nr_added == 0)
1010                         break;
1011         }
1012 }
1013
1014 unsigned int
1015 quadd_get_user_callchain_ut(struct pt_regs *regs,
1016                             struct quadd_callchain *cc,
1017                             struct task_struct *task)
1018 {
1019         long err;
1020         unsigned long ip, sp;
1021         struct vm_area_struct *vma, *vma_sp;
1022         struct mm_struct *mm = task->mm;
1023         struct ex_region_info ri;
1024
1025         cc->unw_method = QUADD_UNW_METHOD_EHT;
1026         cc->unw_rc = QUADD_URC_FAILURE;
1027
1028 #ifdef CONFIG_ARM64
1029         if (!compat_user_mode(regs)) {
1030                 pr_warn_once("user_mode 64: unsupported\n");
1031                 return 0;
1032         }
1033 #endif
1034
1035         if (!regs || !mm)
1036                 return 0;
1037
1038         ip = instruction_pointer(regs);
1039         sp = quadd_user_stack_pointer(regs);
1040
1041         vma = find_vma(mm, ip);
1042         if (!vma)
1043                 return 0;
1044
1045         vma_sp = find_vma(mm, sp);
1046         if (!vma_sp)
1047                 return 0;
1048
1049         err = __search_ex_region(vma->vm_start, &ri);
1050         if (err) {
1051                 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1052                 return 0;
1053         }
1054
1055         unwind_backtrace(cc, &ri, regs, vma_sp, task);
1056
1057         return cc->nr;
1058 }
1059
1060 int quadd_unwind_start(struct task_struct *task)
1061 {
1062         struct regions_data *rd, *rd_old;
1063
1064         spin_lock(&ctx.lock);
1065
1066         rd_old = rcu_dereference(ctx.rd);
1067         if (rd_old)
1068                 pr_warn("%s: warning: rd_old\n", __func__);
1069
1070         rd = rd_alloc(QUADD_EXTABS_SIZE);
1071         if (IS_ERR_OR_NULL(rd)) {
1072                 pr_err("%s: error: rd_alloc\n", __func__);
1073                 spin_unlock(&ctx.lock);
1074                 return -ENOMEM;
1075         }
1076
1077         rcu_assign_pointer(ctx.rd, rd);
1078
1079         if (rd_old)
1080                 call_rcu(&rd_old->rcu, rd_free_rcu);
1081
1082         ctx.pid = task->tgid;
1083
1084         ctx.ex_tables_size = 0;
1085
1086         spin_unlock(&ctx.lock);
1087
1088         return 0;
1089 }
1090
1091 void quadd_unwind_stop(void)
1092 {
1093         int i;
1094         unsigned long nr_entries, size;
1095         struct regions_data *rd;
1096         struct ex_region_info *ri;
1097
1098         spin_lock(&ctx.lock);
1099
1100         ctx.pid = 0;
1101
1102         rd = rcu_dereference(ctx.rd);
1103         if (!rd)
1104                 goto out;
1105
1106         nr_entries = rd->curr_nr;
1107         size = rd->size;
1108
1109         for (i = 0; i < nr_entries; i++) {
1110                 ri = &rd->entries[i];
1111                 clean_mmap(rd, ri->mmap, 0);
1112         }
1113
1114         rcu_assign_pointer(ctx.rd, NULL);
1115         call_rcu(&rd->rcu, rd_free_rcu);
1116
1117 out:
1118         spin_unlock(&ctx.lock);
1119         pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1120 }
1121
1122 int quadd_unwind_init(void)
1123 {
1124         spin_lock_init(&ctx.lock);
1125         rcu_assign_pointer(ctx.rd, NULL);
1126         ctx.pid = 0;
1127
1128         return 0;
1129 }
1130
1131 void quadd_unwind_deinit(void)
1132 {
1133         quadd_unwind_stop();
1134         rcu_barrier();
1135 }