]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/misc/tegra-profiler/eh_unwind.c
misc: tegra-profiler: fix broken backtraces
[sojka/nv-tegra/linux-3.10.git] / drivers / misc / tegra-profiler / eh_unwind.c
1 /*
2  * drivers/misc/tegra-profiler/exh_tables.c
3  *
4  * Copyright (c) 2015, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/mm.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
25
26 #include <linux/tegra_profiler.h>
27
28 #include "hrt.h"
29 #include "tegra.h"
30 #include "eh_unwind.h"
31 #include "backtrace.h"
32 #include "comm.h"
33 #include "dwarf_unwind.h"
34 #include "disassembler.h"
35
36 #define QUADD_EXTABS_SIZE       0x100
37
38 #define GET_NR_PAGES(a, l) \
39         ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
40
41 enum regs {
42         FP_THUMB = 7,
43         FP_ARM = 11,
44
45         SP = 13,
46         LR = 14,
47         PC = 15
48 };
49
50 struct regions_data {
51         struct ex_region_info *entries;
52
53         unsigned long curr_nr;
54         unsigned long size;
55
56         struct rcu_head rcu;
57 };
58
59 struct quadd_unwind_ctx {
60         struct regions_data *rd;
61
62         pid_t pid;
63         unsigned long ex_tables_size;
64         spinlock_t lock;
65 };
66
67 struct unwind_idx {
68         u32 addr_offset;
69         u32 insn;
70 };
71
72 struct stackframe {
73         unsigned long fp_thumb;
74         unsigned long fp_arm;
75
76         unsigned long sp;
77         unsigned long lr;
78         unsigned long pc;
79 };
80
81 struct unwind_ctrl_block {
82         u32 vrs[16];            /* virtual register set */
83         const u32 *insn;        /* pointer to the current instr word */
84         int entries;            /* number of entries left */
85         int byte;               /* current byte in the instr word */
86 };
87
88 struct pin_pages_work {
89         struct work_struct work;
90         unsigned long vm_start;
91 };
92
93 static struct quadd_unwind_ctx ctx;
94
95 static inline int
96 validate_mmap_addr(struct quadd_mmap_area *mmap,
97                    unsigned long addr, unsigned long nbytes)
98 {
99         struct vm_area_struct *vma = mmap->mmap_vma;
100         unsigned long size = vma->vm_end - vma->vm_start;
101         unsigned long data = (unsigned long)mmap->data;
102
103         if (addr & 0x03) {
104                 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
105                             __func__, addr, data, data + size,
106                             vma->vm_start, vma->vm_end);
107                 return 0;
108         }
109
110         if (addr < data || addr >= data + (size - nbytes)) {
111                 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
112                             __func__, addr, data, data + size,
113                             vma->vm_start, vma->vm_end);
114                 return 0;
115         }
116
117         return 1;
118 }
119
120 #define read_user_data(addr, retval)                            \
121 ({                                                              \
122         long ret;                                               \
123                                                                 \
124         pagefault_disable();                                    \
125         ret = __get_user(retval, addr);                         \
126         pagefault_enable();                                     \
127                                                                 \
128         if (ret) {                                              \
129                 pr_debug("%s: failed for address: %p\n",        \
130                          __func__, addr);                       \
131                 ret = -QUADD_URC_EACCESS;                       \
132         }                                                       \
133                                                                 \
134         ret;                                                    \
135 })
136
137 static inline long
138 read_mmap_data(struct quadd_mmap_area *mmap, const u32 *addr, u32 *retval)
139 {
140         if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32))) {
141                 *retval = 0;
142                 return -QUADD_URC_EACCESS;
143         }
144
145         *retval = *addr;
146         return 0;
147 }
148
149 static inline unsigned long
150 ex_addr_to_mmap_addr(unsigned long addr,
151                      struct ex_region_info *ri,
152                      int sec_type)
153 {
154         unsigned long offset;
155         struct extab_info *ti;
156
157         ti = &ri->ex_sec[sec_type];
158         if (unlikely(!ti->length))
159                 return 0;
160
161         offset = addr - ti->addr;
162         return ti->mmap_offset + offset + (unsigned long)ri->mmap->data;
163 }
164
165 static inline unsigned long
166 mmap_addr_to_ex_addr(unsigned long addr,
167                      struct ex_region_info *ri,
168                      int sec_type)
169 {
170         unsigned long offset;
171         struct extab_info *ti;
172
173         ti = &ri->ex_sec[sec_type];
174         if (unlikely(!ti->length))
175                 return 0;
176
177         offset = addr - ti->mmap_offset - (unsigned long)ri->mmap->data;
178         return ti->addr + offset;
179 }
180
181 static inline u32
182 prel31_to_addr(const u32 *ptr)
183 {
184         u32 value;
185         s32 offset;
186
187         if (read_user_data(ptr, value))
188                 return 0;
189
190         /* sign-extend to 32 bits */
191         offset = (((s32)value) << 1) >> 1;
192         return (u32)(unsigned long)ptr + offset;
193 }
194
195 static unsigned long
196 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
197                     int src_type, int dst_type, int to_mmap)
198 {
199         s32 offset;
200         u32 value, addr;
201         unsigned long addr_res;
202
203         value = *ptr;
204         offset = (((s32)value) << 1) >> 1;
205
206         addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, src_type);
207         if (unlikely(!addr))
208                 return 0;
209
210         addr += offset;
211         addr_res = addr;
212
213         if (to_mmap)
214                 addr_res = ex_addr_to_mmap_addr(addr_res, ri, dst_type);
215
216         return addr_res;
217 }
218
219 static int
220 add_ex_region(struct regions_data *rd,
221               struct ex_region_info *new_entry)
222 {
223         unsigned int i_min, i_max, mid;
224         struct ex_region_info *array = rd->entries;
225         unsigned long size = rd->curr_nr;
226
227         if (!array)
228                 return 0;
229
230         if (size == 0) {
231                 memcpy(&array[0], new_entry, sizeof(*new_entry));
232                 return 1;
233         } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
234                 return 0;
235         }
236
237         i_min = 0;
238         i_max = size;
239
240         if (array[0].vm_start > new_entry->vm_start) {
241                 memmove(array + 1, array,
242                         size * sizeof(*array));
243                 memcpy(&array[0], new_entry, sizeof(*new_entry));
244                 return 1;
245         } else if (array[size - 1].vm_start < new_entry->vm_start) {
246                 memcpy(&array[size], new_entry, sizeof(*new_entry));
247                 return 1;
248         }
249
250         while (i_min < i_max) {
251                 mid = i_min + (i_max - i_min) / 2;
252
253                 if (new_entry->vm_start <= array[mid].vm_start)
254                         i_max = mid;
255                 else
256                         i_min = mid + 1;
257         }
258
259         if (array[i_max].vm_start == new_entry->vm_start) {
260                 return 0;
261         } else {
262                 memmove(array + i_max + 1,
263                         array + i_max,
264                         (size - i_max) * sizeof(*array));
265                 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
266                 return 1;
267         }
268 }
269
270 static int
271 remove_ex_region(struct regions_data *rd,
272                  struct ex_region_info *entry)
273 {
274         unsigned int i_min, i_max, mid;
275         struct ex_region_info *array = rd->entries;
276         unsigned long size = rd->curr_nr;
277
278         if (!array)
279                 return 0;
280
281         if (size == 0)
282                 return 0;
283
284         if (size == 1) {
285                 if (array[0].vm_start == entry->vm_start)
286                         return 1;
287                 else
288                         return 0;
289         }
290
291         if (array[0].vm_start > entry->vm_start)
292                 return 0;
293         else if (array[size - 1].vm_start < entry->vm_start)
294                 return 0;
295
296         i_min = 0;
297         i_max = size;
298
299         while (i_min < i_max) {
300                 mid = i_min + (i_max - i_min) / 2;
301
302                 if (entry->vm_start <= array[mid].vm_start)
303                         i_max = mid;
304                 else
305                         i_min = mid + 1;
306         }
307
308         if (array[i_max].vm_start == entry->vm_start) {
309                 memmove(array + i_max,
310                         array + i_max + 1,
311                         (size - i_max) * sizeof(*array));
312                 return 1;
313         } else {
314                 return 0;
315         }
316 }
317
318 static struct ex_region_info *
319 __search_ex_region(struct ex_region_info *array,
320                    unsigned long size,
321                    unsigned long key)
322 {
323         unsigned int i_min, i_max, mid;
324
325         if (size == 0)
326                 return NULL;
327
328         i_min = 0;
329         i_max = size;
330
331         while (i_min < i_max) {
332                 mid = i_min + (i_max - i_min) / 2;
333
334                 if (key <= array[mid].vm_start)
335                         i_max = mid;
336                 else
337                         i_min = mid + 1;
338         }
339
340         if (array[i_max].vm_start == key)
341                 return &array[i_max];
342
343         return NULL;
344 }
345
346 static long
347 search_ex_region(unsigned long key, struct ex_region_info *ri)
348 {
349         struct regions_data *rd;
350         struct ex_region_info *ri_p = NULL;
351
352         rcu_read_lock();
353
354         rd = rcu_dereference(ctx.rd);
355         if (!rd)
356                 goto out;
357
358         ri_p = __search_ex_region(rd->entries, rd->curr_nr, key);
359         if (ri_p)
360                 memcpy(ri, ri_p, sizeof(*ri));
361
362 out:
363         rcu_read_unlock();
364         return ri_p ? 0 : -ENOENT;
365 }
366
367 static long
368 get_extabs_ehabi(unsigned long key, struct ex_region_info *ri)
369 {
370         long err;
371         struct extab_info *ti_exidx;
372
373         err = search_ex_region(key, ri);
374         if (err < 0)
375                 return err;
376
377         ti_exidx = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
378         return ti_exidx->length ? 0 : -ENOENT;
379 }
380
381 long
382 quadd_get_dw_frames(unsigned long key, struct ex_region_info *ri)
383 {
384         long err;
385         struct extab_info *ti, *ti_hdr;
386
387         err = search_ex_region(key, ri);
388         if (err < 0)
389                 return err;
390
391         ti = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME];
392         ti_hdr = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME_HDR];
393
394         if (ti->length && ti_hdr->length)
395                 return 0;
396
397         ti = &ri->ex_sec[QUADD_SEC_TYPE_DEBUG_FRAME];
398         ti_hdr = &ri->ex_sec[QUADD_SEC_TYPE_DEBUG_FRAME_HDR];
399
400         return (ti->length && ti_hdr->length) ? 0 : -ENOENT;
401 }
402
403 static struct regions_data *rd_alloc(unsigned long size)
404 {
405         struct regions_data *rd;
406
407         rd = kzalloc(sizeof(*rd), GFP_ATOMIC);
408         if (!rd)
409                 return NULL;
410
411         rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_ATOMIC);
412         if (!rd->entries) {
413                 kfree(rd);
414                 return NULL;
415         }
416
417         rd->size = size;
418         rd->curr_nr = 0;
419
420         return rd;
421 }
422
423 static void rd_free(struct regions_data *rd)
424 {
425         if (rd)
426                 kfree(rd->entries);
427
428         kfree(rd);
429 }
430
431 static void rd_free_rcu(struct rcu_head *rh)
432 {
433         struct regions_data *rd = container_of(rh, struct regions_data, rcu);
434         rd_free(rd);
435 }
436
437 int quadd_unwind_set_extab(struct quadd_sections *extabs,
438                            struct quadd_mmap_area *mmap)
439 {
440         int i, err = 0;
441         unsigned long nr_entries, nr_added, new_size;
442         struct ex_region_info ri_entry;
443         struct extab_info *ti;
444         struct regions_data *rd, *rd_new;
445         struct ex_region_info *ex_entry;
446
447         if (mmap->type != QUADD_MMAP_TYPE_EXTABS)
448                 return -EIO;
449
450         spin_lock(&ctx.lock);
451
452         rd = rcu_dereference(ctx.rd);
453         if (!rd) {
454                 pr_warn("%s: warning: rd\n", __func__);
455                 new_size = QUADD_EXTABS_SIZE;
456                 nr_entries = 0;
457         } else {
458                 new_size = rd->size;
459                 nr_entries = rd->curr_nr;
460         }
461
462         if (nr_entries >= new_size)
463                 new_size += new_size >> 1;
464
465         rd_new = rd_alloc(new_size);
466         if (IS_ERR_OR_NULL(rd_new)) {
467                 pr_err("%s: error: rd_alloc\n", __func__);
468                 err = -ENOMEM;
469                 goto error_out;
470         }
471
472         if (rd && nr_entries)
473                 memcpy(rd_new->entries, rd->entries,
474                        nr_entries * sizeof(*rd->entries));
475
476         rd_new->curr_nr = nr_entries;
477
478         ri_entry.vm_start = extabs->vm_start;
479         ri_entry.vm_end = extabs->vm_end;
480
481         ri_entry.mmap = mmap;
482
483         for (i = 0; i < QUADD_SEC_TYPE_MAX; i++) {
484                 struct quadd_sec_info *si = &extabs->sec[i];
485
486                 ti = &ri_entry.ex_sec[i];
487
488                 ti->tf_start = 0;
489                 ti->tf_end = 0;
490
491                 if (!si->addr) {
492                         ti->addr = 0;
493                         ti->length = 0;
494                         ti->mmap_offset = 0;
495
496                         continue;
497                 }
498
499                 ti->addr = si->addr;
500                 ti->length = si->length;
501                 ti->mmap_offset = si->mmap_offset;
502         }
503
504         nr_added = add_ex_region(rd_new, &ri_entry);
505         if (nr_added == 0)
506                 goto error_free;
507
508         rd_new->curr_nr += nr_added;
509
510         ex_entry = kzalloc(sizeof(*ex_entry), GFP_ATOMIC);
511         if (!ex_entry) {
512                 err = -ENOMEM;
513                 goto error_free;
514         }
515         memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
516
517         INIT_LIST_HEAD(&ex_entry->list);
518         list_add_tail(&ex_entry->list, &mmap->ex_entries);
519
520         rcu_assign_pointer(ctx.rd, rd_new);
521
522         if (rd)
523                 call_rcu(&rd->rcu, rd_free_rcu);
524
525         spin_unlock(&ctx.lock);
526
527         return 0;
528
529 error_free:
530         rd_free(rd_new);
531 error_out:
532         spin_unlock(&ctx.lock);
533         return err;
534 }
535
536 void
537 quadd_unwind_set_tail_info(unsigned long vm_start,
538                            int secid,
539                            unsigned long tf_start,
540                            unsigned long tf_end)
541 {
542         struct ex_region_info *ri;
543         unsigned long nr_entries, size;
544         struct regions_data *rd, *rd_new;
545         struct extab_info *ti;
546
547         spin_lock(&ctx.lock);
548
549         rd = rcu_dereference(ctx.rd);
550
551         if (!rd || rd->curr_nr == 0)
552                 goto error_out;
553
554         size = rd->size;
555         nr_entries = rd->curr_nr;
556
557         rd_new = rd_alloc(size);
558         if (IS_ERR_OR_NULL(rd_new)) {
559                 pr_err_once("%s: error: rd_alloc\n", __func__);
560                 goto error_out;
561         }
562
563         memcpy(rd_new->entries, rd->entries,
564                nr_entries * sizeof(*rd->entries));
565
566         rd_new->curr_nr = nr_entries;
567
568         ri = __search_ex_region(rd_new->entries, nr_entries, vm_start);
569         if (!ri)
570                 goto error_free;
571
572         ti = &ri->ex_sec[secid];
573
574         ti->tf_start = tf_start;
575         ti->tf_end = tf_end;
576
577         rcu_assign_pointer(ctx.rd, rd_new);
578
579         call_rcu(&rd->rcu, rd_free_rcu);
580         spin_unlock(&ctx.lock);
581
582         return;
583
584 error_free:
585         rd_free(rd_new);
586
587 error_out:
588         spin_unlock(&ctx.lock);
589 }
590
591 static int
592 clean_mmap(struct regions_data *rd, struct quadd_mmap_area *mmap, int rm_ext)
593 {
594         int nr_removed = 0;
595         struct ex_region_info *entry, *next;
596
597         if (!rd || !mmap)
598                 return 0;
599
600         list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
601                 if (rm_ext)
602                         nr_removed += remove_ex_region(rd, entry);
603
604                 list_del(&entry->list);
605                 kfree(entry);
606         }
607
608         return nr_removed;
609 }
610
611 void quadd_unwind_delete_mmap(struct quadd_mmap_area *mmap)
612 {
613         unsigned long nr_entries, nr_removed, new_size;
614         struct regions_data *rd, *rd_new;
615
616         if (!mmap)
617                 return;
618
619         spin_lock(&ctx.lock);
620
621         rd = rcu_dereference(ctx.rd);
622         if (!rd || !rd->curr_nr)
623                 goto error_out;
624
625         nr_entries = rd->curr_nr;
626         new_size = min_t(unsigned long, rd->size, nr_entries);
627
628         rd_new = rd_alloc(new_size);
629         if (IS_ERR_OR_NULL(rd_new)) {
630                 pr_err("%s: error: rd_alloc\n", __func__);
631                 goto error_out;
632         }
633         rd_new->size = new_size;
634         rd_new->curr_nr = nr_entries;
635
636         memcpy(rd_new->entries, rd->entries,
637                 nr_entries * sizeof(*rd->entries));
638
639         nr_removed = clean_mmap(rd_new, mmap, 1);
640         rd_new->curr_nr -= nr_removed;
641
642         rcu_assign_pointer(ctx.rd, rd_new);
643         call_rcu(&rd->rcu, rd_free_rcu);
644
645 error_out:
646         spin_unlock(&ctx.lock);
647 }
648
649 static const struct unwind_idx *
650 unwind_find_idx(struct ex_region_info *ri, u32 addr, unsigned long *lowaddr)
651 {
652         u32 value;
653         unsigned long length;
654         struct extab_info *ti;
655         struct unwind_idx *start;
656         struct unwind_idx *stop;
657         struct unwind_idx *mid = NULL;
658
659         ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
660
661         length = ti->length / sizeof(*start);
662
663         if (unlikely(!length))
664                 return NULL;
665
666         start = (struct unwind_idx *)((char *)ri->mmap->data + ti->mmap_offset);
667         stop = start + length - 1;
668
669         value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri,
670                                          QUADD_SEC_TYPE_EXIDX,
671                                          QUADD_SEC_TYPE_EXTAB, 0);
672         if (!value || addr < value)
673                 return NULL;
674
675         value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri,
676                                          QUADD_SEC_TYPE_EXIDX,
677                                          QUADD_SEC_TYPE_EXTAB, 0);
678         if (!value || addr >= value)
679                 return NULL;
680
681         while (start < stop - 1) {
682                 mid = start + ((stop - start) >> 1);
683
684                 value = (u32)mmap_prel31_to_addr(&mid->addr_offset, ri,
685                                                  QUADD_SEC_TYPE_EXIDX,
686                                                  QUADD_SEC_TYPE_EXTAB, 0);
687                 if (!value)
688                         return NULL;
689
690                 if (addr < value)
691                         stop = mid;
692                 else
693                         start = mid;
694         }
695
696         if (lowaddr)
697                 *lowaddr = mmap_prel31_to_addr(&start->addr_offset,
698                                                ri, 1, 0, 0);
699         return start;
700 }
701
702 static unsigned long
703 unwind_get_byte(struct quadd_mmap_area *mmap,
704                 struct unwind_ctrl_block *ctrl, long *err)
705 {
706         unsigned long ret;
707         u32 insn_word;
708
709         *err = 0;
710
711         if (ctrl->entries <= 0) {
712                 pr_err_once("%s: error: corrupt unwind table\n", __func__);
713                 *err = -QUADD_URC_TBL_IS_CORRUPT;
714                 return 0;
715         }
716
717         *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
718         if (*err < 0)
719                 return 0;
720
721         ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
722
723         if (ctrl->byte == 0) {
724                 ctrl->insn++;
725                 ctrl->entries--;
726                 ctrl->byte = 3;
727         } else
728                 ctrl->byte--;
729
730         return ret;
731 }
732
733 static long
734 read_uleb128(struct quadd_mmap_area *mmap,
735              struct unwind_ctrl_block *ctrl,
736              unsigned long *ret)
737 {
738         long err = 0;
739         unsigned long result;
740         unsigned char byte;
741         int shift, count;
742
743         result = 0;
744         shift = 0;
745         count = 0;
746
747         while (1) {
748                 byte = unwind_get_byte(mmap, ctrl, &err);
749                 if (err < 0)
750                         return err;
751
752                 count++;
753
754                 result |= (byte & 0x7f) << shift;
755                 shift += 7;
756
757                 if (!(byte & 0x80))
758                         break;
759         }
760
761         *ret = result;
762
763         return count;
764 }
765
766 /*
767  * Execute the current unwind instruction.
768  */
769 static long
770 unwind_exec_insn(struct quadd_mmap_area *mmap,
771                  struct unwind_ctrl_block *ctrl,
772                  struct quadd_disasm_data *qd)
773 {
774         long err;
775         unsigned int i;
776         unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
777
778         if (err < 0)
779                 return err;
780
781         pr_debug("%s: insn = %08lx\n", __func__, insn);
782
783         if ((insn & 0xc0) == 0x00) {
784                 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
785                 qd->stacksize -= ((insn & 0x3f) << 2) + 4;
786
787                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
788                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
789         } else if ((insn & 0xc0) == 0x40) {
790                 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
791                 qd->stackoff -= ((insn & 0x3f) << 2) + 4;
792                 pr_debug("CMD_DATA_PUSH: vsp = vsp â€“ %lu (new: %#x)\n",
793                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
794         } else if ((insn & 0xf0) == 0x80) {
795                 unsigned long mask;
796                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
797                 int load_sp, reg = 4;
798
799                 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
800                 if (err < 0)
801                         return err;
802
803                 mask = insn & 0x0fff;
804                 if (mask == 0) {
805                         pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
806                                    insn);
807                         return -QUADD_URC_REFUSE_TO_UNWIND;
808                 }
809
810                 /* pop R4-R15 according to mask */
811                 load_sp = mask & (1 << (13 - 4));
812                 while (mask) {
813                         if (mask & 1) {
814                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
815                                 if (err < 0)
816                                         return err;
817
818                                 qd->r_regset &= ~(1 << reg);
819                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
820                         }
821                         mask >>= 1;
822                         reg++;
823                 }
824                 if (!load_sp)
825                         ctrl->vrs[SP] = (unsigned long)vsp;
826
827                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
828         } else if ((insn & 0xf0) == 0x90 &&
829                    (insn & 0x0d) != 0x0d) {
830                 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
831                 qd->ustackreg = (insn & 0xf);
832                 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
833         } else if ((insn & 0xf0) == 0xa0) {
834                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
835                 unsigned int reg;
836
837                 /* pop R4-R[4+bbb] */
838                 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
839                         err = read_user_data(vsp++, ctrl->vrs[reg]);
840                         if (err < 0)
841                                 return err;
842
843                         qd->r_regset &= ~(1 << reg);
844                         pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
845                 }
846
847                 if (insn & 0x08) {
848                         err = read_user_data(vsp++, ctrl->vrs[14]);
849                         if (err < 0)
850                                 return err;
851
852                         qd->r_regset &= ~(1 << 14);
853                         pr_debug("CMD_REG_POP: pop {r14}\n");
854                 }
855
856                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
857                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
858         } else if (insn == 0xb0) {
859                 if (ctrl->vrs[PC] == 0)
860                         ctrl->vrs[PC] = ctrl->vrs[LR];
861                 /* no further processing */
862                 ctrl->entries = 0;
863
864                 pr_debug("CMD_FINISH\n");
865         } else if (insn == 0xb1) {
866                 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
867                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
868                 int reg = 0;
869
870                 if (err < 0)
871                         return err;
872
873                 if (mask == 0 || mask & 0xf0) {
874                         pr_debug("unwind: Spare encoding %04lx\n",
875                                (insn << 8) | mask);
876                         return -QUADD_URC_SPARE_ENCODING;
877                 }
878
879                 /* pop R0-R3 according to mask */
880                 while (mask) {
881                         if (mask & 1) {
882                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
883                                 if (err < 0)
884                                         return err;
885
886                                 qd->r_regset &= ~(1 << reg);
887                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
888                         }
889                         mask >>= 1;
890                         reg++;
891                 }
892
893                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
894                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
895         } else if (insn == 0xb2) {
896                 long count;
897                 unsigned long uleb128 = 0;
898
899                 count = read_uleb128(mmap, ctrl, &uleb128);
900                 if (count < 0)
901                         return count;
902
903                 if (count == 0)
904                         return -QUADD_URC_TBL_IS_CORRUPT;
905
906                 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
907
908                 qd->stacksize -= 0x204 + (uleb128 << 2);
909                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (%#lx), new vsp: %#x\n",
910                          0x204 + (uleb128 << 2), 0x204 + (uleb128 << 2),
911                          ctrl->vrs[SP]);
912         } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
913                 unsigned long data, reg_from, reg_to;
914                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
915
916                 data = unwind_get_byte(mmap, ctrl, &err);
917                 if (err < 0)
918                         return err;
919
920                 reg_from = (data & 0xf0) >> 4;
921                 reg_to = reg_from + (data & 0x0f);
922
923                 if (insn == 0xc8) {
924                         reg_from += 16;
925                         reg_to += 16;
926                 }
927
928                 for (i = reg_from; i <= reg_to; i++)
929                         vsp += 2, qd->d_regset &= ~(1 << i);
930
931                 if (insn == 0xb3)
932                         vsp++;
933
934                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
935
936                 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
937                          insn, data, reg_from, reg_to);
938                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
939         } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
940                 unsigned long reg_to;
941                 unsigned long data = insn & 0x07;
942                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
943
944                 reg_to = 8 + data;
945
946                 for (i = 8; i <= reg_to; i++)
947                         vsp += 2, qd->d_regset &= ~(1 << i);
948
949                 if ((insn & 0xf8) == 0xb8)
950                         vsp++;
951
952                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
953
954                 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
955                          insn, reg_to);
956                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
957         } else {
958                 pr_debug("error: unhandled instruction %02lx\n", insn);
959                 return -QUADD_URC_UNHANDLED_INSTRUCTION;
960         }
961
962         pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
963                  __func__,
964                  ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
965                  ctrl->vrs[LR], ctrl->vrs[PC]);
966
967         return 0;
968 }
969
970 /*
971  * Unwind a single frame starting with *sp for the symbol at *pc. It
972  * updates the *pc and *sp with the new values.
973  */
974 static long
975 unwind_frame(struct quadd_unw_methods um,
976              struct ex_region_info *ri,
977              struct stackframe *frame,
978              struct vm_area_struct *vma_sp,
979              int thumbflag)
980 {
981         unsigned long high, low, min, max;
982         const struct unwind_idx *idx;
983         struct unwind_ctrl_block ctrl;
984         struct quadd_disasm_data qd;
985 #ifdef QM_DEBUG_DISASSEMBLER
986         struct quadd_disasm_data orig;
987 #endif
988         long err = 0;
989         u32 val;
990
991         if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
992                 return -QUADD_URC_SP_INCORRECT;
993
994         /* only go to a higher address on the stack */
995         low = frame->sp;
996         high = vma_sp->vm_end;
997
998         pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx, thumb: %d\n",
999                  frame->pc, frame->lr, frame->sp, low, high, thumbflag);
1000
1001         idx = unwind_find_idx(ri, frame->pc, &min);
1002         if (IS_ERR_OR_NULL(idx))
1003                 return -QUADD_URC_IDX_NOT_FOUND;
1004
1005         pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
1006
1007         ctrl.vrs[FP_THUMB] = frame->fp_thumb;
1008         ctrl.vrs[FP_ARM] = frame->fp_arm;
1009
1010         ctrl.vrs[SP] = frame->sp;
1011         ctrl.vrs[LR] = frame->lr;
1012         ctrl.vrs[PC] = 0;
1013
1014         err = read_mmap_data(ri->mmap, &idx->insn, &val);
1015         if (err < 0)
1016                 return err;
1017
1018         if (val == 1) {
1019                 /* can't unwind */
1020                 return -QUADD_URC_CANTUNWIND;
1021         } else if ((val & 0x80000000) == 0) {
1022                 /* prel31 to the unwind table */
1023                 ctrl.insn = (u32 *)(unsigned long)
1024                                 mmap_prel31_to_addr(&idx->insn, ri,
1025                                                     QUADD_SEC_TYPE_EXIDX,
1026                                                     QUADD_SEC_TYPE_EXTAB, 1);
1027                 if (!ctrl.insn)
1028                         return -QUADD_URC_TBL_LINK_INCORRECT;
1029         } else if ((val & 0xff000000) == 0x80000000) {
1030                 /* only personality routine 0 supported in the index */
1031                 ctrl.insn = &idx->insn;
1032         } else {
1033                 pr_debug("unsupported personality routine %#x in the index at %p\n",
1034                          val, idx);
1035                 return -QUADD_URC_UNSUPPORTED_PR;
1036         }
1037
1038         err = read_mmap_data(ri->mmap, ctrl.insn, &val);
1039         if (err < 0)
1040                 return err;
1041
1042         /* check the personality routine */
1043         if ((val & 0xff000000) == 0x80000000) {
1044                 ctrl.byte = 2;
1045                 ctrl.entries = 1;
1046         } else if ((val & 0xff000000) == 0x81000000) {
1047                 ctrl.byte = 1;
1048                 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
1049         } else {
1050                 pr_debug("unsupported personality routine %#x at %p\n",
1051                          val, ctrl.insn);
1052                 return -QUADD_URC_UNSUPPORTED_PR;
1053         }
1054
1055         if (um.ut_ce) {
1056                 /* guess for the boundaries to disassemble */
1057                 if (frame->pc - min < QUADD_DISASM_MIN)
1058                         max = min + QUADD_DISASM_MIN;
1059                 else
1060                         max = (frame->pc - min < QUADD_DISASM_MAX)
1061                                 ? frame->pc : min + QUADD_DISASM_MAX;
1062                 err = quadd_disassemble(&qd, min, max, thumbflag);
1063                 if (err < 0)
1064                         return err;
1065 #ifdef QM_DEBUG_DISASSEMBLER
1066                 /* saved for verbose unwind mismatch reporting */
1067                 orig = qd;
1068                 qd.orig = &orig;
1069 #endif
1070         }
1071
1072         while (ctrl.entries > 0) {
1073                 err = unwind_exec_insn(ri->mmap, &ctrl, &qd);
1074                 if (err < 0)
1075                         return err;
1076
1077                 if (ctrl.vrs[SP] & 0x03 ||
1078                     ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
1079                         return -QUADD_URC_SP_INCORRECT;
1080         }
1081
1082         if (um.ut_ce && quadd_check_unwind_result(frame->pc, &qd) < 0)
1083                 return -QUADD_URC_UNWIND_MISMATCH;
1084
1085         if (ctrl.vrs[PC] == 0)
1086                 ctrl.vrs[PC] = ctrl.vrs[LR];
1087
1088         if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
1089                 return -QUADD_URC_PC_INCORRECT;
1090
1091         frame->fp_thumb = ctrl.vrs[FP_THUMB];
1092         frame->fp_arm = ctrl.vrs[FP_ARM];
1093
1094         frame->sp = ctrl.vrs[SP];
1095         frame->lr = ctrl.vrs[LR];
1096         frame->pc = ctrl.vrs[PC];
1097
1098         return 0;
1099 }
1100
1101 static void
1102 unwind_backtrace(struct quadd_callchain *cc,
1103                  struct ex_region_info *ri,
1104                  struct stackframe *frame,
1105                  struct vm_area_struct *vma_sp,
1106                  struct task_struct *task,
1107                  int thumbflag)
1108 {
1109         struct ex_region_info ri_new;
1110
1111         cc->urc_ut = QUADD_URC_FAILURE;
1112
1113         pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
1114                  frame->fp_arm, frame->fp_thumb,
1115                  frame->sp, frame->lr, frame->pc);
1116         pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
1117                  vma_sp->vm_start, vma_sp->vm_end,
1118                  vma_sp->vm_end - vma_sp->vm_start);
1119
1120         while (1) {
1121                 long err;
1122                 int nr_added;
1123                 struct extab_info *ti;
1124                 unsigned long where = frame->pc;
1125                 struct vm_area_struct *vma_pc;
1126                 struct mm_struct *mm = task->mm;
1127
1128                 if (!mm)
1129                         break;
1130
1131                 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32))) {
1132                         cc->urc_ut = QUADD_URC_SP_INCORRECT;
1133                         break;
1134                 }
1135
1136                 vma_pc = find_vma(mm, frame->pc);
1137                 if (!vma_pc)
1138                         break;
1139
1140                 ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
1141
1142                 if (!is_vma_addr(ti->addr, vma_pc, sizeof(u32))) {
1143                         err = get_extabs_ehabi(vma_pc->vm_start, &ri_new);
1144                         if (err) {
1145                                 cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
1146                                 break;
1147                         }
1148
1149                         ri = &ri_new;
1150                 }
1151
1152                 err = unwind_frame(cc->um, ri, frame, vma_sp, thumbflag);
1153                 if (err < 0) {
1154                         pr_debug("end unwind, urc: %ld\n", err);
1155                         cc->urc_ut = -err;
1156                         break;
1157                 }
1158
1159                 /* determine whether outer frame is ARM or Thumb */
1160                 thumbflag = (frame->lr & 0x1);
1161
1162                 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1163                          where, frame->pc);
1164
1165                 cc->curr_sp = frame->sp;
1166                 cc->curr_fp = frame->fp_arm;
1167                 cc->curr_fp_thumb = frame->fp_thumb;
1168                 cc->curr_pc = frame->pc;
1169                 cc->curr_lr = frame->lr;
1170
1171                 nr_added = quadd_callchain_store(cc, frame->pc,
1172                                                  QUADD_UNW_TYPE_UT);
1173                 if (nr_added == 0)
1174                         break;
1175         }
1176 }
1177
1178 unsigned int
1179 quadd_get_user_cc_arm32_ehabi(struct pt_regs *regs,
1180                               struct quadd_callchain *cc,
1181                               struct task_struct *task)
1182 {
1183         long err;
1184         int nr_prev = cc->nr, thumbflag;
1185         unsigned long ip, sp, lr;
1186         struct vm_area_struct *vma, *vma_sp;
1187         struct mm_struct *mm = task->mm;
1188         struct ex_region_info ri;
1189         struct stackframe frame;
1190
1191         if (!regs || !mm)
1192                 return 0;
1193
1194 #ifdef CONFIG_ARM64
1195         if (!compat_user_mode(regs))
1196                 return 0;
1197 #endif
1198
1199         if (cc->urc_ut == QUADD_URC_LEVEL_TOO_DEEP)
1200                 return nr_prev;
1201
1202         cc->urc_ut = QUADD_URC_FAILURE;
1203
1204         if (nr_prev > 0) {
1205                 ip = cc->curr_pc;
1206                 sp = cc->curr_sp;
1207                 lr = cc->curr_lr;
1208                 thumbflag = (lr & 1);
1209
1210                 frame.fp_thumb = cc->curr_fp_thumb;
1211                 frame.fp_arm = cc->curr_fp;
1212         } else {
1213                 ip = instruction_pointer(regs);
1214                 sp = quadd_user_stack_pointer(regs);
1215                 lr = quadd_user_link_register(regs);
1216                 thumbflag = is_thumb_mode(regs);
1217
1218 #ifdef CONFIG_ARM64
1219                 frame.fp_thumb = regs->compat_usr(7);
1220                 frame.fp_arm = regs->compat_usr(11);
1221 #else
1222                 frame.fp_thumb = regs->ARM_r7;
1223                 frame.fp_arm = regs->ARM_fp;
1224 #endif
1225         }
1226
1227         frame.pc = ip;
1228         frame.sp = sp;
1229         frame.lr = lr;
1230
1231         pr_debug("pc: %#lx, lr: %#lx\n", ip, lr);
1232         pr_debug("sp: %#lx, fp_arm: %#lx, fp_thumb: %#lx\n",
1233                  sp, frame.fp_arm, frame.fp_thumb);
1234
1235         vma = find_vma(mm, ip);
1236         if (!vma)
1237                 return 0;
1238
1239         vma_sp = find_vma(mm, sp);
1240         if (!vma_sp)
1241                 return 0;
1242
1243         err = get_extabs_ehabi(vma->vm_start, &ri);
1244         if (err) {
1245                 cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
1246                 return 0;
1247         }
1248
1249         unwind_backtrace(cc, &ri, &frame, vma_sp, task, thumbflag);
1250
1251         pr_debug("%s: exit, cc->nr: %d --> %d\n",
1252                  __func__, nr_prev, cc->nr);
1253
1254         return cc->nr;
1255 }
1256
1257 int
1258 quadd_is_ex_entry_exist_arm32_ehabi(struct pt_regs *regs,
1259                                     unsigned long addr,
1260                                     struct task_struct *task)
1261 {
1262         long err;
1263         u32 value;
1264         const struct unwind_idx *idx;
1265         struct ex_region_info ri;
1266         struct vm_area_struct *vma;
1267         struct mm_struct *mm = task->mm;
1268
1269         if (!regs || !mm)
1270                 return 0;
1271
1272         vma = find_vma(mm, addr);
1273         if (!vma)
1274                 return 0;
1275
1276         err = get_extabs_ehabi(vma->vm_start, &ri);
1277         if (err)
1278                 return 0;
1279
1280         idx = unwind_find_idx(&ri, addr, NULL);
1281         if (IS_ERR_OR_NULL(idx))
1282                 return 0;
1283
1284         err = read_mmap_data(ri.mmap, &idx->insn, &value);
1285         if (err < 0)
1286                 return 0;
1287
1288         /* EXIDX_CANTUNWIND */
1289         if (value == 1)
1290                 return 0;
1291
1292         return 1;
1293 }
1294
1295 int quadd_unwind_start(struct task_struct *task)
1296 {
1297         int err;
1298         struct regions_data *rd, *rd_old;
1299
1300         rd = rd_alloc(QUADD_EXTABS_SIZE);
1301         if (IS_ERR_OR_NULL(rd)) {
1302                 pr_err("%s: error: rd_alloc\n", __func__);
1303                 return -ENOMEM;
1304         }
1305
1306         err = quadd_dwarf_unwind_start();
1307         if (err) {
1308                 rd_free(rd);
1309                 return err;
1310         }
1311
1312         spin_lock(&ctx.lock);
1313
1314         rd_old = rcu_dereference(ctx.rd);
1315         if (rd_old)
1316                 pr_warn("%s: warning: rd_old\n", __func__);
1317
1318         rcu_assign_pointer(ctx.rd, rd);
1319
1320         if (rd_old)
1321                 call_rcu(&rd_old->rcu, rd_free_rcu);
1322
1323         ctx.pid = task->tgid;
1324
1325         ctx.ex_tables_size = 0;
1326
1327         spin_unlock(&ctx.lock);
1328
1329         return 0;
1330 }
1331
1332 void quadd_unwind_stop(void)
1333 {
1334         int i;
1335         unsigned long nr_entries, size;
1336         struct regions_data *rd;
1337         struct ex_region_info *ri;
1338
1339         quadd_dwarf_unwind_stop();
1340
1341         spin_lock(&ctx.lock);
1342
1343         ctx.pid = 0;
1344
1345         rd = rcu_dereference(ctx.rd);
1346         if (!rd)
1347                 goto out;
1348
1349         nr_entries = rd->curr_nr;
1350         size = rd->size;
1351
1352         for (i = 0; i < nr_entries; i++) {
1353                 ri = &rd->entries[i];
1354                 clean_mmap(rd, ri->mmap, 0);
1355         }
1356
1357         rcu_assign_pointer(ctx.rd, NULL);
1358         call_rcu(&rd->rcu, rd_free_rcu);
1359
1360 out:
1361         spin_unlock(&ctx.lock);
1362         pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1363 }
1364
1365 int quadd_unwind_init(void)
1366 {
1367         int err;
1368
1369         err = quadd_dwarf_unwind_init();
1370         if (err)
1371                 return err;
1372
1373         spin_lock_init(&ctx.lock);
1374         rcu_assign_pointer(ctx.rd, NULL);
1375         ctx.pid = 0;
1376
1377         return 0;
1378 }
1379
1380 void quadd_unwind_deinit(void)
1381 {
1382         quadd_unwind_stop();
1383         rcu_barrier();
1384 }