]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/misc/tegra-profiler/eh_unwind.c
misc: tegra-profiler: add unwind reason codes
[sojka/nv-tegra/linux-3.10.git] / drivers / misc / tegra-profiler / eh_unwind.c
1 /*
2  * drivers/misc/tegra-profiler/exh_tables.c
3  *
4  * Copyright (c) 2015, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/mm.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
25
26 #include <linux/tegra_profiler.h>
27
28 #include "eh_unwind.h"
29 #include "backtrace.h"
30 #include "comm.h"
31 #include "dwarf_unwind.h"
32
33 #define QUADD_EXTABS_SIZE       0x100
34
35 #define GET_NR_PAGES(a, l) \
36         ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
37
38 enum regs {
39         FP_THUMB = 7,
40         FP_ARM = 11,
41
42         SP = 13,
43         LR = 14,
44         PC = 15
45 };
46
47 struct regions_data {
48         struct ex_region_info *entries;
49
50         unsigned long curr_nr;
51         unsigned long size;
52
53         struct rcu_head rcu;
54 };
55
56 struct quadd_unwind_ctx {
57         struct regions_data *rd;
58
59         pid_t pid;
60         unsigned long ex_tables_size;
61         spinlock_t lock;
62 };
63
64 struct unwind_idx {
65         u32 addr_offset;
66         u32 insn;
67 };
68
69 struct stackframe {
70         unsigned long fp_thumb;
71         unsigned long fp_arm;
72
73         unsigned long sp;
74         unsigned long lr;
75         unsigned long pc;
76 };
77
78 struct unwind_ctrl_block {
79         u32 vrs[16];            /* virtual register set */
80         const u32 *insn;        /* pointer to the current instr word */
81         int entries;            /* number of entries left */
82         int byte;               /* current byte in the instr word */
83 };
84
85 struct pin_pages_work {
86         struct work_struct work;
87         unsigned long vm_start;
88 };
89
90 static struct quadd_unwind_ctx ctx;
91
92 static inline int
93 validate_mmap_addr(struct quadd_mmap_area *mmap,
94                    unsigned long addr, unsigned long nbytes)
95 {
96         struct vm_area_struct *vma = mmap->mmap_vma;
97         unsigned long size = vma->vm_end - vma->vm_start;
98         unsigned long data = (unsigned long)mmap->data;
99
100         if (addr & 0x03) {
101                 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
102                             __func__, addr, data, data + size,
103                        vma->vm_start, vma->vm_end);
104                 return 0;
105         }
106
107         if (addr < data || addr >= data + (size - nbytes)) {
108                 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
109                             __func__, addr, data, data + size,
110                        vma->vm_start, vma->vm_end);
111                 return 0;
112         }
113
114         return 1;
115 }
116
117 #define read_user_data(addr, retval)                            \
118 ({                                                              \
119         long ret;                                               \
120                                                                 \
121         pagefault_disable();                                    \
122         ret = __get_user(retval, addr);                         \
123         pagefault_enable();                                     \
124                                                                 \
125         if (ret) {                                              \
126                 pr_debug("%s: failed for address: %p\n",        \
127                          __func__, addr);                       \
128                 ret = -QUADD_URC_EACCESS;                       \
129         }                                                       \
130                                                                 \
131         ret;                                                    \
132 })
133
134 static inline long
135 read_mmap_data(struct quadd_mmap_area *mmap, const u32 *addr, u32 *retval)
136 {
137         if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32))) {
138                 *retval = 0;
139                 return -QUADD_URC_EACCESS;
140         }
141
142         *retval = *addr;
143         return 0;
144 }
145
146 static inline unsigned long
147 ex_addr_to_mmap_addr(unsigned long addr,
148                      struct ex_region_info *ri,
149                      int sec_type)
150 {
151         unsigned long offset;
152         struct extab_info *ti;
153
154         ti = &ri->ex_sec[sec_type];
155         offset = addr - ti->addr;
156
157         return ti->mmap_offset + offset + (unsigned long)ri->mmap->data;
158 }
159
160 static inline unsigned long
161 mmap_addr_to_ex_addr(unsigned long addr,
162                      struct ex_region_info *ri,
163                      int sec_type)
164 {
165         unsigned long offset;
166         struct extab_info *ti;
167
168         ti = &ri->ex_sec[sec_type];
169         offset = addr - ti->mmap_offset - (unsigned long)ri->mmap->data;
170
171         return ti->addr + offset;
172 }
173
174 static inline u32
175 prel31_to_addr(const u32 *ptr)
176 {
177         u32 value;
178         s32 offset;
179
180         if (read_user_data(ptr, value))
181                 return 0;
182
183         /* sign-extend to 32 bits */
184         offset = (((s32)value) << 1) >> 1;
185         return (u32)(unsigned long)ptr + offset;
186 }
187
188 static unsigned long
189 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
190                     int src_type, int dst_type, int to_mmap)
191 {
192         s32 offset;
193         u32 value, addr;
194         unsigned long addr_res;
195
196         value = *ptr;
197         offset = (((s32)value) << 1) >> 1;
198
199         addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, src_type);
200         addr += offset;
201         addr_res = addr;
202
203         if (to_mmap)
204                 addr_res = ex_addr_to_mmap_addr(addr_res, ri, dst_type);
205
206         return addr_res;
207 }
208
209 static int
210 add_ex_region(struct regions_data *rd,
211               struct ex_region_info *new_entry)
212 {
213         unsigned int i_min, i_max, mid;
214         struct ex_region_info *array = rd->entries;
215         unsigned long size = rd->curr_nr;
216
217         if (!array)
218                 return 0;
219
220         if (size == 0) {
221                 memcpy(&array[0], new_entry, sizeof(*new_entry));
222                 return 1;
223         } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
224                 return 0;
225         }
226
227         i_min = 0;
228         i_max = size;
229
230         if (array[0].vm_start > new_entry->vm_start) {
231                 memmove(array + 1, array,
232                         size * sizeof(*array));
233                 memcpy(&array[0], new_entry, sizeof(*new_entry));
234                 return 1;
235         } else if (array[size - 1].vm_start < new_entry->vm_start) {
236                 memcpy(&array[size], new_entry, sizeof(*new_entry));
237                 return 1;
238         }
239
240         while (i_min < i_max) {
241                 mid = i_min + (i_max - i_min) / 2;
242
243                 if (new_entry->vm_start <= array[mid].vm_start)
244                         i_max = mid;
245                 else
246                         i_min = mid + 1;
247         }
248
249         if (array[i_max].vm_start == new_entry->vm_start) {
250                 return 0;
251         } else {
252                 memmove(array + i_max + 1,
253                         array + i_max,
254                         (size - i_max) * sizeof(*array));
255                 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
256                 return 1;
257         }
258 }
259
260 static int
261 remove_ex_region(struct regions_data *rd,
262                  struct ex_region_info *entry)
263 {
264         unsigned int i_min, i_max, mid;
265         struct ex_region_info *array = rd->entries;
266         unsigned long size = rd->curr_nr;
267
268         if (!array)
269                 return 0;
270
271         if (size == 0)
272                 return 0;
273
274         if (size == 1) {
275                 if (array[0].vm_start == entry->vm_start)
276                         return 1;
277                 else
278                         return 0;
279         }
280
281         if (array[0].vm_start > entry->vm_start)
282                 return 0;
283         else if (array[size - 1].vm_start < entry->vm_start)
284                 return 0;
285
286         i_min = 0;
287         i_max = size;
288
289         while (i_min < i_max) {
290                 mid = i_min + (i_max - i_min) / 2;
291
292                 if (entry->vm_start <= array[mid].vm_start)
293                         i_max = mid;
294                 else
295                         i_min = mid + 1;
296         }
297
298         if (array[i_max].vm_start == entry->vm_start) {
299                 memmove(array + i_max,
300                         array + i_max + 1,
301                         (size - i_max) * sizeof(*array));
302                 return 1;
303         } else {
304                 return 0;
305         }
306 }
307
308 static struct ex_region_info *
309 __search_ex_region(struct ex_region_info *array,
310                    unsigned long size,
311                    unsigned long key)
312 {
313         unsigned int i_min, i_max, mid;
314
315         if (size == 0)
316                 return NULL;
317
318         i_min = 0;
319         i_max = size;
320
321         while (i_min < i_max) {
322                 mid = i_min + (i_max - i_min) / 2;
323
324                 if (key <= array[mid].vm_start)
325                         i_max = mid;
326                 else
327                         i_min = mid + 1;
328         }
329
330         if (array[i_max].vm_start == key)
331                 return &array[i_max];
332
333         return NULL;
334 }
335
336 static long
337 search_ex_region(unsigned long key, struct ex_region_info *ri)
338 {
339         struct regions_data *rd;
340         struct ex_region_info *ri_p = NULL;
341
342         rcu_read_lock();
343
344         rd = rcu_dereference(ctx.rd);
345         if (!rd)
346                 goto out;
347
348         ri_p = __search_ex_region(rd->entries, rd->curr_nr, key);
349         if (ri_p)
350                 memcpy(ri, ri_p, sizeof(*ri));
351
352 out:
353         rcu_read_unlock();
354         return ri_p ? 0 : -ENOENT;
355 }
356
357 static long
358 get_extabs_ehabi(unsigned long key, struct ex_region_info *ri)
359 {
360         long err;
361         struct extab_info *ti_extab, *ti_exidx;
362
363         err = search_ex_region(key, ri);
364         if (err < 0)
365                 return err;
366
367         ti_extab = &ri->ex_sec[QUADD_SEC_TYPE_EXTAB];
368         ti_exidx = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
369
370         return (ti_extab->length && ti_exidx->length) ? 0 : -ENOENT;
371 }
372
373 long
374 quadd_get_dw_frames(unsigned long key, struct ex_region_info *ri)
375 {
376         long err;
377         struct extab_info *ti, *ti_hdr;
378
379         err = search_ex_region(key, ri);
380         if (err < 0)
381                 return err;
382
383         ti = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME];
384         ti_hdr = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME_HDR];
385
386         if (ti->length && ti_hdr->length)
387                 return 0;
388
389         ti = &ri->ex_sec[QUADD_SEC_TYPE_DEBUG_FRAME];
390         ti_hdr = &ri->ex_sec[QUADD_SEC_TYPE_DEBUG_FRAME_HDR];
391
392         return (ti->length && ti_hdr->length) ? 0 : -ENOENT;
393 }
394
395 static struct regions_data *rd_alloc(unsigned long size)
396 {
397         struct regions_data *rd;
398
399         rd = kzalloc(sizeof(*rd), GFP_ATOMIC);
400         if (!rd)
401                 return NULL;
402
403         rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_ATOMIC);
404         if (!rd->entries) {
405                 kfree(rd);
406                 return NULL;
407         }
408
409         rd->size = size;
410         rd->curr_nr = 0;
411
412         return rd;
413 }
414
415 static void rd_free(struct regions_data *rd)
416 {
417         if (rd)
418                 kfree(rd->entries);
419
420         kfree(rd);
421 }
422
423 static void rd_free_rcu(struct rcu_head *rh)
424 {
425         struct regions_data *rd = container_of(rh, struct regions_data, rcu);
426         rd_free(rd);
427 }
428
429 int quadd_unwind_set_extab(struct quadd_sections *extabs,
430                            struct quadd_mmap_area *mmap)
431 {
432         int i, err = 0;
433         unsigned long nr_entries, nr_added, new_size;
434         struct ex_region_info ri_entry;
435         struct extab_info *ti;
436         struct regions_data *rd, *rd_new;
437         struct ex_region_info *ex_entry;
438
439         if (mmap->type != QUADD_MMAP_TYPE_EXTABS)
440                 return -EIO;
441
442         spin_lock(&ctx.lock);
443
444         rd = rcu_dereference(ctx.rd);
445         if (!rd) {
446                 pr_warn("%s: warning: rd\n", __func__);
447                 new_size = QUADD_EXTABS_SIZE;
448                 nr_entries = 0;
449         } else {
450                 new_size = rd->size;
451                 nr_entries = rd->curr_nr;
452         }
453
454         if (nr_entries >= new_size)
455                 new_size += new_size >> 1;
456
457         rd_new = rd_alloc(new_size);
458         if (IS_ERR_OR_NULL(rd_new)) {
459                 pr_err("%s: error: rd_alloc\n", __func__);
460                 err = -ENOMEM;
461                 goto error_out;
462         }
463
464         if (rd && nr_entries)
465                 memcpy(rd_new->entries, rd->entries,
466                        nr_entries * sizeof(*rd->entries));
467
468         rd_new->curr_nr = nr_entries;
469
470         ri_entry.vm_start = extabs->vm_start;
471         ri_entry.vm_end = extabs->vm_end;
472
473         ri_entry.mmap = mmap;
474
475         for (i = 0; i < QUADD_SEC_TYPE_MAX; i++) {
476                 struct quadd_sec_info *si = &extabs->sec[i];
477
478                 ti = &ri_entry.ex_sec[i];
479
480                 ti->tf_start = 0;
481                 ti->tf_end = 0;
482
483                 if (!si->addr) {
484                         ti->addr = 0;
485                         ti->length = 0;
486                         ti->mmap_offset = 0;
487
488                         continue;
489                 }
490
491                 ti->addr = si->addr;
492                 ti->length = si->length;
493                 ti->mmap_offset = si->mmap_offset;
494         }
495
496         nr_added = add_ex_region(rd_new, &ri_entry);
497         if (nr_added == 0)
498                 goto error_free;
499
500         rd_new->curr_nr += nr_added;
501
502         ex_entry = kzalloc(sizeof(*ex_entry), GFP_ATOMIC);
503         if (!ex_entry) {
504                 err = -ENOMEM;
505                 goto error_free;
506         }
507         memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
508
509         INIT_LIST_HEAD(&ex_entry->list);
510         list_add_tail(&ex_entry->list, &mmap->ex_entries);
511
512         rcu_assign_pointer(ctx.rd, rd_new);
513
514         if (rd)
515                 call_rcu(&rd->rcu, rd_free_rcu);
516
517         spin_unlock(&ctx.lock);
518
519         return 0;
520
521 error_free:
522         rd_free(rd_new);
523 error_out:
524         spin_unlock(&ctx.lock);
525         return err;
526 }
527
528 void
529 quadd_unwind_set_tail_info(unsigned long vm_start,
530                            int secid,
531                            unsigned long tf_start,
532                            unsigned long tf_end)
533 {
534         struct ex_region_info *ri;
535         unsigned long nr_entries, size;
536         struct regions_data *rd, *rd_new;
537         struct extab_info *ti;
538
539         spin_lock(&ctx.lock);
540
541         rd = rcu_dereference(ctx.rd);
542
543         if (!rd || rd->curr_nr == 0)
544                 goto error_out;
545
546         size = rd->size;
547         nr_entries = rd->curr_nr;
548
549         rd_new = rd_alloc(size);
550         if (IS_ERR_OR_NULL(rd_new)) {
551                 pr_err_once("%s: error: rd_alloc\n", __func__);
552                 goto error_out;
553         }
554
555         memcpy(rd_new->entries, rd->entries,
556                nr_entries * sizeof(*rd->entries));
557
558         rd_new->curr_nr = nr_entries;
559
560         ri = __search_ex_region(rd_new->entries, nr_entries, vm_start);
561         if (!ri)
562                 goto error_free;
563
564         ti = &ri->ex_sec[secid];
565
566         ti->tf_start = tf_start;
567         ti->tf_end = tf_end;
568
569         rcu_assign_pointer(ctx.rd, rd_new);
570
571         call_rcu(&rd->rcu, rd_free_rcu);
572         spin_unlock(&ctx.lock);
573
574         return;
575
576 error_free:
577         rd_free(rd_new);
578
579 error_out:
580         spin_unlock(&ctx.lock);
581 }
582
583 static int
584 clean_mmap(struct regions_data *rd, struct quadd_mmap_area *mmap, int rm_ext)
585 {
586         int nr_removed = 0;
587         struct ex_region_info *entry, *next;
588
589         if (!rd || !mmap)
590                 return 0;
591
592         list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
593                 if (rm_ext)
594                         nr_removed += remove_ex_region(rd, entry);
595
596                 list_del(&entry->list);
597                 kfree(entry);
598         }
599
600         return nr_removed;
601 }
602
603 void quadd_unwind_delete_mmap(struct quadd_mmap_area *mmap)
604 {
605         unsigned long nr_entries, nr_removed, new_size;
606         struct regions_data *rd, *rd_new;
607
608         if (!mmap)
609                 return;
610
611         spin_lock(&ctx.lock);
612
613         rd = rcu_dereference(ctx.rd);
614         if (!rd || !rd->curr_nr)
615                 goto error_out;
616
617         nr_entries = rd->curr_nr;
618         new_size = min_t(unsigned long, rd->size, nr_entries);
619
620         rd_new = rd_alloc(new_size);
621         if (IS_ERR_OR_NULL(rd_new)) {
622                 pr_err("%s: error: rd_alloc\n", __func__);
623                 goto error_out;
624         }
625         rd_new->size = new_size;
626         rd_new->curr_nr = nr_entries;
627
628         memcpy(rd_new->entries, rd->entries,
629                 nr_entries * sizeof(*rd->entries));
630
631         nr_removed = clean_mmap(rd_new, mmap, 1);
632         rd_new->curr_nr -= nr_removed;
633
634         rcu_assign_pointer(ctx.rd, rd_new);
635         call_rcu(&rd->rcu, rd_free_rcu);
636
637 error_out:
638         spin_unlock(&ctx.lock);
639 }
640
641 static const struct unwind_idx *
642 unwind_find_idx(struct ex_region_info *ri, u32 addr)
643 {
644         u32 value;
645         unsigned long length;
646         struct extab_info *ti;
647         struct unwind_idx *start;
648         struct unwind_idx *stop;
649         struct unwind_idx *mid = NULL;
650
651         ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
652
653         length = ti->length / sizeof(*start);
654
655         if (unlikely(!length))
656                 return NULL;
657
658         start = (struct unwind_idx *)((char *)ri->mmap->data + ti->mmap_offset);
659         stop = start + length - 1;
660
661         value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri,
662                                          QUADD_SEC_TYPE_EXIDX,
663                                          QUADD_SEC_TYPE_EXTAB, 0);
664         if (addr < value)
665                 return NULL;
666
667         value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri,
668                                          QUADD_SEC_TYPE_EXIDX,
669                                          QUADD_SEC_TYPE_EXTAB, 0);
670         if (addr >= value)
671                 return NULL;
672
673         while (start < stop - 1) {
674                 mid = start + ((stop - start) >> 1);
675
676                 value = (u32)mmap_prel31_to_addr(&mid->addr_offset, ri,
677                                                  QUADD_SEC_TYPE_EXIDX,
678                                                  QUADD_SEC_TYPE_EXTAB, 0);
679
680                 if (addr < value)
681                         stop = mid;
682                 else
683                         start = mid;
684         }
685
686         return start;
687 }
688
689 static unsigned long
690 unwind_get_byte(struct quadd_mmap_area *mmap,
691                 struct unwind_ctrl_block *ctrl, long *err)
692 {
693         unsigned long ret;
694         u32 insn_word;
695
696         *err = 0;
697
698         if (ctrl->entries <= 0) {
699                 pr_err_once("%s: error: corrupt unwind table\n", __func__);
700                 *err = -QUADD_URC_TBL_IS_CORRUPT;
701                 return 0;
702         }
703
704         *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
705         if (*err < 0)
706                 return 0;
707
708         ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
709
710         if (ctrl->byte == 0) {
711                 ctrl->insn++;
712                 ctrl->entries--;
713                 ctrl->byte = 3;
714         } else
715                 ctrl->byte--;
716
717         return ret;
718 }
719
720 static long
721 read_uleb128(struct quadd_mmap_area *mmap,
722              struct unwind_ctrl_block *ctrl,
723              unsigned long *ret)
724 {
725         long err = 0;
726         unsigned long result;
727         unsigned char byte;
728         int shift, count;
729
730         result = 0;
731         shift = 0;
732         count = 0;
733
734         while (1) {
735                 byte = unwind_get_byte(mmap, ctrl, &err);
736                 if (err < 0)
737                         return err;
738
739                 count++;
740
741                 result |= (byte & 0x7f) << shift;
742                 shift += 7;
743
744                 if (!(byte & 0x80))
745                         break;
746         }
747
748         *ret = result;
749
750         return count;
751 }
752
753 /*
754  * Execute the current unwind instruction.
755  */
756 static long
757 unwind_exec_insn(struct quadd_mmap_area *mmap,
758                  struct unwind_ctrl_block *ctrl)
759 {
760         long err;
761         unsigned int i;
762         unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
763
764         if (err < 0)
765                 return err;
766
767         pr_debug("%s: insn = %08lx\n", __func__, insn);
768
769         if ((insn & 0xc0) == 0x00) {
770                 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
771
772                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
773                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
774         } else if ((insn & 0xc0) == 0x40) {
775                 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
776
777                 pr_debug("CMD_DATA_PUSH: vsp = vsp â€“ %lu (new: %#x)\n",
778                         ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
779         } else if ((insn & 0xf0) == 0x80) {
780                 unsigned long mask;
781                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
782                 int load_sp, reg = 4;
783
784                 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
785                 if (err < 0)
786                         return err;
787
788                 mask = insn & 0x0fff;
789                 if (mask == 0) {
790                         pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
791                                    insn);
792                         return -QUADD_URC_REFUSE_TO_UNWIND;
793                 }
794
795                 /* pop R4-R15 according to mask */
796                 load_sp = mask & (1 << (13 - 4));
797                 while (mask) {
798                         if (mask & 1) {
799                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
800                                 if (err < 0)
801                                         return err;
802
803                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
804                         }
805                         mask >>= 1;
806                         reg++;
807                 }
808                 if (!load_sp)
809                         ctrl->vrs[SP] = (unsigned long)vsp;
810
811                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
812         } else if ((insn & 0xf0) == 0x90 &&
813                    (insn & 0x0d) != 0x0d) {
814                 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
815                 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
816         } else if ((insn & 0xf0) == 0xa0) {
817                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
818                 unsigned int reg;
819
820                 /* pop R4-R[4+bbb] */
821                 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
822                         err = read_user_data(vsp++, ctrl->vrs[reg]);
823                         if (err < 0)
824                                 return err;
825
826                         pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
827                 }
828
829                 if (insn & 0x08) {
830                         err = read_user_data(vsp++, ctrl->vrs[14]);
831                         if (err < 0)
832                                 return err;
833
834                         pr_debug("CMD_REG_POP: pop {r14}\n");
835                 }
836
837                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
838                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
839         } else if (insn == 0xb0) {
840                 if (ctrl->vrs[PC] == 0)
841                         ctrl->vrs[PC] = ctrl->vrs[LR];
842                 /* no further processing */
843                 ctrl->entries = 0;
844
845                 pr_debug("CMD_FINISH\n");
846         } else if (insn == 0xb1) {
847                 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
848                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
849                 int reg = 0;
850
851                 if (err < 0)
852                         return err;
853
854                 if (mask == 0 || mask & 0xf0) {
855                         pr_debug("unwind: Spare encoding %04lx\n",
856                                (insn << 8) | mask);
857                         return -QUADD_URC_SPARE_ENCODING;
858                 }
859
860                 /* pop R0-R3 according to mask */
861                 while (mask) {
862                         if (mask & 1) {
863                                 err = read_user_data(vsp++, ctrl->vrs[reg]);
864                                 if (err < 0)
865                                         return err;
866
867                                 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
868                         }
869                         mask >>= 1;
870                         reg++;
871                 }
872
873                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
874                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
875         } else if (insn == 0xb2) {
876                 long count;
877                 unsigned long uleb128 = 0;
878
879                 count = read_uleb128(mmap, ctrl, &uleb128);
880                 if (count < 0)
881                         return count;
882
883                 if (count == 0)
884                         return -QUADD_URC_TBL_IS_CORRUPT;
885
886                 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
887
888                 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (%#lx), new vsp: %#x\n",
889                          0x204 + (uleb128 << 2), 0x204 + (uleb128 << 2),
890                          ctrl->vrs[SP]);
891         } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
892                 unsigned long data, reg_from, reg_to;
893                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
894
895                 data = unwind_get_byte(mmap, ctrl, &err);
896                 if (err < 0)
897                         return err;
898
899                 reg_from = (data & 0xf0) >> 4;
900                 reg_to = reg_from + (data & 0x0f);
901
902                 if (insn == 0xc8) {
903                         reg_from += 16;
904                         reg_to += 16;
905                 }
906
907                 for (i = reg_from; i <= reg_to; i++)
908                         vsp += 2;
909
910                 if (insn == 0xb3)
911                         vsp++;
912
913                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
914
915                 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
916                          insn, data, reg_from, reg_to);
917                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
918         } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
919                 unsigned long reg_to;
920                 unsigned long data = insn & 0x07;
921                 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
922
923                 reg_to = 8 + data;
924
925                 for (i = 8; i <= reg_to; i++)
926                         vsp += 2;
927
928                 if ((insn & 0xf8) == 0xb8)
929                         vsp++;
930
931                 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
932
933                 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
934                          insn, reg_to);
935                 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
936         } else {
937                 pr_debug("error: unhandled instruction %02lx\n", insn);
938                 return -QUADD_URC_UNHANDLED_INSTRUCTION;
939         }
940
941         pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
942                  __func__,
943                  ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
944                  ctrl->vrs[LR], ctrl->vrs[PC]);
945
946         return 0;
947 }
948
949 /*
950  * Unwind a single frame starting with *sp for the symbol at *pc. It
951  * updates the *pc and *sp with the new values.
952  */
953 static long
954 unwind_frame(struct ex_region_info *ri,
955              struct stackframe *frame,
956              struct vm_area_struct *vma_sp)
957 {
958         unsigned long high, low;
959         const struct unwind_idx *idx;
960         struct unwind_ctrl_block ctrl;
961         long err = 0;
962         u32 val;
963
964         if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
965                 return -QUADD_URC_SP_INCORRECT;
966
967         /* only go to a higher address on the stack */
968         low = frame->sp;
969         high = vma_sp->vm_end;
970
971         pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx\n",
972                 frame->pc, frame->lr, frame->sp, low, high);
973
974         idx = unwind_find_idx(ri, frame->pc);
975         if (IS_ERR_OR_NULL(idx))
976                 return -QUADD_URC_IDX_NOT_FOUND;
977
978         pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
979
980         ctrl.vrs[FP_THUMB] = frame->fp_thumb;
981         ctrl.vrs[FP_ARM] = frame->fp_arm;
982
983         ctrl.vrs[SP] = frame->sp;
984         ctrl.vrs[LR] = frame->lr;
985         ctrl.vrs[PC] = 0;
986
987         err = read_mmap_data(ri->mmap, &idx->insn, &val);
988         if (err < 0)
989                 return err;
990
991         if (val == 1) {
992                 /* can't unwind */
993                 return -QUADD_URC_CANTUNWIND;
994         } else if ((val & 0x80000000) == 0) {
995                 /* prel31 to the unwind table */
996                 ctrl.insn = (u32 *)(unsigned long)
997                                 mmap_prel31_to_addr(&idx->insn, ri,
998                                                     QUADD_SEC_TYPE_EXIDX,
999                                                     QUADD_SEC_TYPE_EXTAB, 1);
1000                 if (!ctrl.insn)
1001                         return -QUADD_URC_EACCESS;
1002         } else if ((val & 0xff000000) == 0x80000000) {
1003                 /* only personality routine 0 supported in the index */
1004                 ctrl.insn = &idx->insn;
1005         } else {
1006                 pr_debug("unsupported personality routine %#x in the index at %p\n",
1007                          val, idx);
1008                 return -QUADD_URC_UNSUPPORTED_PR;
1009         }
1010
1011         err = read_mmap_data(ri->mmap, ctrl.insn, &val);
1012         if (err < 0)
1013                 return err;
1014
1015         /* check the personality routine */
1016         if ((val & 0xff000000) == 0x80000000) {
1017                 ctrl.byte = 2;
1018                 ctrl.entries = 1;
1019         } else if ((val & 0xff000000) == 0x81000000) {
1020                 ctrl.byte = 1;
1021                 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
1022         } else {
1023                 pr_debug("unsupported personality routine %#x at %p\n",
1024                          val, ctrl.insn);
1025                 return -QUADD_URC_UNSUPPORTED_PR;
1026         }
1027
1028         while (ctrl.entries > 0) {
1029                 err = unwind_exec_insn(ri->mmap, &ctrl);
1030                 if (err < 0)
1031                         return err;
1032
1033                 if (ctrl.vrs[SP] & 0x03 ||
1034                     ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
1035                         return -QUADD_URC_SP_INCORRECT;
1036         }
1037
1038         if (ctrl.vrs[PC] == 0)
1039                 ctrl.vrs[PC] = ctrl.vrs[LR];
1040
1041         if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
1042                 return -QUADD_URC_PC_INCORRECT;
1043
1044         frame->fp_thumb = ctrl.vrs[FP_THUMB];
1045         frame->fp_arm = ctrl.vrs[FP_ARM];
1046
1047         frame->sp = ctrl.vrs[SP];
1048         frame->lr = ctrl.vrs[LR];
1049         frame->pc = ctrl.vrs[PC];
1050
1051         return 0;
1052 }
1053
1054 static void
1055 unwind_backtrace(struct quadd_callchain *cc,
1056                  struct ex_region_info *ri,
1057                  struct stackframe *frame,
1058                  struct vm_area_struct *vma_sp,
1059                  struct task_struct *task)
1060 {
1061         struct ex_region_info ri_new;
1062
1063         cc->urc_ut = QUADD_URC_FAILURE;
1064
1065         pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
1066                  frame->fp_arm, frame->fp_thumb,
1067                  frame->sp, frame->lr, frame->pc);
1068         pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
1069                  vma_sp->vm_start, vma_sp->vm_end,
1070                  vma_sp->vm_end - vma_sp->vm_start);
1071
1072         while (1) {
1073                 long err;
1074                 int nr_added;
1075                 struct extab_info *ti;
1076                 unsigned long where = frame->pc;
1077                 struct vm_area_struct *vma_pc;
1078                 struct mm_struct *mm = task->mm;
1079
1080                 if (!mm)
1081                         break;
1082
1083                 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32))) {
1084                         cc->urc_ut = QUADD_URC_SP_INCORRECT;
1085                         break;
1086                 }
1087
1088                 vma_pc = find_vma(mm, frame->pc);
1089                 if (!vma_pc)
1090                         break;
1091
1092                 ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
1093
1094                 if (!is_vma_addr(ti->addr, vma_pc, sizeof(u32))) {
1095                         err = get_extabs_ehabi(vma_pc->vm_start, &ri_new);
1096                         if (err) {
1097                                 cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
1098                                 break;
1099                         }
1100
1101                         ri = &ri_new;
1102                 }
1103
1104                 err = unwind_frame(ri, frame, vma_sp);
1105                 if (err < 0) {
1106                         pr_debug("end unwind, urc: %ld\n", err);
1107                         cc->urc_ut = -err;
1108                         break;
1109                 }
1110
1111                 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1112                          where, frame->pc);
1113
1114                 cc->curr_sp = frame->sp;
1115                 cc->curr_fp = frame->fp_arm;
1116                 cc->curr_fp_thumb = frame->fp_thumb;
1117                 cc->curr_pc = frame->pc;
1118
1119                 nr_added = quadd_callchain_store(cc, frame->pc,
1120                                                  QUADD_UNW_TYPE_UT);
1121                 if (nr_added == 0)
1122                         break;
1123         }
1124 }
1125
1126 unsigned int
1127 quadd_get_user_cc_arm32_ehabi(struct pt_regs *regs,
1128                               struct quadd_callchain *cc,
1129                               struct task_struct *task)
1130 {
1131         long err;
1132         int nr_prev = cc->nr;
1133         unsigned long ip, sp, lr;
1134         struct vm_area_struct *vma, *vma_sp;
1135         struct mm_struct *mm = task->mm;
1136         struct ex_region_info ri;
1137         struct stackframe frame;
1138
1139         if (!regs || !mm)
1140                 return 0;
1141
1142 #ifdef CONFIG_ARM64
1143         if (!compat_user_mode(regs))
1144                 return 0;
1145 #endif
1146
1147         if (cc->urc_ut == QUADD_URC_LEVEL_TOO_DEEP)
1148                 return nr_prev;
1149
1150         cc->urc_ut = QUADD_URC_FAILURE;
1151
1152         if (nr_prev > 0) {
1153                 ip = cc->curr_pc;
1154                 sp = cc->curr_sp;
1155                 lr = 0;
1156
1157                 frame.fp_thumb = cc->curr_fp_thumb;
1158                 frame.fp_arm = cc->curr_fp;
1159         } else {
1160                 ip = instruction_pointer(regs);
1161                 sp = quadd_user_stack_pointer(regs);
1162                 lr = quadd_user_link_register(regs);
1163
1164 #ifdef CONFIG_ARM64
1165                 frame.fp_thumb = regs->compat_usr(7);
1166                 frame.fp_arm = regs->compat_usr(11);
1167 #else
1168                 frame.fp_thumb = regs->ARM_r7;
1169                 frame.fp_arm = regs->ARM_fp;
1170 #endif
1171         }
1172
1173         frame.pc = ip;
1174         frame.sp = sp;
1175         frame.lr = lr;
1176
1177         pr_debug("pc: %#lx, lr: %#lx\n", ip, lr);
1178         pr_debug("sp: %#lx, fp_arm: %#lx, fp_thumb: %#lx\n",
1179                  sp, frame.fp_arm, frame.fp_thumb);
1180
1181         vma = find_vma(mm, ip);
1182         if (!vma)
1183                 return 0;
1184
1185         vma_sp = find_vma(mm, sp);
1186         if (!vma_sp)
1187                 return 0;
1188
1189         err = get_extabs_ehabi(vma->vm_start, &ri);
1190         if (err) {
1191                 cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
1192                 return 0;
1193         }
1194
1195         unwind_backtrace(cc, &ri, &frame, vma_sp, task);
1196
1197         pr_debug("%s: exit, cc->nr: %d --> %d\n",
1198                  __func__, nr_prev, cc->nr);
1199
1200         return cc->nr;
1201 }
1202
1203 int
1204 quadd_is_ex_entry_exist_arm32_ehabi(struct pt_regs *regs,
1205                                     unsigned long addr,
1206                                     struct task_struct *task)
1207 {
1208         long err;
1209         u32 value;
1210         const struct unwind_idx *idx;
1211         struct ex_region_info ri;
1212         struct vm_area_struct *vma;
1213         struct mm_struct *mm = task->mm;
1214
1215         if (!regs || !mm)
1216                 return 0;
1217
1218         vma = find_vma(mm, addr);
1219         if (!vma)
1220                 return 0;
1221
1222         err = get_extabs_ehabi(vma->vm_start, &ri);
1223         if (err)
1224                 return 0;
1225
1226         idx = unwind_find_idx(&ri, addr);
1227         if (IS_ERR_OR_NULL(idx))
1228                 return 0;
1229
1230         err = read_mmap_data(ri.mmap, &idx->insn, &value);
1231         if (err < 0)
1232                 return 0;
1233
1234         /* EXIDX_CANTUNWIND */
1235         if (value == 1)
1236                 return 0;
1237
1238         return 1;
1239 }
1240
1241 int quadd_unwind_start(struct task_struct *task)
1242 {
1243         int err;
1244         struct regions_data *rd, *rd_old;
1245
1246         rd = rd_alloc(QUADD_EXTABS_SIZE);
1247         if (IS_ERR_OR_NULL(rd)) {
1248                 pr_err("%s: error: rd_alloc\n", __func__);
1249                 return -ENOMEM;
1250         }
1251
1252         err = quadd_dwarf_unwind_start();
1253         if (err) {
1254                 rd_free(rd);
1255                 return err;
1256         }
1257
1258         spin_lock(&ctx.lock);
1259
1260         rd_old = rcu_dereference(ctx.rd);
1261         if (rd_old)
1262                 pr_warn("%s: warning: rd_old\n", __func__);
1263
1264         rcu_assign_pointer(ctx.rd, rd);
1265
1266         if (rd_old)
1267                 call_rcu(&rd_old->rcu, rd_free_rcu);
1268
1269         ctx.pid = task->tgid;
1270
1271         ctx.ex_tables_size = 0;
1272
1273         spin_unlock(&ctx.lock);
1274
1275         return 0;
1276 }
1277
1278 void quadd_unwind_stop(void)
1279 {
1280         int i;
1281         unsigned long nr_entries, size;
1282         struct regions_data *rd;
1283         struct ex_region_info *ri;
1284
1285         quadd_dwarf_unwind_stop();
1286
1287         spin_lock(&ctx.lock);
1288
1289         ctx.pid = 0;
1290
1291         rd = rcu_dereference(ctx.rd);
1292         if (!rd)
1293                 goto out;
1294
1295         nr_entries = rd->curr_nr;
1296         size = rd->size;
1297
1298         for (i = 0; i < nr_entries; i++) {
1299                 ri = &rd->entries[i];
1300                 clean_mmap(rd, ri->mmap, 0);
1301         }
1302
1303         rcu_assign_pointer(ctx.rd, NULL);
1304         call_rcu(&rd->rcu, rd_free_rcu);
1305
1306 out:
1307         spin_unlock(&ctx.lock);
1308         pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1309 }
1310
1311 int quadd_unwind_init(void)
1312 {
1313         int err;
1314
1315         err = quadd_dwarf_unwind_init();
1316         if (err)
1317                 return err;
1318
1319         spin_lock_init(&ctx.lock);
1320         rcu_assign_pointer(ctx.rd, NULL);
1321         ctx.pid = 0;
1322
1323         return 0;
1324 }
1325
1326 void quadd_unwind_deinit(void)
1327 {
1328         quadd_unwind_stop();
1329         rcu_barrier();
1330 }