Too deep stack level: handle it properly.
Appropriate unwind reason code has been added.
Unwinding based on frame pointers: add unwind reason codes.
Bug
200005380
Change-Id: I2199df90c746ada6a7f224a8b675638b69dc6da8
Signed-off-by: Igor Nabirushkin <inabirushkin@nvidia.com>
Reviewed-on: http://git-master/r/410717
(cherry picked from commit
e96cd9adf0ca020c55545925168671373a67a009)
Reviewed-on: http://git-master/r/454446
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Tested-by: Maxim Morin <mmorin@nvidia.com>
Reviewed-by: Mitch Luban <mluban@nvidia.com>
quadd_callchain_store(struct quadd_callchain *cc,
unsigned long ip)
{
quadd_callchain_store(struct quadd_callchain *cc,
unsigned long ip)
{
- if (ip && cc->nr < QUADD_MAX_STACK_DEPTH) {
- if (cc->cs_64)
- cc->ip_64[cc->nr++] = ip;
- else
- cc->ip_32[cc->nr++] = ip;
+ if (!ip) {
+ cc->unw_rc = QUADD_URC_PC_INCORRECT;
+ return 0;
+ }
+ if (cc->nr >= QUADD_MAX_STACK_DEPTH) {
+ cc->unw_rc = QUADD_URC_LEVEL_TOO_DEEP;
+ return 0;
+
+ if (cc->cs_64)
+ cc->ip_64[cc->nr++] = ip;
+ else
+ cc->ip_32[cc->nr++] = ip;
+
+ return 1;
}
static unsigned long __user *
}
static unsigned long __user *
struct quadd_callchain *cc,
struct vm_area_struct *stack_vma)
{
struct quadd_callchain *cc,
struct vm_area_struct *stack_vma)
{
unsigned long value, value_lr = 0, value_fp = 0;
unsigned long __user *fp_prev = NULL;
if (!is_vma_addr((unsigned long)tail, stack_vma, sizeof(*tail)))
return NULL;
unsigned long value, value_lr = 0, value_fp = 0;
unsigned long __user *fp_prev = NULL;
if (!is_vma_addr((unsigned long)tail, stack_vma, sizeof(*tail)))
return NULL;
- if (__copy_from_user_inatomic(&value, tail, sizeof(unsigned long)))
+ if (__copy_from_user_inatomic(&value, tail, sizeof(unsigned long))) {
+ cc->unw_rc = QUADD_URC_EACCESS;
if (is_vma_addr(value, stack_vma, sizeof(value))) {
/* gcc thumb/clang frame */
if (is_vma_addr(value, stack_vma, sizeof(value))) {
/* gcc thumb/clang frame */
return NULL;
if (__copy_from_user_inatomic(&value_lr, tail + 1,
return NULL;
if (__copy_from_user_inatomic(&value_lr, tail + 1,
+ sizeof(value_lr))) {
+ cc->unw_rc = QUADD_URC_EACCESS;
} else {
/* gcc arm frame */
if (__copy_from_user_inatomic(&value_fp, tail - 1,
} else {
/* gcc arm frame */
if (__copy_from_user_inatomic(&value_fp, tail - 1,
+ sizeof(value_fp))) {
+ cc->unw_rc = QUADD_URC_EACCESS;
if (!is_vma_addr(value_fp, stack_vma, sizeof(value_fp)))
return NULL;
if (!is_vma_addr(value_fp, stack_vma, sizeof(value_fp)))
return NULL;
fp_prev = (unsigned long __user *)value_fp;
fp_prev = (unsigned long __user *)value_fp;
- if (value_lr < QUADD_USER_SPACE_MIN_ADDR)
+ if (value_lr < QUADD_USER_SPACE_MIN_ADDR) {
+ cc->unw_rc = QUADD_URC_PC_INCORRECT;
- quadd_callchain_store(cc, value_lr);
+ nr_added = quadd_callchain_store(cc, value_lr);
+ if (nr_added == 0)
+ return NULL;
if (fp_prev <= tail)
return NULL;
if (fp_prev <= tail)
return NULL;
struct mm_struct *mm = task->mm;
cc->nr = 0;
struct mm_struct *mm = task->mm;
cc->nr = 0;
- cc->unw_method = QUADD_UNW_METHOD_FP;
+ cc->unw_rc = QUADD_URC_FP_INCORRECT;
+ if (!regs || !mm) {
+ cc->unw_rc = QUADD_URC_FAILURE;
sp = quadd_user_stack_pointer(regs);
pc = instruction_pointer(regs);
sp = quadd_user_stack_pointer(regs);
pc = instruction_pointer(regs);
return 0;
vma = find_vma(mm, sp);
return 0;
vma = find_vma(mm, sp);
+ if (!vma) {
+ cc->unw_rc = QUADD_URC_SP_INCORRECT;
if (!is_vma_addr(fp, vma, sizeof(fp)))
return 0;
if (!is_vma_addr(fp, vma, sizeof(fp)))
return 0;
pr_warn_once("frame error: sp/fp: %#lx/%#lx, pc/lr: %#lx/%#lx, vma: %#lx-%#lx\n",
sp, fp, pc, quadd_user_link_register(regs),
vma->vm_start, vma->vm_end);
pr_warn_once("frame error: sp/fp: %#lx/%#lx, pc/lr: %#lx/%#lx, vma: %#lx-%#lx\n",
sp, fp, pc, quadd_user_link_register(regs),
vma->vm_start, vma->vm_end);
+ cc->unw_rc = QUADD_URC_EACCESS;
if (__copy_from_user_inatomic(
&value,
(unsigned long __user *)fp + 1,
if (__copy_from_user_inatomic(
&value,
(unsigned long __user *)fp + 1,
- sizeof(unsigned long)))
+ sizeof(unsigned long))) {
+ cc->unw_rc = QUADD_URC_EACCESS;
vma_pc = find_vma(mm, pc);
read_lr = 1;
vma_pc = find_vma(mm, pc);
read_lr = 1;
if (!read_lr || !is_vma_addr(value, vma_pc, sizeof(value))) {
/* gcc: fp --> short frame tail (fp) */
if (!read_lr || !is_vma_addr(value, vma_pc, sizeof(value))) {
/* gcc: fp --> short frame tail (fp) */
unsigned long lr = quadd_user_link_register(regs);
unsigned long lr = quadd_user_link_register(regs);
- if (lr < QUADD_USER_SPACE_MIN_ADDR)
+ if (lr < QUADD_USER_SPACE_MIN_ADDR) {
+ cc->unw_rc = QUADD_URC_PC_INCORRECT;
+ }
+
+ nr_added = quadd_callchain_store(cc, lr);
+ if (nr_added == 0)
+ return cc->nr;
- quadd_callchain_store(cc, lr);
tail = (unsigned long __user *)reg;
}
}
tail = (unsigned long __user *)reg;
}
}
struct vm_area_struct *vma;
unsigned long __user *tail;
struct vm_area_struct *vma;
unsigned long __user *tail;
+ cc->unw_rc = QUADD_URC_FP_INCORRECT;
+
+ if (!mm) {
+ cc->unw_rc = QUADD_URC_FAILURE;
+ return cc->nr;
+ }
vma = find_vma(mm, cc->curr_sp);
vma = find_vma(mm, cc->curr_sp);
+ if (!vma) {
+ cc->unw_rc = QUADD_URC_SP_INCORRECT;
+ return cc->nr;
+ }
tail = (unsigned long __user *)cc->curr_fp;
while (tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, cc, vma);
tail = (unsigned long __user *)cc->curr_fp;
while (tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, cc, vma);
struct quadd_callchain *cc,
struct vm_area_struct *stack_vma)
{
struct quadd_callchain *cc,
struct vm_area_struct *stack_vma)
{
u32 value, value_lr = 0, value_fp = 0;
u32 __user *fp_prev = NULL;
if (!is_vma_addr((unsigned long)tail, stack_vma, sizeof(*tail)))
return NULL;
u32 value, value_lr = 0, value_fp = 0;
u32 __user *fp_prev = NULL;
if (!is_vma_addr((unsigned long)tail, stack_vma, sizeof(*tail)))
return NULL;
- if (__copy_from_user_inatomic(&value, tail, sizeof(value)))
+ if (__copy_from_user_inatomic(&value, tail, sizeof(value))) {
+ cc->unw_rc = QUADD_URC_EACCESS;
if (is_vma_addr(value, stack_vma, sizeof(value))) {
/* gcc thumb/clang frame */
if (is_vma_addr(value, stack_vma, sizeof(value))) {
/* gcc thumb/clang frame */
return NULL;
if (__copy_from_user_inatomic(&value_lr, tail + 1,
return NULL;
if (__copy_from_user_inatomic(&value_lr, tail + 1,
+ sizeof(value_lr))) {
+ cc->unw_rc = QUADD_URC_EACCESS;
} else {
/* gcc arm frame */
if (__copy_from_user_inatomic(&value_fp, tail - 1,
} else {
/* gcc arm frame */
if (__copy_from_user_inatomic(&value_fp, tail - 1,
+ sizeof(value_fp))) {
+ cc->unw_rc = QUADD_URC_EACCESS;
if (!is_vma_addr(value_fp, stack_vma, sizeof(value_fp)))
return NULL;
if (!is_vma_addr(value_fp, stack_vma, sizeof(value_fp)))
return NULL;
fp_prev = (u32 __user *)(unsigned long)value_fp;
fp_prev = (u32 __user *)(unsigned long)value_fp;
- if (value_lr < QUADD_USER_SPACE_MIN_ADDR)
+ if (value_lr < QUADD_USER_SPACE_MIN_ADDR) {
+ cc->unw_rc = QUADD_URC_PC_INCORRECT;
- quadd_callchain_store(cc, value_lr);
+ nr_added = quadd_callchain_store(cc, value_lr);
+ if (nr_added == 0)
+ return NULL;
if (fp_prev <= tail)
return NULL;
if (fp_prev <= tail)
return NULL;
struct mm_struct *mm = task->mm;
cc->nr = 0;
struct mm_struct *mm = task->mm;
cc->nr = 0;
+ cc->unw_rc = QUADD_URC_FP_INCORRECT;
+ if (!regs || !mm) {
+ cc->unw_rc = QUADD_URC_FAILURE;
sp = quadd_user_stack_pointer(regs);
pc = instruction_pointer(regs);
sp = quadd_user_stack_pointer(regs);
pc = instruction_pointer(regs);
return 0;
vma = find_vma(mm, sp);
return 0;
vma = find_vma(mm, sp);
+ if (!vma) {
+ cc->unw_rc = QUADD_URC_SP_INCORRECT;
if (!is_vma_addr(fp, vma, sizeof(fp)))
return 0;
if (!is_vma_addr(fp, vma, sizeof(fp)))
return 0;
pr_warn_once("frame error: sp/fp: %#x/%#x, pc/lr: %#x/%#x, vma: %#lx-%#lx\n",
sp, fp, pc, (u32)quadd_user_link_register(regs),
vma->vm_start, vma->vm_end);
pr_warn_once("frame error: sp/fp: %#x/%#x, pc/lr: %#x/%#x, vma: %#lx-%#lx\n",
sp, fp, pc, (u32)quadd_user_link_register(regs),
vma->vm_start, vma->vm_end);
+ cc->unw_rc = QUADD_URC_EACCESS;
if (__copy_from_user_inatomic(
&value,
(u32 __user *)(fp + sizeof(u32)),
if (__copy_from_user_inatomic(
&value,
(u32 __user *)(fp + sizeof(u32)),
+ sizeof(value))) {
+ cc->unw_rc = QUADD_URC_EACCESS;
vma_pc = find_vma(mm, pc);
read_lr = 1;
vma_pc = find_vma(mm, pc);
read_lr = 1;
if (!read_lr || !is_vma_addr(value, vma_pc, sizeof(value))) {
/* gcc: fp --> short frame tail (fp) */
if (!read_lr || !is_vma_addr(value, vma_pc, sizeof(value))) {
/* gcc: fp --> short frame tail (fp) */
u32 lr = quadd_user_link_register(regs);
u32 lr = quadd_user_link_register(regs);
- if (lr < QUADD_USER_SPACE_MIN_ADDR)
+ if (lr < QUADD_USER_SPACE_MIN_ADDR) {
+ cc->unw_rc = QUADD_URC_PC_INCORRECT;
+ }
+
+ nr_added = quadd_callchain_store(cc, lr);
+ if (nr_added == 0)
+ return cc->nr;
- quadd_callchain_store(cc, lr);
tail = (u32 __user *)(unsigned long)reg;
}
}
tail = (u32 __user *)(unsigned long)reg;
}
}
struct vm_area_struct *vma;
u32 __user *tail;
struct vm_area_struct *vma;
u32 __user *tail;
+ cc->unw_rc = QUADD_URC_FP_INCORRECT;
+
+ if (!mm) {
+ cc->unw_rc = QUADD_URC_FAILURE;
+ return cc->nr;
+ }
vma = find_vma(mm, cc->curr_sp);
vma = find_vma(mm, cc->curr_sp);
+ if (!vma) {
+ cc->unw_rc = QUADD_URC_SP_INCORRECT;
+ return cc->nr;
+ }
tail = (u32 __user *)cc->curr_fp;
while (tail && !((unsigned long)tail & 0x3))
tail = user_backtrace_compat(tail, cc, vma);
tail = (u32 __user *)cc->curr_fp;
while (tail && !((unsigned long)tail & 0x3))
tail = user_backtrace_compat(tail, cc, vma);
{
if (cc->nr > 0) {
int nr, nr_prev = cc->nr;
{
if (cc->nr > 0) {
int nr, nr_prev = cc->nr;
+
+ if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
+ return nr_prev;
+
#ifdef CONFIG_ARM64
if (compat_user_mode(regs))
nr = __user_backtrace_compat(cc, task);
#ifdef CONFIG_ARM64
if (compat_user_mode(regs))
nr = __user_backtrace_compat(cc, task);
struct quadd_parameters *param = &ctx->param;
cc->nr = 0;
struct quadd_parameters *param = &ctx->param;
cc->nr = 0;
+ cc->unw_method = QUADD_URC_FAILURE;
res = misc_register(misc_dev);
if (res < 0) {
pr_err("Error: misc_register: %d\n", res);
res = misc_register(misc_dev);
if (res < 0) {
pr_err("Error: misc_register: %d\n", res);
return res;
}
comm_ctx.misc_dev = misc_dev;
return res;
}
comm_ctx.misc_dev = misc_dev;
unsigned long where = frame.pc;
struct vm_area_struct *vma_pc;
struct mm_struct *mm = task->mm;
unsigned long where = frame.pc;
struct vm_area_struct *vma_pc;
struct mm_struct *mm = task->mm;
pr_debug("function at [<%08lx>] from [<%08lx>]\n",
where, frame.pc);
pr_debug("function at [<%08lx>] from [<%08lx>]\n",
where, frame.pc);
- quadd_callchain_store(cc, frame.pc);
-
cc->curr_sp = frame.sp;
cc->curr_fp = frame.fp_arm;
cc->curr_sp = frame.sp;
cc->curr_fp = frame.fp_arm;
+
+ nr_added = quadd_callchain_store(cc, frame.pc);
+ if (nr_added == 0)
+ break;
vec_idx++;
s->reserved = 0;
vec_idx++;
s->reserved = 0;
+ cc->unw_method = QUADD_URC_SUCCESS;
if (ctx->param.backtrace) {
bt_size = quadd_get_user_callchain(user_regs, cc, ctx, task);
if (ctx->param.backtrace) {
bt_size = quadd_get_user_callchain(user_regs, cc, ctx, task);
}
extra_data |= cc->unw_method << QUADD_SED_UNW_METHOD_SHIFT;
}
extra_data |= cc->unw_method << QUADD_SED_UNW_METHOD_SHIFT;
-
- if (cc->unw_method == QUADD_UNW_METHOD_EHT ||
- cc->unw_method == QUADD_UNW_METHOD_MIXED)
- s->reserved |= cc->unw_rc << QUADD_SAMPLE_URC_SHIFT;
+ s->reserved |= cc->unw_rc << QUADD_SAMPLE_URC_SHIFT;
}
s->callchain_nr = bt_size;
}
s->callchain_nr = bt_size;
unsigned int extra;
if (!validate_freq(p->freq)) {
unsigned int extra;
if (!validate_freq(p->freq)) {
- pr_err("%s: incorrect frequency: %u", __func__, p->freq);
+ pr_err("%s: incorrect frequency: %u\n", __func__, p->freq);
#ifndef __QUADD_VERSION_H
#define __QUADD_VERSION_H
#ifndef __QUADD_VERSION_H
#define __QUADD_VERSION_H
-#define QUADD_MODULE_VERSION "1.67"
+#define QUADD_MODULE_VERSION "1.68"
#define QUADD_MODULE_BRANCH "Dev"
#endif /* __QUADD_VERSION_H */
#define QUADD_MODULE_BRANCH "Dev"
#endif /* __QUADD_VERSION_H */
QUADD_URC_SPARE_ENCODING,
QUADD_URC_UNSUPPORTED_PR,
QUADD_URC_PC_INCORRECT,
QUADD_URC_SPARE_ENCODING,
QUADD_URC_UNSUPPORTED_PR,
QUADD_URC_PC_INCORRECT,
+ QUADD_URC_LEVEL_TOO_DEEP,
+ QUADD_URC_FP_INCORRECT,