#include "hrt.h"
#include "tegra.h"
+static inline int
+is_table_unwinding(struct quadd_callchain *cc)
+{
+ return cc->um.ut || cc->um.dwarf;
+}
+
unsigned long
quadd_user_stack_pointer(struct pt_regs *regs)
{
unsigned long low_addr = cc->hrt->low_addr;
if (ip < low_addr || !validate_pc_addr(ip, sizeof(unsigned long))) {
- cc->unw_rc = QUADD_URC_PC_INCORRECT;
+ cc->urc_fp = QUADD_URC_PC_INCORRECT;
return 0;
}
if (cc->nr >= QUADD_MAX_STACK_DEPTH) {
- cc->unw_rc = QUADD_URC_LEVEL_TOO_DEEP;
+ cc->urc_fp = QUADD_URC_LEVEL_TOO_DEEP;
return 0;
}
return NULL;
if (__copy_from_user_inatomic(&value, tail, sizeof(unsigned long))) {
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return NULL;
}
if (__copy_from_user_inatomic(&value_lr, tail + 1,
sizeof(value_lr))) {
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return NULL;
}
/* gcc arm frame */
if (__copy_from_user_inatomic(&value_fp, tail - 1,
sizeof(value_fp))) {
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return NULL;
}
if (nr_added == 0)
return NULL;
- if (cc->unw_method == QUADD_UNW_METHOD_MIXED &&
+ if (is_table_unwinding(cc) &&
is_ex_entry_exist(regs, value_lr, task))
return NULL;
struct mm_struct *mm = task->mm;
cc->nr = 0;
- cc->unw_rc = QUADD_URC_FP_INCORRECT;
+ cc->urc_fp = QUADD_URC_FP_INCORRECT;
if (!regs || !mm) {
- cc->unw_rc = QUADD_URC_FAILURE;
+ cc->urc_fp = QUADD_URC_FAILURE;
return 0;
}
vma = find_vma(mm, sp);
if (!vma) {
- cc->unw_rc = QUADD_URC_SP_INCORRECT;
+ cc->urc_fp = QUADD_URC_SP_INCORRECT;
return 0;
}
if (probe_kernel_address(fp, reg)) {
pr_warn_once("%s: failed for address: %#lx\n", __func__, fp);
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return 0;
}
&value,
(unsigned long __user *)fp + 1,
sizeof(unsigned long))) {
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return 0;
}
struct vm_area_struct *vma;
unsigned long __user *tail;
- cc->unw_rc = QUADD_URC_FP_INCORRECT;
+ cc->urc_fp = QUADD_URC_FP_INCORRECT;
if (!mm) {
- cc->unw_rc = QUADD_URC_FAILURE;
+ cc->urc_fp = QUADD_URC_FAILURE;
return cc->nr;
}
vma = find_vma(mm, cc->curr_sp);
if (!vma) {
- cc->unw_rc = QUADD_URC_SP_INCORRECT;
+ cc->urc_fp = QUADD_URC_SP_INCORRECT;
return cc->nr;
}
return NULL;
if (__copy_from_user_inatomic(&value, tail, sizeof(value))) {
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return NULL;
}
if (__copy_from_user_inatomic(&value_lr, tail + 1,
sizeof(value_lr))) {
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return NULL;
}
/* gcc arm frame */
if (__copy_from_user_inatomic(&value_fp, tail - 1,
sizeof(value_fp))) {
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return NULL;
}
if (nr_added == 0)
return NULL;
- if (cc->unw_method == QUADD_UNW_METHOD_MIXED &&
+ if (is_table_unwinding(cc) &&
is_ex_entry_exist(regs, value_lr, task))
return NULL;
struct mm_struct *mm = task->mm;
cc->nr = 0;
- cc->unw_rc = QUADD_URC_FP_INCORRECT;
+ cc->urc_fp = QUADD_URC_FP_INCORRECT;
if (!regs || !mm) {
- cc->unw_rc = QUADD_URC_FAILURE;
+ cc->urc_fp = QUADD_URC_FAILURE;
return 0;
}
vma = find_vma(mm, sp);
if (!vma) {
- cc->unw_rc = QUADD_URC_SP_INCORRECT;
+ cc->urc_fp = QUADD_URC_SP_INCORRECT;
return 0;
}
if (probe_kernel_address((unsigned long)fp, reg)) {
pr_warn_once("%s: failed for address: %#x\n", __func__, fp);
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return 0;
}
&value,
(u32 __user *)(fp + sizeof(u32)),
sizeof(value))) {
- cc->unw_rc = QUADD_URC_EACCESS;
+ cc->urc_fp = QUADD_URC_EACCESS;
return 0;
}
struct vm_area_struct *vma;
u32 __user *tail;
- cc->unw_rc = QUADD_URC_FP_INCORRECT;
+ cc->urc_fp = QUADD_URC_FP_INCORRECT;
if (!mm) {
- cc->unw_rc = QUADD_URC_FAILURE;
+ cc->urc_fp = QUADD_URC_FAILURE;
return cc->nr;
}
vma = find_vma(mm, cc->curr_sp);
if (!vma) {
- cc->unw_rc = QUADD_URC_SP_INCORRECT;
+ cc->urc_fp = QUADD_URC_SP_INCORRECT;
return cc->nr;
}
struct task_struct *task)
{
if (cc->nr > 0) {
- if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
+ if (cc->urc_fp == QUADD_URC_LEVEL_TOO_DEEP)
return cc->nr;
#ifdef CONFIG_ARM64
return get_user_callchain_fp(regs, cc, task);
}
-static unsigned int
-get_user_callchain_ut(struct pt_regs *regs,
- struct quadd_callchain *cc,
- struct task_struct *task)
-{
- int nr_prev;
- unsigned long prev_sp;
-
- do {
- nr_prev = cc->nr;
- prev_sp = cc->curr_sp;
-
- quadd_get_user_cc_dwarf(regs, cc, task);
- if (nr_prev > 0 && cc->nr == nr_prev)
- break;
-
- nr_prev = cc->nr;
-
- quadd_get_user_cc_arm32_ehabi(regs, cc, task);
- } while (nr_prev != cc->nr &&
- (cc->nr <= 1 || cc->curr_sp > prev_sp));
-
- return cc->nr;
-}
-
static unsigned int
get_user_callchain_mixed(struct pt_regs *regs,
struct quadd_callchain *cc,
{
int nr_prev;
unsigned long prev_sp;
+ struct quadd_unw_methods *um = &cc->um;
do {
nr_prev = cc->nr;
prev_sp = cc->curr_sp;
- quadd_get_user_cc_dwarf(regs, cc, task);
- quadd_get_user_cc_arm32_ehabi(regs, cc, task);
+ if (um->dwarf)
+ quadd_get_user_cc_dwarf(regs, cc, task);
+ if (um->ut)
+ quadd_get_user_cc_arm32_ehabi(regs, cc, task);
if (nr_prev != cc->nr) {
if (cc->nr > 1 &&
continue;
}
- __get_user_callchain_fp(regs, cc, task);
+ if (um->fp)
+ __get_user_callchain_fp(regs, cc, task);
} while (nr_prev != cc->nr &&
(cc->nr <= 1 || cc->curr_sp > prev_sp));
struct quadd_ctx *ctx,
struct task_struct *task)
{
- unsigned int method = cc->unw_method;
-
cc->nr = 0;
if (!regs) {
- cc->unw_rc = QUADD_URC_FAILURE;
+ cc->urc_fp = QUADD_URC_FAILURE;
+ cc->urc_ut = QUADD_URC_FAILURE;
+ cc->urc_dwarf = QUADD_URC_FAILURE;
return 0;
}
cc->cs_64 = 0;
#endif
- cc->unw_rc = 0;
-
- switch (method) {
- case QUADD_UNW_METHOD_FP:
- __get_user_callchain_fp(regs, cc, task);
- break;
+ cc->urc_fp = QUADD_URC_NONE;
+ cc->urc_ut = QUADD_URC_NONE;
+ cc->urc_dwarf = QUADD_URC_NONE;
- case QUADD_UNW_METHOD_EHT:
- get_user_callchain_ut(regs, cc, task);
- break;
-
- case QUADD_UNW_METHOD_MIXED:
- get_user_callchain_mixed(regs, cc, task);
- break;
-
- case QUADD_UNW_METHOD_NONE:
- default:
- break;
- }
+ get_user_callchain_mixed(regs, cc, task);
return cc->nr;
}
struct quadd_hrt_ctx;
+struct quadd_unw_methods {
+ unsigned int
+ fp:1,
+ ut:1,
+ ut_ce:1,
+ dwarf:1;
+};
+
struct quadd_callchain {
int nr;
int cs_64;
- unsigned int unw_method;
- unsigned int unw_rc;
+ struct quadd_unw_methods um;
+
+ unsigned int urc_fp;
+ unsigned int urc_ut;
+ unsigned int urc_dwarf;
unsigned long curr_sp;
unsigned long curr_fp;
mmap_hdr->magic = QUADD_MMAP_HEADER_MAGIC;
mmap_hdr->version = QUADD_MMAP_HEADER_VERSION;
mmap_hdr->cpu_id = cpu_id;
+ mmap_hdr->samples_version = QUADD_SAMPLES_VERSION;
rb_hdr = (struct quadd_ring_buffer_hdr *)(mmap_hdr + 1);
rb->rb_hdr = rb_hdr;
unsigned int unw_type;
int is_eh = 1, mode = sf->mode;
- cc->unw_rc = QUADD_URC_FAILURE;
+ cc->urc_dwarf = QUADD_URC_FAILURE;
user_reg_size = get_user_reg_size(mode);
while (1) {
sp = sf->vregs[regnum_sp(mode)];
if (!validate_stack_addr(sp, vma_sp, user_reg_size)) {
- cc->unw_rc = -QUADD_URC_SP_INCORRECT;
+ cc->urc_dwarf = QUADD_URC_SP_INCORRECT;
break;
}
if (!is_vma_addr(addr, vma_pc, user_reg_size)) {
err = quadd_get_dw_frames(vma_pc->vm_start, &ri_new);
if (err) {
- cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
+ cc->urc_dwarf = QUADD_URC_TBL_NOT_EXIST;
break;
}
if (!is_fde_entry_exist(ri, sf->pc, &__is_eh, &__is_debug)) {
pr_debug("eh/debug fde entries are not existed\n");
- cc->unw_rc = QUADD_URC_IDX_NOT_FOUND;
+ cc->urc_dwarf = QUADD_URC_IDX_NOT_FOUND;
break;
}
pr_debug("is_eh: %d, is_debug: %d\n", __is_eh, __is_debug);
err = unwind_frame(ri, sf, vma_sp, is_eh);
if (err < 0) {
- cc->unw_rc = -err;
+ cc->urc_dwarf = -err;
break;
}
} else {
- cc->unw_rc = -err;
+ cc->urc_dwarf = -err;
break;
}
}
if (!regs || !mm)
return 0;
- if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
+ if (cc->urc_dwarf == QUADD_URC_LEVEL_TOO_DEEP)
return nr_prev;
- cc->unw_rc = QUADD_URC_FAILURE;
+ cc->urc_dwarf = QUADD_URC_FAILURE;
if (nr_prev > 0) {
ip = cc->curr_pc;
err = quadd_get_dw_frames(vma->vm_start, &ri);
if (err) {
- cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
+ cc->urc_dwarf = QUADD_URC_TBL_NOT_EXIST;
return 0;
}
{
struct ex_region_info ri_new;
- cc->unw_rc = QUADD_URC_FAILURE;
+ cc->urc_ut = QUADD_URC_FAILURE;
pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
frame->fp_arm, frame->fp_thumb,
break;
if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32))) {
- cc->unw_rc = -QUADD_URC_SP_INCORRECT;
+ cc->urc_ut = QUADD_URC_SP_INCORRECT;
break;
}
if (!is_vma_addr(ti->addr, vma_pc, sizeof(u32))) {
err = get_extabs_ehabi(vma_pc->vm_start, &ri_new);
if (err) {
- cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
+ cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
break;
}
err = unwind_frame(ri, frame, vma_sp);
if (err < 0) {
pr_debug("end unwind, urc: %ld\n", err);
- cc->unw_rc = -err;
+ cc->urc_ut = -err;
break;
}
return 0;
#endif
- if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
+ if (cc->urc_ut == QUADD_URC_LEVEL_TOO_DEEP)
return nr_prev;
- cc->unw_rc = QUADD_URC_FAILURE;
+ cc->urc_ut = QUADD_URC_FAILURE;
if (nr_prev > 0) {
ip = cc->curr_pc;
err = get_extabs_ehabi(vma->vm_start, &ri);
if (err) {
- cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
+ cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
return 0;
}
regs = get_irq_regs();
- if (!hrt.active)
+ if (!atomic_read(&hrt.active))
return HRTIMER_NORESTART;
qm_debug_handler_sample(regs);
hdr->reserved = 0;
hdr->extra_length = 0;
- hdr->reserved |= hrt.unw_method << QUADD_HDR_UNW_METHOD_SHIFT;
+ if (hdr->backtrace) {
+ struct quadd_unw_methods *um = &hrt.um;
+
+ hdr->reserved |= um->fp ? QUADD_HDR_BT_FP : 0;
+ hdr->reserved |= um->ut ? QUADD_HDR_BT_UT : 0;
+ hdr->reserved |= um->ut_ce ? QUADD_HDR_BT_UT_CE : 0;
+ hdr->reserved |= um->dwarf ? QUADD_HDR_BT_DWARF : 0;
+ }
if (hrt.use_arch_timer)
hdr->reserved |= QUADD_HDR_USE_ARCH_TIMER;
static void
read_all_sources(struct pt_regs *regs, struct task_struct *task)
{
- u32 state, extra_data = 0;
+ u32 state, extra_data = 0, urcs = 0;
int i, vec_idx = 0, bt_size = 0;
int nr_events = 0, nr_positive_events = 0;
struct pt_regs *user_regs;
- struct quadd_iovec vec[5];
+ struct quadd_iovec vec[6];
struct hrt_event_value events[QUADD_MAX_COUNTERS];
u32 events_extra[QUADD_MAX_COUNTERS];
cc->curr_pc = 0;
if (ctx->param.backtrace) {
- cc->unw_method = hrt.unw_method;
+ cc->um = hrt.um;
+
bt_size = quadd_get_user_callchain(user_regs, cc, ctx, task);
if (!bt_size && !user_mode(regs)) {
extra_data |= QUADD_SED_IP64;
}
- extra_data |= cc->unw_method << QUADD_SED_UNW_METHOD_SHIFT;
- s->reserved |= cc->unw_rc << QUADD_SAMPLE_URC_SHIFT;
+ urcs |= (cc->urc_fp & QUADD_SAMPLE_URC_MASK) <<
+ QUADD_SAMPLE_URC_SHIFT_FP;
+ urcs |= (cc->urc_ut & QUADD_SAMPLE_URC_MASK) <<
+ QUADD_SAMPLE_URC_SHIFT_UT;
+ urcs |= (cc->urc_dwarf & QUADD_SAMPLE_URC_MASK) <<
+ QUADD_SAMPLE_URC_SHIFT_DWARF;
+
+ s->reserved |= QUADD_SAMPLE_RES_URCS_ENABLED;
+
+ vec[vec_idx].base = &urcs;
+ vec[vec_idx].len = sizeof(urcs);
+ vec_idx++;
}
s->callchain_nr = bt_size;
struct event_data events[QUADD_MAX_COUNTERS];
/* static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 2); */
- if (likely(!hrt.active))
+ if (likely(!atomic_read(&hrt.active)))
return;
/*
if (__ratelimit(&ratelimit_state))
struct quadd_ctx *ctx = hrt.quadd_ctx;
/* static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 2); */
- if (likely(!hrt.active))
+ if (likely(!atomic_read(&hrt.active)))
return;
/*
if (__ratelimit(&ratelimit_state))
{
struct quadd_parameters *param;
- if (likely(!hrt.active))
+ if (likely(!atomic_read(&hrt.active)))
return;
if (!is_profile_process(current))
extra = param->reserved[QUADD_PARAM_IDX_EXTRA];
- if (extra & QUADD_PARAM_EXTRA_BT_MIXED)
- hrt.unw_method = QUADD_UNW_METHOD_MIXED;
- else if (extra & QUADD_PARAM_EXTRA_BT_UNWIND_TABLES)
- hrt.unw_method = QUADD_UNW_METHOD_EHT;
- else if (extra & QUADD_PARAM_EXTRA_BT_FP)
- hrt.unw_method = QUADD_UNW_METHOD_FP;
- else
- hrt.unw_method = QUADD_UNW_METHOD_NONE;
+ if (param->backtrace) {
+ struct quadd_unw_methods *um = &hrt.um;
+
+ um->fp = extra & QUADD_PARAM_EXTRA_BT_FP ? 1 : 0;
+ um->ut = extra & QUADD_PARAM_EXTRA_BT_UT ? 1 : 0;
+ um->ut_ce = extra & QUADD_PARAM_EXTRA_BT_UT_CE ? 1 : 0;
+ um->dwarf = extra & QUADD_PARAM_EXTRA_BT_DWARF ? 1 : 0;
+
+ pr_info("unw methods: fp/ut/ut_ce/dwarf: %u/%u/%u/%u\n",
+ um->fp, um->ut, um->ut_ce, um->dwarf);
+ }
if (hrt.tc && (extra & QUADD_PARAM_EXTRA_USE_ARCH_TIMER))
hrt.use_arch_timer = 1;
quadd_ma_start(&hrt);
- hrt.active = 1;
+ atomic_set(&hrt.active, 1);
pr_info("Start hrt: freq/period: %ld/%llu\n", freq, period);
return 0;
quadd_ma_stop(&hrt);
- hrt.active = 0;
+ atomic_set(&hrt.active, 0);
atomic64_set(&hrt.counter_samples, 0);
atomic64_set(&hrt.skipped_samples, 0);
void quadd_hrt_deinit(void)
{
- if (hrt.active)
+ if (atomic_read(&hrt.active))
quadd_hrt_stop();
free_percpu(hrt.cpu_ctx);
struct quadd_cpu_context *cpu_ctx;
hrt.quadd_ctx = ctx;
- hrt.active = 0;
+ atomic_set(&hrt.active, 0);
freq = ctx->param.freq;
freq = max_t(long, QUADD_HRT_MIN_FREQ, freq);
struct quadd_ctx *quadd_ctx;
- int active;
+ atomic_t active;
atomic_t nr_active_all_core;
atomic64_t counter_samples;
struct timecounter *tc;
int use_arch_timer;
- unsigned int unw_method;
+ struct quadd_unw_methods um;
int get_stack_offset;
};
struct quadd_hrt_ctx *hrt_ctx = (struct quadd_hrt_ctx *)data;
struct timer_list *timer = &hrt_ctx->ma_timer;
- if (hrt_ctx->active == 0)
+ if (!atomic_read(&hrt_ctx->active))
return;
check_ma(hrt_ctx);
int pl310_events_id;
int nr_pmu = 0, nr_pl310 = 0;
struct task_struct *task;
- unsigned int extra;
u64 *low_addr_p;
if (!validate_freq(p->freq)) {
}
}
- extra = p->reserved[QUADD_PARAM_IDX_EXTRA];
-
- if (extra & QUADD_PARAM_EXTRA_BT_UNWIND_TABLES)
- pr_info("unwinding: exception-handling tables\n");
-
- if (extra & QUADD_PARAM_EXTRA_BT_FP)
- pr_info("unwinding: frame pointers\n");
-
- if (extra & QUADD_PARAM_EXTRA_BT_MIXED)
- pr_info("unwinding: mixed mode\n");
-
low_addr_p = (u64 *)&p->reserved[QUADD_PARAM_IDX_BT_LOWER_BOUND];
ctx.hrt->low_addr = (unsigned long)*low_addr_p;
pr_info("bt lower bound: %#lx\n", ctx.hrt->low_addr);
#ifndef __QUADD_VERSION_H
#define __QUADD_VERSION_H
-#define QUADD_MODULE_VERSION "1.96"
+#define QUADD_MODULE_VERSION "1.97"
#define QUADD_MODULE_BRANCH "Dev"
#endif /* __QUADD_VERSION_H */
#include <linux/ioctl.h>
-#define QUADD_SAMPLES_VERSION 32
-#define QUADD_IO_VERSION 17
+#define QUADD_SAMPLES_VERSION 33
+#define QUADD_IO_VERSION 18
#define QUADD_IO_VERSION_DYNAMIC_RB 5
#define QUADD_IO_VERSION_RB_MAX_FILL_COUNT 6
#define QUADD_IO_VERSION_BT_LOWER_BOUND 15
#define QUADD_IO_VERSION_STACK_OFFSET 16
#define QUADD_IO_VERSION_SECTIONS_INFO 17
+#define QUADD_IO_VERSION_UNW_METHODS_OPT 18
#define QUADD_SAMPLE_VERSION_THUMB_MODE_FLAG 17
#define QUADD_SAMPLE_VERSION_GROUP_SAMPLES 18
#define QUADD_SAMPLE_VERSION_HDR_ARCH_TIMER 30
#define QUADD_SAMPLE_VERSION_STACK_OFFSET 31
#define QUADD_SAMPLE_VERSION_SCHED_TASK_STATE 32
+#define QUADD_SAMPLE_VERSION_URCS 33
#define QUADD_MMAP_HEADER_VERSION 1
#pragma pack(push, 1)
-#define QUADD_SAMPLE_UNW_METHOD_SHIFT 0
-#define QUADD_SAMPLE_UNW_METHOD_MASK (1 << QUADD_SAMPLE_UNW_METHOD_SHIFT)
+#define QUADD_SAMPLE_RES_URCS_ENABLED (1 << 0)
-enum {
- QUADD_UNW_METHOD_FP = 0,
- QUADD_UNW_METHOD_EHT,
- QUADD_UNW_METHOD_MIXED,
- QUADD_UNW_METHOD_NONE,
-};
+#define QUADD_SAMPLE_URC_MASK 0xff
-#define QUADD_SAMPLE_URC_SHIFT 1
-#define QUADD_SAMPLE_URC_MASK (0x0f << QUADD_SAMPLE_URC_SHIFT)
+#define QUADD_SAMPLE_URC_SHIFT_FP 0
+#define QUADD_SAMPLE_URC_SHIFT_UT (1 * 8)
+#define QUADD_SAMPLE_URC_SHIFT_DWARF (2 * 8)
enum {
QUADD_URC_SUCCESS = 0,
QUADD_URC_PC_INCORRECT,
QUADD_URC_LEVEL_TOO_DEEP,
QUADD_URC_FP_INCORRECT,
+ QUADD_URC_NONE,
QUADD_URC_MAX,
};
#define QUADD_SED_IP64 (1 << 0)
-#define QUADD_SED_UNW_METHOD_SHIFT 1
-#define QUADD_SED_UNW_METHOD_MASK (0x07 << QUADD_SED_UNW_METHOD_SHIFT)
-
-#define QUADD_SED_STACK_OFFSET_SHIFT 4
+#define QUADD_SED_STACK_OFFSET_SHIFT 1
#define QUADD_SED_STACK_OFFSET_MASK (0xffff << QUADD_SED_STACK_OFFSET_SHIFT)
enum {
#define QUADD_HEADER_MAGIC 0x1122
-#define QUADD_HDR_UNW_METHOD_SHIFT 0
-#define QUADD_HDR_UNW_METHOD_MASK (0x07 << QUADD_HDR_UNW_METHOD_SHIFT)
-
+#define QUADD_HDR_BT_FP (1 << 0)
+#define QUADD_HDR_BT_UT (1 << 1)
+#define QUADD_HDR_BT_UT_CE (1 << 2)
#define QUADD_HDR_USE_ARCH_TIMER (1 << 3)
#define QUADD_HDR_STACK_OFFSET (1 << 4)
+#define QUADD_HDR_BT_DWARF (1 << 5)
struct quadd_header_data {
u16 magic;
#define QUADD_PARAM_EXTRA_GET_MMAP (1 << 0)
#define QUADD_PARAM_EXTRA_BT_FP (1 << 1)
-#define QUADD_PARAM_EXTRA_BT_UNWIND_TABLES (1 << 2)
+#define QUADD_PARAM_EXTRA_BT_UT (1 << 2)
#define QUADD_PARAM_EXTRA_BT_MIXED (1 << 3)
#define QUADD_PARAM_EXTRA_USE_ARCH_TIMER (1 << 4)
#define QUADD_PARAM_EXTRA_STACK_OFFSET (1 << 5)
+#define QUADD_PARAM_EXTRA_BT_UT_CE (1 << 6)
+#define QUADD_PARAM_EXTRA_BT_DWARF (1 << 7)
struct quadd_parameters {
u32 freq;