2 #ifndef _LINUX_TRACE_EVENT_H
3 #define _LINUX_TRACE_EVENT_H
5 #include <linux/ring_buffer.h>
6 #include <linux/trace_seq.h>
7 #include <linux/percpu.h>
8 #include <linux/hardirq.h>
9 #include <linux/perf_event.h>
10 #include <linux/tracepoint.h>
18 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
20 const struct trace_print_flags *flag_array);
22 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
23 const struct trace_print_flags *symbol_array);
25 #if BITS_PER_LONG == 32
26 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
27 unsigned long long val,
28 const struct trace_print_flags_u64
32 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
33 unsigned int bitmask_size);
35 const char *trace_print_hex_seq(struct trace_seq *p,
36 const unsigned char *buf, int len);
38 const char *trace_print_array_seq(struct trace_seq *p,
39 const void *buf, int count,
42 struct trace_iterator;
45 int trace_raw_output_prep(struct trace_iterator *iter,
46 struct trace_event *event);
49 * The trace entry - the most basic unit of tracing. This is what
50 * is printed in the end as a single line in the trace output, such as:
52 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
57 unsigned char preempt_count;
59 unsigned short migrate_disable;
60 unsigned short padding;
61 unsigned char preempt_lazy_count;
64 #define TRACE_EVENT_TYPE_MAX \
65 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
68 * Trace iterator - used by printout routines who present trace
69 * results to users and which routines might sleep, etc:
71 struct trace_iterator {
72 struct trace_array *tr;
74 struct trace_buffer *trace_buffer;
78 struct ring_buffer_iter **buffer_iter;
79 unsigned long iter_flags;
81 /* trace_seq for __print_flags() and __print_symbolic() etc. */
82 struct trace_seq tmp_seq;
84 cpumask_var_t started;
86 /* it's true when current open file is snapshot */
89 /* The below is zeroed out in pipe_read */
91 struct trace_entry *ent;
92 unsigned long lost_events;
101 /* All new field here will be zeroed out in pipe_read */
104 enum trace_iter_flags {
105 TRACE_FILE_LAT_FMT = 1,
106 TRACE_FILE_ANNOTATE = 2,
107 TRACE_FILE_TIME_IN_NS = 4,
111 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
112 int flags, struct trace_event *event);
114 struct trace_event_functions {
115 trace_print_func trace;
116 trace_print_func raw;
117 trace_print_func hex;
118 trace_print_func binary;
122 struct hlist_node node;
123 struct list_head list;
125 struct trace_event_functions *funcs;
128 extern int register_trace_event(struct trace_event *event);
129 extern int unregister_trace_event(struct trace_event *event);
131 /* Return values for print_line callback */
133 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
134 TRACE_TYPE_HANDLED = 1,
135 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
136 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
140 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
141 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
142 * simplifies those functions and keeps them in sync.
144 static inline enum print_line_t trace_handle_return(struct trace_seq *s)
146 return trace_seq_has_overflowed(s) ?
147 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
150 void tracing_generic_entry_update(struct trace_entry *entry,
153 struct trace_event_file;
155 struct ring_buffer_event *
156 trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
157 struct trace_event_file *trace_file,
158 int type, unsigned long len,
159 unsigned long flags, int pc);
161 void tracing_record_cmdline(struct task_struct *tsk);
163 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
169 TRACE_REG_UNREGISTER,
170 #ifdef CONFIG_PERF_EVENTS
171 TRACE_REG_PERF_REGISTER,
172 TRACE_REG_PERF_UNREGISTER,
174 TRACE_REG_PERF_CLOSE,
180 struct trace_event_call;
182 struct trace_event_class {
185 #ifdef CONFIG_PERF_EVENTS
188 int (*reg)(struct trace_event_call *event,
189 enum trace_reg type, void *data);
190 int (*define_fields)(struct trace_event_call *);
191 struct list_head *(*get_fields)(struct trace_event_call *);
192 struct list_head fields;
193 int (*raw_init)(struct trace_event_call *);
196 extern int trace_event_reg(struct trace_event_call *event,
197 enum trace_reg type, void *data);
199 struct trace_event_buffer {
200 struct ring_buffer *buffer;
201 struct ring_buffer_event *event;
202 struct trace_event_file *trace_file;
208 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
209 struct trace_event_file *trace_file,
212 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
215 TRACE_EVENT_FL_FILTERED_BIT,
216 TRACE_EVENT_FL_CAP_ANY_BIT,
217 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
218 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
219 TRACE_EVENT_FL_WAS_ENABLED_BIT,
220 TRACE_EVENT_FL_TRACEPOINT_BIT,
221 TRACE_EVENT_FL_KPROBE_BIT,
222 TRACE_EVENT_FL_UPROBE_BIT,
227 * FILTERED - The event has a filter attached
228 * CAP_ANY - Any user can enable for perf
229 * NO_SET_FILTER - Set when filter has error and is to be ignored
230 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
231 * WAS_ENABLED - Set and stays set when an event was ever enabled
232 * (used for module unloading, if a module event is enabled,
233 * it is best to clear the buffers that used it).
234 * TRACEPOINT - Event is a tracepoint
235 * KPROBE - Event is a kprobe
236 * UPROBE - Event is a uprobe
239 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
240 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
241 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
242 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
243 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
244 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
245 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
246 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
249 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
251 struct trace_event_call {
252 struct list_head list;
253 struct trace_event_class *class;
256 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
257 struct tracepoint *tp;
259 struct trace_event event;
261 struct event_filter *filter;
265 * bit 0: filter_active
266 * bit 1: allow trace by non root (cap any)
267 * bit 2: failed to apply filter
268 * bit 3: trace internal event (do not enable)
269 * bit 4: Event was enabled by module
270 * bit 5: use call filter rather than file filter
271 * bit 6: Event is a tracepoint
273 int flags; /* static flags of different events */
275 #ifdef CONFIG_PERF_EVENTS
277 struct hlist_head __percpu *perf_events;
278 struct bpf_prog *prog;
280 int (*perf_perm)(struct trace_event_call *,
281 struct perf_event *);
285 static inline const char *
286 trace_event_name(struct trace_event_call *call)
288 if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
289 return call->tp ? call->tp->name : NULL;
295 struct trace_subsystem_dir;
298 EVENT_FILE_FL_ENABLED_BIT,
299 EVENT_FILE_FL_RECORDED_CMD_BIT,
300 EVENT_FILE_FL_FILTERED_BIT,
301 EVENT_FILE_FL_NO_SET_FILTER_BIT,
302 EVENT_FILE_FL_SOFT_MODE_BIT,
303 EVENT_FILE_FL_SOFT_DISABLED_BIT,
304 EVENT_FILE_FL_TRIGGER_MODE_BIT,
305 EVENT_FILE_FL_TRIGGER_COND_BIT,
306 EVENT_FILE_FL_PID_FILTER_BIT,
311 * ENABLED - The event is enabled
312 * RECORDED_CMD - The comms should be recorded at sched_switch
313 * FILTERED - The event has a filter attached
314 * NO_SET_FILTER - Set when filter has error and is to be ignored
315 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
316 * SOFT_DISABLED - When set, do not trace the event (even though its
317 * tracepoint may be enabled)
318 * TRIGGER_MODE - When set, invoke the triggers associated with the event
319 * TRIGGER_COND - When set, one or more triggers has an associated filter
320 * PID_FILTER - When set, the event is filtered based on pid
323 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
324 EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
325 EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
326 EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
327 EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
328 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
329 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
330 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
331 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
334 struct trace_event_file {
335 struct list_head list;
336 struct trace_event_call *event_call;
337 struct event_filter *filter;
339 struct trace_array *tr;
340 struct trace_subsystem_dir *system;
341 struct list_head triggers;
346 * bit 1: enabled cmd record
347 * bit 2: enable/disable with the soft disable bit
348 * bit 3: soft disabled
349 * bit 4: trigger enabled
351 * Note: The bits must be set atomically to prevent races
352 * from other writers. Reads of flags do not need to be in
353 * sync as they occur in critical sections. But the way flags
354 * is currently used, these changes do not affect the code
355 * except that when a change is made, it may have a slight
356 * delay in propagating the changes to other CPUs due to
357 * caching and such. Which is mostly OK ;-)
360 atomic_t sm_ref; /* soft-mode reference counter */
361 atomic_t tm_ref; /* trigger-mode reference counter */
364 #define __TRACE_EVENT_FLAGS(name, value) \
365 static int __init trace_init_flags_##name(void) \
367 event_##name.flags |= value; \
370 early_initcall(trace_init_flags_##name);
372 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
373 static int perf_perm_##name(struct trace_event_call *tp_event, \
374 struct perf_event *p_event) \
376 return ({ expr; }); \
378 static int __init trace_init_perf_perm_##name(void) \
380 event_##name.perf_perm = &perf_perm_##name; \
383 early_initcall(trace_init_perf_perm_##name);
385 #define PERF_MAX_TRACE_SIZE 2048
387 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
389 enum event_trigger_type {
391 ETT_TRACE_ONOFF = (1 << 0),
392 ETT_SNAPSHOT = (1 << 1),
393 ETT_STACKTRACE = (1 << 2),
394 ETT_EVENT_ENABLE = (1 << 3),
395 ETT_EVENT_HIST = (1 << 4),
396 ETT_HIST_ENABLE = (1 << 5),
399 extern int filter_match_preds(struct event_filter *filter, void *rec);
401 extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
403 extern void event_triggers_post_call(struct trace_event_file *file,
404 enum event_trigger_type tt,
407 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
410 * trace_trigger_soft_disabled - do triggers and test if soft disabled
411 * @file: The file pointer of the event to test
413 * If any triggers without filters are attached to this event, they
414 * will be called here. If the event is soft disabled and has no
415 * triggers that require testing the fields, it will return true,
419 trace_trigger_soft_disabled(struct trace_event_file *file)
421 unsigned long eflags = file->flags;
423 if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
424 if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
425 event_triggers_call(file, NULL);
426 if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
428 if (eflags & EVENT_FILE_FL_PID_FILTER)
429 return trace_event_ignore_this_pid(file);
434 #ifdef CONFIG_BPF_EVENTS
435 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
437 static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
445 FILTER_STATIC_STRING,
453 extern int trace_event_raw_init(struct trace_event_call *call);
454 extern int trace_define_field(struct trace_event_call *call, const char *type,
455 const char *name, int offset, int size,
456 int is_signed, int filter_type);
457 extern int trace_add_event_call(struct trace_event_call *call);
458 extern int trace_remove_event_call(struct trace_event_call *call);
459 extern int trace_event_get_offsets(struct trace_event_call *call);
461 #define is_signed_type(type) (((type)(-1)) < (type)1)
463 int trace_set_clr_event(const char *system, const char *event, int set);
466 * The double __builtin_constant_p is because gcc will give us an error
467 * if we try to allocate the static variable to fmt if it is not a
468 * constant. Even with the outer if statement optimizing out.
470 #define event_trace_printk(ip, fmt, args...) \
472 __trace_printk_check_format(fmt, ##args); \
473 tracing_record_cmdline(current); \
474 if (__builtin_constant_p(fmt)) { \
475 static const char *trace_printk_fmt \
476 __attribute__((section("__trace_printk_fmt"))) = \
477 __builtin_constant_p(fmt) ? fmt : NULL; \
479 __trace_bprintk(ip, trace_printk_fmt, ##args); \
481 __trace_printk(ip, fmt, ##args); \
484 #ifdef CONFIG_PERF_EVENTS
487 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
489 extern int perf_trace_init(struct perf_event *event);
490 extern void perf_trace_destroy(struct perf_event *event);
491 extern int perf_trace_add(struct perf_event *event, int flags);
492 extern void perf_trace_del(struct perf_event *event, int flags);
493 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
495 extern void ftrace_profile_free_filter(struct perf_event *event);
496 void perf_trace_buf_update(void *record, u16 type);
497 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
499 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
500 struct trace_event_call *call, u64 count,
501 struct pt_regs *regs, struct hlist_head *head,
502 struct task_struct *task);
505 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
506 u64 count, struct pt_regs *regs, void *head,
507 struct task_struct *task)
509 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
513 #endif /* _LINUX_TRACE_EVENT_H */