]> rtime.felk.cvut.cz Git - linux-imx.git/blob - include/trace/events/bcache.h
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[linux-imx.git] / include / trace / events / bcache.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM bcache
3
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
6
7 #include <linux/tracepoint.h>
8
9 struct search;
10
11 DECLARE_EVENT_CLASS(bcache_request,
12         TP_PROTO(struct search *s, struct bio *bio),
13         TP_ARGS(s, bio),
14
15         TP_STRUCT__entry(
16                 __field(dev_t,          dev                     )
17                 __field(unsigned int,   orig_major              )
18                 __field(unsigned int,   orig_minor              )
19                 __field(sector_t,       sector                  )
20                 __field(dev_t,          orig_sector             )
21                 __field(unsigned int,   nr_sector               )
22                 __array(char,           rwbs,   6               )
23         ),
24
25         TP_fast_assign(
26                 __entry->dev            = bio->bi_bdev->bd_dev;
27                 __entry->orig_major     = s->d->disk->major;
28                 __entry->orig_minor     = s->d->disk->first_minor;
29                 __entry->sector         = bio->bi_sector;
30                 __entry->orig_sector    = bio->bi_sector - 16;
31                 __entry->nr_sector      = bio->bi_size >> 9;
32                 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
33         ),
34
35         TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
36                   MAJOR(__entry->dev), MINOR(__entry->dev),
37                   __entry->rwbs, (unsigned long long)__entry->sector,
38                   __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
39                   (unsigned long long)__entry->orig_sector)
40 );
41
42 DECLARE_EVENT_CLASS(bkey,
43         TP_PROTO(struct bkey *k),
44         TP_ARGS(k),
45
46         TP_STRUCT__entry(
47                 __field(u32,    size                            )
48                 __field(u32,    inode                           )
49                 __field(u64,    offset                          )
50                 __field(bool,   dirty                           )
51         ),
52
53         TP_fast_assign(
54                 __entry->inode  = KEY_INODE(k);
55                 __entry->offset = KEY_OFFSET(k);
56                 __entry->size   = KEY_SIZE(k);
57                 __entry->dirty  = KEY_DIRTY(k);
58         ),
59
60         TP_printk("%u:%llu len %u dirty %u", __entry->inode,
61                   __entry->offset, __entry->size, __entry->dirty)
62 );
63
64 DECLARE_EVENT_CLASS(btree_node,
65         TP_PROTO(struct btree *b),
66         TP_ARGS(b),
67
68         TP_STRUCT__entry(
69                 __field(size_t,         bucket                  )
70         ),
71
72         TP_fast_assign(
73                 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
74         ),
75
76         TP_printk("bucket %zu", __entry->bucket)
77 );
78
79 /* request.c */
80
81 DEFINE_EVENT(bcache_request, bcache_request_start,
82         TP_PROTO(struct search *s, struct bio *bio),
83         TP_ARGS(s, bio)
84 );
85
86 DEFINE_EVENT(bcache_request, bcache_request_end,
87         TP_PROTO(struct search *s, struct bio *bio),
88         TP_ARGS(s, bio)
89 );
90
91 DECLARE_EVENT_CLASS(bcache_bio,
92         TP_PROTO(struct bio *bio),
93         TP_ARGS(bio),
94
95         TP_STRUCT__entry(
96                 __field(dev_t,          dev                     )
97                 __field(sector_t,       sector                  )
98                 __field(unsigned int,   nr_sector               )
99                 __array(char,           rwbs,   6               )
100         ),
101
102         TP_fast_assign(
103                 __entry->dev            = bio->bi_bdev->bd_dev;
104                 __entry->sector         = bio->bi_sector;
105                 __entry->nr_sector      = bio->bi_size >> 9;
106                 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
107         ),
108
109         TP_printk("%d,%d  %s %llu + %u",
110                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
111                   (unsigned long long)__entry->sector, __entry->nr_sector)
112 );
113
114 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
115         TP_PROTO(struct bio *bio),
116         TP_ARGS(bio)
117 );
118
119 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
120         TP_PROTO(struct bio *bio),
121         TP_ARGS(bio)
122 );
123
124 TRACE_EVENT(bcache_read,
125         TP_PROTO(struct bio *bio, bool hit, bool bypass),
126         TP_ARGS(bio, hit, bypass),
127
128         TP_STRUCT__entry(
129                 __field(dev_t,          dev                     )
130                 __field(sector_t,       sector                  )
131                 __field(unsigned int,   nr_sector               )
132                 __array(char,           rwbs,   6               )
133                 __field(bool,           cache_hit               )
134                 __field(bool,           bypass                  )
135         ),
136
137         TP_fast_assign(
138                 __entry->dev            = bio->bi_bdev->bd_dev;
139                 __entry->sector         = bio->bi_sector;
140                 __entry->nr_sector      = bio->bi_size >> 9;
141                 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
142                 __entry->cache_hit = hit;
143                 __entry->bypass = bypass;
144         ),
145
146         TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
147                   MAJOR(__entry->dev), MINOR(__entry->dev),
148                   __entry->rwbs, (unsigned long long)__entry->sector,
149                   __entry->nr_sector, __entry->cache_hit, __entry->bypass)
150 );
151
152 TRACE_EVENT(bcache_write,
153         TP_PROTO(struct bio *bio, bool writeback, bool bypass),
154         TP_ARGS(bio, writeback, bypass),
155
156         TP_STRUCT__entry(
157                 __field(dev_t,          dev                     )
158                 __field(sector_t,       sector                  )
159                 __field(unsigned int,   nr_sector               )
160                 __array(char,           rwbs,   6               )
161                 __field(bool,           writeback               )
162                 __field(bool,           bypass                  )
163         ),
164
165         TP_fast_assign(
166                 __entry->dev            = bio->bi_bdev->bd_dev;
167                 __entry->sector         = bio->bi_sector;
168                 __entry->nr_sector      = bio->bi_size >> 9;
169                 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
170                 __entry->writeback = writeback;
171                 __entry->bypass = bypass;
172         ),
173
174         TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
175                   MAJOR(__entry->dev), MINOR(__entry->dev),
176                   __entry->rwbs, (unsigned long long)__entry->sector,
177                   __entry->nr_sector, __entry->writeback, __entry->bypass)
178 );
179
180 DEFINE_EVENT(bcache_bio, bcache_read_retry,
181         TP_PROTO(struct bio *bio),
182         TP_ARGS(bio)
183 );
184
185 DEFINE_EVENT(bkey, bcache_cache_insert,
186         TP_PROTO(struct bkey *k),
187         TP_ARGS(k)
188 );
189
190 /* Journal */
191
192 DECLARE_EVENT_CLASS(cache_set,
193         TP_PROTO(struct cache_set *c),
194         TP_ARGS(c),
195
196         TP_STRUCT__entry(
197                 __array(char,           uuid,   16 )
198         ),
199
200         TP_fast_assign(
201                 memcpy(__entry->uuid, c->sb.set_uuid, 16);
202         ),
203
204         TP_printk("%pU", __entry->uuid)
205 );
206
207 DEFINE_EVENT(bkey, bcache_journal_replay_key,
208         TP_PROTO(struct bkey *k),
209         TP_ARGS(k)
210 );
211
212 DEFINE_EVENT(cache_set, bcache_journal_full,
213         TP_PROTO(struct cache_set *c),
214         TP_ARGS(c)
215 );
216
217 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
218         TP_PROTO(struct cache_set *c),
219         TP_ARGS(c)
220 );
221
222 DEFINE_EVENT(bcache_bio, bcache_journal_write,
223         TP_PROTO(struct bio *bio),
224         TP_ARGS(bio)
225 );
226
227 /* Btree */
228
229 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
230         TP_PROTO(struct cache_set *c),
231         TP_ARGS(c)
232 );
233
234 DEFINE_EVENT(btree_node, bcache_btree_read,
235         TP_PROTO(struct btree *b),
236         TP_ARGS(b)
237 );
238
239 TRACE_EVENT(bcache_btree_write,
240         TP_PROTO(struct btree *b),
241         TP_ARGS(b),
242
243         TP_STRUCT__entry(
244                 __field(size_t,         bucket                  )
245                 __field(unsigned,       block                   )
246                 __field(unsigned,       keys                    )
247         ),
248
249         TP_fast_assign(
250                 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
251                 __entry->block  = b->written;
252                 __entry->keys   = b->sets[b->nsets].data->keys;
253         ),
254
255         TP_printk("bucket %zu", __entry->bucket)
256 );
257
258 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
259         TP_PROTO(struct btree *b),
260         TP_ARGS(b)
261 );
262
263 DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail,
264         TP_PROTO(struct btree *b),
265         TP_ARGS(b)
266 );
267
268 DEFINE_EVENT(btree_node, bcache_btree_node_free,
269         TP_PROTO(struct btree *b),
270         TP_ARGS(b)
271 );
272
273 TRACE_EVENT(bcache_btree_gc_coalesce,
274         TP_PROTO(unsigned nodes),
275         TP_ARGS(nodes),
276
277         TP_STRUCT__entry(
278                 __field(unsigned,       nodes                   )
279         ),
280
281         TP_fast_assign(
282                 __entry->nodes  = nodes;
283         ),
284
285         TP_printk("coalesced %u nodes", __entry->nodes)
286 );
287
288 DEFINE_EVENT(cache_set, bcache_gc_start,
289         TP_PROTO(struct cache_set *c),
290         TP_ARGS(c)
291 );
292
293 DEFINE_EVENT(cache_set, bcache_gc_end,
294         TP_PROTO(struct cache_set *c),
295         TP_ARGS(c)
296 );
297
298 DEFINE_EVENT(bkey, bcache_gc_copy,
299         TP_PROTO(struct bkey *k),
300         TP_ARGS(k)
301 );
302
303 DEFINE_EVENT(bkey, bcache_gc_copy_collision,
304         TP_PROTO(struct bkey *k),
305         TP_ARGS(k)
306 );
307
308 TRACE_EVENT(bcache_btree_insert_key,
309         TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
310         TP_ARGS(b, k, op, status),
311
312         TP_STRUCT__entry(
313                 __field(u64,    btree_node                      )
314                 __field(u32,    btree_level                     )
315                 __field(u32,    inode                           )
316                 __field(u64,    offset                          )
317                 __field(u32,    size                            )
318                 __field(u8,     dirty                           )
319                 __field(u8,     op                              )
320                 __field(u8,     status                          )
321         ),
322
323         TP_fast_assign(
324                 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
325                 __entry->btree_level = b->level;
326                 __entry->inode  = KEY_INODE(k);
327                 __entry->offset = KEY_OFFSET(k);
328                 __entry->size   = KEY_SIZE(k);
329                 __entry->dirty  = KEY_DIRTY(k);
330                 __entry->op = op;
331                 __entry->status = status;
332         ),
333
334         TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
335                   __entry->status, __entry->op,
336                   __entry->btree_node, __entry->btree_level,
337                   __entry->inode, __entry->offset,
338                   __entry->size, __entry->dirty)
339 );
340
341 DECLARE_EVENT_CLASS(btree_split,
342         TP_PROTO(struct btree *b, unsigned keys),
343         TP_ARGS(b, keys),
344
345         TP_STRUCT__entry(
346                 __field(size_t,         bucket                  )
347                 __field(unsigned,       keys                    )
348         ),
349
350         TP_fast_assign(
351                 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
352                 __entry->keys   = keys;
353         ),
354
355         TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
356 );
357
358 DEFINE_EVENT(btree_split, bcache_btree_node_split,
359         TP_PROTO(struct btree *b, unsigned keys),
360         TP_ARGS(b, keys)
361 );
362
363 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
364         TP_PROTO(struct btree *b, unsigned keys),
365         TP_ARGS(b, keys)
366 );
367
368 DEFINE_EVENT(btree_node, bcache_btree_set_root,
369         TP_PROTO(struct btree *b),
370         TP_ARGS(b)
371 );
372
373 /* Allocator */
374
375 TRACE_EVENT(bcache_alloc_invalidate,
376         TP_PROTO(struct cache *ca),
377         TP_ARGS(ca),
378
379         TP_STRUCT__entry(
380                 __field(unsigned,       free                    )
381                 __field(unsigned,       free_inc                )
382                 __field(unsigned,       free_inc_size           )
383                 __field(unsigned,       unused                  )
384         ),
385
386         TP_fast_assign(
387                 __entry->free           = fifo_used(&ca->free);
388                 __entry->free_inc       = fifo_used(&ca->free_inc);
389                 __entry->free_inc_size  = ca->free_inc.size;
390                 __entry->unused         = fifo_used(&ca->unused);
391         ),
392
393         TP_printk("free %u free_inc %u/%u unused %u", __entry->free,
394                   __entry->free_inc, __entry->free_inc_size, __entry->unused)
395 );
396
397 TRACE_EVENT(bcache_alloc_fail,
398         TP_PROTO(struct cache *ca),
399         TP_ARGS(ca),
400
401         TP_STRUCT__entry(
402                 __field(unsigned,       free                    )
403                 __field(unsigned,       free_inc                )
404                 __field(unsigned,       unused                  )
405                 __field(unsigned,       blocked                 )
406         ),
407
408         TP_fast_assign(
409                 __entry->free           = fifo_used(&ca->free);
410                 __entry->free_inc       = fifo_used(&ca->free_inc);
411                 __entry->unused         = fifo_used(&ca->unused);
412                 __entry->blocked        = atomic_read(&ca->set->prio_blocked);
413         ),
414
415         TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free,
416                   __entry->free_inc, __entry->unused, __entry->blocked)
417 );
418
419 /* Background writeback */
420
421 DEFINE_EVENT(bkey, bcache_writeback,
422         TP_PROTO(struct bkey *k),
423         TP_ARGS(k)
424 );
425
426 DEFINE_EVENT(bkey, bcache_writeback_collision,
427         TP_PROTO(struct bkey *k),
428         TP_ARGS(k)
429 );
430
431 #endif /* _TRACE_BCACHE_H */
432
433 /* This part must be outside protection */
434 #include <trace/define_trace.h>