]> rtime.felk.cvut.cz Git - linux-imx.git/blob - kernel/hw_breakpoint.c
mac80211: fix TKIP replay vulnerability
[linux-imx.git] / kernel / hw_breakpoint.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15  *
16  * Copyright (C) 2007 Alan Stern
17  * Copyright (C) IBM Corporation, 2009
18  * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19  *
20  * Thanks to Ingo Molnar for his many suggestions.
21  *
22  * Authors: Alan Stern <stern@rowland.harvard.edu>
23  *          K.Prasad <prasad@linux.vnet.ibm.com>
24  *          Frederic Weisbecker <fweisbec@gmail.com>
25  */
26
27 /*
28  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29  * using the CPU's debug registers.
30  * This file contains the arch-independent routines.
31  */
32
33 #include <linux/irqflags.h>
34 #include <linux/kallsyms.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/percpu.h>
41 #include <linux/sched.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/cpu.h>
45 #include <linux/smp.h>
46
47 #include <linux/hw_breakpoint.h>
48
49
50 /*
51  * Constraints data
52  */
53
54 /* Number of pinned cpu breakpoints in a cpu */
55 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
56
57 /* Number of pinned task breakpoints in a cpu */
58 static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
59
60 /* Number of non-pinned cpu/task breakpoints in a cpu */
61 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
62
63 static int nr_slots[TYPE_MAX];
64
65 static int constraints_initialized;
66
67 /* Gather the number of total pinned and un-pinned bp in a cpuset */
68 struct bp_busy_slots {
69         unsigned int pinned;
70         unsigned int flexible;
71 };
72
73 /* Serialize accesses to the above constraints */
74 static DEFINE_MUTEX(nr_bp_mutex);
75
76 __weak int hw_breakpoint_weight(struct perf_event *bp)
77 {
78         return 1;
79 }
80
81 static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
82 {
83         if (bp->attr.bp_type & HW_BREAKPOINT_RW)
84                 return TYPE_DATA;
85
86         return TYPE_INST;
87 }
88
89 /*
90  * Report the maximum number of pinned breakpoints a task
91  * have in this cpu
92  */
93 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
94 {
95         int i;
96         unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
97
98         for (i = nr_slots[type] - 1; i >= 0; i--) {
99                 if (tsk_pinned[i] > 0)
100                         return i + 1;
101         }
102
103         return 0;
104 }
105
106 static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
107 {
108         struct perf_event_context *ctx = tsk->perf_event_ctxp;
109         struct list_head *list;
110         struct perf_event *bp;
111         unsigned long flags;
112         int count = 0;
113
114         if (WARN_ONCE(!ctx, "No perf context for this task"))
115                 return 0;
116
117         list = &ctx->event_list;
118
119         raw_spin_lock_irqsave(&ctx->lock, flags);
120
121         /*
122          * The current breakpoint counter is not included in the list
123          * at the open() callback time
124          */
125         list_for_each_entry(bp, list, event_entry) {
126                 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
127                         if (find_slot_idx(bp) == type)
128                                 count += hw_breakpoint_weight(bp);
129         }
130
131         raw_spin_unlock_irqrestore(&ctx->lock, flags);
132
133         return count;
134 }
135
136 /*
137  * Report the number of pinned/un-pinned breakpoints we have in
138  * a given cpu (cpu > -1) or in all of them (cpu = -1).
139  */
140 static void
141 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
142                     enum bp_type_idx type)
143 {
144         int cpu = bp->cpu;
145         struct task_struct *tsk = bp->ctx->task;
146
147         if (cpu >= 0) {
148                 slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
149                 if (!tsk)
150                         slots->pinned += max_task_bp_pinned(cpu, type);
151                 else
152                         slots->pinned += task_bp_pinned(tsk, type);
153                 slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
154
155                 return;
156         }
157
158         for_each_online_cpu(cpu) {
159                 unsigned int nr;
160
161                 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
162                 if (!tsk)
163                         nr += max_task_bp_pinned(cpu, type);
164                 else
165                         nr += task_bp_pinned(tsk, type);
166
167                 if (nr > slots->pinned)
168                         slots->pinned = nr;
169
170                 nr = per_cpu(nr_bp_flexible[type], cpu);
171
172                 if (nr > slots->flexible)
173                         slots->flexible = nr;
174         }
175 }
176
177 /*
178  * For now, continue to consider flexible as pinned, until we can
179  * ensure no flexible event can ever be scheduled before a pinned event
180  * in a same cpu.
181  */
182 static void
183 fetch_this_slot(struct bp_busy_slots *slots, int weight)
184 {
185         slots->pinned += weight;
186 }
187
188 /*
189  * Add a pinned breakpoint for the given task in our constraint table
190  */
191 static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
192                                 enum bp_type_idx type, int weight)
193 {
194         unsigned int *tsk_pinned;
195         int old_count = 0;
196         int old_idx = 0;
197         int idx = 0;
198
199         old_count = task_bp_pinned(tsk, type);
200         old_idx = old_count - 1;
201         idx = old_idx + weight;
202
203         tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
204         if (enable) {
205                 tsk_pinned[idx]++;
206                 if (old_count > 0)
207                         tsk_pinned[old_idx]--;
208         } else {
209                 tsk_pinned[idx]--;
210                 if (old_count > 0)
211                         tsk_pinned[old_idx]++;
212         }
213 }
214
215 /*
216  * Add/remove the given breakpoint in our constraint table
217  */
218 static void
219 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
220                int weight)
221 {
222         int cpu = bp->cpu;
223         struct task_struct *tsk = bp->ctx->task;
224
225         /* Pinned counter task profiling */
226         if (tsk) {
227                 if (cpu >= 0) {
228                         toggle_bp_task_slot(tsk, cpu, enable, type, weight);
229                         return;
230                 }
231
232                 for_each_online_cpu(cpu)
233                         toggle_bp_task_slot(tsk, cpu, enable, type, weight);
234                 return;
235         }
236
237         /* Pinned counter cpu profiling */
238         if (enable)
239                 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
240         else
241                 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
242 }
243
244 /*
245  * Contraints to check before allowing this new breakpoint counter:
246  *
247  *  == Non-pinned counter == (Considered as pinned for now)
248  *
249  *   - If attached to a single cpu, check:
250  *
251  *       (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
252  *           + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
253  *
254  *       -> If there are already non-pinned counters in this cpu, it means
255  *          there is already a free slot for them.
256  *          Otherwise, we check that the maximum number of per task
257  *          breakpoints (for this cpu) plus the number of per cpu breakpoint
258  *          (for this cpu) doesn't cover every registers.
259  *
260  *   - If attached to every cpus, check:
261  *
262  *       (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
263  *           + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
264  *
265  *       -> This is roughly the same, except we check the number of per cpu
266  *          bp for every cpu and we keep the max one. Same for the per tasks
267  *          breakpoints.
268  *
269  *
270  * == Pinned counter ==
271  *
272  *   - If attached to a single cpu, check:
273  *
274  *       ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
275  *            + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
276  *
277  *       -> Same checks as before. But now the nr_bp_flexible, if any, must keep
278  *          one register at least (or they will never be fed).
279  *
280  *   - If attached to every cpus, check:
281  *
282  *       ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
283  *            + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
284  */
285 static int __reserve_bp_slot(struct perf_event *bp)
286 {
287         struct bp_busy_slots slots = {0};
288         enum bp_type_idx type;
289         int weight;
290
291         /* We couldn't initialize breakpoint constraints on boot */
292         if (!constraints_initialized)
293                 return -ENOMEM;
294
295         /* Basic checks */
296         if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
297             bp->attr.bp_type == HW_BREAKPOINT_INVALID)
298                 return -EINVAL;
299
300         type = find_slot_idx(bp);
301         weight = hw_breakpoint_weight(bp);
302
303         fetch_bp_busy_slots(&slots, bp, type);
304         fetch_this_slot(&slots, weight);
305
306         /* Flexible counters need to keep at least one slot */
307         if (slots.pinned + (!!slots.flexible) > nr_slots[type])
308                 return -ENOSPC;
309
310         toggle_bp_slot(bp, true, type, weight);
311
312         return 0;
313 }
314
315 int reserve_bp_slot(struct perf_event *bp)
316 {
317         int ret;
318
319         mutex_lock(&nr_bp_mutex);
320
321         ret = __reserve_bp_slot(bp);
322
323         mutex_unlock(&nr_bp_mutex);
324
325         return ret;
326 }
327
328 static void __release_bp_slot(struct perf_event *bp)
329 {
330         enum bp_type_idx type;
331         int weight;
332
333         type = find_slot_idx(bp);
334         weight = hw_breakpoint_weight(bp);
335         toggle_bp_slot(bp, false, type, weight);
336 }
337
338 void release_bp_slot(struct perf_event *bp)
339 {
340         mutex_lock(&nr_bp_mutex);
341
342         __release_bp_slot(bp);
343
344         mutex_unlock(&nr_bp_mutex);
345 }
346
347 /*
348  * Allow the kernel debugger to reserve breakpoint slots without
349  * taking a lock using the dbg_* variant of for the reserve and
350  * release breakpoint slots.
351  */
352 int dbg_reserve_bp_slot(struct perf_event *bp)
353 {
354         if (mutex_is_locked(&nr_bp_mutex))
355                 return -1;
356
357         return __reserve_bp_slot(bp);
358 }
359
360 int dbg_release_bp_slot(struct perf_event *bp)
361 {
362         if (mutex_is_locked(&nr_bp_mutex))
363                 return -1;
364
365         __release_bp_slot(bp);
366
367         return 0;
368 }
369
370 static int validate_hw_breakpoint(struct perf_event *bp)
371 {
372         int ret;
373
374         ret = arch_validate_hwbkpt_settings(bp);
375         if (ret)
376                 return ret;
377
378         if (arch_check_bp_in_kernelspace(bp)) {
379                 if (bp->attr.exclude_kernel)
380                         return -EINVAL;
381                 /*
382                  * Don't let unprivileged users set a breakpoint in the trap
383                  * path to avoid trap recursion attacks.
384                  */
385                 if (!capable(CAP_SYS_ADMIN))
386                         return -EPERM;
387         }
388
389         return 0;
390 }
391
392 int register_perf_hw_breakpoint(struct perf_event *bp)
393 {
394         int ret;
395
396         ret = reserve_bp_slot(bp);
397         if (ret)
398                 return ret;
399
400         ret = validate_hw_breakpoint(bp);
401
402         /* if arch_validate_hwbkpt_settings() fails then release bp slot */
403         if (ret)
404                 release_bp_slot(bp);
405
406         return ret;
407 }
408
409 /**
410  * register_user_hw_breakpoint - register a hardware breakpoint for user space
411  * @attr: breakpoint attributes
412  * @triggered: callback to trigger when we hit the breakpoint
413  * @tsk: pointer to 'task_struct' of the process to which the address belongs
414  */
415 struct perf_event *
416 register_user_hw_breakpoint(struct perf_event_attr *attr,
417                             perf_overflow_handler_t triggered,
418                             struct task_struct *tsk)
419 {
420         return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
421                                                 triggered);
422 }
423 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
424
425 /**
426  * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
427  * @bp: the breakpoint structure to modify
428  * @attr: new breakpoint attributes
429  * @triggered: callback to trigger when we hit the breakpoint
430  * @tsk: pointer to 'task_struct' of the process to which the address belongs
431  */
432 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
433 {
434         u64 old_addr = bp->attr.bp_addr;
435         u64 old_len = bp->attr.bp_len;
436         int old_type = bp->attr.bp_type;
437         int err = 0;
438
439         perf_event_disable(bp);
440
441         bp->attr.bp_addr = attr->bp_addr;
442         bp->attr.bp_type = attr->bp_type;
443         bp->attr.bp_len = attr->bp_len;
444
445         if (attr->disabled)
446                 goto end;
447
448         err = validate_hw_breakpoint(bp);
449         if (!err)
450                 perf_event_enable(bp);
451
452         if (err) {
453                 bp->attr.bp_addr = old_addr;
454                 bp->attr.bp_type = old_type;
455                 bp->attr.bp_len = old_len;
456                 if (!bp->attr.disabled)
457                         perf_event_enable(bp);
458
459                 return err;
460         }
461
462 end:
463         bp->attr.disabled = attr->disabled;
464
465         return 0;
466 }
467 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
468
469 /**
470  * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
471  * @bp: the breakpoint structure to unregister
472  */
473 void unregister_hw_breakpoint(struct perf_event *bp)
474 {
475         if (!bp)
476                 return;
477         perf_event_release_kernel(bp);
478 }
479 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
480
481 /**
482  * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
483  * @attr: breakpoint attributes
484  * @triggered: callback to trigger when we hit the breakpoint
485  *
486  * @return a set of per_cpu pointers to perf events
487  */
488 struct perf_event * __percpu *
489 register_wide_hw_breakpoint(struct perf_event_attr *attr,
490                             perf_overflow_handler_t triggered)
491 {
492         struct perf_event * __percpu *cpu_events, **pevent, *bp;
493         long err;
494         int cpu;
495
496         cpu_events = alloc_percpu(typeof(*cpu_events));
497         if (!cpu_events)
498                 return (void __percpu __force *)ERR_PTR(-ENOMEM);
499
500         get_online_cpus();
501         for_each_online_cpu(cpu) {
502                 pevent = per_cpu_ptr(cpu_events, cpu);
503                 bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
504
505                 *pevent = bp;
506
507                 if (IS_ERR(bp)) {
508                         err = PTR_ERR(bp);
509                         goto fail;
510                 }
511         }
512         put_online_cpus();
513
514         return cpu_events;
515
516 fail:
517         for_each_online_cpu(cpu) {
518                 pevent = per_cpu_ptr(cpu_events, cpu);
519                 if (IS_ERR(*pevent))
520                         break;
521                 unregister_hw_breakpoint(*pevent);
522         }
523         put_online_cpus();
524
525         free_percpu(cpu_events);
526         return (void __percpu __force *)ERR_PTR(err);
527 }
528 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
529
530 /**
531  * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
532  * @cpu_events: the per cpu set of events to unregister
533  */
534 void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
535 {
536         int cpu;
537         struct perf_event **pevent;
538
539         for_each_possible_cpu(cpu) {
540                 pevent = per_cpu_ptr(cpu_events, cpu);
541                 unregister_hw_breakpoint(*pevent);
542         }
543         free_percpu(cpu_events);
544 }
545 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
546
547 static struct notifier_block hw_breakpoint_exceptions_nb = {
548         .notifier_call = hw_breakpoint_exceptions_notify,
549         /* we need to be notified first */
550         .priority = 0x7fffffff
551 };
552
553 static int __init init_hw_breakpoint(void)
554 {
555         unsigned int **task_bp_pinned;
556         int cpu, err_cpu;
557         int i;
558
559         for (i = 0; i < TYPE_MAX; i++)
560                 nr_slots[i] = hw_breakpoint_slots(i);
561
562         for_each_possible_cpu(cpu) {
563                 for (i = 0; i < TYPE_MAX; i++) {
564                         task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
565                         *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
566                                                   GFP_KERNEL);
567                         if (!*task_bp_pinned)
568                                 goto err_alloc;
569                 }
570         }
571
572         constraints_initialized = 1;
573
574         return register_die_notifier(&hw_breakpoint_exceptions_nb);
575
576  err_alloc:
577         for_each_possible_cpu(err_cpu) {
578                 if (err_cpu == cpu)
579                         break;
580                 for (i = 0; i < TYPE_MAX; i++)
581                         kfree(per_cpu(nr_task_bp_pinned[i], cpu));
582         }
583
584         return -ENOMEM;
585 }
586 core_initcall(init_hw_breakpoint);
587
588
589 struct pmu perf_ops_bp = {
590         .enable         = arch_install_hw_breakpoint,
591         .disable        = arch_uninstall_hw_breakpoint,
592         .read           = hw_breakpoint_pmu_read,
593 };