Root/kernel/hw_breakpoint.c

1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19 *
20 * Thanks to Ingo Molnar for his many suggestions.
21 *
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
25 */
26
27/*
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
31 */
32
33#include <linux/irqflags.h>
34#include <linux/kallsyms.h>
35#include <linux/notifier.h>
36#include <linux/kprobes.h>
37#include <linux/kdebug.h>
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/percpu.h>
41#include <linux/sched.h>
42#include <linux/init.h>
43#include <linux/slab.h>
44#include <linux/list.h>
45#include <linux/cpu.h>
46#include <linux/smp.h>
47
48#include <linux/hw_breakpoint.h>
49
50
51/*
52 * Constraints data
53 */
54
55/* Number of pinned cpu breakpoints in a cpu */
56static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
57
58/* Number of pinned task breakpoints in a cpu */
59static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
60
61/* Number of non-pinned cpu/task breakpoints in a cpu */
62static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
63
64static int nr_slots[TYPE_MAX];
65
66/* Keep track of the breakpoints attached to tasks */
67static LIST_HEAD(bp_task_head);
68
69static int constraints_initialized;
70
71/* Gather the number of total pinned and un-pinned bp in a cpuset */
72struct bp_busy_slots {
73    unsigned int pinned;
74    unsigned int flexible;
75};
76
77/* Serialize accesses to the above constraints */
78static DEFINE_MUTEX(nr_bp_mutex);
79
80__weak int hw_breakpoint_weight(struct perf_event *bp)
81{
82    return 1;
83}
84
85static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
86{
87    if (bp->attr.bp_type & HW_BREAKPOINT_RW)
88        return TYPE_DATA;
89
90    return TYPE_INST;
91}
92
93/*
94 * Report the maximum number of pinned breakpoints a task
95 * have in this cpu
96 */
97static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
98{
99    int i;
100    unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
101
102    for (i = nr_slots[type] - 1; i >= 0; i--) {
103        if (tsk_pinned[i] > 0)
104            return i + 1;
105    }
106
107    return 0;
108}
109
110/*
111 * Count the number of breakpoints of the same type and same task.
112 * The given event must be not on the list.
113 */
114static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
115{
116    struct task_struct *tsk = bp->hw.bp_target;
117    struct perf_event *iter;
118    int count = 0;
119
120    list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121        if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
122            count += hw_breakpoint_weight(iter);
123    }
124
125    return count;
126}
127
128/*
129 * Report the number of pinned/un-pinned breakpoints we have in
130 * a given cpu (cpu > -1) or in all of them (cpu = -1).
131 */
132static void
133fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
134            enum bp_type_idx type)
135{
136    int cpu = bp->cpu;
137    struct task_struct *tsk = bp->hw.bp_target;
138
139    if (cpu >= 0) {
140        slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
141        if (!tsk)
142            slots->pinned += max_task_bp_pinned(cpu, type);
143        else
144            slots->pinned += task_bp_pinned(bp, type);
145        slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
146
147        return;
148    }
149
150    for_each_online_cpu(cpu) {
151        unsigned int nr;
152
153        nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
154        if (!tsk)
155            nr += max_task_bp_pinned(cpu, type);
156        else
157            nr += task_bp_pinned(bp, type);
158
159        if (nr > slots->pinned)
160            slots->pinned = nr;
161
162        nr = per_cpu(nr_bp_flexible[type], cpu);
163
164        if (nr > slots->flexible)
165            slots->flexible = nr;
166    }
167}
168
169/*
170 * For now, continue to consider flexible as pinned, until we can
171 * ensure no flexible event can ever be scheduled before a pinned event
172 * in a same cpu.
173 */
174static void
175fetch_this_slot(struct bp_busy_slots *slots, int weight)
176{
177    slots->pinned += weight;
178}
179
180/*
181 * Add a pinned breakpoint for the given task in our constraint table
182 */
183static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
184                enum bp_type_idx type, int weight)
185{
186    unsigned int *tsk_pinned;
187    int old_count = 0;
188    int old_idx = 0;
189    int idx = 0;
190
191    old_count = task_bp_pinned(bp, type);
192    old_idx = old_count - 1;
193    idx = old_idx + weight;
194
195    /* tsk_pinned[n] is the number of tasks having n breakpoints */
196    tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
197    if (enable) {
198        tsk_pinned[idx]++;
199        if (old_count > 0)
200            tsk_pinned[old_idx]--;
201    } else {
202        tsk_pinned[idx]--;
203        if (old_count > 0)
204            tsk_pinned[old_idx]++;
205    }
206}
207
208/*
209 * Add/remove the given breakpoint in our constraint table
210 */
211static void
212toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
213           int weight)
214{
215    int cpu = bp->cpu;
216    struct task_struct *tsk = bp->hw.bp_target;
217
218    /* Pinned counter cpu profiling */
219    if (!tsk) {
220
221        if (enable)
222            per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
223        else
224            per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
225        return;
226    }
227
228    /* Pinned counter task profiling */
229
230    if (!enable)
231        list_del(&bp->hw.bp_list);
232
233    if (cpu >= 0) {
234        toggle_bp_task_slot(bp, cpu, enable, type, weight);
235    } else {
236        for_each_online_cpu(cpu)
237            toggle_bp_task_slot(bp, cpu, enable, type, weight);
238    }
239
240    if (enable)
241        list_add_tail(&bp->hw.bp_list, &bp_task_head);
242}
243
244/*
245 * Function to perform processor-specific cleanup during unregistration
246 */
247__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
248{
249    /*
250     * A weak stub function here for those archs that don't define
251     * it inside arch/.../kernel/hw_breakpoint.c
252     */
253}
254
255/*
256 * Contraints to check before allowing this new breakpoint counter:
257 *
258 * == Non-pinned counter == (Considered as pinned for now)
259 *
260 * - If attached to a single cpu, check:
261 *
262 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
263 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
264 *
265 * -> If there are already non-pinned counters in this cpu, it means
266 * there is already a free slot for them.
267 * Otherwise, we check that the maximum number of per task
268 * breakpoints (for this cpu) plus the number of per cpu breakpoint
269 * (for this cpu) doesn't cover every registers.
270 *
271 * - If attached to every cpus, check:
272 *
273 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
274 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
275 *
276 * -> This is roughly the same, except we check the number of per cpu
277 * bp for every cpu and we keep the max one. Same for the per tasks
278 * breakpoints.
279 *
280 *
281 * == Pinned counter ==
282 *
283 * - If attached to a single cpu, check:
284 *
285 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
286 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
287 *
288 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
289 * one register at least (or they will never be fed).
290 *
291 * - If attached to every cpus, check:
292 *
293 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
294 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
295 */
296static int __reserve_bp_slot(struct perf_event *bp)
297{
298    struct bp_busy_slots slots = {0};
299    enum bp_type_idx type;
300    int weight;
301
302    /* We couldn't initialize breakpoint constraints on boot */
303    if (!constraints_initialized)
304        return -ENOMEM;
305
306    /* Basic checks */
307    if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
308        bp->attr.bp_type == HW_BREAKPOINT_INVALID)
309        return -EINVAL;
310
311    type = find_slot_idx(bp);
312    weight = hw_breakpoint_weight(bp);
313
314    fetch_bp_busy_slots(&slots, bp, type);
315    /*
316     * Simulate the addition of this breakpoint to the constraints
317     * and see the result.
318     */
319    fetch_this_slot(&slots, weight);
320
321    /* Flexible counters need to keep at least one slot */
322    if (slots.pinned + (!!slots.flexible) > nr_slots[type])
323        return -ENOSPC;
324
325    toggle_bp_slot(bp, true, type, weight);
326
327    return 0;
328}
329
330int reserve_bp_slot(struct perf_event *bp)
331{
332    int ret;
333
334    mutex_lock(&nr_bp_mutex);
335
336    ret = __reserve_bp_slot(bp);
337
338    mutex_unlock(&nr_bp_mutex);
339
340    return ret;
341}
342
343static void __release_bp_slot(struct perf_event *bp)
344{
345    enum bp_type_idx type;
346    int weight;
347
348    type = find_slot_idx(bp);
349    weight = hw_breakpoint_weight(bp);
350    toggle_bp_slot(bp, false, type, weight);
351}
352
353void release_bp_slot(struct perf_event *bp)
354{
355    mutex_lock(&nr_bp_mutex);
356
357    arch_unregister_hw_breakpoint(bp);
358    __release_bp_slot(bp);
359
360    mutex_unlock(&nr_bp_mutex);
361}
362
363/*
364 * Allow the kernel debugger to reserve breakpoint slots without
365 * taking a lock using the dbg_* variant of for the reserve and
366 * release breakpoint slots.
367 */
368int dbg_reserve_bp_slot(struct perf_event *bp)
369{
370    if (mutex_is_locked(&nr_bp_mutex))
371        return -1;
372
373    return __reserve_bp_slot(bp);
374}
375
376int dbg_release_bp_slot(struct perf_event *bp)
377{
378    if (mutex_is_locked(&nr_bp_mutex))
379        return -1;
380
381    __release_bp_slot(bp);
382
383    return 0;
384}
385
386static int validate_hw_breakpoint(struct perf_event *bp)
387{
388    int ret;
389
390    ret = arch_validate_hwbkpt_settings(bp);
391    if (ret)
392        return ret;
393
394    if (arch_check_bp_in_kernelspace(bp)) {
395        if (bp->attr.exclude_kernel)
396            return -EINVAL;
397        /*
398         * Don't let unprivileged users set a breakpoint in the trap
399         * path to avoid trap recursion attacks.
400         */
401        if (!capable(CAP_SYS_ADMIN))
402            return -EPERM;
403    }
404
405    return 0;
406}
407
408int register_perf_hw_breakpoint(struct perf_event *bp)
409{
410    int ret;
411
412    ret = reserve_bp_slot(bp);
413    if (ret)
414        return ret;
415
416    ret = validate_hw_breakpoint(bp);
417
418    /* if arch_validate_hwbkpt_settings() fails then release bp slot */
419    if (ret)
420        release_bp_slot(bp);
421
422    return ret;
423}
424
425/**
426 * register_user_hw_breakpoint - register a hardware breakpoint for user space
427 * @attr: breakpoint attributes
428 * @triggered: callback to trigger when we hit the breakpoint
429 * @tsk: pointer to 'task_struct' of the process to which the address belongs
430 */
431struct perf_event *
432register_user_hw_breakpoint(struct perf_event_attr *attr,
433                perf_overflow_handler_t triggered,
434                struct task_struct *tsk)
435{
436    return perf_event_create_kernel_counter(attr, -1, tsk, triggered);
437}
438EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
439
440/**
441 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
442 * @bp: the breakpoint structure to modify
443 * @attr: new breakpoint attributes
444 * @triggered: callback to trigger when we hit the breakpoint
445 * @tsk: pointer to 'task_struct' of the process to which the address belongs
446 */
447int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
448{
449    u64 old_addr = bp->attr.bp_addr;
450    u64 old_len = bp->attr.bp_len;
451    int old_type = bp->attr.bp_type;
452    int err = 0;
453
454    perf_event_disable(bp);
455
456    bp->attr.bp_addr = attr->bp_addr;
457    bp->attr.bp_type = attr->bp_type;
458    bp->attr.bp_len = attr->bp_len;
459
460    if (attr->disabled)
461        goto end;
462
463    err = validate_hw_breakpoint(bp);
464    if (!err)
465        perf_event_enable(bp);
466
467    if (err) {
468        bp->attr.bp_addr = old_addr;
469        bp->attr.bp_type = old_type;
470        bp->attr.bp_len = old_len;
471        if (!bp->attr.disabled)
472            perf_event_enable(bp);
473
474        return err;
475    }
476
477end:
478    bp->attr.disabled = attr->disabled;
479
480    return 0;
481}
482EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
483
484/**
485 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
486 * @bp: the breakpoint structure to unregister
487 */
488void unregister_hw_breakpoint(struct perf_event *bp)
489{
490    if (!bp)
491        return;
492    perf_event_release_kernel(bp);
493}
494EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
495
496/**
497 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
498 * @attr: breakpoint attributes
499 * @triggered: callback to trigger when we hit the breakpoint
500 *
501 * @return a set of per_cpu pointers to perf events
502 */
503struct perf_event * __percpu *
504register_wide_hw_breakpoint(struct perf_event_attr *attr,
505                perf_overflow_handler_t triggered)
506{
507    struct perf_event * __percpu *cpu_events, **pevent, *bp;
508    long err;
509    int cpu;
510
511    cpu_events = alloc_percpu(typeof(*cpu_events));
512    if (!cpu_events)
513        return (void __percpu __force *)ERR_PTR(-ENOMEM);
514
515    get_online_cpus();
516    for_each_online_cpu(cpu) {
517        pevent = per_cpu_ptr(cpu_events, cpu);
518        bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered);
519
520        *pevent = bp;
521
522        if (IS_ERR(bp)) {
523            err = PTR_ERR(bp);
524            goto fail;
525        }
526    }
527    put_online_cpus();
528
529    return cpu_events;
530
531fail:
532    for_each_online_cpu(cpu) {
533        pevent = per_cpu_ptr(cpu_events, cpu);
534        if (IS_ERR(*pevent))
535            break;
536        unregister_hw_breakpoint(*pevent);
537    }
538    put_online_cpus();
539
540    free_percpu(cpu_events);
541    return (void __percpu __force *)ERR_PTR(err);
542}
543EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
544
545/**
546 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
547 * @cpu_events: the per cpu set of events to unregister
548 */
549void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
550{
551    int cpu;
552    struct perf_event **pevent;
553
554    for_each_possible_cpu(cpu) {
555        pevent = per_cpu_ptr(cpu_events, cpu);
556        unregister_hw_breakpoint(*pevent);
557    }
558    free_percpu(cpu_events);
559}
560EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
561
562static struct notifier_block hw_breakpoint_exceptions_nb = {
563    .notifier_call = hw_breakpoint_exceptions_notify,
564    /* we need to be notified first */
565    .priority = 0x7fffffff
566};
567
568static void bp_perf_event_destroy(struct perf_event *event)
569{
570    release_bp_slot(event);
571}
572
573static int hw_breakpoint_event_init(struct perf_event *bp)
574{
575    int err;
576
577    if (bp->attr.type != PERF_TYPE_BREAKPOINT)
578        return -ENOENT;
579
580    err = register_perf_hw_breakpoint(bp);
581    if (err)
582        return err;
583
584    bp->destroy = bp_perf_event_destroy;
585
586    return 0;
587}
588
589static int hw_breakpoint_add(struct perf_event *bp, int flags)
590{
591    if (!(flags & PERF_EF_START))
592        bp->hw.state = PERF_HES_STOPPED;
593
594    return arch_install_hw_breakpoint(bp);
595}
596
597static void hw_breakpoint_del(struct perf_event *bp, int flags)
598{
599    arch_uninstall_hw_breakpoint(bp);
600}
601
602static void hw_breakpoint_start(struct perf_event *bp, int flags)
603{
604    bp->hw.state = 0;
605}
606
607static void hw_breakpoint_stop(struct perf_event *bp, int flags)
608{
609    bp->hw.state = PERF_HES_STOPPED;
610}
611
612static struct pmu perf_breakpoint = {
613    .task_ctx_nr = perf_sw_context, /* could eventually get its own */
614
615    .event_init = hw_breakpoint_event_init,
616    .add = hw_breakpoint_add,
617    .del = hw_breakpoint_del,
618    .start = hw_breakpoint_start,
619    .stop = hw_breakpoint_stop,
620    .read = hw_breakpoint_pmu_read,
621};
622
623int __init init_hw_breakpoint(void)
624{
625    unsigned int **task_bp_pinned;
626    int cpu, err_cpu;
627    int i;
628
629    for (i = 0; i < TYPE_MAX; i++)
630        nr_slots[i] = hw_breakpoint_slots(i);
631
632    for_each_possible_cpu(cpu) {
633        for (i = 0; i < TYPE_MAX; i++) {
634            task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
635            *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
636                          GFP_KERNEL);
637            if (!*task_bp_pinned)
638                goto err_alloc;
639        }
640    }
641
642    constraints_initialized = 1;
643
644    perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
645
646    return register_die_notifier(&hw_breakpoint_exceptions_nb);
647
648 err_alloc:
649    for_each_possible_cpu(err_cpu) {
650        if (err_cpu == cpu)
651            break;
652        for (i = 0; i < TYPE_MAX; i++)
653            kfree(per_cpu(nr_task_bp_pinned[i], cpu));
654    }
655
656    return -ENOMEM;
657}
658
659
660

Archive Download this file



interactive