9fb66d358d81bc61ddab3b6b77f3766455cc2377
[platform/kernel/linux-rpi.git] / kernel / events / hw_breakpoint.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2007 Alan Stern
4  * Copyright (C) IBM Corporation, 2009
5  * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
6  *
7  * Thanks to Ingo Molnar for his many suggestions.
8  *
9  * Authors: Alan Stern <stern@rowland.harvard.edu>
10  *          K.Prasad <prasad@linux.vnet.ibm.com>
11  *          Frederic Weisbecker <fweisbec@gmail.com>
12  */
13
14 /*
15  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
16  * using the CPU's debug registers.
17  * This file contains the arch-independent routines.
18  */
19
20 #include <linux/hw_breakpoint.h>
21
22 #include <linux/bug.h>
23 #include <linux/cpu.h>
24 #include <linux/export.h>
25 #include <linux/init.h>
26 #include <linux/irqflags.h>
27 #include <linux/kdebug.h>
28 #include <linux/kernel.h>
29 #include <linux/mutex.h>
30 #include <linux/notifier.h>
31 #include <linux/percpu.h>
32 #include <linux/rhashtable.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35
36 /*
37  * Constraints data
38  */
39 struct bp_cpuinfo {
40         /* Number of pinned cpu breakpoints in a cpu */
41         unsigned int    cpu_pinned;
42         /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
43 #ifdef hw_breakpoint_slots
44         unsigned int    tsk_pinned[hw_breakpoint_slots(0)];
45 #else
46         unsigned int    *tsk_pinned;
47 #endif
48         /* Number of non-pinned cpu/task breakpoints in a cpu */
49         unsigned int    flexible; /* XXX: placeholder, see fetch_this_slot() */
50 };
51
52 static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
53
54 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
55 {
56         return per_cpu_ptr(bp_cpuinfo + type, cpu);
57 }
58
59 /* Keep track of the breakpoints attached to tasks */
60 static struct rhltable task_bps_ht;
61 static const struct rhashtable_params task_bps_ht_params = {
62         .head_offset = offsetof(struct hw_perf_event, bp_list),
63         .key_offset = offsetof(struct hw_perf_event, target),
64         .key_len = sizeof_field(struct hw_perf_event, target),
65         .automatic_shrinking = true,
66 };
67
68 static bool constraints_initialized __ro_after_init;
69
70 /* Gather the number of total pinned and un-pinned bp in a cpuset */
71 struct bp_busy_slots {
72         unsigned int pinned;
73         unsigned int flexible;
74 };
75
76 /* Serialize accesses to the above constraints */
77 static DEFINE_MUTEX(nr_bp_mutex);
78
79 #ifdef hw_breakpoint_slots
80 /*
81  * Number of breakpoint slots is constant, and the same for all types.
82  */
83 static_assert(hw_breakpoint_slots(TYPE_INST) == hw_breakpoint_slots(TYPE_DATA));
84 static inline int hw_breakpoint_slots_cached(int type)  { return hw_breakpoint_slots(type); }
85 static inline int init_breakpoint_slots(void)           { return 0; }
86 #else
87 /*
88  * Dynamic number of breakpoint slots.
89  */
90 static int __nr_bp_slots[TYPE_MAX] __ro_after_init;
91
92 static inline int hw_breakpoint_slots_cached(int type)
93 {
94         return __nr_bp_slots[type];
95 }
96
97 static __init int init_breakpoint_slots(void)
98 {
99         int i, cpu, err_cpu;
100
101         for (i = 0; i < TYPE_MAX; i++)
102                 __nr_bp_slots[i] = hw_breakpoint_slots(i);
103
104         for_each_possible_cpu(cpu) {
105                 for (i = 0; i < TYPE_MAX; i++) {
106                         struct bp_cpuinfo *info = get_bp_info(cpu, i);
107
108                         info->tsk_pinned = kcalloc(__nr_bp_slots[i], sizeof(int), GFP_KERNEL);
109                         if (!info->tsk_pinned)
110                                 goto err;
111                 }
112         }
113
114         return 0;
115 err:
116         for_each_possible_cpu(err_cpu) {
117                 for (i = 0; i < TYPE_MAX; i++)
118                         kfree(get_bp_info(err_cpu, i)->tsk_pinned);
119                 if (err_cpu == cpu)
120                         break;
121         }
122
123         return -ENOMEM;
124 }
125 #endif
126
127 __weak int hw_breakpoint_weight(struct perf_event *bp)
128 {
129         return 1;
130 }
131
132 static inline enum bp_type_idx find_slot_idx(u64 bp_type)
133 {
134         if (bp_type & HW_BREAKPOINT_RW)
135                 return TYPE_DATA;
136
137         return TYPE_INST;
138 }
139
140 /*
141  * Report the maximum number of pinned breakpoints a task
142  * have in this cpu
143  */
144 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
145 {
146         unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
147         int i;
148
149         for (i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
150                 if (tsk_pinned[i] > 0)
151                         return i + 1;
152         }
153
154         return 0;
155 }
156
157 /*
158  * Count the number of breakpoints of the same type and same task.
159  * The given event must be not on the list.
160  */
161 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
162 {
163         struct rhlist_head *head, *pos;
164         struct perf_event *iter;
165         int count = 0;
166
167         rcu_read_lock();
168         head = rhltable_lookup(&task_bps_ht, &bp->hw.target, task_bps_ht_params);
169         if (!head)
170                 goto out;
171
172         rhl_for_each_entry_rcu(iter, pos, head, hw.bp_list) {
173                 if (find_slot_idx(iter->attr.bp_type) == type &&
174                     (iter->cpu < 0 || cpu == iter->cpu))
175                         count += hw_breakpoint_weight(iter);
176         }
177
178 out:
179         rcu_read_unlock();
180         return count;
181 }
182
183 static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
184 {
185         if (bp->cpu >= 0)
186                 return cpumask_of(bp->cpu);
187         return cpu_possible_mask;
188 }
189
190 /*
191  * Report the number of pinned/un-pinned breakpoints we have in
192  * a given cpu (cpu > -1) or in all of them (cpu = -1).
193  */
194 static void
195 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
196                     enum bp_type_idx type)
197 {
198         const struct cpumask *cpumask = cpumask_of_bp(bp);
199         int cpu;
200
201         for_each_cpu(cpu, cpumask) {
202                 struct bp_cpuinfo *info = get_bp_info(cpu, type);
203                 int nr;
204
205                 nr = info->cpu_pinned;
206                 if (!bp->hw.target)
207                         nr += max_task_bp_pinned(cpu, type);
208                 else
209                         nr += task_bp_pinned(cpu, bp, type);
210
211                 if (nr > slots->pinned)
212                         slots->pinned = nr;
213
214                 nr = info->flexible;
215                 if (nr > slots->flexible)
216                         slots->flexible = nr;
217         }
218 }
219
220 /*
221  * For now, continue to consider flexible as pinned, until we can
222  * ensure no flexible event can ever be scheduled before a pinned event
223  * in a same cpu.
224  */
225 static void
226 fetch_this_slot(struct bp_busy_slots *slots, int weight)
227 {
228         slots->pinned += weight;
229 }
230
231 /*
232  * Add a pinned breakpoint for the given task in our constraint table
233  */
234 static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
235                                 enum bp_type_idx type, int weight)
236 {
237         unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
238         int old_idx, new_idx;
239
240         old_idx = task_bp_pinned(cpu, bp, type) - 1;
241         new_idx = old_idx + weight;
242
243         if (old_idx >= 0)
244                 tsk_pinned[old_idx]--;
245         if (new_idx >= 0)
246                 tsk_pinned[new_idx]++;
247 }
248
249 /*
250  * Add/remove the given breakpoint in our constraint table
251  */
252 static int
253 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
254                int weight)
255 {
256         const struct cpumask *cpumask = cpumask_of_bp(bp);
257         int cpu;
258
259         if (!enable)
260                 weight = -weight;
261
262         /* Pinned counter cpu profiling */
263         if (!bp->hw.target) {
264                 get_bp_info(bp->cpu, type)->cpu_pinned += weight;
265                 return 0;
266         }
267
268         /* Pinned counter task profiling */
269         for_each_cpu(cpu, cpumask)
270                 toggle_bp_task_slot(bp, cpu, type, weight);
271
272         if (enable)
273                 return rhltable_insert(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params);
274         else
275                 return rhltable_remove(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params);
276 }
277
278 __weak int arch_reserve_bp_slot(struct perf_event *bp)
279 {
280         return 0;
281 }
282
283 __weak void arch_release_bp_slot(struct perf_event *bp)
284 {
285 }
286
287 /*
288  * Function to perform processor-specific cleanup during unregistration
289  */
290 __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
291 {
292         /*
293          * A weak stub function here for those archs that don't define
294          * it inside arch/.../kernel/hw_breakpoint.c
295          */
296 }
297
298 /*
299  * Constraints to check before allowing this new breakpoint counter:
300  *
301  *  == Non-pinned counter == (Considered as pinned for now)
302  *
303  *   - If attached to a single cpu, check:
304  *
305  *       (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
306  *           + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
307  *
308  *       -> If there are already non-pinned counters in this cpu, it means
309  *          there is already a free slot for them.
310  *          Otherwise, we check that the maximum number of per task
311  *          breakpoints (for this cpu) plus the number of per cpu breakpoint
312  *          (for this cpu) doesn't cover every registers.
313  *
314  *   - If attached to every cpus, check:
315  *
316  *       (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
317  *           + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
318  *
319  *       -> This is roughly the same, except we check the number of per cpu
320  *          bp for every cpu and we keep the max one. Same for the per tasks
321  *          breakpoints.
322  *
323  *
324  * == Pinned counter ==
325  *
326  *   - If attached to a single cpu, check:
327  *
328  *       ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
329  *            + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
330  *
331  *       -> Same checks as before. But now the info->flexible, if any, must keep
332  *          one register at least (or they will never be fed).
333  *
334  *   - If attached to every cpus, check:
335  *
336  *       ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
337  *            + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
338  */
339 static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
340 {
341         struct bp_busy_slots slots = {0};
342         enum bp_type_idx type;
343         int weight;
344         int ret;
345
346         /* We couldn't initialize breakpoint constraints on boot */
347         if (!constraints_initialized)
348                 return -ENOMEM;
349
350         /* Basic checks */
351         if (bp_type == HW_BREAKPOINT_EMPTY ||
352             bp_type == HW_BREAKPOINT_INVALID)
353                 return -EINVAL;
354
355         type = find_slot_idx(bp_type);
356         weight = hw_breakpoint_weight(bp);
357
358         fetch_bp_busy_slots(&slots, bp, type);
359         /*
360          * Simulate the addition of this breakpoint to the constraints
361          * and see the result.
362          */
363         fetch_this_slot(&slots, weight);
364
365         /* Flexible counters need to keep at least one slot */
366         if (slots.pinned + (!!slots.flexible) > hw_breakpoint_slots_cached(type))
367                 return -ENOSPC;
368
369         ret = arch_reserve_bp_slot(bp);
370         if (ret)
371                 return ret;
372
373         return toggle_bp_slot(bp, true, type, weight);
374 }
375
376 int reserve_bp_slot(struct perf_event *bp)
377 {
378         int ret;
379
380         mutex_lock(&nr_bp_mutex);
381
382         ret = __reserve_bp_slot(bp, bp->attr.bp_type);
383
384         mutex_unlock(&nr_bp_mutex);
385
386         return ret;
387 }
388
389 static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
390 {
391         enum bp_type_idx type;
392         int weight;
393
394         arch_release_bp_slot(bp);
395
396         type = find_slot_idx(bp_type);
397         weight = hw_breakpoint_weight(bp);
398         WARN_ON(toggle_bp_slot(bp, false, type, weight));
399 }
400
401 void release_bp_slot(struct perf_event *bp)
402 {
403         mutex_lock(&nr_bp_mutex);
404
405         arch_unregister_hw_breakpoint(bp);
406         __release_bp_slot(bp, bp->attr.bp_type);
407
408         mutex_unlock(&nr_bp_mutex);
409 }
410
411 static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
412 {
413         int err;
414
415         __release_bp_slot(bp, old_type);
416
417         err = __reserve_bp_slot(bp, new_type);
418         if (err) {
419                 /*
420                  * Reserve the old_type slot back in case
421                  * there's no space for the new type.
422                  *
423                  * This must succeed, because we just released
424                  * the old_type slot in the __release_bp_slot
425                  * call above. If not, something is broken.
426                  */
427                 WARN_ON(__reserve_bp_slot(bp, old_type));
428         }
429
430         return err;
431 }
432
433 static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
434 {
435         int ret;
436
437         mutex_lock(&nr_bp_mutex);
438         ret = __modify_bp_slot(bp, old_type, new_type);
439         mutex_unlock(&nr_bp_mutex);
440         return ret;
441 }
442
443 /*
444  * Allow the kernel debugger to reserve breakpoint slots without
445  * taking a lock using the dbg_* variant of for the reserve and
446  * release breakpoint slots.
447  */
448 int dbg_reserve_bp_slot(struct perf_event *bp)
449 {
450         if (mutex_is_locked(&nr_bp_mutex))
451                 return -1;
452
453         return __reserve_bp_slot(bp, bp->attr.bp_type);
454 }
455
456 int dbg_release_bp_slot(struct perf_event *bp)
457 {
458         if (mutex_is_locked(&nr_bp_mutex))
459                 return -1;
460
461         __release_bp_slot(bp, bp->attr.bp_type);
462
463         return 0;
464 }
465
466 static int hw_breakpoint_parse(struct perf_event *bp,
467                                const struct perf_event_attr *attr,
468                                struct arch_hw_breakpoint *hw)
469 {
470         int err;
471
472         err = hw_breakpoint_arch_parse(bp, attr, hw);
473         if (err)
474                 return err;
475
476         if (arch_check_bp_in_kernelspace(hw)) {
477                 if (attr->exclude_kernel)
478                         return -EINVAL;
479                 /*
480                  * Don't let unprivileged users set a breakpoint in the trap
481                  * path to avoid trap recursion attacks.
482                  */
483                 if (!capable(CAP_SYS_ADMIN))
484                         return -EPERM;
485         }
486
487         return 0;
488 }
489
490 int register_perf_hw_breakpoint(struct perf_event *bp)
491 {
492         struct arch_hw_breakpoint hw = { };
493         int err;
494
495         err = reserve_bp_slot(bp);
496         if (err)
497                 return err;
498
499         err = hw_breakpoint_parse(bp, &bp->attr, &hw);
500         if (err) {
501                 release_bp_slot(bp);
502                 return err;
503         }
504
505         bp->hw.info = hw;
506
507         return 0;
508 }
509
510 /**
511  * register_user_hw_breakpoint - register a hardware breakpoint for user space
512  * @attr: breakpoint attributes
513  * @triggered: callback to trigger when we hit the breakpoint
514  * @context: context data could be used in the triggered callback
515  * @tsk: pointer to 'task_struct' of the process to which the address belongs
516  */
517 struct perf_event *
518 register_user_hw_breakpoint(struct perf_event_attr *attr,
519                             perf_overflow_handler_t triggered,
520                             void *context,
521                             struct task_struct *tsk)
522 {
523         return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
524                                                 context);
525 }
526 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
527
528 static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
529                                     struct perf_event_attr *from)
530 {
531         to->bp_addr = from->bp_addr;
532         to->bp_type = from->bp_type;
533         to->bp_len  = from->bp_len;
534         to->disabled = from->disabled;
535 }
536
537 int
538 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
539                                 bool check)
540 {
541         struct arch_hw_breakpoint hw = { };
542         int err;
543
544         err = hw_breakpoint_parse(bp, attr, &hw);
545         if (err)
546                 return err;
547
548         if (check) {
549                 struct perf_event_attr old_attr;
550
551                 old_attr = bp->attr;
552                 hw_breakpoint_copy_attr(&old_attr, attr);
553                 if (memcmp(&old_attr, attr, sizeof(*attr)))
554                         return -EINVAL;
555         }
556
557         if (bp->attr.bp_type != attr->bp_type) {
558                 err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
559                 if (err)
560                         return err;
561         }
562
563         hw_breakpoint_copy_attr(&bp->attr, attr);
564         bp->hw.info = hw;
565
566         return 0;
567 }
568
569 /**
570  * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
571  * @bp: the breakpoint structure to modify
572  * @attr: new breakpoint attributes
573  */
574 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
575 {
576         int err;
577
578         /*
579          * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
580          * will not be possible to raise IPIs that invoke __perf_event_disable.
581          * So call the function directly after making sure we are targeting the
582          * current task.
583          */
584         if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
585                 perf_event_disable_local(bp);
586         else
587                 perf_event_disable(bp);
588
589         err = modify_user_hw_breakpoint_check(bp, attr, false);
590
591         if (!bp->attr.disabled)
592                 perf_event_enable(bp);
593
594         return err;
595 }
596 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
597
598 /**
599  * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
600  * @bp: the breakpoint structure to unregister
601  */
602 void unregister_hw_breakpoint(struct perf_event *bp)
603 {
604         if (!bp)
605                 return;
606         perf_event_release_kernel(bp);
607 }
608 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
609
610 /**
611  * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
612  * @attr: breakpoint attributes
613  * @triggered: callback to trigger when we hit the breakpoint
614  * @context: context data could be used in the triggered callback
615  *
616  * @return a set of per_cpu pointers to perf events
617  */
618 struct perf_event * __percpu *
619 register_wide_hw_breakpoint(struct perf_event_attr *attr,
620                             perf_overflow_handler_t triggered,
621                             void *context)
622 {
623         struct perf_event * __percpu *cpu_events, *bp;
624         long err = 0;
625         int cpu;
626
627         cpu_events = alloc_percpu(typeof(*cpu_events));
628         if (!cpu_events)
629                 return (void __percpu __force *)ERR_PTR(-ENOMEM);
630
631         cpus_read_lock();
632         for_each_online_cpu(cpu) {
633                 bp = perf_event_create_kernel_counter(attr, cpu, NULL,
634                                                       triggered, context);
635                 if (IS_ERR(bp)) {
636                         err = PTR_ERR(bp);
637                         break;
638                 }
639
640                 per_cpu(*cpu_events, cpu) = bp;
641         }
642         cpus_read_unlock();
643
644         if (likely(!err))
645                 return cpu_events;
646
647         unregister_wide_hw_breakpoint(cpu_events);
648         return (void __percpu __force *)ERR_PTR(err);
649 }
650 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
651
652 /**
653  * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
654  * @cpu_events: the per cpu set of events to unregister
655  */
656 void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
657 {
658         int cpu;
659
660         for_each_possible_cpu(cpu)
661                 unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
662
663         free_percpu(cpu_events);
664 }
665 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
666
667 /**
668  * hw_breakpoint_is_used - check if breakpoints are currently used
669  *
670  * Returns: true if breakpoints are used, false otherwise.
671  */
672 bool hw_breakpoint_is_used(void)
673 {
674         int cpu;
675
676         if (!constraints_initialized)
677                 return false;
678
679         for_each_possible_cpu(cpu) {
680                 for (int type = 0; type < TYPE_MAX; ++type) {
681                         struct bp_cpuinfo *info = get_bp_info(cpu, type);
682
683                         if (info->cpu_pinned)
684                                 return true;
685
686                         for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
687                                 if (info->tsk_pinned[slot])
688                                         return true;
689                         }
690                 }
691         }
692
693         return false;
694 }
695
696 static struct notifier_block hw_breakpoint_exceptions_nb = {
697         .notifier_call = hw_breakpoint_exceptions_notify,
698         /* we need to be notified first */
699         .priority = 0x7fffffff
700 };
701
702 static void bp_perf_event_destroy(struct perf_event *event)
703 {
704         release_bp_slot(event);
705 }
706
707 static int hw_breakpoint_event_init(struct perf_event *bp)
708 {
709         int err;
710
711         if (bp->attr.type != PERF_TYPE_BREAKPOINT)
712                 return -ENOENT;
713
714         /*
715          * no branch sampling for breakpoint events
716          */
717         if (has_branch_stack(bp))
718                 return -EOPNOTSUPP;
719
720         err = register_perf_hw_breakpoint(bp);
721         if (err)
722                 return err;
723
724         bp->destroy = bp_perf_event_destroy;
725
726         return 0;
727 }
728
729 static int hw_breakpoint_add(struct perf_event *bp, int flags)
730 {
731         if (!(flags & PERF_EF_START))
732                 bp->hw.state = PERF_HES_STOPPED;
733
734         if (is_sampling_event(bp)) {
735                 bp->hw.last_period = bp->hw.sample_period;
736                 perf_swevent_set_period(bp);
737         }
738
739         return arch_install_hw_breakpoint(bp);
740 }
741
742 static void hw_breakpoint_del(struct perf_event *bp, int flags)
743 {
744         arch_uninstall_hw_breakpoint(bp);
745 }
746
747 static void hw_breakpoint_start(struct perf_event *bp, int flags)
748 {
749         bp->hw.state = 0;
750 }
751
752 static void hw_breakpoint_stop(struct perf_event *bp, int flags)
753 {
754         bp->hw.state = PERF_HES_STOPPED;
755 }
756
757 static struct pmu perf_breakpoint = {
758         .task_ctx_nr    = perf_sw_context, /* could eventually get its own */
759
760         .event_init     = hw_breakpoint_event_init,
761         .add            = hw_breakpoint_add,
762         .del            = hw_breakpoint_del,
763         .start          = hw_breakpoint_start,
764         .stop           = hw_breakpoint_stop,
765         .read           = hw_breakpoint_pmu_read,
766 };
767
768 int __init init_hw_breakpoint(void)
769 {
770         int ret;
771
772         ret = rhltable_init(&task_bps_ht, &task_bps_ht_params);
773         if (ret)
774                 return ret;
775
776         ret = init_breakpoint_slots();
777         if (ret)
778                 return ret;
779
780         constraints_initialized = true;
781
782         perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
783
784         return register_die_notifier(&hw_breakpoint_exceptions_nb);
785 }