1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2007 Alan Stern
4 * Copyright (C) IBM Corporation, 2009
5 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
7 * Thanks to Ingo Molnar for his many suggestions.
9 * Authors: Alan Stern <stern@rowland.harvard.edu>
10 * K.Prasad <prasad@linux.vnet.ibm.com>
11 * Frederic Weisbecker <fweisbec@gmail.com>
15 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
16 * using the CPU's debug registers.
17 * This file contains the arch-independent routines.
20 #include <linux/hw_breakpoint.h>
22 #include <linux/atomic.h>
23 #include <linux/bug.h>
24 #include <linux/cpu.h>
25 #include <linux/export.h>
26 #include <linux/init.h>
27 #include <linux/irqflags.h>
28 #include <linux/kdebug.h>
29 #include <linux/kernel.h>
30 #include <linux/mutex.h>
31 #include <linux/notifier.h>
32 #include <linux/percpu-rwsem.h>
33 #include <linux/percpu.h>
34 #include <linux/rhashtable.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
39 * Datastructure to track the total uses of N slots across tasks or CPUs;
40 * bp_slots_histogram::count[N] is the number of assigned N+1 breakpoint slots.
42 struct bp_slots_histogram {
43 #ifdef hw_breakpoint_slots
44 atomic_t count[hw_breakpoint_slots(0)];
51 * Per-CPU constraints data.
54 /* Number of pinned CPU breakpoints in a CPU. */
55 unsigned int cpu_pinned;
56 /* Histogram of pinned task breakpoints in a CPU. */
57 struct bp_slots_histogram tsk_pinned;
60 static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
62 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
64 return per_cpu_ptr(bp_cpuinfo + type, cpu);
67 /* Number of pinned CPU breakpoints globally. */
68 static struct bp_slots_histogram cpu_pinned[TYPE_MAX];
69 /* Number of pinned CPU-independent task breakpoints. */
70 static struct bp_slots_histogram tsk_pinned_all[TYPE_MAX];
72 /* Keep track of the breakpoints attached to tasks */
73 static struct rhltable task_bps_ht;
74 static const struct rhashtable_params task_bps_ht_params = {
75 .head_offset = offsetof(struct hw_perf_event, bp_list),
76 .key_offset = offsetof(struct hw_perf_event, target),
77 .key_len = sizeof_field(struct hw_perf_event, target),
78 .automatic_shrinking = true,
81 static bool constraints_initialized __ro_after_init;
84 * Synchronizes accesses to the per-CPU constraints; the locking rules are:
86 * 1. Atomic updates to bp_cpuinfo::tsk_pinned only require a held read-lock
87 * (due to bp_slots_histogram::count being atomic, no update are lost).
89 * 2. Holding a write-lock is required for computations that require a
90 * stable snapshot of all bp_cpuinfo::tsk_pinned.
92 * 3. In all other cases, non-atomic accesses require the appropriately held
93 * lock (read-lock for read-only accesses; write-lock for reads/writes).
95 DEFINE_STATIC_PERCPU_RWSEM(bp_cpuinfo_sem);
98 * Return mutex to serialize accesses to per-task lists in task_bps_ht. Since
99 * rhltable synchronizes concurrent insertions/deletions, independent tasks may
100 * insert/delete concurrently; therefore, a mutex per task is sufficient.
102 * Uses task_struct::perf_event_mutex, to avoid extending task_struct with a
103 * hw_breakpoint-only mutex, which may be infrequently used. The caveat here is
104 * that hw_breakpoint may contend with per-task perf event list management. The
105 * assumption is that perf usecases involving hw_breakpoints are very unlikely
106 * to result in unnecessary contention.
108 static inline struct mutex *get_task_bps_mutex(struct perf_event *bp)
110 struct task_struct *tsk = bp->hw.target;
112 return tsk ? &tsk->perf_event_mutex : NULL;
115 static struct mutex *bp_constraints_lock(struct perf_event *bp)
117 struct mutex *tsk_mtx = get_task_bps_mutex(bp);
121 * Fully analogous to the perf_try_init_event() nesting
122 * argument in the comment near perf_event_ctx_lock_nested();
123 * this child->perf_event_mutex cannot ever deadlock against
124 * the parent->perf_event_mutex usage from
125 * perf_event_task_{en,dis}able().
127 * Specifically, inherited events will never occur on
130 mutex_lock_nested(tsk_mtx, SINGLE_DEPTH_NESTING);
131 percpu_down_read(&bp_cpuinfo_sem);
133 percpu_down_write(&bp_cpuinfo_sem);
139 static void bp_constraints_unlock(struct mutex *tsk_mtx)
142 percpu_up_read(&bp_cpuinfo_sem);
143 mutex_unlock(tsk_mtx);
145 percpu_up_write(&bp_cpuinfo_sem);
149 static bool bp_constraints_is_locked(struct perf_event *bp)
151 struct mutex *tsk_mtx = get_task_bps_mutex(bp);
153 return percpu_is_write_locked(&bp_cpuinfo_sem) ||
154 (tsk_mtx ? mutex_is_locked(tsk_mtx) :
155 percpu_is_read_locked(&bp_cpuinfo_sem));
158 static inline void assert_bp_constraints_lock_held(struct perf_event *bp)
160 struct mutex *tsk_mtx = get_task_bps_mutex(bp);
163 lockdep_assert_held(tsk_mtx);
164 lockdep_assert_held(&bp_cpuinfo_sem);
167 #ifdef hw_breakpoint_slots
169 * Number of breakpoint slots is constant, and the same for all types.
171 static_assert(hw_breakpoint_slots(TYPE_INST) == hw_breakpoint_slots(TYPE_DATA));
172 static inline int hw_breakpoint_slots_cached(int type) { return hw_breakpoint_slots(type); }
173 static inline int init_breakpoint_slots(void) { return 0; }
176 * Dynamic number of breakpoint slots.
178 static int __nr_bp_slots[TYPE_MAX] __ro_after_init;
180 static inline int hw_breakpoint_slots_cached(int type)
182 return __nr_bp_slots[type];
186 bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type)
188 hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL);
192 static __init void bp_slots_histogram_free(struct bp_slots_histogram *hist)
197 static __init int init_breakpoint_slots(void)
201 for (i = 0; i < TYPE_MAX; i++)
202 __nr_bp_slots[i] = hw_breakpoint_slots(i);
204 for_each_possible_cpu(cpu) {
205 for (i = 0; i < TYPE_MAX; i++) {
206 struct bp_cpuinfo *info = get_bp_info(cpu, i);
208 if (!bp_slots_histogram_alloc(&info->tsk_pinned, i))
212 for (i = 0; i < TYPE_MAX; i++) {
213 if (!bp_slots_histogram_alloc(&cpu_pinned[i], i))
215 if (!bp_slots_histogram_alloc(&tsk_pinned_all[i], i))
221 for_each_possible_cpu(err_cpu) {
222 for (i = 0; i < TYPE_MAX; i++)
223 bp_slots_histogram_free(&get_bp_info(err_cpu, i)->tsk_pinned);
227 for (i = 0; i < TYPE_MAX; i++) {
228 bp_slots_histogram_free(&cpu_pinned[i]);
229 bp_slots_histogram_free(&tsk_pinned_all[i]);
237 bp_slots_histogram_add(struct bp_slots_histogram *hist, int old, int val)
239 const int old_idx = old - 1;
240 const int new_idx = old_idx + val;
243 WARN_ON(atomic_dec_return_relaxed(&hist->count[old_idx]) < 0);
245 WARN_ON(atomic_inc_return_relaxed(&hist->count[new_idx]) < 0);
249 bp_slots_histogram_max(struct bp_slots_histogram *hist, enum bp_type_idx type)
251 for (int i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
252 const int count = atomic_read(&hist->count[i]);
254 /* Catch unexpected writers; we want a stable snapshot. */
255 ASSERT_EXCLUSIVE_WRITER(hist->count[i]);
258 WARN(count < 0, "inconsistent breakpoint slots histogram");
265 bp_slots_histogram_max_merge(struct bp_slots_histogram *hist1, struct bp_slots_histogram *hist2,
266 enum bp_type_idx type)
268 for (int i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
269 const int count1 = atomic_read(&hist1->count[i]);
270 const int count2 = atomic_read(&hist2->count[i]);
272 /* Catch unexpected writers; we want a stable snapshot. */
273 ASSERT_EXCLUSIVE_WRITER(hist1->count[i]);
274 ASSERT_EXCLUSIVE_WRITER(hist2->count[i]);
275 if (count1 + count2 > 0)
277 WARN(count1 < 0, "inconsistent breakpoint slots histogram");
278 WARN(count2 < 0, "inconsistent breakpoint slots histogram");
284 #ifndef hw_breakpoint_weight
285 static inline int hw_breakpoint_weight(struct perf_event *bp)
291 static inline enum bp_type_idx find_slot_idx(u64 bp_type)
293 if (bp_type & HW_BREAKPOINT_RW)
300 * Return the maximum number of pinned breakpoints a task has in this CPU.
302 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
304 struct bp_slots_histogram *tsk_pinned = &get_bp_info(cpu, type)->tsk_pinned;
307 * At this point we want to have acquired the bp_cpuinfo_sem as a
308 * writer to ensure that there are no concurrent writers in
309 * toggle_bp_task_slot() to tsk_pinned, and we get a stable snapshot.
311 lockdep_assert_held_write(&bp_cpuinfo_sem);
312 return bp_slots_histogram_max_merge(tsk_pinned, &tsk_pinned_all[type], type);
316 * Count the number of breakpoints of the same type and same task.
317 * The given event must be not on the list.
319 * If @cpu is -1, but the result of task_bp_pinned() is not CPU-independent,
320 * returns a negative value.
322 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
324 struct rhlist_head *head, *pos;
325 struct perf_event *iter;
329 * We need a stable snapshot of the per-task breakpoint list.
331 assert_bp_constraints_lock_held(bp);
334 head = rhltable_lookup(&task_bps_ht, &bp->hw.target, task_bps_ht_params);
338 rhl_for_each_entry_rcu(iter, pos, head, hw.bp_list) {
339 if (find_slot_idx(iter->attr.bp_type) != type)
342 if (iter->cpu >= 0) {
346 } else if (cpu != iter->cpu)
350 count += hw_breakpoint_weight(iter);
358 static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
361 return cpumask_of(bp->cpu);
362 return cpu_possible_mask;
366 * Returns the max pinned breakpoint slots in a given
367 * CPU (cpu > -1) or across all of them (cpu = -1).
370 max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type)
372 const struct cpumask *cpumask = cpumask_of_bp(bp);
373 int pinned_slots = 0;
376 if (bp->hw.target && bp->cpu < 0) {
377 int max_pinned = task_bp_pinned(-1, bp, type);
379 if (max_pinned >= 0) {
381 * Fast path: task_bp_pinned() is CPU-independent and
382 * returns the same value for any CPU.
384 max_pinned += bp_slots_histogram_max(&cpu_pinned[type], type);
389 for_each_cpu(cpu, cpumask) {
390 struct bp_cpuinfo *info = get_bp_info(cpu, type);
393 nr = info->cpu_pinned;
395 nr += max_task_bp_pinned(cpu, type);
397 nr += task_bp_pinned(cpu, bp, type);
399 pinned_slots = max(nr, pinned_slots);
406 * Add/remove the given breakpoint in our constraint table
409 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight)
411 int cpu, next_tsk_pinned;
416 if (!bp->hw.target) {
418 * Update the pinned CPU slots, in per-CPU bp_cpuinfo and in the
421 struct bp_cpuinfo *info = get_bp_info(bp->cpu, type);
423 lockdep_assert_held_write(&bp_cpuinfo_sem);
424 bp_slots_histogram_add(&cpu_pinned[type], info->cpu_pinned, weight);
425 info->cpu_pinned += weight;
430 * If bp->hw.target, tsk_pinned is only modified, but not used
431 * otherwise. We can permit concurrent updates as long as there are no
432 * other uses: having acquired bp_cpuinfo_sem as a reader allows
433 * concurrent updates here. Uses of tsk_pinned will require acquiring
434 * bp_cpuinfo_sem as a writer to stabilize tsk_pinned's value.
436 lockdep_assert_held_read(&bp_cpuinfo_sem);
439 * Update the pinned task slots, in per-CPU bp_cpuinfo and in the global
440 * histogram. We need to take care of 4 cases:
442 * 1. This breakpoint targets all CPUs (cpu < 0), and there may only
443 * exist other task breakpoints targeting all CPUs. In this case we
444 * can simply update the global slots histogram.
446 * 2. This breakpoint targets a specific CPU (cpu >= 0), but there may
447 * only exist other task breakpoints targeting all CPUs.
449 * a. On enable: remove the existing breakpoints from the global
450 * slots histogram and use the per-CPU histogram.
452 * b. On disable: re-insert the existing breakpoints into the global
453 * slots histogram and remove from per-CPU histogram.
455 * 3. Some other existing task breakpoints target specific CPUs. Only
456 * update the per-CPU slots histogram.
461 * Remove before updating histograms so we can determine if this
462 * was the last task breakpoint for a specific CPU.
464 int ret = rhltable_remove(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params);
470 * Note: If !enable, next_tsk_pinned will not count the to-be-removed breakpoint.
472 next_tsk_pinned = task_bp_pinned(-1, bp, type);
474 if (next_tsk_pinned >= 0) {
475 if (bp->cpu < 0) { /* Case 1: fast path */
477 next_tsk_pinned += hw_breakpoint_weight(bp);
478 bp_slots_histogram_add(&tsk_pinned_all[type], next_tsk_pinned, weight);
479 } else if (enable) { /* Case 2.a: slow path */
480 /* Add existing to per-CPU histograms. */
481 for_each_possible_cpu(cpu) {
482 bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
485 /* Add this first CPU-pinned task breakpoint. */
486 bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned,
487 next_tsk_pinned, weight);
488 /* Rebalance global task pinned histogram. */
489 bp_slots_histogram_add(&tsk_pinned_all[type], next_tsk_pinned,
491 } else { /* Case 2.b: slow path */
492 /* Remove this last CPU-pinned task breakpoint. */
493 bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned,
494 next_tsk_pinned + hw_breakpoint_weight(bp), weight);
495 /* Remove all from per-CPU histograms. */
496 for_each_possible_cpu(cpu) {
497 bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
498 next_tsk_pinned, -next_tsk_pinned);
500 /* Rebalance global task pinned histogram. */
501 bp_slots_histogram_add(&tsk_pinned_all[type], 0, next_tsk_pinned);
503 } else { /* Case 3: slow path */
504 const struct cpumask *cpumask = cpumask_of_bp(bp);
506 for_each_cpu(cpu, cpumask) {
507 next_tsk_pinned = task_bp_pinned(cpu, bp, type);
509 next_tsk_pinned += hw_breakpoint_weight(bp);
510 bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
511 next_tsk_pinned, weight);
516 * Readers want a stable snapshot of the per-task breakpoint list.
518 assert_bp_constraints_lock_held(bp);
521 return rhltable_insert(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params);
526 __weak int arch_reserve_bp_slot(struct perf_event *bp)
531 __weak void arch_release_bp_slot(struct perf_event *bp)
536 * Function to perform processor-specific cleanup during unregistration
538 __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
541 * A weak stub function here for those archs that don't define
542 * it inside arch/.../kernel/hw_breakpoint.c
547 * Constraints to check before allowing this new breakpoint counter.
549 * Note: Flexible breakpoints are currently unimplemented, but outlined in the
550 * below algorithm for completeness. The implementation treats flexible as
551 * pinned due to no guarantee that we currently always schedule flexible events
552 * before a pinned event in a same CPU.
554 * == Non-pinned counter == (Considered as pinned for now)
556 * - If attached to a single cpu, check:
558 * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
559 * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
561 * -> If there are already non-pinned counters in this cpu, it means
562 * there is already a free slot for them.
563 * Otherwise, we check that the maximum number of per task
564 * breakpoints (for this cpu) plus the number of per cpu breakpoint
565 * (for this cpu) doesn't cover every registers.
567 * - If attached to every cpus, check:
569 * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
570 * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
572 * -> This is roughly the same, except we check the number of per cpu
573 * bp for every cpu and we keep the max one. Same for the per tasks
577 * == Pinned counter ==
579 * - If attached to a single cpu, check:
581 * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
582 * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
584 * -> Same checks as before. But now the info->flexible, if any, must keep
585 * one register at least (or they will never be fed).
587 * - If attached to every cpus, check:
589 * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
590 * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
592 static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
594 enum bp_type_idx type;
595 int max_pinned_slots;
599 /* We couldn't initialize breakpoint constraints on boot */
600 if (!constraints_initialized)
604 if (bp_type == HW_BREAKPOINT_EMPTY ||
605 bp_type == HW_BREAKPOINT_INVALID)
608 type = find_slot_idx(bp_type);
609 weight = hw_breakpoint_weight(bp);
611 /* Check if this new breakpoint can be satisfied across all CPUs. */
612 max_pinned_slots = max_bp_pinned_slots(bp, type) + weight;
613 if (max_pinned_slots > hw_breakpoint_slots_cached(type))
616 ret = arch_reserve_bp_slot(bp);
620 return toggle_bp_slot(bp, true, type, weight);
623 int reserve_bp_slot(struct perf_event *bp)
625 struct mutex *mtx = bp_constraints_lock(bp);
626 int ret = __reserve_bp_slot(bp, bp->attr.bp_type);
628 bp_constraints_unlock(mtx);
632 static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
634 enum bp_type_idx type;
637 arch_release_bp_slot(bp);
639 type = find_slot_idx(bp_type);
640 weight = hw_breakpoint_weight(bp);
641 WARN_ON(toggle_bp_slot(bp, false, type, weight));
644 void release_bp_slot(struct perf_event *bp)
646 struct mutex *mtx = bp_constraints_lock(bp);
648 arch_unregister_hw_breakpoint(bp);
649 __release_bp_slot(bp, bp->attr.bp_type);
650 bp_constraints_unlock(mtx);
653 static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
657 __release_bp_slot(bp, old_type);
659 err = __reserve_bp_slot(bp, new_type);
662 * Reserve the old_type slot back in case
663 * there's no space for the new type.
665 * This must succeed, because we just released
666 * the old_type slot in the __release_bp_slot
667 * call above. If not, something is broken.
669 WARN_ON(__reserve_bp_slot(bp, old_type));
675 static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
677 struct mutex *mtx = bp_constraints_lock(bp);
678 int ret = __modify_bp_slot(bp, old_type, new_type);
680 bp_constraints_unlock(mtx);
685 * Allow the kernel debugger to reserve breakpoint slots without
686 * taking a lock using the dbg_* variant of for the reserve and
687 * release breakpoint slots.
689 int dbg_reserve_bp_slot(struct perf_event *bp)
693 if (bp_constraints_is_locked(bp))
696 /* Locks aren't held; disable lockdep assert checking. */
698 ret = __reserve_bp_slot(bp, bp->attr.bp_type);
704 int dbg_release_bp_slot(struct perf_event *bp)
706 if (bp_constraints_is_locked(bp))
709 /* Locks aren't held; disable lockdep assert checking. */
711 __release_bp_slot(bp, bp->attr.bp_type);
717 static int hw_breakpoint_parse(struct perf_event *bp,
718 const struct perf_event_attr *attr,
719 struct arch_hw_breakpoint *hw)
723 err = hw_breakpoint_arch_parse(bp, attr, hw);
727 if (arch_check_bp_in_kernelspace(hw)) {
728 if (attr->exclude_kernel)
731 * Don't let unprivileged users set a breakpoint in the trap
732 * path to avoid trap recursion attacks.
734 if (!capable(CAP_SYS_ADMIN))
741 int register_perf_hw_breakpoint(struct perf_event *bp)
743 struct arch_hw_breakpoint hw = { };
746 err = reserve_bp_slot(bp);
750 err = hw_breakpoint_parse(bp, &bp->attr, &hw);
762 * register_user_hw_breakpoint - register a hardware breakpoint for user space
763 * @attr: breakpoint attributes
764 * @triggered: callback to trigger when we hit the breakpoint
765 * @context: context data could be used in the triggered callback
766 * @tsk: pointer to 'task_struct' of the process to which the address belongs
769 register_user_hw_breakpoint(struct perf_event_attr *attr,
770 perf_overflow_handler_t triggered,
772 struct task_struct *tsk)
774 return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
777 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
779 static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
780 struct perf_event_attr *from)
782 to->bp_addr = from->bp_addr;
783 to->bp_type = from->bp_type;
784 to->bp_len = from->bp_len;
785 to->disabled = from->disabled;
789 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
792 struct arch_hw_breakpoint hw = { };
795 err = hw_breakpoint_parse(bp, attr, &hw);
800 struct perf_event_attr old_attr;
803 hw_breakpoint_copy_attr(&old_attr, attr);
804 if (memcmp(&old_attr, attr, sizeof(*attr)))
808 if (bp->attr.bp_type != attr->bp_type) {
809 err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
814 hw_breakpoint_copy_attr(&bp->attr, attr);
821 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
822 * @bp: the breakpoint structure to modify
823 * @attr: new breakpoint attributes
825 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
830 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
831 * will not be possible to raise IPIs that invoke __perf_event_disable.
832 * So call the function directly after making sure we are targeting the
835 if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
836 perf_event_disable_local(bp);
838 perf_event_disable(bp);
840 err = modify_user_hw_breakpoint_check(bp, attr, false);
842 if (!bp->attr.disabled)
843 perf_event_enable(bp);
847 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
850 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
851 * @bp: the breakpoint structure to unregister
853 void unregister_hw_breakpoint(struct perf_event *bp)
857 perf_event_release_kernel(bp);
859 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
862 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
863 * @attr: breakpoint attributes
864 * @triggered: callback to trigger when we hit the breakpoint
865 * @context: context data could be used in the triggered callback
867 * @return a set of per_cpu pointers to perf events
869 struct perf_event * __percpu *
870 register_wide_hw_breakpoint(struct perf_event_attr *attr,
871 perf_overflow_handler_t triggered,
874 struct perf_event * __percpu *cpu_events, *bp;
878 cpu_events = alloc_percpu(typeof(*cpu_events));
880 return (void __percpu __force *)ERR_PTR(-ENOMEM);
883 for_each_online_cpu(cpu) {
884 bp = perf_event_create_kernel_counter(attr, cpu, NULL,
891 per_cpu(*cpu_events, cpu) = bp;
898 unregister_wide_hw_breakpoint(cpu_events);
899 return (void __percpu __force *)ERR_PTR(err);
901 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
904 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
905 * @cpu_events: the per cpu set of events to unregister
907 void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
911 for_each_possible_cpu(cpu)
912 unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
914 free_percpu(cpu_events);
916 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
919 * hw_breakpoint_is_used - check if breakpoints are currently used
921 * Returns: true if breakpoints are used, false otherwise.
923 bool hw_breakpoint_is_used(void)
927 if (!constraints_initialized)
930 for_each_possible_cpu(cpu) {
931 for (int type = 0; type < TYPE_MAX; ++type) {
932 struct bp_cpuinfo *info = get_bp_info(cpu, type);
934 if (info->cpu_pinned)
937 for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
938 if (atomic_read(&info->tsk_pinned.count[slot]))
944 for (int type = 0; type < TYPE_MAX; ++type) {
945 for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
947 * Warn, because if there are CPU pinned counters,
948 * should never get here; bp_cpuinfo::cpu_pinned should
949 * be consistent with the global cpu_pinned histogram.
951 if (WARN_ON(atomic_read(&cpu_pinned[type].count[slot])))
954 if (atomic_read(&tsk_pinned_all[type].count[slot]))
962 static struct notifier_block hw_breakpoint_exceptions_nb = {
963 .notifier_call = hw_breakpoint_exceptions_notify,
964 /* we need to be notified first */
965 .priority = 0x7fffffff
968 static void bp_perf_event_destroy(struct perf_event *event)
970 release_bp_slot(event);
973 static int hw_breakpoint_event_init(struct perf_event *bp)
977 if (bp->attr.type != PERF_TYPE_BREAKPOINT)
981 * no branch sampling for breakpoint events
983 if (has_branch_stack(bp))
986 err = register_perf_hw_breakpoint(bp);
990 bp->destroy = bp_perf_event_destroy;
995 static int hw_breakpoint_add(struct perf_event *bp, int flags)
997 if (!(flags & PERF_EF_START))
998 bp->hw.state = PERF_HES_STOPPED;
1000 if (is_sampling_event(bp)) {
1001 bp->hw.last_period = bp->hw.sample_period;
1002 perf_swevent_set_period(bp);
1005 return arch_install_hw_breakpoint(bp);
1008 static void hw_breakpoint_del(struct perf_event *bp, int flags)
1010 arch_uninstall_hw_breakpoint(bp);
1013 static void hw_breakpoint_start(struct perf_event *bp, int flags)
1018 static void hw_breakpoint_stop(struct perf_event *bp, int flags)
1020 bp->hw.state = PERF_HES_STOPPED;
1023 static struct pmu perf_breakpoint = {
1024 .task_ctx_nr = perf_sw_context, /* could eventually get its own */
1026 .event_init = hw_breakpoint_event_init,
1027 .add = hw_breakpoint_add,
1028 .del = hw_breakpoint_del,
1029 .start = hw_breakpoint_start,
1030 .stop = hw_breakpoint_stop,
1031 .read = hw_breakpoint_pmu_read,
1034 int __init init_hw_breakpoint(void)
1038 ret = rhltable_init(&task_bps_ht, &task_bps_ht_params);
1042 ret = init_breakpoint_slots();
1046 constraints_initialized = true;
1048 perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
1050 return register_die_notifier(&hw_breakpoint_exceptions_nb);