1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2019, Google LLC.
8 #define pr_fmt(fmt) "kcsan: " fmt
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/delay.h>
13 #include <linux/export.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/moduleparam.h>
18 #include <linux/percpu.h>
19 #include <linux/preempt.h>
20 #include <linux/sched.h>
21 #include <linux/uaccess.h>
25 #include "permissive.h"
27 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
28 unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
29 unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
30 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
31 static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
33 #ifdef MODULE_PARAM_PREFIX
34 #undef MODULE_PARAM_PREFIX
36 #define MODULE_PARAM_PREFIX "kcsan."
37 module_param_named(early_enable, kcsan_early_enable, bool, 0);
38 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
39 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
40 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
41 module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
45 /* Per-CPU kcsan_ctx for interrupts */
46 static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
49 .atomic_nest_count = 0,
50 .in_flat_atomic = false,
52 .scoped_accesses = {LIST_POISON1, NULL},
56 * Helper macros to index into adjacent slots, starting from address slot
57 * itself, followed by the right and left slots.
59 * The purpose is 2-fold:
61 * 1. if during insertion the address slot is already occupied, check if
62 * any adjacent slots are free;
63 * 2. accesses that straddle a slot boundary due to size that exceeds a
64 * slot's range may check adjacent slots if any watchpoint matches.
66 * Note that accesses with very large size may still miss a watchpoint; however,
67 * given this should be rare, this is a reasonable trade-off to make, since this
70 * 1. excessive contention between watchpoint checks and setup;
71 * 2. larger number of simultaneous watchpoints without sacrificing
74 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
78 * slot=63: [64, 65, 63]
80 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
83 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
84 * slot (middle) is fine if we assume that races occur rarely. The set of
85 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
86 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
88 #define SLOT_IDX_FAST(slot, i) (slot + i)
91 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
92 * able to safely update and access a watchpoint without introducing locking
93 * overhead, we encode each watchpoint as a single atomic long. The initial
94 * zero-initialized state matches INVALID_WATCHPOINT.
96 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
97 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
99 static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
102 * Instructions to skip watching counter, used in should_watch(). We use a
103 * per-CPU counter to avoid excessive contention.
105 static DEFINE_PER_CPU(long, kcsan_skip);
107 /* For kcsan_prandom_u32_max(). */
108 static DEFINE_PER_CPU(u32, kcsan_rand_state);
110 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
113 long *encoded_watchpoint)
115 const int slot = watchpoint_slot(addr);
116 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
117 atomic_long_t *watchpoint;
118 unsigned long wp_addr_masked;
123 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
125 for (i = 0; i < NUM_SLOTS; ++i) {
126 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
127 *encoded_watchpoint = atomic_long_read(watchpoint);
128 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
129 &wp_size, &is_write))
132 if (expect_write && !is_write)
135 /* Check if the watchpoint matches the access. */
136 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
143 static inline atomic_long_t *
144 insert_watchpoint(unsigned long addr, size_t size, bool is_write)
146 const int slot = watchpoint_slot(addr);
147 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
148 atomic_long_t *watchpoint;
151 /* Check slot index logic, ensuring we stay within array bounds. */
152 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
153 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
154 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
155 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
157 for (i = 0; i < NUM_SLOTS; ++i) {
158 long expect_val = INVALID_WATCHPOINT;
160 /* Try to acquire this slot. */
161 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
162 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
170 * Return true if watchpoint was successfully consumed, false otherwise.
172 * This may return false if:
174 * 1. another thread already consumed the watchpoint;
175 * 2. the thread that set up the watchpoint already removed it;
176 * 3. the watchpoint was removed and then re-used.
178 static __always_inline bool
179 try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
181 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
184 /* Return true if watchpoint was not touched, false if already consumed. */
185 static inline bool consume_watchpoint(atomic_long_t *watchpoint)
187 return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
190 /* Remove the watchpoint -- its slot may be reused after. */
191 static inline void remove_watchpoint(atomic_long_t *watchpoint)
193 atomic_long_set(watchpoint, INVALID_WATCHPOINT);
196 static __always_inline struct kcsan_ctx *get_ctx(void)
199 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
200 * also result in calls that generate warnings in uaccess regions.
202 return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
205 static __always_inline void
206 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
208 /* Check scoped accesses; never inline because this is a slow-path! */
209 static noinline void kcsan_check_scoped_accesses(void)
211 struct kcsan_ctx *ctx = get_ctx();
212 struct list_head *prev_save = ctx->scoped_accesses.prev;
213 struct kcsan_scoped_access *scoped_access;
215 ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
216 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
217 check_access(scoped_access->ptr, scoped_access->size,
218 scoped_access->type, scoped_access->ip);
220 ctx->scoped_accesses.prev = prev_save;
223 /* Rules for generic atomic accesses. Called from fast-path. */
224 static __always_inline bool
225 is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
227 if (type & KCSAN_ACCESS_ATOMIC)
231 * Unless explicitly declared atomic, never consider an assertion access
232 * as atomic. This allows using them also in atomic regions, such as
233 * seqlocks, without implicitly changing their semantics.
235 if (type & KCSAN_ACCESS_ASSERT)
238 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
239 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
240 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
241 return true; /* Assume aligned writes up to word size are atomic. */
243 if (ctx->atomic_next > 0) {
245 * Because we do not have separate contexts for nested
246 * interrupts, in case atomic_next is set, we simply assume that
247 * the outer interrupt set atomic_next. In the worst case, we
248 * will conservatively consider operations as atomic. This is a
249 * reasonable trade-off to make, since this case should be
250 * extremely rare; however, even if extremely rare, it could
251 * lead to false positives otherwise.
253 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
254 --ctx->atomic_next; /* in task, or outer interrupt */
258 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
261 static __always_inline bool
262 should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
265 * Never set up watchpoints when memory operations are atomic.
267 * Need to check this first, before kcsan_skip check below: (1) atomics
268 * should not count towards skipped instructions, and (2) to actually
269 * decrement kcsan_atomic_next for consecutive instruction stream.
271 if (is_atomic(ctx, ptr, size, type))
274 if (this_cpu_dec_return(kcsan_skip) >= 0)
278 * NOTE: If we get here, kcsan_skip must always be reset in slow path
279 * via reset_kcsan_skip() to avoid underflow.
282 /* this operation should be watched */
287 * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
288 * congruential generator, using constants from "Numerical Recipes".
290 static u32 kcsan_prandom_u32_max(u32 ep_ro)
292 u32 state = this_cpu_read(kcsan_rand_state);
294 state = 1664525 * state + 1013904223;
295 this_cpu_write(kcsan_rand_state, state);
297 return state % ep_ro;
300 static inline void reset_kcsan_skip(void)
302 long skip_count = kcsan_skip_watch -
303 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
304 kcsan_prandom_u32_max(kcsan_skip_watch) :
306 this_cpu_write(kcsan_skip, skip_count);
309 static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
311 return READ_ONCE(kcsan_enabled) && !ctx->disable_count;
314 /* Introduce delay depending on context and configuration. */
315 static void delay_access(int type)
317 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
318 /* For certain access types, skew the random delay to be longer. */
319 unsigned int skew_delay_order =
320 (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
322 delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
323 kcsan_prandom_u32_max(delay >> skew_delay_order) :
328 void kcsan_save_irqtrace(struct task_struct *task)
330 #ifdef CONFIG_TRACE_IRQFLAGS
331 task->kcsan_save_irqtrace = task->irqtrace;
335 void kcsan_restore_irqtrace(struct task_struct *task)
337 #ifdef CONFIG_TRACE_IRQFLAGS
338 task->irqtrace = task->kcsan_save_irqtrace;
343 * Pull everything together: check_access() below contains the performance
344 * critical operations; the fast-path (including check_access) functions should
345 * all be inlinable by the instrumentation functions.
347 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
348 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
349 * be filtered from the stacktrace, as well as give them unique names for the
350 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
351 * since they do not access any user memory, but instrumentation is still
352 * emitted in UACCESS regions.
355 static noinline void kcsan_found_watchpoint(const volatile void *ptr,
359 atomic_long_t *watchpoint,
360 long encoded_watchpoint)
362 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
363 struct kcsan_ctx *ctx = get_ctx();
368 * We know a watchpoint exists. Let's try to keep the race-window
369 * between here and finally consuming the watchpoint below as small as
370 * possible -- avoid unneccessarily complex code until consumed.
373 if (!kcsan_is_enabled(ctx))
377 * The access_mask check relies on value-change comparison. To avoid
378 * reporting a race where e.g. the writer set up the watchpoint, but the
379 * reader has access_mask!=0, we have to ignore the found watchpoint.
381 if (ctx->access_mask)
385 * If the other thread does not want to ignore the access, and there was
386 * a value change as a result of this thread's operation, we will still
387 * generate a report of unknown origin.
389 * Use CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n to filter.
391 if (!is_assert && kcsan_ignore_address(ptr))
395 * Consuming the watchpoint must be guarded by kcsan_is_enabled() to
396 * avoid erroneously triggering reports if the context is disabled.
398 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
400 /* keep this after try_consume_watchpoint */
401 flags = user_access_save();
404 kcsan_save_irqtrace(current);
405 kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints);
406 kcsan_restore_irqtrace(current);
409 * The other thread may not print any diagnostics, as it has
410 * already removed the watchpoint, or another thread consumed
411 * the watchpoint before this thread.
413 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
417 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
419 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
421 user_access_restore(flags);
425 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip)
427 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
428 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
429 atomic_long_t *watchpoint;
431 unsigned long access_mask;
432 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
433 unsigned long ua_flags = user_access_save();
434 struct kcsan_ctx *ctx = get_ctx();
435 unsigned long irq_flags = 0;
438 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
443 if (!kcsan_is_enabled(ctx))
447 * Check to-ignore addresses after kcsan_is_enabled(), as we may access
448 * memory that is not yet initialized during early boot.
450 if (!is_assert && kcsan_ignore_address(ptr))
453 if (!check_encodable((unsigned long)ptr, size)) {
454 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
459 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
460 * runtime is entered for every memory access, and potentially useful
461 * information is lost if dirtied by KCSAN.
463 kcsan_save_irqtrace(current);
464 if (!kcsan_interrupt_watcher)
465 local_irq_save(irq_flags);
467 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
468 if (watchpoint == NULL) {
470 * Out of capacity: the size of 'watchpoints', and the frequency
471 * with which should_watch() returns true should be tweaked so
472 * that this case happens very rarely.
474 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
478 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
479 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
482 * Read the current value, to later check and infer a race if the data
483 * was modified via a non-instrumented access, e.g. from a device.
488 old = READ_ONCE(*(const u8 *)ptr);
491 old = READ_ONCE(*(const u16 *)ptr);
494 old = READ_ONCE(*(const u32 *)ptr);
497 old = READ_ONCE(*(const u64 *)ptr);
500 break; /* ignore; we do not diff the values */
504 * Delay this thread, to increase probability of observing a racy
505 * conflicting access.
510 * Re-read value, and check if it is as expected; if not, we infer a
513 access_mask = ctx->access_mask;
517 new = READ_ONCE(*(const u8 *)ptr);
520 new = READ_ONCE(*(const u16 *)ptr);
523 new = READ_ONCE(*(const u32 *)ptr);
526 new = READ_ONCE(*(const u64 *)ptr);
529 break; /* ignore; we do not diff the values */
537 * Check if we observed a value change.
539 * Also check if the data race should be ignored (the rules depend on
540 * non-zero diff); if it is to be ignored, the below rules for
541 * KCSAN_VALUE_CHANGE_MAYBE apply.
543 if (diff && !kcsan_ignore_data_race(size, type, old, new, diff))
544 value_change = KCSAN_VALUE_CHANGE_TRUE;
546 /* Check if this access raced with another. */
547 if (!consume_watchpoint(watchpoint)) {
549 * Depending on the access type, map a value_change of MAYBE to
550 * TRUE (always report) or FALSE (never report).
552 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
553 if (access_mask != 0) {
555 * For access with access_mask, we require a
556 * value-change, as it is likely that races on
557 * ~access_mask bits are expected.
559 value_change = KCSAN_VALUE_CHANGE_FALSE;
560 } else if (size > 8 || is_assert) {
561 /* Always assume a value-change. */
562 value_change = KCSAN_VALUE_CHANGE_TRUE;
567 * No need to increment 'data_races' counter, as the racing
568 * thread already did.
570 * Count 'assert_failures' for each failed ASSERT access,
571 * therefore both this thread and the racing thread may
572 * increment this counter.
574 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
575 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
577 kcsan_report_known_origin(ptr, size, type, ip,
578 value_change, watchpoint - watchpoints,
579 old, new, access_mask);
580 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
581 /* Inferring a race, since the value should not have changed. */
583 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
585 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
587 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) {
588 kcsan_report_unknown_origin(ptr, size, type, ip,
589 old, new, access_mask);
594 * Remove watchpoint; must be after reporting, since the slot may be
595 * reused after this point.
597 remove_watchpoint(watchpoint);
598 atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
600 if (!kcsan_interrupt_watcher)
601 local_irq_restore(irq_flags);
602 kcsan_restore_irqtrace(current);
604 user_access_restore(ua_flags);
607 static __always_inline void
608 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
610 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
611 atomic_long_t *watchpoint;
612 long encoded_watchpoint;
615 * Do nothing for 0 sized check; this comparison will be optimized out
616 * for constant sized instrumentation (__tsan_{read,write}N).
618 if (unlikely(size == 0))
622 * Avoid user_access_save in fast-path: find_watchpoint is safe without
623 * user_access_save, as the address that ptr points to is only used to
624 * check if a watchpoint exists; ptr is never dereferenced.
626 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
627 &encoded_watchpoint);
629 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
630 * slow-path, as long as no state changes that cause a race to be
631 * detected and reported have occurred until kcsan_is_enabled() is
635 if (unlikely(watchpoint != NULL))
636 kcsan_found_watchpoint(ptr, size, type, ip, watchpoint, encoded_watchpoint);
638 struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
640 if (unlikely(should_watch(ctx, ptr, size, type)))
641 kcsan_setup_watchpoint(ptr, size, type, ip);
642 else if (unlikely(ctx->scoped_accesses.prev))
643 kcsan_check_scoped_accesses();
647 /* === Public interface ===================================================== */
649 void __init kcsan_init(void)
655 for_each_possible_cpu(cpu)
656 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
659 * We are in the init task, and no other tasks should be running;
660 * WRITE_ONCE without memory barrier is sufficient.
662 if (kcsan_early_enable) {
663 pr_info("enabled early\n");
664 WRITE_ONCE(kcsan_enabled, true);
667 if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) ||
668 IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) ||
669 IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) ||
670 IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
671 pr_warn("non-strict mode configured - use CONFIG_KCSAN_STRICT=y to see all data races\n");
673 pr_info("strict mode configured\n");
677 /* === Exported interface =================================================== */
679 void kcsan_disable_current(void)
681 ++get_ctx()->disable_count;
683 EXPORT_SYMBOL(kcsan_disable_current);
685 void kcsan_enable_current(void)
687 if (get_ctx()->disable_count-- == 0) {
689 * Warn if kcsan_enable_current() calls are unbalanced with
690 * kcsan_disable_current() calls, which causes disable_count to
691 * become negative and should not happen.
693 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
694 kcsan_disable_current(); /* disable to generate warning */
695 WARN(1, "Unbalanced %s()", __func__);
696 kcsan_enable_current();
699 EXPORT_SYMBOL(kcsan_enable_current);
701 void kcsan_enable_current_nowarn(void)
703 if (get_ctx()->disable_count-- == 0)
704 kcsan_disable_current();
706 EXPORT_SYMBOL(kcsan_enable_current_nowarn);
708 void kcsan_nestable_atomic_begin(void)
711 * Do *not* check and warn if we are in a flat atomic region: nestable
712 * and flat atomic regions are independent from each other.
713 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
717 ++get_ctx()->atomic_nest_count;
719 EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
721 void kcsan_nestable_atomic_end(void)
723 if (get_ctx()->atomic_nest_count-- == 0) {
725 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
726 * kcsan_nestable_atomic_begin() calls, which causes
727 * atomic_nest_count to become negative and should not happen.
729 kcsan_nestable_atomic_begin(); /* restore to 0 */
730 kcsan_disable_current(); /* disable to generate warning */
731 WARN(1, "Unbalanced %s()", __func__);
732 kcsan_enable_current();
735 EXPORT_SYMBOL(kcsan_nestable_atomic_end);
737 void kcsan_flat_atomic_begin(void)
739 get_ctx()->in_flat_atomic = true;
741 EXPORT_SYMBOL(kcsan_flat_atomic_begin);
743 void kcsan_flat_atomic_end(void)
745 get_ctx()->in_flat_atomic = false;
747 EXPORT_SYMBOL(kcsan_flat_atomic_end);
749 void kcsan_atomic_next(int n)
751 get_ctx()->atomic_next = n;
753 EXPORT_SYMBOL(kcsan_atomic_next);
755 void kcsan_set_access_mask(unsigned long mask)
757 get_ctx()->access_mask = mask;
759 EXPORT_SYMBOL(kcsan_set_access_mask);
761 struct kcsan_scoped_access *
762 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
763 struct kcsan_scoped_access *sa)
765 struct kcsan_ctx *ctx = get_ctx();
767 check_access(ptr, size, type, _RET_IP_);
769 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
771 INIT_LIST_HEAD(&sa->list);
777 if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
778 INIT_LIST_HEAD(&ctx->scoped_accesses);
779 list_add(&sa->list, &ctx->scoped_accesses);
781 ctx->disable_count--;
784 EXPORT_SYMBOL(kcsan_begin_scoped_access);
786 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
788 struct kcsan_ctx *ctx = get_ctx();
790 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
793 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
796 if (list_empty(&ctx->scoped_accesses))
798 * Ensure we do not enter kcsan_check_scoped_accesses()
799 * slow-path if unnecessary, and avoids requiring list_empty()
800 * in the fast-path (to avoid a READ_ONCE() and potential
803 ctx->scoped_accesses.prev = NULL;
805 ctx->disable_count--;
807 check_access(sa->ptr, sa->size, sa->type, sa->ip);
809 EXPORT_SYMBOL(kcsan_end_scoped_access);
811 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
813 check_access(ptr, size, type, _RET_IP_);
815 EXPORT_SYMBOL(__kcsan_check_access);
818 * KCSAN uses the same instrumentation that is emitted by supported compilers
819 * for ThreadSanitizer (TSAN).
821 * When enabled, the compiler emits instrumentation calls (the functions
822 * prefixed with "__tsan" below) for all loads and stores that it generated;
823 * inline asm is not instrumented.
825 * Note that, not all supported compiler versions distinguish aligned/unaligned
826 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
827 * version to the generic version, which can handle both.
830 #define DEFINE_TSAN_READ_WRITE(size) \
831 void __tsan_read##size(void *ptr); \
832 void __tsan_read##size(void *ptr) \
834 check_access(ptr, size, 0, _RET_IP_); \
836 EXPORT_SYMBOL(__tsan_read##size); \
837 void __tsan_unaligned_read##size(void *ptr) \
838 __alias(__tsan_read##size); \
839 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
840 void __tsan_write##size(void *ptr); \
841 void __tsan_write##size(void *ptr) \
843 check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \
845 EXPORT_SYMBOL(__tsan_write##size); \
846 void __tsan_unaligned_write##size(void *ptr) \
847 __alias(__tsan_write##size); \
848 EXPORT_SYMBOL(__tsan_unaligned_write##size); \
849 void __tsan_read_write##size(void *ptr); \
850 void __tsan_read_write##size(void *ptr) \
852 check_access(ptr, size, \
853 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \
856 EXPORT_SYMBOL(__tsan_read_write##size); \
857 void __tsan_unaligned_read_write##size(void *ptr) \
858 __alias(__tsan_read_write##size); \
859 EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
861 DEFINE_TSAN_READ_WRITE(1);
862 DEFINE_TSAN_READ_WRITE(2);
863 DEFINE_TSAN_READ_WRITE(4);
864 DEFINE_TSAN_READ_WRITE(8);
865 DEFINE_TSAN_READ_WRITE(16);
867 void __tsan_read_range(void *ptr, size_t size);
868 void __tsan_read_range(void *ptr, size_t size)
870 check_access(ptr, size, 0, _RET_IP_);
872 EXPORT_SYMBOL(__tsan_read_range);
874 void __tsan_write_range(void *ptr, size_t size);
875 void __tsan_write_range(void *ptr, size_t size)
877 check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_);
879 EXPORT_SYMBOL(__tsan_write_range);
882 * Use of explicit volatile is generally disallowed [1], however, volatile is
883 * still used in various concurrent context, whether in low-level
884 * synchronization primitives or for legacy reasons.
885 * [1] https://lwn.net/Articles/233479/
887 * We only consider volatile accesses atomic if they are aligned and would pass
888 * the size-check of compiletime_assert_rwonce_type().
890 #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
891 void __tsan_volatile_read##size(void *ptr); \
892 void __tsan_volatile_read##size(void *ptr) \
894 const bool is_atomic = size <= sizeof(long long) && \
895 IS_ALIGNED((unsigned long)ptr, size); \
896 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
898 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \
901 EXPORT_SYMBOL(__tsan_volatile_read##size); \
902 void __tsan_unaligned_volatile_read##size(void *ptr) \
903 __alias(__tsan_volatile_read##size); \
904 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
905 void __tsan_volatile_write##size(void *ptr); \
906 void __tsan_volatile_write##size(void *ptr) \
908 const bool is_atomic = size <= sizeof(long long) && \
909 IS_ALIGNED((unsigned long)ptr, size); \
910 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
912 check_access(ptr, size, \
913 KCSAN_ACCESS_WRITE | \
914 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \
917 EXPORT_SYMBOL(__tsan_volatile_write##size); \
918 void __tsan_unaligned_volatile_write##size(void *ptr) \
919 __alias(__tsan_volatile_write##size); \
920 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
922 DEFINE_TSAN_VOLATILE_READ_WRITE(1);
923 DEFINE_TSAN_VOLATILE_READ_WRITE(2);
924 DEFINE_TSAN_VOLATILE_READ_WRITE(4);
925 DEFINE_TSAN_VOLATILE_READ_WRITE(8);
926 DEFINE_TSAN_VOLATILE_READ_WRITE(16);
929 * The below are not required by KCSAN, but can still be emitted by the
932 void __tsan_func_entry(void *call_pc);
933 void __tsan_func_entry(void *call_pc)
936 EXPORT_SYMBOL(__tsan_func_entry);
937 void __tsan_func_exit(void);
938 void __tsan_func_exit(void)
941 EXPORT_SYMBOL(__tsan_func_exit);
942 void __tsan_init(void);
943 void __tsan_init(void)
946 EXPORT_SYMBOL(__tsan_init);
949 * Instrumentation for atomic builtins (__atomic_*, __sync_*).
951 * Normal kernel code _should not_ be using them directly, but some
952 * architectures may implement some or all atomics using the compilers'
955 * Note: If an architecture decides to fully implement atomics using the
956 * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
957 * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
958 * atomic-instrumented) is no longer necessary.
960 * TSAN instrumentation replaces atomic accesses with calls to any of the below
961 * functions, whose job is to also execute the operation itself.
964 #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
965 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
966 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
968 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
969 check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
971 return __atomic_load_n(ptr, memorder); \
973 EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
974 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
975 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
977 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
978 check_access(ptr, bits / BITS_PER_BYTE, \
979 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
981 __atomic_store_n(ptr, v, memorder); \
983 EXPORT_SYMBOL(__tsan_atomic##bits##_store)
985 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
986 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
987 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
989 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
990 check_access(ptr, bits / BITS_PER_BYTE, \
991 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
992 KCSAN_ACCESS_ATOMIC, _RET_IP_); \
994 return __atomic_##op##suffix(ptr, v, memorder); \
996 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
999 * Note: CAS operations are always classified as write, even in case they
1000 * fail. We cannot perform check_access() after a write, as it might lead to
1001 * false positives, in cases such as:
1003 * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
1005 * T1: if (__atomic_load_n(&p->flag, ...)) {
1010 * The only downside is that, if there are 3 threads, with one CAS that
1011 * succeeds, another CAS that fails, and an unmarked racing operation, we may
1012 * point at the wrong CAS as the source of the race. However, if we assume that
1013 * all CAS can succeed in some other execution, the data race is still valid.
1015 #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
1016 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1017 u##bits val, int mo, int fail_mo); \
1018 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1019 u##bits val, int mo, int fail_mo) \
1021 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1022 check_access(ptr, bits / BITS_PER_BYTE, \
1023 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1024 KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1026 return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
1028 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
1030 #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
1031 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1032 int mo, int fail_mo); \
1033 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1034 int mo, int fail_mo) \
1036 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1037 check_access(ptr, bits / BITS_PER_BYTE, \
1038 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1039 KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1041 __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
1044 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
1046 #define DEFINE_TSAN_ATOMIC_OPS(bits) \
1047 DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
1048 DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
1049 DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
1050 DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
1051 DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
1052 DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
1053 DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
1054 DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
1055 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
1056 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
1057 DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1059 DEFINE_TSAN_ATOMIC_OPS(8);
1060 DEFINE_TSAN_ATOMIC_OPS(16);
1061 DEFINE_TSAN_ATOMIC_OPS(32);
1062 DEFINE_TSAN_ATOMIC_OPS(64);
1064 void __tsan_atomic_thread_fence(int memorder);
1065 void __tsan_atomic_thread_fence(int memorder)
1067 __atomic_thread_fence(memorder);
1069 EXPORT_SYMBOL(__tsan_atomic_thread_fence);
1071 void __tsan_atomic_signal_fence(int memorder);
1072 void __tsan_atomic_signal_fence(int memorder) { }
1073 EXPORT_SYMBOL(__tsan_atomic_signal_fence);