1 // SPDX-License-Identifier: GPL-2.0
2 #define CREATE_TRACE_POINTS
3 #include <trace/events/mmap_lock.h>
6 #include <linux/cgroup.h>
7 #include <linux/memcontrol.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/mutex.h>
10 #include <linux/percpu.h>
11 #include <linux/rcupdate.h>
12 #include <linux/smp.h>
13 #include <linux/trace_events.h>
14 #include <linux/local_lock.h>
16 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
17 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
18 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
23 * Our various events all share the same buffer (because we don't want or need
24 * to allocate a set of buffers *per event type*), so we need to protect against
25 * concurrent _reg() and _unreg() calls, and count how many _reg() calls have
28 static DEFINE_MUTEX(reg_lock);
29 static int reg_refcount; /* Protected by reg_lock. */
32 * Size of the buffer for memcg path names. Ignoring stack trace support,
33 * trace_events_hist.c uses MAX_FILTER_STR_VAL for this, so we also use it.
35 #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
38 * How many contexts our trace events might be called in: normal, softirq, irq,
41 #define CONTEXT_COUNT 4
48 static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = {
49 .lock = INIT_LOCAL_LOCK(lock),
50 .buf_idx = LOCAL_INIT(0),
53 static char **tmp_bufs;
55 /* Called with reg_lock held. */
56 static void free_memcg_path_bufs(void)
58 struct memcg_path *memcg_path;
60 char **old = tmp_bufs;
62 for_each_possible_cpu(cpu) {
63 memcg_path = per_cpu_ptr(&memcg_paths, cpu);
64 *(old++) = rcu_dereference_protected(memcg_path->buf,
65 lockdep_is_held(®_lock));
66 rcu_assign_pointer(memcg_path->buf, NULL);
69 /* Wait for inflight memcg_path_buf users to finish. */
73 for_each_possible_cpu(cpu) {
81 int trace_mmap_lock_reg(void)
86 mutex_lock(®_lock);
88 /* If the refcount is going 0->1, proceed with allocating buffers. */
92 tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs),
97 for_each_possible_cpu(cpu) {
98 new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
101 rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new);
102 /* Don't need to wait for inflights, they'd have gotten NULL. */
106 mutex_unlock(®_lock);
110 free_memcg_path_bufs();
112 /* Since we failed, undo the earlier ref increment. */
115 mutex_unlock(®_lock);
119 void trace_mmap_lock_unreg(void)
121 mutex_lock(®_lock);
123 /* If the refcount is going 1->0, proceed with freeing buffers. */
127 free_memcg_path_bufs();
130 mutex_unlock(®_lock);
133 static inline char *get_memcg_path_buf(void)
135 struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths);
140 buf = rcu_dereference(memcg_path->buf);
145 idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) -
150 static inline void put_memcg_path_buf(void)
152 local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx);
157 * Write the given mm_struct's memcg path to a percpu buffer, and return a
158 * pointer to it. If the path cannot be determined, or no buffer was available
159 * (because the trace event is being unregistered), NULL is returned.
161 * Note: buffers are allocated per-cpu to avoid locking, so preemption must be
162 * disabled by the caller before calling us, and re-enabled only after the
163 * caller is done with the pointer.
165 * The caller must call put_memcg_path_buf() once the buffer is no longer
166 * needed. This must be done while preemption is still disabled.
168 static const char *get_mm_memcg_path(struct mm_struct *mm)
171 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
175 if (unlikely(memcg->css.cgroup == NULL))
178 buf = get_memcg_path_buf();
182 cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE);
185 css_put(&memcg->css);
190 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
192 const char *memcg_path; \
193 local_lock(&memcg_paths.lock); \
194 memcg_path = get_mm_memcg_path(mm); \
195 trace_mmap_lock_##type(mm, \
196 memcg_path != NULL ? memcg_path : "", \
198 if (likely(memcg_path != NULL)) \
199 put_memcg_path_buf(); \
200 local_unlock(&memcg_paths.lock); \
203 #else /* !CONFIG_MEMCG */
205 int trace_mmap_lock_reg(void)
210 void trace_mmap_lock_unreg(void)
214 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
215 trace_mmap_lock_##type(mm, "", ##__VA_ARGS__)
217 #endif /* CONFIG_MEMCG */
220 * Trace calls must be in a separate file, as otherwise there's a circular
221 * dependency between linux/mmap_lock.h and trace/events/mmap_lock.h.
224 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write)
226 TRACE_MMAP_LOCK_EVENT(start_locking, mm, write);
228 EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking);
230 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
233 TRACE_MMAP_LOCK_EVENT(acquire_returned, mm, write, success);
235 EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned);
237 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
239 TRACE_MMAP_LOCK_EVENT(released, mm, write);
241 EXPORT_SYMBOL(__mmap_lock_do_trace_released);