1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/kernel.h>
7 #include <linux/zalloc.h>
11 #include "thread-stack.h"
13 #include "namespaces.h"
18 #include "callchain.h"
20 #include <api/fs/fs.h>
22 int thread__init_maps(struct thread *thread, struct machine *machine)
24 pid_t pid = thread__pid(thread);
26 if (pid == thread__tid(thread) || pid == -1) {
27 thread__set_maps(thread, maps__new(machine));
29 struct thread *leader = __machine__findnew_thread(machine, pid, pid);
32 thread__set_maps(thread, maps__get(thread__maps(leader)));
37 return thread__maps(thread) ? 0 : -1;
40 struct thread *thread__new(pid_t pid, pid_t tid)
44 RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
45 struct thread *thread;
47 if (ADD_RC_CHK(thread, _thread) != NULL) {
48 thread__set_pid(thread, pid);
49 thread__set_tid(thread, tid);
50 thread__set_ppid(thread, -1);
51 thread__set_cpu(thread, -1);
52 thread__set_guest_cpu(thread, -1);
53 thread__set_lbr_stitch_enable(thread, false);
54 INIT_LIST_HEAD(thread__namespaces_list(thread));
55 INIT_LIST_HEAD(thread__comm_list(thread));
56 init_rwsem(thread__namespaces_lock(thread));
57 init_rwsem(thread__comm_lock(thread));
59 comm_str = malloc(32);
63 snprintf(comm_str, 32, ":%d", tid);
64 comm = comm__new(comm_str, 0, false);
69 list_add(&comm->list, thread__comm_list(thread));
70 refcount_set(thread__refcnt(thread), 1);
71 /* Thread holds first ref to nsdata. */
72 RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid);
73 srccode_state_init(thread__srccode_state(thread));
83 static void (*thread__priv_destructor)(void *priv);
85 void thread__set_priv_destructor(void (*destructor)(void *priv))
87 assert(thread__priv_destructor == NULL);
89 thread__priv_destructor = destructor;
92 void thread__delete(struct thread *thread)
94 struct namespaces *namespaces, *tmp_namespaces;
95 struct comm *comm, *tmp_comm;
97 thread_stack__free(thread);
99 if (thread__maps(thread)) {
100 maps__put(thread__maps(thread));
101 thread__set_maps(thread, NULL);
103 down_write(thread__namespaces_lock(thread));
104 list_for_each_entry_safe(namespaces, tmp_namespaces,
105 thread__namespaces_list(thread), list) {
106 list_del_init(&namespaces->list);
107 namespaces__free(namespaces);
109 up_write(thread__namespaces_lock(thread));
111 down_write(thread__comm_lock(thread));
112 list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) {
113 list_del_init(&comm->list);
116 up_write(thread__comm_lock(thread));
118 nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo);
119 srccode_state_free(thread__srccode_state(thread));
121 exit_rwsem(thread__namespaces_lock(thread));
122 exit_rwsem(thread__comm_lock(thread));
123 thread__free_stitch_list(thread);
125 if (thread__priv_destructor)
126 thread__priv_destructor(thread__priv(thread));
131 struct thread *thread__get(struct thread *thread)
133 struct thread *result;
135 if (RC_CHK_GET(result, thread))
136 refcount_inc(thread__refcnt(thread));
141 void thread__put(struct thread *thread)
143 if (thread && refcount_dec_and_test(thread__refcnt(thread)))
144 thread__delete(thread);
149 static struct namespaces *__thread__namespaces(struct thread *thread)
151 if (list_empty(thread__namespaces_list(thread)))
154 return list_first_entry(thread__namespaces_list(thread), struct namespaces, list);
157 struct namespaces *thread__namespaces(struct thread *thread)
159 struct namespaces *ns;
161 down_read(thread__namespaces_lock(thread));
162 ns = __thread__namespaces(thread);
163 up_read(thread__namespaces_lock(thread));
168 static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
169 struct perf_record_namespaces *event)
171 struct namespaces *new, *curr = __thread__namespaces(thread);
173 new = namespaces__new(event);
177 list_add(&new->list, thread__namespaces_list(thread));
179 if (timestamp && curr) {
181 * setns syscall must have changed few or all the namespaces
182 * of this thread. Update end time for the namespaces
185 curr = list_next_entry(new, list);
186 curr->end_time = timestamp;
192 int thread__set_namespaces(struct thread *thread, u64 timestamp,
193 struct perf_record_namespaces *event)
197 down_write(thread__namespaces_lock(thread));
198 ret = __thread__set_namespaces(thread, timestamp, event);
199 up_write(thread__namespaces_lock(thread));
203 struct comm *thread__comm(struct thread *thread)
205 if (list_empty(thread__comm_list(thread)))
208 return list_first_entry(thread__comm_list(thread), struct comm, list);
211 struct comm *thread__exec_comm(struct thread *thread)
213 struct comm *comm, *last = NULL, *second_last = NULL;
215 list_for_each_entry(comm, thread__comm_list(thread), list) {
223 * 'last' with no start time might be the parent's comm of a synthesized
224 * thread (created by processing a synthesized fork event). For a main
225 * thread, that is very probably wrong. Prefer a later comm to avoid
228 if (second_last && !last->start && thread__pid(thread) == thread__tid(thread))
234 static int ____thread__set_comm(struct thread *thread, const char *str,
235 u64 timestamp, bool exec)
237 struct comm *new, *curr = thread__comm(thread);
239 /* Override the default :tid entry */
240 if (!thread__comm_set(thread)) {
241 int err = comm__override(curr, str, timestamp, exec);
245 new = comm__new(str, timestamp, exec);
248 list_add(&new->list, thread__comm_list(thread));
251 unwind__flush_access(thread__maps(thread));
254 thread__set_comm_set(thread, true);
259 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
264 down_write(thread__comm_lock(thread));
265 ret = ____thread__set_comm(thread, str, timestamp, exec);
266 up_write(thread__comm_lock(thread));
270 int thread__set_comm_from_proc(struct thread *thread)
277 if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
278 thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) &&
279 procfs__read_str(path, &comm, &sz) == 0) {
281 err = thread__set_comm(thread, comm, 0);
287 static const char *__thread__comm_str(struct thread *thread)
289 const struct comm *comm = thread__comm(thread);
294 return comm__str(comm);
297 const char *thread__comm_str(struct thread *thread)
301 down_read(thread__comm_lock(thread));
302 str = __thread__comm_str(thread);
303 up_read(thread__comm_lock(thread));
308 static int __thread__comm_len(struct thread *thread, const char *comm)
312 thread__set_comm_len(thread, strlen(comm));
314 return thread__var_comm_len(thread);
317 /* CHECKME: it should probably better return the max comm len from its comm list */
318 int thread__comm_len(struct thread *thread)
320 int comm_len = thread__var_comm_len(thread);
325 down_read(thread__comm_lock(thread));
326 comm = __thread__comm_str(thread);
327 comm_len = __thread__comm_len(thread, comm);
328 up_read(thread__comm_lock(thread));
334 size_t thread__fprintf(struct thread *thread, FILE *fp)
336 return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) +
337 maps__fprintf(thread__maps(thread), fp);
340 int thread__insert_map(struct thread *thread, struct map *map)
344 ret = unwind__prepare_access(thread__maps(thread), map, NULL);
348 maps__fixup_overlappings(thread__maps(thread), map, stderr);
349 return maps__insert(thread__maps(thread), map);
352 static int __thread__prepare_access(struct thread *thread)
354 bool initialized = false;
356 struct maps *maps = thread__maps(thread);
357 struct map_rb_node *rb_node;
359 down_read(maps__lock(maps));
361 maps__for_each_entry(maps, rb_node) {
362 err = unwind__prepare_access(thread__maps(thread), rb_node->map, &initialized);
363 if (err || initialized)
367 up_read(maps__lock(maps));
372 static int thread__prepare_access(struct thread *thread)
376 if (dwarf_callchain_users)
377 err = __thread__prepare_access(thread);
382 static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
384 /* This is new thread, we share map groups for process. */
385 if (thread__pid(thread) == thread__pid(parent))
386 return thread__prepare_access(thread);
388 if (thread__maps(thread) == thread__maps(parent)) {
389 pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
390 thread__pid(thread), thread__tid(thread),
391 thread__pid(parent), thread__tid(parent));
394 /* But this one is new process, copy maps. */
395 return do_maps_clone ? maps__clone(thread, thread__maps(parent)) : 0;
398 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
400 if (thread__comm_set(parent)) {
401 const char *comm = thread__comm_str(parent);
405 err = thread__set_comm(thread, comm, timestamp);
410 thread__set_ppid(thread, thread__tid(parent));
411 return thread__clone_maps(thread, parent, do_maps_clone);
414 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
415 struct addr_location *al)
418 const u8 cpumodes[] = {
419 PERF_RECORD_MISC_USER,
420 PERF_RECORD_MISC_KERNEL,
421 PERF_RECORD_MISC_GUEST_USER,
422 PERF_RECORD_MISC_GUEST_KERNEL
425 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
426 thread__find_symbol(thread, cpumodes[i], addr, al);
432 struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
434 if (thread__pid(thread) == thread__tid(thread))
435 return thread__get(thread);
437 if (thread__pid(thread) == -1)
440 return machine__find_thread(machine, thread__pid(thread), thread__pid(thread));
443 int thread__memcpy(struct thread *thread, struct machine *machine,
444 void *buf, u64 ip, int len, bool *is64bit)
446 u8 cpumode = PERF_RECORD_MISC_USER;
447 struct addr_location al;
451 if (machine__kernel_ip(machine, ip))
452 cpumode = PERF_RECORD_MISC_KERNEL;
454 addr_location__init(&al);
455 if (!thread__find_map(thread, cpumode, ip, &al)) {
456 addr_location__exit(&al);
460 dso = map__dso(al.map);
462 if (!dso || dso->data.status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
463 addr_location__exit(&al);
467 offset = map__map_ip(al.map, ip);
469 *is64bit = dso->is_64_bit;
471 addr_location__exit(&al);
473 return dso__data_read_offset(dso, machine, offset, buf, len);
476 void thread__free_stitch_list(struct thread *thread)
478 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
479 struct stitch_list *pos, *tmp;
484 list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
485 list_del_init(&pos->node);
489 list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
490 list_del_init(&pos->node);
494 zfree(&lbr_stitch->prev_lbr_cursor);
495 free(thread__lbr_stitch(thread));
496 thread__set_lbr_stitch(thread, NULL);