1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtual Processor Dispatch Trace Log
5 * (C) Copyright IBM Corporation 2009
7 * Author: Jeremy Kerr <jk@ozlabs.org>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
13 #include <linux/uaccess.h>
14 #include <asm/firmware.h>
15 #include <asm/lppaca.h>
16 #include <asm/debugfs.h>
17 #include <asm/plpar_wrappers.h>
18 #include <asm/machdep.h>
21 struct dtl_entry *buf;
28 static DEFINE_PER_CPU(struct dtl, cpu_dtl);
30 static u8 dtl_event_mask = DTL_LOG_ALL;
34 * Size of per-cpu log buffers. Firmware requires that the buffer does
35 * not cross a 4k boundary.
37 static int dtl_buf_entries = N_DISPATCH_LOG;
39 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
42 struct dtl_entry *write_ptr;
43 struct dtl_entry *buf;
44 struct dtl_entry *buf_end;
47 static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
49 static atomic_t dtl_count;
52 * The cpu accounting code controls the DTL ring buffer, and we get
53 * given entries as they are processed.
55 static void consume_dtle(struct dtl_entry *dtle, u64 index)
57 struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
58 struct dtl_entry *wp = dtlr->write_ptr;
59 struct lppaca *vpa = local_paca->lppaca_ptr;
67 /* check for hypervisor ring buffer overflow, ignore this entry if so */
68 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
72 if (wp == dtlr->buf_end)
76 /* incrementing write_index makes the new entry visible */
81 static int dtl_start(struct dtl *dtl)
83 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
86 dtlr->buf_end = dtl->buf + dtl->buf_entries;
87 dtlr->write_index = 0;
89 /* setting write_ptr enables logging into our buffer */
91 dtlr->write_ptr = dtl->buf;
93 /* enable event logging */
94 lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
96 dtl_consumer = consume_dtle;
97 atomic_inc(&dtl_count);
101 static void dtl_stop(struct dtl *dtl)
103 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
105 dtlr->write_ptr = NULL;
110 /* restore dtl_enable_mask */
111 lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
113 if (atomic_dec_and_test(&dtl_count))
117 static u64 dtl_current_index(struct dtl *dtl)
119 return per_cpu(dtl_rings, dtl->cpu).write_index;
122 #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
124 static int dtl_start(struct dtl *dtl)
129 /* Register our dtl buffer with the hypervisor. The HV expects the
130 * buffer size to be passed in the second word of the buffer */
131 ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
133 hwcpu = get_hard_smp_processor_id(dtl->cpu);
134 addr = __pa(dtl->buf);
135 ret = register_dtl(hwcpu, addr);
137 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
138 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
142 /* set our initial buffer indices */
143 lppaca_of(dtl->cpu).dtl_idx = 0;
145 /* ensure that our updates to the lppaca fields have occurred before
146 * we actually enable the logging */
149 /* enable event logging */
150 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
155 static void dtl_stop(struct dtl *dtl)
157 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
159 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
161 unregister_dtl(hwcpu);
164 static u64 dtl_current_index(struct dtl *dtl)
166 return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
168 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
170 static int dtl_enable(struct dtl *dtl)
174 struct dtl_entry *buf = NULL;
179 /* only allow one reader */
183 /* ensure there are no other conflicting dtl users */
184 if (!read_trylock(&dtl_access_lock))
187 n_entries = dtl_buf_entries;
188 buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
190 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
192 read_unlock(&dtl_access_lock);
196 spin_lock(&dtl->lock);
199 /* store the original allocation size for use during read */
200 dtl->buf_entries = n_entries;
207 spin_unlock(&dtl->lock);
210 read_unlock(&dtl_access_lock);
211 kmem_cache_free(dtl_cache, buf);
217 static void dtl_disable(struct dtl *dtl)
219 spin_lock(&dtl->lock);
221 kmem_cache_free(dtl_cache, dtl->buf);
223 dtl->buf_entries = 0;
224 spin_unlock(&dtl->lock);
225 read_unlock(&dtl_access_lock);
230 static int dtl_file_open(struct inode *inode, struct file *filp)
232 struct dtl *dtl = inode->i_private;
235 rc = dtl_enable(dtl);
239 filp->private_data = dtl;
243 static int dtl_file_release(struct inode *inode, struct file *filp)
245 struct dtl *dtl = inode->i_private;
250 static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
253 long int rc, n_read, n_req, read_size;
255 u64 cur_idx, last_idx, i;
257 if ((len % sizeof(struct dtl_entry)) != 0)
260 dtl = filp->private_data;
262 /* requested number of entries to read */
263 n_req = len / sizeof(struct dtl_entry);
265 /* actual number of entries read */
268 spin_lock(&dtl->lock);
270 cur_idx = dtl_current_index(dtl);
271 last_idx = dtl->last_idx;
273 if (last_idx + dtl->buf_entries <= cur_idx)
274 last_idx = cur_idx - dtl->buf_entries + 1;
276 if (last_idx + n_req > cur_idx)
277 n_req = cur_idx - last_idx;
280 dtl->last_idx = last_idx + n_req;
282 spin_unlock(&dtl->lock);
287 i = last_idx % dtl->buf_entries;
289 /* read the tail of the buffer if we've wrapped */
290 if (i + n_req > dtl->buf_entries) {
291 read_size = dtl->buf_entries - i;
293 rc = copy_to_user(buf, &dtl->buf[i],
294 read_size * sizeof(struct dtl_entry));
301 buf += read_size * sizeof(struct dtl_entry);
304 /* .. and now the head */
305 rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
311 return n_read * sizeof(struct dtl_entry);
314 static const struct file_operations dtl_fops = {
315 .open = dtl_file_open,
316 .release = dtl_file_release,
317 .read = dtl_file_read,
321 static struct dentry *dtl_dir;
323 static int dtl_setup_file(struct dtl *dtl)
327 sprintf(name, "cpu-%d", dtl->cpu);
329 dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
336 static int dtl_init(void)
338 struct dentry *event_mask_file, *buf_entries_file;
341 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
344 /* set up common debugfs structure */
347 dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
349 printk(KERN_WARNING "%s: can't create dtl root dir\n",
354 event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
355 dtl_dir, &dtl_event_mask);
356 buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
357 dtl_dir, &dtl_buf_entries);
359 if (!event_mask_file || !buf_entries_file) {
360 printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
364 /* set up the per-cpu log structures */
365 for_each_possible_cpu(i) {
366 struct dtl *dtl = &per_cpu(cpu_dtl, i);
367 spin_lock_init(&dtl->lock);
370 rc = dtl_setup_file(dtl);
378 debugfs_remove_recursive(dtl_dir);
382 machine_arch_initcall(pseries, dtl_init);