Merge tag 'powerpc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[platform/kernel/linux-starfive.git] / arch / powerpc / platforms / pseries / dtl.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Virtual Processor Dispatch Trace Log
4  *
5  * (C) Copyright IBM Corporation 2009
6  *
7  * Author: Jeremy Kerr <jk@ozlabs.org>
8  */
9
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <asm/smp.h>
13 #include <linux/uaccess.h>
14 #include <asm/firmware.h>
15 #include <asm/lppaca.h>
16 #include <asm/debugfs.h>
17 #include <asm/plpar_wrappers.h>
18 #include <asm/machdep.h>
19
20 struct dtl {
21         struct dtl_entry        *buf;
22         struct dentry           *file;
23         int                     cpu;
24         int                     buf_entries;
25         u64                     last_idx;
26         spinlock_t              lock;
27 };
28 static DEFINE_PER_CPU(struct dtl, cpu_dtl);
29
30 static u8 dtl_event_mask = DTL_LOG_ALL;
31
32
33 /*
34  * Size of per-cpu log buffers. Firmware requires that the buffer does
35  * not cross a 4k boundary.
36  */
37 static int dtl_buf_entries = N_DISPATCH_LOG;
38
39 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
40 struct dtl_ring {
41         u64     write_index;
42         struct dtl_entry *write_ptr;
43         struct dtl_entry *buf;
44         struct dtl_entry *buf_end;
45 };
46
47 static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
48
49 static atomic_t dtl_count;
50
51 /*
52  * The cpu accounting code controls the DTL ring buffer, and we get
53  * given entries as they are processed.
54  */
55 static void consume_dtle(struct dtl_entry *dtle, u64 index)
56 {
57         struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
58         struct dtl_entry *wp = dtlr->write_ptr;
59         struct lppaca *vpa = local_paca->lppaca_ptr;
60
61         if (!wp)
62                 return;
63
64         *wp = *dtle;
65         barrier();
66
67         /* check for hypervisor ring buffer overflow, ignore this entry if so */
68         if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
69                 return;
70
71         ++wp;
72         if (wp == dtlr->buf_end)
73                 wp = dtlr->buf;
74         dtlr->write_ptr = wp;
75
76         /* incrementing write_index makes the new entry visible */
77         smp_wmb();
78         ++dtlr->write_index;
79 }
80
81 static int dtl_start(struct dtl *dtl)
82 {
83         struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
84
85         dtlr->buf = dtl->buf;
86         dtlr->buf_end = dtl->buf + dtl->buf_entries;
87         dtlr->write_index = 0;
88
89         /* setting write_ptr enables logging into our buffer */
90         smp_wmb();
91         dtlr->write_ptr = dtl->buf;
92
93         /* enable event logging */
94         lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
95
96         dtl_consumer = consume_dtle;
97         atomic_inc(&dtl_count);
98         return 0;
99 }
100
101 static void dtl_stop(struct dtl *dtl)
102 {
103         struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
104
105         dtlr->write_ptr = NULL;
106         smp_wmb();
107
108         dtlr->buf = NULL;
109
110         /* restore dtl_enable_mask */
111         lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
112
113         if (atomic_dec_and_test(&dtl_count))
114                 dtl_consumer = NULL;
115 }
116
117 static u64 dtl_current_index(struct dtl *dtl)
118 {
119         return per_cpu(dtl_rings, dtl->cpu).write_index;
120 }
121
122 #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
123
124 static int dtl_start(struct dtl *dtl)
125 {
126         unsigned long addr;
127         int ret, hwcpu;
128
129         /* Register our dtl buffer with the hypervisor. The HV expects the
130          * buffer size to be passed in the second word of the buffer */
131         ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
132
133         hwcpu = get_hard_smp_processor_id(dtl->cpu);
134         addr = __pa(dtl->buf);
135         ret = register_dtl(hwcpu, addr);
136         if (ret) {
137                 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
138                        "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
139                 return -EIO;
140         }
141
142         /* set our initial buffer indices */
143         lppaca_of(dtl->cpu).dtl_idx = 0;
144
145         /* ensure that our updates to the lppaca fields have occurred before
146          * we actually enable the logging */
147         smp_wmb();
148
149         /* enable event logging */
150         lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
151
152         return 0;
153 }
154
155 static void dtl_stop(struct dtl *dtl)
156 {
157         int hwcpu = get_hard_smp_processor_id(dtl->cpu);
158
159         lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
160
161         unregister_dtl(hwcpu);
162 }
163
164 static u64 dtl_current_index(struct dtl *dtl)
165 {
166         return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
167 }
168 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
169
170 static int dtl_enable(struct dtl *dtl)
171 {
172         long int n_entries;
173         long int rc;
174         struct dtl_entry *buf = NULL;
175
176         if (!dtl_cache)
177                 return -ENOMEM;
178
179         /* only allow one reader */
180         if (dtl->buf)
181                 return -EBUSY;
182
183         /* ensure there are no other conflicting dtl users */
184         if (!read_trylock(&dtl_access_lock))
185                 return -EBUSY;
186
187         n_entries = dtl_buf_entries;
188         buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
189         if (!buf) {
190                 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
191                                 __func__, dtl->cpu);
192                 read_unlock(&dtl_access_lock);
193                 return -ENOMEM;
194         }
195
196         spin_lock(&dtl->lock);
197         rc = -EBUSY;
198         if (!dtl->buf) {
199                 /* store the original allocation size for use during read */
200                 dtl->buf_entries = n_entries;
201                 dtl->buf = buf;
202                 dtl->last_idx = 0;
203                 rc = dtl_start(dtl);
204                 if (rc)
205                         dtl->buf = NULL;
206         }
207         spin_unlock(&dtl->lock);
208
209         if (rc) {
210                 read_unlock(&dtl_access_lock);
211                 kmem_cache_free(dtl_cache, buf);
212         }
213
214         return rc;
215 }
216
217 static void dtl_disable(struct dtl *dtl)
218 {
219         spin_lock(&dtl->lock);
220         dtl_stop(dtl);
221         kmem_cache_free(dtl_cache, dtl->buf);
222         dtl->buf = NULL;
223         dtl->buf_entries = 0;
224         spin_unlock(&dtl->lock);
225         read_unlock(&dtl_access_lock);
226 }
227
228 /* file interface */
229
230 static int dtl_file_open(struct inode *inode, struct file *filp)
231 {
232         struct dtl *dtl = inode->i_private;
233         int rc;
234
235         rc = dtl_enable(dtl);
236         if (rc)
237                 return rc;
238
239         filp->private_data = dtl;
240         return 0;
241 }
242
243 static int dtl_file_release(struct inode *inode, struct file *filp)
244 {
245         struct dtl *dtl = inode->i_private;
246         dtl_disable(dtl);
247         return 0;
248 }
249
250 static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
251                 loff_t *pos)
252 {
253         long int rc, n_read, n_req, read_size;
254         struct dtl *dtl;
255         u64 cur_idx, last_idx, i;
256
257         if ((len % sizeof(struct dtl_entry)) != 0)
258                 return -EINVAL;
259
260         dtl = filp->private_data;
261
262         /* requested number of entries to read */
263         n_req = len / sizeof(struct dtl_entry);
264
265         /* actual number of entries read */
266         n_read = 0;
267
268         spin_lock(&dtl->lock);
269
270         cur_idx = dtl_current_index(dtl);
271         last_idx = dtl->last_idx;
272
273         if (last_idx + dtl->buf_entries <= cur_idx)
274                 last_idx = cur_idx - dtl->buf_entries + 1;
275
276         if (last_idx + n_req > cur_idx)
277                 n_req = cur_idx - last_idx;
278
279         if (n_req > 0)
280                 dtl->last_idx = last_idx + n_req;
281
282         spin_unlock(&dtl->lock);
283
284         if (n_req <= 0)
285                 return 0;
286
287         i = last_idx % dtl->buf_entries;
288
289         /* read the tail of the buffer if we've wrapped */
290         if (i + n_req > dtl->buf_entries) {
291                 read_size = dtl->buf_entries - i;
292
293                 rc = copy_to_user(buf, &dtl->buf[i],
294                                 read_size * sizeof(struct dtl_entry));
295                 if (rc)
296                         return -EFAULT;
297
298                 i = 0;
299                 n_req -= read_size;
300                 n_read += read_size;
301                 buf += read_size * sizeof(struct dtl_entry);
302         }
303
304         /* .. and now the head */
305         rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
306         if (rc)
307                 return -EFAULT;
308
309         n_read += n_req;
310
311         return n_read * sizeof(struct dtl_entry);
312 }
313
314 static const struct file_operations dtl_fops = {
315         .open           = dtl_file_open,
316         .release        = dtl_file_release,
317         .read           = dtl_file_read,
318         .llseek         = no_llseek,
319 };
320
321 static struct dentry *dtl_dir;
322
323 static int dtl_setup_file(struct dtl *dtl)
324 {
325         char name[10];
326
327         sprintf(name, "cpu-%d", dtl->cpu);
328
329         dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
330         if (!dtl->file)
331                 return -ENOMEM;
332
333         return 0;
334 }
335
336 static int dtl_init(void)
337 {
338         struct dentry *event_mask_file, *buf_entries_file;
339         int rc, i;
340
341         if (!firmware_has_feature(FW_FEATURE_SPLPAR))
342                 return -ENODEV;
343
344         /* set up common debugfs structure */
345
346         rc = -ENOMEM;
347         dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
348         if (!dtl_dir) {
349                 printk(KERN_WARNING "%s: can't create dtl root dir\n",
350                                 __func__);
351                 goto err;
352         }
353
354         event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
355                                 dtl_dir, &dtl_event_mask);
356         buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
357                                 dtl_dir, &dtl_buf_entries);
358
359         if (!event_mask_file || !buf_entries_file) {
360                 printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
361                 goto err_remove_dir;
362         }
363
364         /* set up the per-cpu log structures */
365         for_each_possible_cpu(i) {
366                 struct dtl *dtl = &per_cpu(cpu_dtl, i);
367                 spin_lock_init(&dtl->lock);
368                 dtl->cpu = i;
369
370                 rc = dtl_setup_file(dtl);
371                 if (rc)
372                         goto err_remove_dir;
373         }
374
375         return 0;
376
377 err_remove_dir:
378         debugfs_remove_recursive(dtl_dir);
379 err:
380         return rc;
381 }
382 machine_arch_initcall(pseries, dtl_init);