Merge tag 'sound-fix-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[platform/kernel/linux-starfive.git] / drivers / base / devcoredump.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(c) 2014 Intel Mobile Communications GmbH
4  * Copyright(c) 2015 Intel Deutschland GmbH
5  *
6  * Author: Johannes Berg <johannes@sipsolutions.net>
7  */
8 #include <linux/module.h>
9 #include <linux/device.h>
10 #include <linux/devcoredump.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/workqueue.h>
15
16 static struct class devcd_class;
17
18 /* global disable flag, for security purposes */
19 static bool devcd_disabled;
20
21 /* if data isn't read by userspace after 5 minutes then delete it */
22 #define DEVCD_TIMEOUT   (HZ * 60 * 5)
23
24 struct devcd_entry {
25         struct device devcd_dev;
26         void *data;
27         size_t datalen;
28         /*
29          * Here, mutex is required to serialize the calls to del_wk work between
30          * user/kernel space which happens when devcd is added with device_add()
31          * and that sends uevent to user space. User space reads the uevents,
32          * and calls to devcd_data_write() which try to modify the work which is
33          * not even initialized/queued from devcoredump.
34          *
35          *
36          *
37          *        cpu0(X)                                 cpu1(Y)
38          *
39          *        dev_coredump() uevent sent to user space
40          *        device_add()  ======================> user space process Y reads the
41          *                                              uevents writes to devcd fd
42          *                                              which results into writes to
43          *
44          *                                             devcd_data_write()
45          *                                               mod_delayed_work()
46          *                                                 try_to_grab_pending()
47          *                                                   del_timer()
48          *                                                     debug_assert_init()
49          *       INIT_DELAYED_WORK()
50          *       schedule_delayed_work()
51          *
52          *
53          * Also, mutex alone would not be enough to avoid scheduling of
54          * del_wk work after it get flush from a call to devcd_free()
55          * mentioned as below.
56          *
57          *      disabled_store()
58          *        devcd_free()
59          *          mutex_lock()             devcd_data_write()
60          *          flush_delayed_work()
61          *          mutex_unlock()
62          *                                   mutex_lock()
63          *                                   mod_delayed_work()
64          *                                   mutex_unlock()
65          * So, delete_work flag is required.
66          */
67         struct mutex mutex;
68         bool delete_work;
69         struct module *owner;
70         ssize_t (*read)(char *buffer, loff_t offset, size_t count,
71                         void *data, size_t datalen);
72         void (*free)(void *data);
73         struct delayed_work del_wk;
74         struct device *failing_dev;
75 };
76
77 static struct devcd_entry *dev_to_devcd(struct device *dev)
78 {
79         return container_of(dev, struct devcd_entry, devcd_dev);
80 }
81
82 static void devcd_dev_release(struct device *dev)
83 {
84         struct devcd_entry *devcd = dev_to_devcd(dev);
85
86         devcd->free(devcd->data);
87         module_put(devcd->owner);
88
89         /*
90          * this seems racy, but I don't see a notifier or such on
91          * a struct device to know when it goes away?
92          */
93         if (devcd->failing_dev->kobj.sd)
94                 sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj,
95                                   "devcoredump");
96
97         put_device(devcd->failing_dev);
98         kfree(devcd);
99 }
100
101 static void devcd_del(struct work_struct *wk)
102 {
103         struct devcd_entry *devcd;
104
105         devcd = container_of(wk, struct devcd_entry, del_wk.work);
106
107         device_del(&devcd->devcd_dev);
108         put_device(&devcd->devcd_dev);
109 }
110
111 static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
112                                struct bin_attribute *bin_attr,
113                                char *buffer, loff_t offset, size_t count)
114 {
115         struct device *dev = kobj_to_dev(kobj);
116         struct devcd_entry *devcd = dev_to_devcd(dev);
117
118         return devcd->read(buffer, offset, count, devcd->data, devcd->datalen);
119 }
120
121 static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
122                                 struct bin_attribute *bin_attr,
123                                 char *buffer, loff_t offset, size_t count)
124 {
125         struct device *dev = kobj_to_dev(kobj);
126         struct devcd_entry *devcd = dev_to_devcd(dev);
127
128         mutex_lock(&devcd->mutex);
129         if (!devcd->delete_work) {
130                 devcd->delete_work = true;
131                 mod_delayed_work(system_wq, &devcd->del_wk, 0);
132         }
133         mutex_unlock(&devcd->mutex);
134
135         return count;
136 }
137
138 static struct bin_attribute devcd_attr_data = {
139         .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
140         .size = 0,
141         .read = devcd_data_read,
142         .write = devcd_data_write,
143 };
144
145 static struct bin_attribute *devcd_dev_bin_attrs[] = {
146         &devcd_attr_data, NULL,
147 };
148
149 static const struct attribute_group devcd_dev_group = {
150         .bin_attrs = devcd_dev_bin_attrs,
151 };
152
153 static const struct attribute_group *devcd_dev_groups[] = {
154         &devcd_dev_group, NULL,
155 };
156
157 static int devcd_free(struct device *dev, void *data)
158 {
159         struct devcd_entry *devcd = dev_to_devcd(dev);
160
161         mutex_lock(&devcd->mutex);
162         if (!devcd->delete_work)
163                 devcd->delete_work = true;
164
165         flush_delayed_work(&devcd->del_wk);
166         mutex_unlock(&devcd->mutex);
167         return 0;
168 }
169
170 static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
171                              char *buf)
172 {
173         return sysfs_emit(buf, "%d\n", devcd_disabled);
174 }
175
176 /*
177  *
178  *      disabled_store()                                        worker()
179  *       class_for_each_device(&devcd_class,
180  *              NULL, NULL, devcd_free)
181  *         ...
182  *         ...
183  *         while ((dev = class_dev_iter_next(&iter))
184  *                                                             devcd_del()
185  *                                                               device_del()
186  *                                                                 put_device() <- last reference
187  *             error = fn(dev, data)                           devcd_dev_release()
188  *             devcd_free(dev, data)                           kfree(devcd)
189  *             mutex_lock(&devcd->mutex);
190  *
191  *
192  * In the above diagram, It looks like disabled_store() would be racing with parallely
193  * running devcd_del() and result in memory abort while acquiring devcd->mutex which
194  * is called after kfree of devcd memory  after dropping its last reference with
195  * put_device(). However, this will not happens as fn(dev, data) runs
196  * with its own reference to device via klist_node so it is not its last reference.
197  * so, above situation would not occur.
198  */
199
200 static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
201                               const char *buf, size_t count)
202 {
203         long tmp = simple_strtol(buf, NULL, 10);
204
205         /*
206          * This essentially makes the attribute write-once, since you can't
207          * go back to not having it disabled. This is intentional, it serves
208          * as a system lockdown feature.
209          */
210         if (tmp != 1)
211                 return -EINVAL;
212
213         devcd_disabled = true;
214
215         class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
216
217         return count;
218 }
219 static CLASS_ATTR_RW(disabled);
220
221 static struct attribute *devcd_class_attrs[] = {
222         &class_attr_disabled.attr,
223         NULL,
224 };
225 ATTRIBUTE_GROUPS(devcd_class);
226
227 static struct class devcd_class = {
228         .name           = "devcoredump",
229         .owner          = THIS_MODULE,
230         .dev_release    = devcd_dev_release,
231         .dev_groups     = devcd_dev_groups,
232         .class_groups   = devcd_class_groups,
233 };
234
235 static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
236                            void *data, size_t datalen)
237 {
238         return memory_read_from_buffer(buffer, count, &offset, data, datalen);
239 }
240
241 static void devcd_freev(void *data)
242 {
243         vfree(data);
244 }
245
246 /**
247  * dev_coredumpv - create device coredump with vmalloc data
248  * @dev: the struct device for the crashed device
249  * @data: vmalloc data containing the device coredump
250  * @datalen: length of the data
251  * @gfp: allocation flags
252  *
253  * This function takes ownership of the vmalloc'ed data and will free
254  * it when it is no longer used. See dev_coredumpm() for more information.
255  */
256 void dev_coredumpv(struct device *dev, void *data, size_t datalen,
257                    gfp_t gfp)
258 {
259         dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev);
260 }
261 EXPORT_SYMBOL_GPL(dev_coredumpv);
262
263 static int devcd_match_failing(struct device *dev, const void *failing)
264 {
265         struct devcd_entry *devcd = dev_to_devcd(dev);
266
267         return devcd->failing_dev == failing;
268 }
269
270 /**
271  * devcd_free_sgtable - free all the memory of the given scatterlist table
272  * (i.e. both pages and scatterlist instances)
273  * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
274  * using the sg_chain function then that function should be called only once
275  * on the chained table
276  * @data: pointer to sg_table to free
277  */
278 static void devcd_free_sgtable(void *data)
279 {
280         _devcd_free_sgtable(data);
281 }
282
283 /**
284  * devcd_read_from_sgtable - copy data from sg_table to a given buffer
285  * and return the number of bytes read
286  * @buffer: the buffer to copy the data to it
287  * @buf_len: the length of the buffer
288  * @data: the scatterlist table to copy from
289  * @offset: start copy from @offset@ bytes from the head of the data
290  *      in the given scatterlist
291  * @data_len: the length of the data in the sg_table
292  */
293 static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
294                                        size_t buf_len, void *data,
295                                        size_t data_len)
296 {
297         struct scatterlist *table = data;
298
299         if (offset > data_len)
300                 return -EINVAL;
301
302         if (offset + buf_len > data_len)
303                 buf_len = data_len - offset;
304         return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len,
305                                   offset);
306 }
307
308 /**
309  * dev_coredumpm - create device coredump with read/free methods
310  * @dev: the struct device for the crashed device
311  * @owner: the module that contains the read/free functions, use %THIS_MODULE
312  * @data: data cookie for the @read/@free functions
313  * @datalen: length of the data
314  * @gfp: allocation flags
315  * @read: function to read from the given buffer
316  * @free: function to free the given buffer
317  *
318  * Creates a new device coredump for the given device. If a previous one hasn't
319  * been read yet, the new coredump is discarded. The data lifetime is determined
320  * by the device coredump framework and when it is no longer needed the @free
321  * function will be called to free the data.
322  */
323 void dev_coredumpm(struct device *dev, struct module *owner,
324                    void *data, size_t datalen, gfp_t gfp,
325                    ssize_t (*read)(char *buffer, loff_t offset, size_t count,
326                                    void *data, size_t datalen),
327                    void (*free)(void *data))
328 {
329         static atomic_t devcd_count = ATOMIC_INIT(0);
330         struct devcd_entry *devcd;
331         struct device *existing;
332
333         if (devcd_disabled)
334                 goto free;
335
336         existing = class_find_device(&devcd_class, NULL, dev,
337                                      devcd_match_failing);
338         if (existing) {
339                 put_device(existing);
340                 goto free;
341         }
342
343         if (!try_module_get(owner))
344                 goto free;
345
346         devcd = kzalloc(sizeof(*devcd), gfp);
347         if (!devcd)
348                 goto put_module;
349
350         devcd->owner = owner;
351         devcd->data = data;
352         devcd->datalen = datalen;
353         devcd->read = read;
354         devcd->free = free;
355         devcd->failing_dev = get_device(dev);
356         devcd->delete_work = false;
357
358         mutex_init(&devcd->mutex);
359         device_initialize(&devcd->devcd_dev);
360
361         dev_set_name(&devcd->devcd_dev, "devcd%d",
362                      atomic_inc_return(&devcd_count));
363         devcd->devcd_dev.class = &devcd_class;
364
365         mutex_lock(&devcd->mutex);
366         if (device_add(&devcd->devcd_dev))
367                 goto put_device;
368
369         /*
370          * These should normally not fail, but there is no problem
371          * continuing without the links, so just warn instead of
372          * failing.
373          */
374         if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
375                               "failing_device") ||
376             sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
377                               "devcoredump"))
378                 dev_warn(dev, "devcoredump create_link failed\n");
379
380         INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
381         schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
382         mutex_unlock(&devcd->mutex);
383         return;
384  put_device:
385         put_device(&devcd->devcd_dev);
386         mutex_unlock(&devcd->mutex);
387  put_module:
388         module_put(owner);
389  free:
390         free(data);
391 }
392 EXPORT_SYMBOL_GPL(dev_coredumpm);
393
394 /**
395  * dev_coredumpsg - create device coredump that uses scatterlist as data
396  * parameter
397  * @dev: the struct device for the crashed device
398  * @table: the dump data
399  * @datalen: length of the data
400  * @gfp: allocation flags
401  *
402  * Creates a new device coredump for the given device. If a previous one hasn't
403  * been read yet, the new coredump is discarded. The data lifetime is determined
404  * by the device coredump framework and when it is no longer needed
405  * it will free the data.
406  */
407 void dev_coredumpsg(struct device *dev, struct scatterlist *table,
408                     size_t datalen, gfp_t gfp)
409 {
410         dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
411                       devcd_free_sgtable);
412 }
413 EXPORT_SYMBOL_GPL(dev_coredumpsg);
414
415 static int __init devcoredump_init(void)
416 {
417         return class_register(&devcd_class);
418 }
419 __initcall(devcoredump_init);
420
421 static void __exit devcoredump_exit(void)
422 {
423         class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
424         class_unregister(&devcd_class);
425 }
426 __exitcall(devcoredump_exit);