1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2023 Google Corporation
6 #include <linux/devcoredump.h>
8 #include <asm/unaligned.h>
9 #include <net/bluetooth/bluetooth.h>
10 #include <net/bluetooth/hci_core.h>
12 enum hci_devcoredump_pkt_type {
13 HCI_DEVCOREDUMP_PKT_INIT,
14 HCI_DEVCOREDUMP_PKT_SKB,
15 HCI_DEVCOREDUMP_PKT_PATTERN,
16 HCI_DEVCOREDUMP_PKT_COMPLETE,
17 HCI_DEVCOREDUMP_PKT_ABORT,
20 struct hci_devcoredump_skb_cb {
24 struct hci_devcoredump_skb_pattern {
29 #define hci_dmp_cb(skb) ((struct hci_devcoredump_skb_cb *)((skb)->cb))
31 #define DBG_UNEXPECTED_STATE() \
33 "Unexpected packet (%d) for state (%d). ", \
34 hci_dmp_cb(skb)->pkt_type, hdev->dump.state)
36 #define MAX_DEVCOREDUMP_HDR_SIZE 512 /* bytes */
38 static int hci_devcd_update_hdr_state(char *buf, size_t size, int state)
45 len = scnprintf(buf, size, "Bluetooth devcoredump\nState: %d\n", state);
47 return len + 1; /* scnprintf adds \0 at the end upon state rewrite */
50 /* Call with hci_dev_lock only. */
51 static int hci_devcd_update_state(struct hci_dev *hdev, int state)
53 bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.",
54 hdev->dump.state, state);
56 hdev->dump.state = state;
58 return hci_devcd_update_hdr_state(hdev->dump.head,
59 hdev->dump.alloc_size, state);
62 static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb)
64 char dump_start[] = "--- Start dump ---\n";
68 hdr_len = hci_devcd_update_hdr_state(hdr, sizeof(hdr),
69 HCI_DEVCOREDUMP_IDLE);
70 skb_put_data(skb, hdr, hdr_len);
72 if (hdev->dump.dmp_hdr)
73 hdev->dump.dmp_hdr(hdev, skb);
75 skb_put_data(skb, dump_start, strlen(dump_start));
80 /* Do not call with hci_dev_lock since this calls driver code. */
81 static void hci_devcd_notify(struct hci_dev *hdev, int state)
83 if (hdev->dump.notify_change)
84 hdev->dump.notify_change(hdev, state);
87 /* Call with hci_dev_lock only. */
88 void hci_devcd_reset(struct hci_dev *hdev)
90 hdev->dump.head = NULL;
91 hdev->dump.tail = NULL;
92 hdev->dump.alloc_size = 0;
94 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
96 cancel_delayed_work(&hdev->dump.dump_timeout);
97 skb_queue_purge(&hdev->dump.dump_q);
100 /* Call with hci_dev_lock only. */
101 static void hci_devcd_free(struct hci_dev *hdev)
104 vfree(hdev->dump.head);
106 hci_devcd_reset(hdev);
109 /* Call with hci_dev_lock only. */
110 static int hci_devcd_alloc(struct hci_dev *hdev, u32 size)
112 hdev->dump.head = vmalloc(size);
113 if (!hdev->dump.head)
116 hdev->dump.alloc_size = size;
117 hdev->dump.tail = hdev->dump.head;
118 hdev->dump.end = hdev->dump.head + size;
120 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
125 /* Call with hci_dev_lock only. */
126 static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size)
128 if (hdev->dump.tail + size > hdev->dump.end)
131 memcpy(hdev->dump.tail, buf, size);
132 hdev->dump.tail += size;
137 /* Call with hci_dev_lock only. */
138 static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len)
140 if (hdev->dump.tail + len > hdev->dump.end)
143 memset(hdev->dump.tail, pattern, len);
144 hdev->dump.tail += len;
149 /* Call with hci_dev_lock only. */
150 static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size)
156 skb = alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE, GFP_ATOMIC);
160 dump_hdr_size = hci_devcd_mkheader(hdev, skb);
162 if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) {
167 /* Insert the device header */
168 if (!hci_devcd_copy(hdev, skb->data, skb->len)) {
169 bt_dev_err(hdev, "Failed to insert header");
170 hci_devcd_free(hdev);
182 static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb)
186 if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) {
187 DBG_UNEXPECTED_STATE();
191 if (skb->len != sizeof(dump_size)) {
192 bt_dev_dbg(hdev, "Invalid dump init pkt");
196 dump_size = get_unaligned_le32(skb_pull_data(skb, 4));
198 bt_dev_err(hdev, "Zero size dump init pkt");
202 if (hci_devcd_prepare(hdev, dump_size)) {
203 bt_dev_err(hdev, "Failed to prepare for dump");
207 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE);
208 queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout,
212 static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb)
214 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
215 DBG_UNEXPECTED_STATE();
219 if (!hci_devcd_copy(hdev, skb->data, skb->len))
220 bt_dev_dbg(hdev, "Failed to insert skb");
223 static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev,
226 struct hci_devcoredump_skb_pattern *pattern;
228 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
229 DBG_UNEXPECTED_STATE();
233 if (skb->len != sizeof(*pattern)) {
234 bt_dev_dbg(hdev, "Invalid pattern skb");
238 pattern = skb_pull_data(skb, sizeof(*pattern));
240 if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len))
241 bt_dev_dbg(hdev, "Failed to set pattern");
244 static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev,
249 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
250 DBG_UNEXPECTED_STATE();
254 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE);
255 dump_size = hdev->dump.tail - hdev->dump.head;
257 bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size,
258 hdev->dump.alloc_size);
260 dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
263 static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev,
268 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
269 DBG_UNEXPECTED_STATE();
273 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT);
274 dump_size = hdev->dump.tail - hdev->dump.head;
276 bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size,
277 hdev->dump.alloc_size);
279 /* Emit a devcoredump with the available data */
280 dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
283 /* Bluetooth devcoredump state machine.
285 * Devcoredump states:
287 * HCI_DEVCOREDUMP_IDLE: The default state.
289 * HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has
290 * been initialized using hci_devcd_init(). Once active, the driver
291 * can append data using hci_devcd_append() or insert a pattern
292 * using hci_devcd_append_pattern().
294 * HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive
295 * can signal the completion using hci_devcd_complete(). A
296 * devcoredump is generated indicating the completion event and
297 * then the state machine is reset to the default state.
299 * HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in
300 * case of any error using hci_devcd_abort(). A devcoredump is
301 * still generated with the available data indicating the abort
302 * event and then the state machine is reset to the default state.
304 * HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec
305 * is started during devcoredump initialization. Once the timeout
306 * occurs, the driver is notified, a devcoredump is generated with
307 * the available data indicating the timeout event and then the
308 * state machine is reset to the default state.
310 * The driver must register using hci_devcd_register() before using the hci
313 void hci_devcd_rx(struct work_struct *work)
315 struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx);
319 while ((skb = skb_dequeue(&hdev->dump.dump_q))) {
320 /* Return if timeout occurs. The timeout handler function
321 * hci_devcd_timeout() will report the available dump data.
323 if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) {
329 start_state = hdev->dump.state;
331 switch (hci_dmp_cb(skb)->pkt_type) {
332 case HCI_DEVCOREDUMP_PKT_INIT:
333 hci_devcd_handle_pkt_init(hdev, skb);
336 case HCI_DEVCOREDUMP_PKT_SKB:
337 hci_devcd_handle_pkt_skb(hdev, skb);
340 case HCI_DEVCOREDUMP_PKT_PATTERN:
341 hci_devcd_handle_pkt_pattern(hdev, skb);
344 case HCI_DEVCOREDUMP_PKT_COMPLETE:
345 hci_devcd_handle_pkt_complete(hdev, skb);
348 case HCI_DEVCOREDUMP_PKT_ABORT:
349 hci_devcd_handle_pkt_abort(hdev, skb);
353 bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ",
354 hci_dmp_cb(skb)->pkt_type, hdev->dump.state);
358 hci_dev_unlock(hdev);
361 /* Notify the driver about any state changes before resetting
364 if (start_state != hdev->dump.state)
365 hci_devcd_notify(hdev, hdev->dump.state);
367 /* Reset the state machine if the devcoredump is complete */
369 if (hdev->dump.state == HCI_DEVCOREDUMP_DONE ||
370 hdev->dump.state == HCI_DEVCOREDUMP_ABORT)
371 hci_devcd_reset(hdev);
372 hci_dev_unlock(hdev);
375 EXPORT_SYMBOL(hci_devcd_rx);
377 void hci_devcd_timeout(struct work_struct *work)
379 struct hci_dev *hdev = container_of(work, struct hci_dev,
380 dump.dump_timeout.work);
383 hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT);
387 cancel_work(&hdev->dump.dump_rx);
389 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT);
391 dump_size = hdev->dump.tail - hdev->dump.head;
392 bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size,
393 hdev->dump.alloc_size);
395 /* Emit a devcoredump with the available data */
396 dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
398 hci_devcd_reset(hdev);
400 hci_dev_unlock(hdev);
402 EXPORT_SYMBOL(hci_devcd_timeout);
404 int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump,
405 dmp_hdr_t dmp_hdr, notify_change_t notify_change)
407 /* Driver must implement coredump() and dmp_hdr() functions for
408 * bluetooth devcoredump. The coredump() should trigger a coredump
409 * event on the controller when the device's coredump sysfs entry is
410 * written to. The dmp_hdr() should create a dump header to identify
411 * the controller/fw/driver info.
413 if (!coredump || !dmp_hdr)
417 hdev->dump.coredump = coredump;
418 hdev->dump.dmp_hdr = dmp_hdr;
419 hdev->dump.notify_change = notify_change;
420 hdev->dump.supported = true;
421 hdev->dump.timeout = DEVCOREDUMP_TIMEOUT;
422 hci_dev_unlock(hdev);
426 EXPORT_SYMBOL(hci_devcd_register);
428 static inline bool hci_devcd_enabled(struct hci_dev *hdev)
430 return hdev->dump.supported;
433 int hci_devcd_init(struct hci_dev *hdev, u32 dump_size)
437 if (!hci_devcd_enabled(hdev))
440 skb = alloc_skb(sizeof(dump_size), GFP_ATOMIC);
444 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_INIT;
445 put_unaligned_le32(dump_size, skb_put(skb, 4));
447 skb_queue_tail(&hdev->dump.dump_q, skb);
448 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
452 EXPORT_SYMBOL(hci_devcd_init);
454 int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb)
459 if (!hci_devcd_enabled(hdev)) {
464 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_SKB;
466 skb_queue_tail(&hdev->dump.dump_q, skb);
467 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
471 EXPORT_SYMBOL(hci_devcd_append);
473 int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len)
475 struct hci_devcoredump_skb_pattern p;
478 if (!hci_devcd_enabled(hdev))
481 skb = alloc_skb(sizeof(p), GFP_ATOMIC);
488 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_PATTERN;
489 skb_put_data(skb, &p, sizeof(p));
491 skb_queue_tail(&hdev->dump.dump_q, skb);
492 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
496 EXPORT_SYMBOL(hci_devcd_append_pattern);
498 int hci_devcd_complete(struct hci_dev *hdev)
502 if (!hci_devcd_enabled(hdev))
505 skb = alloc_skb(0, GFP_ATOMIC);
509 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_COMPLETE;
511 skb_queue_tail(&hdev->dump.dump_q, skb);
512 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
516 EXPORT_SYMBOL(hci_devcd_complete);
518 int hci_devcd_abort(struct hci_dev *hdev)
522 if (!hci_devcd_enabled(hdev))
525 skb = alloc_skb(0, GFP_ATOMIC);
529 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_ABORT;
531 skb_queue_tail(&hdev->dump.dump_q, skb);
532 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
536 EXPORT_SYMBOL(hci_devcd_abort);