1 // SPDX-License-Identifier: GPL-2.0-only
3 * ACPI AML interfacing support
5 * Copyright (C) 2015, Intel Corporation
6 * Authors: Lv Zheng <lv.zheng@intel.com>
10 #define pr_fmt(fmt) "ACPI: AML: " fmt
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/wait.h>
15 #include <linux/poll.h>
16 #include <linux/sched.h>
17 #include <linux/kthread.h>
18 #include <linux/proc_fs.h>
19 #include <linux/debugfs.h>
20 #include <linux/circ_buf.h>
21 #include <linux/acpi.h>
24 #define ACPI_AML_BUF_ALIGN (sizeof (acpi_size))
25 #define ACPI_AML_BUF_SIZE PAGE_SIZE
27 #define circ_count(circ) \
28 (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
29 #define circ_count_to_end(circ) \
30 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
31 #define circ_space(circ) \
32 (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
33 #define circ_space_to_end(circ) \
34 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
36 #define ACPI_AML_OPENED 0x0001
37 #define ACPI_AML_CLOSED 0x0002
38 #define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */
39 #define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */
40 #define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */
41 #define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */
42 #define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
43 #define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
44 #define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN)
45 #define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED)
48 wait_queue_head_t wait;
52 struct task_struct *thread;
53 char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
54 struct circ_buf out_crc;
55 char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
56 struct circ_buf in_crc;
57 acpi_osd_exec_callback function;
62 static struct acpi_aml_io acpi_aml_io;
63 static bool acpi_aml_initialized;
64 static struct file *acpi_aml_active_reader;
65 static struct dentry *acpi_aml_dentry;
67 static inline bool __acpi_aml_running(void)
69 return acpi_aml_io.thread ? true : false;
72 static inline bool __acpi_aml_access_ok(unsigned long flag)
75 * The debugger interface is in opened state (OPENED && !CLOSED),
76 * then it is allowed to access the debugger buffers from either
77 * user space or the kernel space.
78 * In addition, for the kernel space, only the debugger thread
79 * (thread ID matched) is allowed to access.
81 if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
82 (acpi_aml_io.flags & ACPI_AML_CLOSED) ||
83 !__acpi_aml_running())
85 if ((flag & ACPI_AML_KERN) &&
86 current != acpi_aml_io.thread)
91 static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
94 * Another read is not in progress and there is data in buffer
97 if (!(acpi_aml_io.flags & flag) && circ_count(circ))
102 static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
105 * Another write is not in progress and there is buffer space
106 * available for write.
108 if (!(acpi_aml_io.flags & flag) && circ_space(circ))
113 static inline bool __acpi_aml_busy(void)
115 if (acpi_aml_io.flags & ACPI_AML_BUSY)
120 static inline bool __acpi_aml_used(void)
122 return acpi_aml_io.usages ? true : false;
125 static inline bool acpi_aml_running(void)
129 mutex_lock(&acpi_aml_io.lock);
130 ret = __acpi_aml_running();
131 mutex_unlock(&acpi_aml_io.lock);
135 static bool acpi_aml_busy(void)
139 mutex_lock(&acpi_aml_io.lock);
140 ret = __acpi_aml_busy();
141 mutex_unlock(&acpi_aml_io.lock);
145 static bool acpi_aml_used(void)
150 * The usage count is prepared to avoid race conditions between the
151 * starts and the stops of the debugger thread.
153 mutex_lock(&acpi_aml_io.lock);
154 ret = __acpi_aml_used();
155 mutex_unlock(&acpi_aml_io.lock);
159 static bool acpi_aml_kern_readable(void)
163 mutex_lock(&acpi_aml_io.lock);
164 ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
165 __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
166 mutex_unlock(&acpi_aml_io.lock);
170 static bool acpi_aml_kern_writable(void)
174 mutex_lock(&acpi_aml_io.lock);
175 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
176 __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
177 mutex_unlock(&acpi_aml_io.lock);
181 static bool acpi_aml_user_readable(void)
185 mutex_lock(&acpi_aml_io.lock);
186 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
187 __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
188 mutex_unlock(&acpi_aml_io.lock);
192 static bool acpi_aml_user_writable(void)
196 mutex_lock(&acpi_aml_io.lock);
197 ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
198 __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
199 mutex_unlock(&acpi_aml_io.lock);
203 static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
207 mutex_lock(&acpi_aml_io.lock);
208 if (!__acpi_aml_access_ok(flag)) {
212 if (!__acpi_aml_writable(circ, flag)) {
216 acpi_aml_io.flags |= flag;
218 mutex_unlock(&acpi_aml_io.lock);
222 static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
226 mutex_lock(&acpi_aml_io.lock);
227 if (!__acpi_aml_access_ok(flag)) {
231 if (!__acpi_aml_readable(circ, flag)) {
235 acpi_aml_io.flags |= flag;
237 mutex_unlock(&acpi_aml_io.lock);
241 static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
243 mutex_lock(&acpi_aml_io.lock);
244 acpi_aml_io.flags &= ~flag;
246 wake_up_interruptible(&acpi_aml_io.wait);
247 mutex_unlock(&acpi_aml_io.lock);
250 static int acpi_aml_write_kern(const char *buf, int len)
253 struct circ_buf *crc = &acpi_aml_io.out_crc;
257 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
260 /* sync tail before inserting logs */
262 p = &crc->buf[crc->head];
263 n = min(len, circ_space_to_end(crc));
265 /* sync head after inserting logs */
267 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
268 acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
272 static int acpi_aml_readb_kern(void)
275 struct circ_buf *crc = &acpi_aml_io.in_crc;
278 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
281 /* sync head before removing cmds */
283 p = &crc->buf[crc->tail];
285 /* sync tail before inserting cmds */
287 crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
288 acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
293 * acpi_aml_write_log() - Capture debugger output
294 * @msg: the debugger output
296 * This function should be used to implement acpi_os_printf() to filter out
297 * the debugger output and store the output into the debugger interface
298 * buffer. Return the size of stored logs or errno.
300 static ssize_t acpi_aml_write_log(const char *msg)
303 int count = 0, size = 0;
305 if (!acpi_aml_initialized)
311 ret = acpi_aml_write_kern(msg + size, count);
312 if (ret == -EAGAIN) {
313 ret = wait_event_interruptible(acpi_aml_io.wait,
314 acpi_aml_kern_writable());
316 * We need to retry when the condition
328 return size > 0 ? size : ret;
332 * acpi_aml_read_cmd() - Capture debugger input
333 * @msg: the debugger input
334 * @size: the size of the debugger input
336 * This function should be used to implement acpi_os_get_line() to capture
337 * the debugger input commands and store the input commands into the
338 * debugger interface buffer. Return the size of stored commands or errno.
340 static ssize_t acpi_aml_read_cmd(char *msg, size_t count)
346 * This is ensured by the running fact of the debugger thread
347 * unless a bug is introduced.
349 BUG_ON(!acpi_aml_initialized);
353 * Check each input byte to find the end of the command.
355 ret = acpi_aml_readb_kern();
356 if (ret == -EAGAIN) {
357 ret = wait_event_interruptible(acpi_aml_io.wait,
358 acpi_aml_kern_readable());
360 * We need to retry when the condition becomes
368 *(msg + size) = (char)ret;
373 * acpi_os_get_line() requires a zero terminated command
376 *(msg + size - 1) = '\0';
380 return size > 0 ? size : ret;
383 static int acpi_aml_thread(void *unused)
385 acpi_osd_exec_callback function = NULL;
388 mutex_lock(&acpi_aml_io.lock);
389 if (acpi_aml_io.function) {
390 acpi_aml_io.usages++;
391 function = acpi_aml_io.function;
392 context = acpi_aml_io.context;
394 mutex_unlock(&acpi_aml_io.lock);
399 mutex_lock(&acpi_aml_io.lock);
400 acpi_aml_io.usages--;
401 if (!__acpi_aml_used()) {
402 acpi_aml_io.thread = NULL;
403 wake_up(&acpi_aml_io.wait);
405 mutex_unlock(&acpi_aml_io.lock);
411 * acpi_aml_create_thread() - Create AML debugger thread
412 * @function: the debugger thread callback
413 * @context: the context to be passed to the debugger thread
415 * This function should be used to implement acpi_os_execute() which is
416 * used by the ACPICA debugger to create the debugger thread.
418 static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
420 struct task_struct *t;
422 mutex_lock(&acpi_aml_io.lock);
423 acpi_aml_io.function = function;
424 acpi_aml_io.context = context;
425 mutex_unlock(&acpi_aml_io.lock);
427 t = kthread_create(acpi_aml_thread, NULL, "aml");
429 pr_err("Failed to create AML debugger thread.\n");
433 mutex_lock(&acpi_aml_io.lock);
434 acpi_aml_io.thread = t;
435 acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
437 mutex_unlock(&acpi_aml_io.lock);
441 static int acpi_aml_wait_command_ready(bool single_step,
442 char *buffer, size_t length)
447 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
449 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
451 status = acpi_os_get_line(buffer, length, NULL);
452 if (ACPI_FAILURE(status))
457 static int acpi_aml_notify_command_complete(void)
462 static int acpi_aml_open(struct inode *inode, struct file *file)
467 mutex_lock(&acpi_aml_io.lock);
469 * The debugger interface is being closed, no new user is allowed
470 * during this period.
472 if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
476 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
478 * Only one reader is allowed to initiate the debugger
481 if (acpi_aml_active_reader) {
485 pr_debug("Opening debugger reader.\n");
486 acpi_aml_active_reader = file;
490 * No writer is allowed unless the debugger thread is
493 if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
498 if (acpi_aml_active_reader == file) {
499 pr_debug("Opening debugger interface.\n");
500 mutex_unlock(&acpi_aml_io.lock);
502 pr_debug("Initializing debugger thread.\n");
503 status = acpi_initialize_debugger();
504 if (ACPI_FAILURE(status)) {
505 pr_err("Failed to initialize debugger.\n");
509 pr_debug("Debugger thread initialized.\n");
511 mutex_lock(&acpi_aml_io.lock);
512 acpi_aml_io.flags |= ACPI_AML_OPENED;
513 acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
514 acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
515 pr_debug("Debugger interface opened.\n");
520 if (acpi_aml_active_reader == file)
521 acpi_aml_active_reader = NULL;
523 mutex_unlock(&acpi_aml_io.lock);
528 static int acpi_aml_release(struct inode *inode, struct file *file)
530 mutex_lock(&acpi_aml_io.lock);
532 if (file == acpi_aml_active_reader) {
533 pr_debug("Closing debugger reader.\n");
534 acpi_aml_active_reader = NULL;
536 pr_debug("Closing debugger interface.\n");
537 acpi_aml_io.flags |= ACPI_AML_CLOSED;
540 * Wake up all user space/kernel space blocked
543 wake_up_interruptible(&acpi_aml_io.wait);
544 mutex_unlock(&acpi_aml_io.lock);
546 * Wait all user space/kernel space readers/writers to
547 * stop so that ACPICA command loop of the debugger thread
548 * should fail all its command line reads after this point.
550 wait_event(acpi_aml_io.wait, !acpi_aml_busy());
553 * Then we try to terminate the debugger thread if it is
556 pr_debug("Terminating debugger thread.\n");
557 acpi_terminate_debugger();
558 wait_event(acpi_aml_io.wait, !acpi_aml_used());
559 pr_debug("Debugger thread terminated.\n");
561 mutex_lock(&acpi_aml_io.lock);
562 acpi_aml_io.flags &= ~ACPI_AML_OPENED;
564 if (acpi_aml_io.users == 0) {
565 pr_debug("Debugger interface closed.\n");
566 acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
568 mutex_unlock(&acpi_aml_io.lock);
572 static int acpi_aml_read_user(char __user *buf, int len)
575 struct circ_buf *crc = &acpi_aml_io.out_crc;
579 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
582 /* sync head before removing logs */
584 p = &crc->buf[crc->tail];
585 n = min(len, circ_count_to_end(crc));
586 if (copy_to_user(buf, p, n)) {
590 /* sync tail after removing logs */
592 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
595 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
599 static ssize_t acpi_aml_read(struct file *file, char __user *buf,
600 size_t count, loff_t *ppos)
607 if (!access_ok(buf, count))
612 ret = acpi_aml_read_user(buf + size, count);
613 if (ret == -EAGAIN) {
614 if (file->f_flags & O_NONBLOCK)
617 ret = wait_event_interruptible(acpi_aml_io.wait,
618 acpi_aml_user_readable());
620 * We need to retry when the condition
628 if (!acpi_aml_running())
639 return size > 0 ? size : ret;
642 static int acpi_aml_write_user(const char __user *buf, int len)
645 struct circ_buf *crc = &acpi_aml_io.in_crc;
649 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
652 /* sync tail before inserting cmds */
654 p = &crc->buf[crc->head];
655 n = min(len, circ_space_to_end(crc));
656 if (copy_from_user(p, buf, n)) {
660 /* sync head after inserting cmds */
662 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
665 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
669 static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
670 size_t count, loff_t *ppos)
677 if (!access_ok(buf, count))
682 ret = acpi_aml_write_user(buf + size, count);
683 if (ret == -EAGAIN) {
684 if (file->f_flags & O_NONBLOCK)
687 ret = wait_event_interruptible(acpi_aml_io.wait,
688 acpi_aml_user_writable());
690 * We need to retry when the condition
698 if (!acpi_aml_running())
708 return size > 0 ? size : ret;
711 static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
715 poll_wait(file, &acpi_aml_io.wait, wait);
716 if (acpi_aml_user_readable())
717 masks |= EPOLLIN | EPOLLRDNORM;
718 if (acpi_aml_user_writable())
719 masks |= EPOLLOUT | EPOLLWRNORM;
724 static const struct file_operations acpi_aml_operations = {
725 .read = acpi_aml_read,
726 .write = acpi_aml_write,
727 .poll = acpi_aml_poll,
728 .open = acpi_aml_open,
729 .release = acpi_aml_release,
730 .llseek = generic_file_llseek,
733 static const struct acpi_debugger_ops acpi_aml_debugger = {
734 .create_thread = acpi_aml_create_thread,
735 .read_cmd = acpi_aml_read_cmd,
736 .write_log = acpi_aml_write_log,
737 .wait_command_ready = acpi_aml_wait_command_ready,
738 .notify_command_complete = acpi_aml_notify_command_complete,
741 static int __init acpi_aml_init(void)
748 /* Initialize AML IO interface */
749 mutex_init(&acpi_aml_io.lock);
750 init_waitqueue_head(&acpi_aml_io.wait);
751 acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
752 acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
754 acpi_aml_dentry = debugfs_create_file("acpidbg",
755 S_IFREG | S_IRUGO | S_IWUSR,
756 acpi_debugfs_dir, NULL,
757 &acpi_aml_operations);
759 ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
761 debugfs_remove(acpi_aml_dentry);
762 acpi_aml_dentry = NULL;
766 acpi_aml_initialized = true;
770 static void __exit acpi_aml_exit(void)
772 if (acpi_aml_initialized) {
773 acpi_unregister_debugger(&acpi_aml_debugger);
774 debugfs_remove(acpi_aml_dentry);
775 acpi_aml_dentry = NULL;
776 acpi_aml_initialized = false;
780 module_init(acpi_aml_init);
781 module_exit(acpi_aml_exit);
783 MODULE_AUTHOR("Lv Zheng");
784 MODULE_DESCRIPTION("ACPI debugger userspace IO driver");
785 MODULE_LICENSE("GPL");