1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ec.c - ACPI Embedded Controller Driver (v3)
5 * Copyright (C) 2001-2015 Intel Corporation
6 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
7 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
8 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * 2004 Luming Yu <luming.yu@intel.com>
10 * 2001, 2002 Andy Grover <andrew.grover@intel.com>
11 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
12 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
15 /* Uncomment next line to get verbose printout */
17 #define pr_fmt(fmt) "ACPI: EC: " fmt
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/types.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/list.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/acpi.h>
30 #include <linux/dmi.h>
35 #define ACPI_EC_CLASS "embedded_controller"
36 #define ACPI_EC_DEVICE_NAME "Embedded Controller"
38 /* EC status register */
39 #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
40 #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
41 #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
42 #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
43 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
46 * The SCI_EVT clearing timing is not defined by the ACPI specification.
47 * This leads to lots of practical timing issues for the host EC driver.
48 * The following variations are defined (from the target EC firmware's
50 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
51 * target can clear SCI_EVT at any time so long as the host can see
52 * the indication by reading the status register (EC_SC). So the
53 * host should re-check SCI_EVT after the first time the SCI_EVT
54 * indication is seen, which is the same time the query request
55 * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
56 * at any later time could indicate another event. Normally such
57 * kind of EC firmware has implemented an event queue and will
58 * return 0x00 to indicate "no outstanding event".
59 * QUERY: After seeing the query request (QR_EC) written to the command
60 * register (EC_CMD) by the host and having prepared the responding
61 * event value in the data register (EC_DATA), the target can safely
62 * clear SCI_EVT because the target can confirm that the current
63 * event is being handled by the host. The host then should check
64 * SCI_EVT right after reading the event response from the data
66 * EVENT: After seeing the event response read from the data register
67 * (EC_DATA) by the host, the target can clear SCI_EVT. As the
68 * target requires time to notice the change in the data register
69 * (EC_DATA), the host may be required to wait additional guarding
70 * time before checking the SCI_EVT again. Such guarding may not be
71 * necessary if the host is notified via another IRQ.
73 #define ACPI_EC_EVT_TIMING_STATUS 0x00
74 #define ACPI_EC_EVT_TIMING_QUERY 0x01
75 #define ACPI_EC_EVT_TIMING_EVENT 0x02
79 ACPI_EC_COMMAND_READ = 0x80,
80 ACPI_EC_COMMAND_WRITE = 0x81,
81 ACPI_EC_BURST_ENABLE = 0x82,
82 ACPI_EC_BURST_DISABLE = 0x83,
83 ACPI_EC_COMMAND_QUERY = 0x84,
86 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
87 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
88 #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
89 #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
90 * when trying to clear the EC */
91 #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
94 EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
95 EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
96 EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
97 EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
98 EC_FLAGS_STARTED, /* Driver is started */
99 EC_FLAGS_STOPPED, /* Driver is stopped */
100 EC_FLAGS_EVENTS_MASKED, /* Events masked */
103 #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
104 #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
106 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
107 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
108 module_param(ec_delay, uint, 0644);
109 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
111 static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
112 module_param(ec_max_queries, uint, 0644);
113 MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
115 static bool ec_busy_polling __read_mostly;
116 module_param(ec_busy_polling, bool, 0644);
117 MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
119 static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
120 module_param(ec_polling_guard, uint, 0644);
121 MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
123 static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
126 * If the number of false interrupts per one transaction exceeds
127 * this threshold, will think there is a GPE storm happened and
128 * will disable the GPE for normal transaction.
130 static unsigned int ec_storm_threshold __read_mostly = 8;
131 module_param(ec_storm_threshold, uint, 0644);
132 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
134 static bool ec_freeze_events __read_mostly;
135 module_param(ec_freeze_events, bool, 0644);
136 MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
138 static bool ec_no_wakeup __read_mostly;
139 module_param(ec_no_wakeup, bool, 0644);
140 MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
142 struct acpi_ec_query_handler {
143 struct list_head node;
144 acpi_ec_query_func func;
154 unsigned short irq_count;
163 struct acpi_ec_query {
164 struct transaction transaction;
165 struct work_struct work;
166 struct acpi_ec_query_handler *handler;
170 static int acpi_ec_submit_query(struct acpi_ec *ec);
171 static void advance_transaction(struct acpi_ec *ec, bool interrupt);
172 static void acpi_ec_event_handler(struct work_struct *work);
174 struct acpi_ec *first_ec;
175 EXPORT_SYMBOL(first_ec);
177 static struct acpi_ec *boot_ec;
178 static bool boot_ec_is_ecdt;
179 static struct workqueue_struct *ec_wq;
180 static struct workqueue_struct *ec_query_wq;
182 static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
183 static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
184 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
186 /* --------------------------------------------------------------------------
188 * -------------------------------------------------------------------------- */
191 * Splitters used by the developers to track the boundary of the EC
192 * handling processes.
195 #define EC_DBG_SEP " "
196 #define EC_DBG_DRV "+++++"
197 #define EC_DBG_STM "====="
198 #define EC_DBG_REQ "*****"
199 #define EC_DBG_EVT "#####"
201 #define EC_DBG_SEP ""
208 #define ec_log_raw(fmt, ...) \
209 pr_info(fmt "\n", ##__VA_ARGS__)
210 #define ec_dbg_raw(fmt, ...) \
211 pr_debug(fmt "\n", ##__VA_ARGS__)
212 #define ec_log(filter, fmt, ...) \
213 ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
214 #define ec_dbg(filter, fmt, ...) \
215 ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
217 #define ec_log_drv(fmt, ...) \
218 ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
219 #define ec_dbg_drv(fmt, ...) \
220 ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
221 #define ec_dbg_stm(fmt, ...) \
222 ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
223 #define ec_dbg_req(fmt, ...) \
224 ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
225 #define ec_dbg_evt(fmt, ...) \
226 ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
227 #define ec_dbg_ref(ec, fmt, ...) \
228 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
230 /* --------------------------------------------------------------------------
232 * -------------------------------------------------------------------------- */
234 static bool acpi_ec_started(struct acpi_ec *ec)
236 return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
237 !test_bit(EC_FLAGS_STOPPED, &ec->flags);
240 static bool acpi_ec_event_enabled(struct acpi_ec *ec)
243 * There is an OSPM early stage logic. During the early stages
244 * (boot/resume), OSPMs shouldn't enable the event handling, only
245 * the EC transactions are allowed to be performed.
247 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
250 * However, disabling the event handling is experimental for late
251 * stage (suspend), and is controlled by the boot parameter of
252 * "ec_freeze_events":
253 * 1. true: The EC event handling is disabled before entering
255 * 2. false: The EC event handling is automatically disabled as
256 * soon as the EC driver is stopped.
258 if (ec_freeze_events)
259 return acpi_ec_started(ec);
261 return test_bit(EC_FLAGS_STARTED, &ec->flags);
264 static bool acpi_ec_flushed(struct acpi_ec *ec)
266 return ec->reference_count == 1;
269 /* --------------------------------------------------------------------------
271 * -------------------------------------------------------------------------- */
273 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
275 u8 x = inb(ec->command_addr);
277 ec_dbg_raw("EC_SC(R) = 0x%2.2x "
278 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
280 !!(x & ACPI_EC_FLAG_SCI),
281 !!(x & ACPI_EC_FLAG_BURST),
282 !!(x & ACPI_EC_FLAG_CMD),
283 !!(x & ACPI_EC_FLAG_IBF),
284 !!(x & ACPI_EC_FLAG_OBF));
288 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
290 u8 x = inb(ec->data_addr);
292 ec->timestamp = jiffies;
293 ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
297 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
299 ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
300 outb(command, ec->command_addr);
301 ec->timestamp = jiffies;
304 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
306 ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
307 outb(data, ec->data_addr);
308 ec->timestamp = jiffies;
311 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
312 static const char *acpi_ec_cmd_string(u8 cmd)
329 #define acpi_ec_cmd_string(cmd) "UNDEF"
332 /* --------------------------------------------------------------------------
334 * -------------------------------------------------------------------------- */
336 static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec)
338 acpi_event_status gpe_status = 0;
340 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
341 return !!(gpe_status & ACPI_EVENT_FLAG_STATUS_SET);
344 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
347 acpi_enable_gpe(NULL, ec->gpe);
349 BUG_ON(ec->reference_count < 1);
350 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
352 if (acpi_ec_gpe_status_set(ec)) {
354 * On some platforms, EN=1 writes cannot trigger GPE. So
355 * software need to manually trigger a pseudo GPE event on
358 ec_dbg_raw("Polling quirk");
359 advance_transaction(ec, false);
363 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
366 acpi_disable_gpe(NULL, ec->gpe);
368 BUG_ON(ec->reference_count < 1);
369 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
373 /* --------------------------------------------------------------------------
374 * Transaction Management
375 * -------------------------------------------------------------------------- */
377 static void acpi_ec_submit_request(struct acpi_ec *ec)
379 ec->reference_count++;
380 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
381 ec->gpe >= 0 && ec->reference_count == 1)
382 acpi_ec_enable_gpe(ec, true);
385 static void acpi_ec_complete_request(struct acpi_ec *ec)
387 bool flushed = false;
389 ec->reference_count--;
390 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
391 ec->gpe >= 0 && ec->reference_count == 0)
392 acpi_ec_disable_gpe(ec, true);
393 flushed = acpi_ec_flushed(ec);
398 static void acpi_ec_mask_events(struct acpi_ec *ec)
400 if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
402 acpi_ec_disable_gpe(ec, false);
404 disable_irq_nosync(ec->irq);
406 ec_dbg_drv("Polling enabled");
407 set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
411 static void acpi_ec_unmask_events(struct acpi_ec *ec)
413 if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
414 clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
416 acpi_ec_enable_gpe(ec, false);
420 ec_dbg_drv("Polling disabled");
425 * acpi_ec_submit_flushable_request() - Increase the reference count unless
426 * the flush operation is not in
430 * This function must be used before taking a new action that should hold
431 * the reference count. If this function returns false, then the action
432 * must be discarded or it will prevent the flush operation from being
435 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
437 if (!acpi_ec_started(ec))
439 acpi_ec_submit_request(ec);
443 static void acpi_ec_submit_event(struct acpi_ec *ec)
446 * It is safe to mask the events here, because acpi_ec_close_event()
447 * will run at least once after this.
449 acpi_ec_mask_events(ec);
450 if (!acpi_ec_event_enabled(ec))
453 if (ec->event_state != EC_EVENT_READY)
456 ec_dbg_evt("Command(%s) submitted/blocked",
457 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
459 ec->event_state = EC_EVENT_IN_PROGRESS;
461 * If events_to_process is greater than 0 at this point, the while ()
462 * loop in acpi_ec_event_handler() is still running and incrementing
463 * events_to_process will cause it to invoke acpi_ec_submit_query() once
464 * more, so it is not necessary to queue up the event work to start the
467 if (ec->events_to_process++ > 0)
470 ec->events_in_progress++;
471 queue_work(ec_wq, &ec->work);
474 static void acpi_ec_complete_event(struct acpi_ec *ec)
476 if (ec->event_state == EC_EVENT_IN_PROGRESS)
477 ec->event_state = EC_EVENT_COMPLETE;
480 static void acpi_ec_close_event(struct acpi_ec *ec)
482 if (ec->event_state != EC_EVENT_READY)
483 ec_dbg_evt("Command(%s) unblocked",
484 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
486 ec->event_state = EC_EVENT_READY;
487 acpi_ec_unmask_events(ec);
490 static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
492 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
493 ec_log_drv("event unblocked");
495 * Unconditionally invoke this once after enabling the event
496 * handling mechanism to detect the pending events.
498 advance_transaction(ec, false);
501 static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
503 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
504 ec_log_drv("event blocked");
508 * Process _Q events that might have accumulated in the EC.
509 * Run with locked ec mutex.
511 static void acpi_ec_clear(struct acpi_ec *ec)
515 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
516 if (acpi_ec_submit_query(ec))
519 if (unlikely(i == ACPI_EC_CLEAR_MAX))
520 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
522 pr_info("%d stale EC events cleared\n", i);
525 static void acpi_ec_enable_event(struct acpi_ec *ec)
529 spin_lock_irqsave(&ec->lock, flags);
530 if (acpi_ec_started(ec))
531 __acpi_ec_enable_event(ec);
532 spin_unlock_irqrestore(&ec->lock, flags);
534 /* Drain additional events if hardware requires that */
535 if (EC_FLAGS_CLEAR_ON_RESUME)
539 #ifdef CONFIG_PM_SLEEP
540 static void __acpi_ec_flush_work(void)
542 flush_workqueue(ec_wq); /* flush ec->work */
543 flush_workqueue(ec_query_wq); /* flush queries */
546 static void acpi_ec_disable_event(struct acpi_ec *ec)
550 spin_lock_irqsave(&ec->lock, flags);
551 __acpi_ec_disable_event(ec);
552 spin_unlock_irqrestore(&ec->lock, flags);
555 * When ec_freeze_events is true, we need to flush events in
556 * the proper position before entering the noirq stage.
558 __acpi_ec_flush_work();
561 void acpi_ec_flush_work(void)
563 /* Without ec_wq there is nothing to flush. */
567 __acpi_ec_flush_work();
569 #endif /* CONFIG_PM_SLEEP */
571 static bool acpi_ec_guard_event(struct acpi_ec *ec)
576 spin_lock_irqsave(&ec->lock, flags);
578 * If firmware SCI_EVT clearing timing is "event", we actually
579 * don't know when the SCI_EVT will be cleared by firmware after
580 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
583 * The guarding period is applicable if the event state is not
584 * EC_EVENT_READY, but otherwise if the current transaction is of the
585 * ACPI_EC_COMMAND_QUERY type, the guarding should have elapsed already
586 * and it should not be applied to let the transaction transition into
587 * the ACPI_EC_COMMAND_POLL state immediately.
589 guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
590 ec->event_state != EC_EVENT_READY &&
591 (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
592 spin_unlock_irqrestore(&ec->lock, flags);
596 static int ec_transaction_polled(struct acpi_ec *ec)
601 spin_lock_irqsave(&ec->lock, flags);
602 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
604 spin_unlock_irqrestore(&ec->lock, flags);
608 static int ec_transaction_completed(struct acpi_ec *ec)
613 spin_lock_irqsave(&ec->lock, flags);
614 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
616 spin_unlock_irqrestore(&ec->lock, flags);
620 static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
622 ec->curr->flags |= flag;
624 if (ec->curr->command != ACPI_EC_COMMAND_QUERY)
627 switch (ec_event_clearing) {
628 case ACPI_EC_EVT_TIMING_STATUS:
629 if (flag == ACPI_EC_COMMAND_POLL)
630 acpi_ec_close_event(ec);
634 case ACPI_EC_EVT_TIMING_QUERY:
635 if (flag == ACPI_EC_COMMAND_COMPLETE)
636 acpi_ec_close_event(ec);
640 case ACPI_EC_EVT_TIMING_EVENT:
641 if (flag == ACPI_EC_COMMAND_COMPLETE)
642 acpi_ec_complete_event(ec);
646 static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t)
648 if (t->irq_count < ec_storm_threshold)
651 /* Trigger if the threshold is 0 too. */
652 if (t->irq_count == ec_storm_threshold)
653 acpi_ec_mask_events(ec);
656 static void advance_transaction(struct acpi_ec *ec, bool interrupt)
658 struct transaction *t = ec->curr;
662 ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
665 * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
666 * changes to always trigger a GPE interrupt.
668 * GPE STS is a W1C register, which means:
670 * 1. Software can clear it without worrying about clearing the other
671 * GPEs' STS bits when the hardware sets them in parallel.
673 * 2. As long as software can ensure only clearing it when it is set,
674 * hardware won't set it in parallel.
676 if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
677 acpi_clear_gpe(NULL, ec->gpe);
679 status = acpi_ec_read_status(ec);
682 * Another IRQ or a guarded polling mode advancement is detected,
683 * the next QR_EC submission is then allowed.
685 if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
686 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
687 ec->event_state == EC_EVENT_COMPLETE)
688 acpi_ec_close_event(ec);
694 if (t->flags & ACPI_EC_COMMAND_POLL) {
695 if (t->wlen > t->wi) {
696 if (!(status & ACPI_EC_FLAG_IBF))
697 acpi_ec_write_data(ec, t->wdata[t->wi++]);
698 else if (interrupt && !(status & ACPI_EC_FLAG_SCI))
699 acpi_ec_spurious_interrupt(ec, t);
700 } else if (t->rlen > t->ri) {
701 if (status & ACPI_EC_FLAG_OBF) {
702 t->rdata[t->ri++] = acpi_ec_read_data(ec);
703 if (t->rlen == t->ri) {
704 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
706 if (t->command == ACPI_EC_COMMAND_QUERY)
707 ec_dbg_evt("Command(%s) completed by hardware",
708 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
710 } else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) {
711 acpi_ec_spurious_interrupt(ec, t);
713 } else if (t->wlen == t->wi && !(status & ACPI_EC_FLAG_IBF)) {
714 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
717 } else if (!(status & ACPI_EC_FLAG_IBF)) {
718 acpi_ec_write_cmd(ec, t->command);
719 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
723 if (status & ACPI_EC_FLAG_SCI)
724 acpi_ec_submit_event(ec);
726 if (wakeup && interrupt)
730 static void start_transaction(struct acpi_ec *ec)
732 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
736 static int ec_guard(struct acpi_ec *ec)
738 unsigned long guard = usecs_to_jiffies(ec->polling_guard);
739 unsigned long timeout = ec->timestamp + guard;
741 /* Ensure guarding period before polling EC status */
743 if (ec->busy_polling) {
744 /* Perform busy polling */
745 if (ec_transaction_completed(ec))
747 udelay(jiffies_to_usecs(guard));
750 * Perform wait polling
751 * 1. Wait the transaction to be completed by the
752 * GPE handler after the transaction enters
753 * ACPI_EC_COMMAND_POLL state.
754 * 2. A special guarding logic is also required
755 * for event clearing mode "event" before the
756 * transaction enters ACPI_EC_COMMAND_POLL
759 if (!ec_transaction_polled(ec) &&
760 !acpi_ec_guard_event(ec))
762 if (wait_event_timeout(ec->wait,
763 ec_transaction_completed(ec),
767 } while (time_before(jiffies, timeout));
771 static int ec_poll(struct acpi_ec *ec)
774 int repeat = 5; /* number of command restarts */
777 unsigned long delay = jiffies +
778 msecs_to_jiffies(ec_delay);
782 spin_lock_irqsave(&ec->lock, flags);
783 advance_transaction(ec, false);
784 spin_unlock_irqrestore(&ec->lock, flags);
785 } while (time_before(jiffies, delay));
786 pr_debug("controller reset, restart transaction\n");
787 spin_lock_irqsave(&ec->lock, flags);
788 start_transaction(ec);
789 spin_unlock_irqrestore(&ec->lock, flags);
794 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
795 struct transaction *t)
800 /* start transaction */
801 spin_lock_irqsave(&ec->lock, tmp);
802 /* Enable GPE for command processing (IBF=0/OBF=1) */
803 if (!acpi_ec_submit_flushable_request(ec)) {
807 ec_dbg_ref(ec, "Increase command");
808 /* following two actions should be kept atomic */
810 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
811 start_transaction(ec);
812 spin_unlock_irqrestore(&ec->lock, tmp);
816 spin_lock_irqsave(&ec->lock, tmp);
817 if (t->irq_count == ec_storm_threshold)
818 acpi_ec_unmask_events(ec);
819 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
821 /* Disable GPE for command processing (IBF=0/OBF=1) */
822 acpi_ec_complete_request(ec);
823 ec_dbg_ref(ec, "Decrease command");
825 spin_unlock_irqrestore(&ec->lock, tmp);
829 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
834 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
837 memset(t->rdata, 0, t->rlen);
839 mutex_lock(&ec->mutex);
840 if (ec->global_lock) {
841 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
842 if (ACPI_FAILURE(status)) {
848 status = acpi_ec_transaction_unlocked(ec, t);
851 acpi_release_global_lock(glk);
853 mutex_unlock(&ec->mutex);
857 static int acpi_ec_burst_enable(struct acpi_ec *ec)
860 struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
861 .wdata = NULL, .rdata = &d,
862 .wlen = 0, .rlen = 1};
864 return acpi_ec_transaction(ec, &t);
867 static int acpi_ec_burst_disable(struct acpi_ec *ec)
869 struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
870 .wdata = NULL, .rdata = NULL,
871 .wlen = 0, .rlen = 0};
873 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
874 acpi_ec_transaction(ec, &t) : 0;
877 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
881 struct transaction t = {.command = ACPI_EC_COMMAND_READ,
882 .wdata = &address, .rdata = &d,
883 .wlen = 1, .rlen = 1};
885 result = acpi_ec_transaction(ec, &t);
890 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
892 u8 wdata[2] = { address, data };
893 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
894 .wdata = wdata, .rdata = NULL,
895 .wlen = 2, .rlen = 0};
897 return acpi_ec_transaction(ec, &t);
900 int ec_read(u8 addr, u8 *val)
908 err = acpi_ec_read(first_ec, addr, &temp_data);
916 EXPORT_SYMBOL(ec_read);
918 int ec_write(u8 addr, u8 val)
923 return acpi_ec_write(first_ec, addr, val);
925 EXPORT_SYMBOL(ec_write);
927 int ec_transaction(u8 command,
928 const u8 *wdata, unsigned wdata_len,
929 u8 *rdata, unsigned rdata_len)
931 struct transaction t = {.command = command,
932 .wdata = wdata, .rdata = rdata,
933 .wlen = wdata_len, .rlen = rdata_len};
938 return acpi_ec_transaction(first_ec, &t);
940 EXPORT_SYMBOL(ec_transaction);
942 /* Get the handle to the EC device */
943 acpi_handle ec_get_handle(void)
947 return first_ec->handle;
949 EXPORT_SYMBOL(ec_get_handle);
951 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
955 spin_lock_irqsave(&ec->lock, flags);
956 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
957 ec_dbg_drv("Starting EC");
958 /* Enable GPE for event processing (SCI_EVT=1) */
960 acpi_ec_submit_request(ec);
961 ec_dbg_ref(ec, "Increase driver");
963 ec_log_drv("EC started");
965 spin_unlock_irqrestore(&ec->lock, flags);
968 static bool acpi_ec_stopped(struct acpi_ec *ec)
973 spin_lock_irqsave(&ec->lock, flags);
974 flushed = acpi_ec_flushed(ec);
975 spin_unlock_irqrestore(&ec->lock, flags);
979 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
983 spin_lock_irqsave(&ec->lock, flags);
984 if (acpi_ec_started(ec)) {
985 ec_dbg_drv("Stopping EC");
986 set_bit(EC_FLAGS_STOPPED, &ec->flags);
987 spin_unlock_irqrestore(&ec->lock, flags);
988 wait_event(ec->wait, acpi_ec_stopped(ec));
989 spin_lock_irqsave(&ec->lock, flags);
990 /* Disable GPE for event processing (SCI_EVT=1) */
992 acpi_ec_complete_request(ec);
993 ec_dbg_ref(ec, "Decrease driver");
994 } else if (!ec_freeze_events)
995 __acpi_ec_disable_event(ec);
996 clear_bit(EC_FLAGS_STARTED, &ec->flags);
997 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
998 ec_log_drv("EC stopped");
1000 spin_unlock_irqrestore(&ec->lock, flags);
1003 static void acpi_ec_enter_noirq(struct acpi_ec *ec)
1005 unsigned long flags;
1007 spin_lock_irqsave(&ec->lock, flags);
1008 ec->busy_polling = true;
1009 ec->polling_guard = 0;
1010 ec_log_drv("interrupt blocked");
1011 spin_unlock_irqrestore(&ec->lock, flags);
1014 static void acpi_ec_leave_noirq(struct acpi_ec *ec)
1016 unsigned long flags;
1018 spin_lock_irqsave(&ec->lock, flags);
1019 ec->busy_polling = ec_busy_polling;
1020 ec->polling_guard = ec_polling_guard;
1021 ec_log_drv("interrupt unblocked");
1022 spin_unlock_irqrestore(&ec->lock, flags);
1025 void acpi_ec_block_transactions(void)
1027 struct acpi_ec *ec = first_ec;
1032 mutex_lock(&ec->mutex);
1033 /* Prevent transactions from being carried out */
1034 acpi_ec_stop(ec, true);
1035 mutex_unlock(&ec->mutex);
1038 void acpi_ec_unblock_transactions(void)
1041 * Allow transactions to happen again (this function is called from
1042 * atomic context during wakeup, so we don't need to acquire the mutex).
1045 acpi_ec_start(first_ec, true);
1048 /* --------------------------------------------------------------------------
1050 -------------------------------------------------------------------------- */
1051 static struct acpi_ec_query_handler *
1052 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
1054 struct acpi_ec_query_handler *handler;
1056 mutex_lock(&ec->mutex);
1057 list_for_each_entry(handler, &ec->list, node) {
1058 if (value == handler->query_bit) {
1059 kref_get(&handler->kref);
1060 mutex_unlock(&ec->mutex);
1064 mutex_unlock(&ec->mutex);
1068 static void acpi_ec_query_handler_release(struct kref *kref)
1070 struct acpi_ec_query_handler *handler =
1071 container_of(kref, struct acpi_ec_query_handler, kref);
1076 static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
1078 kref_put(&handler->kref, acpi_ec_query_handler_release);
1081 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
1082 acpi_handle handle, acpi_ec_query_func func,
1085 struct acpi_ec_query_handler *handler =
1086 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
1091 handler->query_bit = query_bit;
1092 handler->handle = handle;
1093 handler->func = func;
1094 handler->data = data;
1095 mutex_lock(&ec->mutex);
1096 kref_init(&handler->kref);
1097 list_add(&handler->node, &ec->list);
1098 mutex_unlock(&ec->mutex);
1101 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
1103 static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
1104 bool remove_all, u8 query_bit)
1106 struct acpi_ec_query_handler *handler, *tmp;
1107 LIST_HEAD(free_list);
1109 mutex_lock(&ec->mutex);
1110 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
1111 if (remove_all || query_bit == handler->query_bit) {
1112 list_del_init(&handler->node);
1113 list_add(&handler->node, &free_list);
1116 mutex_unlock(&ec->mutex);
1117 list_for_each_entry_safe(handler, tmp, &free_list, node)
1118 acpi_ec_put_query_handler(handler);
1121 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
1123 acpi_ec_remove_query_handlers(ec, false, query_bit);
1125 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
1127 static void acpi_ec_event_processor(struct work_struct *work)
1129 struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
1130 struct acpi_ec_query_handler *handler = q->handler;
1131 struct acpi_ec *ec = q->ec;
1133 ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
1136 handler->func(handler->data);
1137 else if (handler->handle)
1138 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
1140 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
1142 spin_lock_irq(&ec->lock);
1143 ec->queries_in_progress--;
1144 spin_unlock_irq(&ec->lock);
1146 acpi_ec_put_query_handler(handler);
1150 static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
1152 struct acpi_ec_query *q;
1153 struct transaction *t;
1155 q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
1159 INIT_WORK(&q->work, acpi_ec_event_processor);
1160 t = &q->transaction;
1161 t->command = ACPI_EC_COMMAND_QUERY;
1168 static int acpi_ec_submit_query(struct acpi_ec *ec)
1170 struct acpi_ec_query *q;
1174 q = acpi_ec_create_query(ec, &value);
1179 * Query the EC to find out which _Qxx method we need to evaluate.
1180 * Note that successful completion of the query causes the ACPI_EC_SCI
1181 * bit to be cleared (and thus clearing the interrupt source).
1183 result = acpi_ec_transaction(ec, &q->transaction);
1192 q->handler = acpi_ec_get_query_handler_by_value(ec, value);
1199 * It is reported that _Qxx are evaluated in a parallel way on Windows:
1200 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
1202 * Put this log entry before queue_work() to make it appear in the log
1203 * before any other messages emitted during workqueue handling.
1205 ec_dbg_evt("Query(0x%02x) scheduled", value);
1207 spin_lock_irq(&ec->lock);
1209 ec->queries_in_progress++;
1210 queue_work(ec_query_wq, &q->work);
1212 spin_unlock_irq(&ec->lock);
1222 static void acpi_ec_event_handler(struct work_struct *work)
1224 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
1226 ec_dbg_evt("Event started");
1228 spin_lock_irq(&ec->lock);
1230 while (ec->events_to_process) {
1231 spin_unlock_irq(&ec->lock);
1233 acpi_ec_submit_query(ec);
1235 spin_lock_irq(&ec->lock);
1237 ec->events_to_process--;
1241 * Before exit, make sure that the it will be possible to queue up the
1242 * event handling work again regardless of whether or not the query
1243 * queued up above is processed successfully.
1245 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
1248 acpi_ec_complete_event(ec);
1250 ec_dbg_evt("Event stopped");
1252 spin_unlock_irq(&ec->lock);
1254 guard_timeout = !!ec_guard(ec);
1256 spin_lock_irq(&ec->lock);
1258 /* Take care of SCI_EVT unless someone else is doing that. */
1259 if (guard_timeout && !ec->curr)
1260 advance_transaction(ec, false);
1262 acpi_ec_close_event(ec);
1264 ec_dbg_evt("Event stopped");
1267 ec->events_in_progress--;
1269 spin_unlock_irq(&ec->lock);
1272 static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
1274 unsigned long flags;
1276 spin_lock_irqsave(&ec->lock, flags);
1277 advance_transaction(ec, true);
1278 spin_unlock_irqrestore(&ec->lock, flags);
1281 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
1282 u32 gpe_number, void *data)
1284 acpi_ec_handle_interrupt(data);
1285 return ACPI_INTERRUPT_HANDLED;
1288 static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
1290 acpi_ec_handle_interrupt(data);
1294 /* --------------------------------------------------------------------------
1295 * Address Space Management
1296 * -------------------------------------------------------------------------- */
1299 acpi_ec_space_handler(u32 function, acpi_physical_address address,
1300 u32 bits, u64 *value64,
1301 void *handler_context, void *region_context)
1303 struct acpi_ec *ec = handler_context;
1304 int result = 0, i, bytes = bits / 8;
1305 u8 *value = (u8 *)value64;
1307 if ((address > 0xFF) || !value || !handler_context)
1308 return AE_BAD_PARAMETER;
1310 if (function != ACPI_READ && function != ACPI_WRITE)
1311 return AE_BAD_PARAMETER;
1313 if (ec->busy_polling || bits > 8)
1314 acpi_ec_burst_enable(ec);
1316 for (i = 0; i < bytes; ++i, ++address, ++value)
1317 result = (function == ACPI_READ) ?
1318 acpi_ec_read(ec, address, value) :
1319 acpi_ec_write(ec, address, *value);
1321 if (ec->busy_polling || bits > 8)
1322 acpi_ec_burst_disable(ec);
1326 return AE_BAD_PARAMETER;
1328 return AE_NOT_FOUND;
1336 /* --------------------------------------------------------------------------
1338 * -------------------------------------------------------------------------- */
1341 ec_parse_io_ports(struct acpi_resource *resource, void *context);
1343 static void acpi_ec_free(struct acpi_ec *ec)
1352 static struct acpi_ec *acpi_ec_alloc(void)
1354 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
1358 mutex_init(&ec->mutex);
1359 init_waitqueue_head(&ec->wait);
1360 INIT_LIST_HEAD(&ec->list);
1361 spin_lock_init(&ec->lock);
1362 INIT_WORK(&ec->work, acpi_ec_event_handler);
1363 ec->timestamp = jiffies;
1364 ec->busy_polling = true;
1365 ec->polling_guard = 0;
1372 acpi_ec_register_query_methods(acpi_handle handle, u32 level,
1373 void *context, void **return_value)
1376 struct acpi_buffer buffer = { sizeof(node_name), node_name };
1377 struct acpi_ec *ec = context;
1381 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
1383 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
1384 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
1389 ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
1392 unsigned long long tmp = 0;
1393 struct acpi_ec *ec = context;
1395 /* clear addr values, ec_parse_io_ports depend on it */
1396 ec->command_addr = ec->data_addr = 0;
1398 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1399 ec_parse_io_ports, ec);
1400 if (ACPI_FAILURE(status))
1402 if (ec->data_addr == 0 || ec->command_addr == 0)
1405 /* Get GPE bit assignment (EC events). */
1406 /* TODO: Add support for _GPE returning a package */
1407 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
1408 if (ACPI_SUCCESS(status))
1411 * Errors are non-fatal, allowing for ACPI Reduced Hardware
1412 * platforms which use GpioInt instead of GPE.
1415 /* Use the global lock for all EC transactions? */
1417 acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
1418 ec->global_lock = tmp;
1419 ec->handle = handle;
1420 return AE_CTRL_TERMINATE;
1423 static bool install_gpe_event_handler(struct acpi_ec *ec)
1427 status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
1428 ACPI_GPE_EDGE_TRIGGERED,
1429 &acpi_ec_gpe_handler, ec);
1430 if (ACPI_FAILURE(status))
1433 if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1)
1434 acpi_ec_enable_gpe(ec, true);
1439 static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
1441 return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED,
1442 "ACPI EC", ec) >= 0;
1446 * ec_install_handlers - Install service callbacks and register query methods.
1448 * @device: ACPI device object corresponding to @ec.
1450 * Install a handler for the EC address space type unless it has been installed
1451 * already. If @device is not NULL, also look for EC query methods in the
1452 * namespace and register them, and install an event (either GPE or GPIO IRQ)
1453 * handler for the EC, if possible.
1456 * -ENODEV if the address space handler cannot be installed, which means
1457 * "unable to handle transactions",
1458 * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred,
1459 * or 0 (success) otherwise.
1461 static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device)
1465 acpi_ec_start(ec, false);
1467 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1468 acpi_ec_enter_noirq(ec);
1469 status = acpi_install_address_space_handler(ec->handle,
1471 &acpi_ec_space_handler,
1473 if (ACPI_FAILURE(status)) {
1474 acpi_ec_stop(ec, false);
1477 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1484 /* ACPI reduced hardware platforms use a GpioInt from _CRS. */
1485 int irq = acpi_dev_gpio_irq_get(device, 0);
1487 * Bail out right away for deferred probing or complete the
1488 * initialization regardless of any other errors.
1490 if (irq == -EPROBE_DEFER)
1491 return -EPROBE_DEFER;
1496 if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1497 /* Find and register all query methods */
1498 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
1499 acpi_ec_register_query_methods,
1501 set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1503 if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1507 ready = install_gpe_event_handler(ec);
1508 else if (ec->irq >= 0)
1509 ready = install_gpio_irq_event_handler(ec);
1512 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1513 acpi_ec_leave_noirq(ec);
1516 * Failures to install an event handler are not fatal, because
1517 * the EC can be polled for events.
1520 /* EC is fully operational, allow queries */
1521 acpi_ec_enable_event(ec);
1526 static void ec_remove_handlers(struct acpi_ec *ec)
1528 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1529 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
1530 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
1531 pr_err("failed to remove space handler\n");
1532 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1536 * Stops handling the EC transactions after removing the operation
1537 * region handler. This is required because _REG(DISCONNECT)
1538 * invoked during the removal can result in new EC transactions.
1540 * Flushes the EC requests and thus disables the GPE before
1541 * removing the GPE handler. This is required by the current ACPICA
1542 * GPE core. ACPICA GPE core will automatically disable a GPE when
1543 * it is indicated but there is no way to handle it. So the drivers
1544 * must disable the GPEs prior to removing the GPE handlers.
1546 acpi_ec_stop(ec, false);
1548 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1550 ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
1551 &acpi_ec_gpe_handler)))
1552 pr_err("failed to remove gpe handler\n");
1555 free_irq(ec->irq, ec);
1557 clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1559 if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1560 acpi_ec_remove_query_handlers(ec, true, 0);
1561 clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1565 static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device)
1569 ret = ec_install_handlers(ec, device);
1573 /* First EC capable of handling transactions */
1577 pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
1580 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1582 pr_info("GPE=0x%x\n", ec->gpe);
1584 pr_info("IRQ=%d\n", ec->irq);
1590 static int acpi_ec_add(struct acpi_device *device)
1595 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
1596 strcpy(acpi_device_class(device), ACPI_EC_CLASS);
1598 if (boot_ec && (boot_ec->handle == device->handle ||
1599 !strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) {
1600 /* Fast path: this device corresponds to the boot EC. */
1605 ec = acpi_ec_alloc();
1609 status = ec_parse_device(device->handle, 0, ec, NULL);
1610 if (status != AE_CTRL_TERMINATE) {
1615 if (boot_ec && ec->command_addr == boot_ec->command_addr &&
1616 ec->data_addr == boot_ec->data_addr) {
1618 * Trust PNP0C09 namespace location rather than ECDT ID.
1619 * But trust ECDT GPE rather than _GPE because of ASUS
1620 * quirks. So do not change boot_ec->gpe to ec->gpe,
1621 * except when the TRUST_DSDT_GPE quirk is set.
1623 boot_ec->handle = ec->handle;
1625 if (EC_FLAGS_TRUST_DSDT_GPE)
1626 boot_ec->gpe = ec->gpe;
1628 acpi_handle_debug(ec->handle, "duplicated.\n");
1634 ret = acpi_ec_setup(ec, device);
1639 acpi_handle_info(boot_ec->handle,
1640 "Boot %s EC initialization complete\n",
1641 boot_ec_is_ecdt ? "ECDT" : "DSDT");
1643 acpi_handle_info(ec->handle,
1644 "EC: Used to handle transactions and events\n");
1646 device->driver_data = ec;
1648 ret = !!request_region(ec->data_addr, 1, "EC data");
1649 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
1650 ret = !!request_region(ec->command_addr, 1, "EC cmd");
1651 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
1653 /* Reprobe devices depending on the EC */
1654 acpi_dev_clear_dependencies(device);
1656 acpi_handle_debug(ec->handle, "enumerated.\n");
1666 static int acpi_ec_remove(struct acpi_device *device)
1673 ec = acpi_driver_data(device);
1674 release_region(ec->data_addr, 1);
1675 release_region(ec->command_addr, 1);
1676 device->driver_data = NULL;
1677 if (ec != boot_ec) {
1678 ec_remove_handlers(ec);
1685 ec_parse_io_ports(struct acpi_resource *resource, void *context)
1687 struct acpi_ec *ec = context;
1689 if (resource->type != ACPI_RESOURCE_TYPE_IO)
1693 * The first address region returned is the data port, and
1694 * the second address region returned is the status/command
1697 if (ec->data_addr == 0)
1698 ec->data_addr = resource->data.io.minimum;
1699 else if (ec->command_addr == 0)
1700 ec->command_addr = resource->data.io.minimum;
1702 return AE_CTRL_TERMINATE;
1707 static const struct acpi_device_id ec_device_ids[] = {
1714 * This function is not Windows-compatible as Windows never enumerates the
1715 * namespace EC before the main ACPI device enumeration process. It is
1716 * retained for historical reason and will be deprecated in the future.
1718 void __init acpi_ec_dsdt_probe(void)
1725 * If a platform has ECDT, there is no need to proceed as the
1726 * following probe is not a part of the ACPI device enumeration,
1727 * executing _STA is not safe, and thus this probe may risk of
1728 * picking up an invalid EC device.
1733 ec = acpi_ec_alloc();
1738 * At this point, the namespace is initialized, so start to find
1739 * the namespace objects.
1741 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
1742 if (ACPI_FAILURE(status) || !ec->handle) {
1748 * When the DSDT EC is available, always re-configure boot EC to
1749 * have _REG evaluated. _REG can only be evaluated after the
1750 * namespace initialization.
1751 * At this point, the GPE is not fully initialized, so do not to
1752 * handle the events.
1754 ret = acpi_ec_setup(ec, NULL);
1762 acpi_handle_info(ec->handle,
1763 "Boot DSDT EC used to handle transactions\n");
1767 * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization.
1769 * First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not
1770 * found a matching object in the namespace.
1772 * Next, in case the DSDT EC is not functioning, it is still necessary to
1773 * provide a functional ECDT EC to handle events, so add an extra device object
1774 * to represent it (see https://bugzilla.kernel.org/show_bug.cgi?id=115021).
1776 * This is useful on platforms with valid ECDT and invalid DSDT EC settings,
1777 * like ASUS X550ZE (see https://bugzilla.kernel.org/show_bug.cgi?id=196847).
1779 static void __init acpi_ec_ecdt_start(void)
1781 struct acpi_table_ecdt *ecdt_ptr;
1785 /* Bail out if a matching EC has been found in the namespace. */
1786 if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT)
1789 /* Look up the object pointed to from the ECDT in the namespace. */
1790 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1791 (struct acpi_table_header **)&ecdt_ptr);
1792 if (ACPI_FAILURE(status))
1795 status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
1796 if (ACPI_SUCCESS(status)) {
1797 boot_ec->handle = handle;
1799 /* Add a special ACPI device object to represent the boot EC. */
1800 acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
1803 acpi_put_table((struct acpi_table_header *)ecdt_ptr);
1807 * On some hardware it is necessary to clear events accumulated by the EC during
1808 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1809 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1811 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1813 * Ideally, the EC should also be instructed NOT to accumulate events during
1814 * sleep (which Windows seems to do somehow), but the interface to control this
1815 * behaviour is not known at this time.
1817 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
1818 * however it is very likely that other Samsung models are affected.
1820 * On systems which don't accumulate _Q events during sleep, this extra check
1821 * should be harmless.
1823 static int ec_clear_on_resume(const struct dmi_system_id *id)
1825 pr_debug("Detected system needing EC poll on resume.\n");
1826 EC_FLAGS_CLEAR_ON_RESUME = 1;
1827 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
1832 * Some ECDTs contain wrong register addresses.
1834 * https://bugzilla.kernel.org/show_bug.cgi?id=12461
1836 static int ec_correct_ecdt(const struct dmi_system_id *id)
1838 pr_debug("Detected system needing ECDT address correction.\n");
1839 EC_FLAGS_CORRECT_ECDT = 1;
1844 * Some ECDTs contain wrong GPE setting, but they share the same port addresses
1845 * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
1846 * https://bugzilla.kernel.org/show_bug.cgi?id=209989
1848 static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
1850 pr_debug("Detected system needing DSDT GPE setting.\n");
1851 EC_FLAGS_TRUST_DSDT_GPE = 1;
1855 static const struct dmi_system_id ec_dmi_table[] __initconst = {
1859 * https://bugzilla.kernel.org/show_bug.cgi?id=12461
1861 .callback = ec_correct_ecdt,
1863 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
1864 DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),
1869 * HP Pavilion Gaming Laptop 15-cx0xxx
1870 * https://bugzilla.kernel.org/show_bug.cgi?id=209989
1872 .callback = ec_honor_dsdt_gpe,
1874 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1875 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),
1881 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1883 .callback = ec_clear_on_resume,
1885 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
1891 void __init acpi_ec_ecdt_probe(void)
1893 struct acpi_table_ecdt *ecdt_ptr;
1898 /* Generate a boot ec context. */
1899 dmi_check_system(ec_dmi_table);
1900 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1901 (struct acpi_table_header **)&ecdt_ptr);
1902 if (ACPI_FAILURE(status))
1905 if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
1908 * https://bugzilla.kernel.org/show_bug.cgi?id=11880
1913 ec = acpi_ec_alloc();
1917 if (EC_FLAGS_CORRECT_ECDT) {
1918 ec->command_addr = ecdt_ptr->data.address;
1919 ec->data_addr = ecdt_ptr->control.address;
1921 ec->command_addr = ecdt_ptr->control.address;
1922 ec->data_addr = ecdt_ptr->data.address;
1926 * Ignore the GPE value on Reduced Hardware platforms.
1927 * Some products have this set to an erroneous value.
1929 if (!acpi_gbl_reduced_hardware)
1930 ec->gpe = ecdt_ptr->gpe;
1932 ec->handle = ACPI_ROOT_OBJECT;
1935 * At this point, the namespace is not initialized, so do not find
1936 * the namespace objects, or handle the events.
1938 ret = acpi_ec_setup(ec, NULL);
1945 boot_ec_is_ecdt = true;
1947 pr_info("Boot ECDT EC used to handle transactions\n");
1950 acpi_put_table((struct acpi_table_header *)ecdt_ptr);
1953 #ifdef CONFIG_PM_SLEEP
1954 static int acpi_ec_suspend(struct device *dev)
1956 struct acpi_ec *ec =
1957 acpi_driver_data(to_acpi_device(dev));
1959 if (!pm_suspend_no_platform() && ec_freeze_events)
1960 acpi_ec_disable_event(ec);
1964 static int acpi_ec_suspend_noirq(struct device *dev)
1966 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1969 * The SCI handler doesn't run at this point, so the GPE can be
1970 * masked at the low level without side effects.
1972 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1973 ec->gpe >= 0 && ec->reference_count >= 1)
1974 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
1976 acpi_ec_enter_noirq(ec);
1981 static int acpi_ec_resume_noirq(struct device *dev)
1983 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1985 acpi_ec_leave_noirq(ec);
1987 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1988 ec->gpe >= 0 && ec->reference_count >= 1)
1989 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
1994 static int acpi_ec_resume(struct device *dev)
1996 struct acpi_ec *ec =
1997 acpi_driver_data(to_acpi_device(dev));
1999 acpi_ec_enable_event(ec);
2003 void acpi_ec_mark_gpe_for_wake(void)
2005 if (first_ec && !ec_no_wakeup)
2006 acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
2008 EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake);
2010 void acpi_ec_set_gpe_wake_mask(u8 action)
2012 if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup)
2013 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
2016 static bool acpi_ec_work_in_progress(struct acpi_ec *ec)
2018 return ec->events_in_progress + ec->queries_in_progress > 0;
2021 bool acpi_ec_dispatch_gpe(void)
2023 bool work_in_progress = false;
2026 return acpi_any_gpe_status_set(U32_MAX);
2029 * Report wakeup if the status bit is set for any enabled GPE other
2032 if (acpi_any_gpe_status_set(first_ec->gpe))
2036 * Cancel the SCI wakeup and process all pending events in case there
2037 * are any wakeup ones in there.
2039 * Note that if any non-EC GPEs are active at this point, the SCI will
2040 * retrigger after the rearming in acpi_s2idle_wake(), so no events
2041 * should be missed by canceling the wakeup here.
2043 pm_system_cancel_wakeup();
2046 * Dispatch the EC GPE in-band, but do not report wakeup in any case
2047 * to allow the caller to process events properly after that.
2049 spin_lock_irq(&first_ec->lock);
2051 if (acpi_ec_gpe_status_set(first_ec)) {
2052 pm_pr_dbg("ACPI EC GPE status set\n");
2054 advance_transaction(first_ec, false);
2055 work_in_progress = acpi_ec_work_in_progress(first_ec);
2058 spin_unlock_irq(&first_ec->lock);
2060 if (!work_in_progress)
2063 pm_pr_dbg("ACPI EC GPE dispatched\n");
2065 /* Drain EC work. */
2067 acpi_ec_flush_work();
2069 pm_pr_dbg("ACPI EC work flushed\n");
2071 spin_lock_irq(&first_ec->lock);
2073 work_in_progress = acpi_ec_work_in_progress(first_ec);
2075 spin_unlock_irq(&first_ec->lock);
2076 } while (work_in_progress && !pm_wakeup_pending());
2080 #endif /* CONFIG_PM_SLEEP */
2082 static const struct dev_pm_ops acpi_ec_pm = {
2083 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
2084 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
2087 static int param_set_event_clearing(const char *val,
2088 const struct kernel_param *kp)
2092 if (!strncmp(val, "status", sizeof("status") - 1)) {
2093 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
2094 pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
2095 } else if (!strncmp(val, "query", sizeof("query") - 1)) {
2096 ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
2097 pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
2098 } else if (!strncmp(val, "event", sizeof("event") - 1)) {
2099 ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
2100 pr_info("Assuming SCI_EVT clearing on event reads\n");
2106 static int param_get_event_clearing(char *buffer,
2107 const struct kernel_param *kp)
2109 switch (ec_event_clearing) {
2110 case ACPI_EC_EVT_TIMING_STATUS:
2111 return sprintf(buffer, "status\n");
2112 case ACPI_EC_EVT_TIMING_QUERY:
2113 return sprintf(buffer, "query\n");
2114 case ACPI_EC_EVT_TIMING_EVENT:
2115 return sprintf(buffer, "event\n");
2117 return sprintf(buffer, "invalid\n");
2122 module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
2124 MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
2126 static struct acpi_driver acpi_ec_driver = {
2128 .class = ACPI_EC_CLASS,
2129 .ids = ec_device_ids,
2132 .remove = acpi_ec_remove,
2134 .drv.pm = &acpi_ec_pm,
2137 static void acpi_ec_destroy_workqueues(void)
2140 destroy_workqueue(ec_wq);
2144 destroy_workqueue(ec_query_wq);
2149 static int acpi_ec_init_workqueues(void)
2152 ec_wq = alloc_ordered_workqueue("kec", 0);
2155 ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
2157 if (!ec_wq || !ec_query_wq) {
2158 acpi_ec_destroy_workqueues();
2164 static const struct dmi_system_id acpi_ec_no_wakeup[] = {
2167 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2168 DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
2173 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2174 DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
2179 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
2180 DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
2186 void __init acpi_ec_init(void)
2190 result = acpi_ec_init_workqueues();
2195 * Disable EC wakeup on following systems to prevent periodic
2196 * wakeup from EC GPE.
2198 if (dmi_check_system(acpi_ec_no_wakeup)) {
2199 ec_no_wakeup = true;
2200 pr_debug("Disabling EC wakeup on suspend-to-idle\n");
2203 /* Driver must be registered after acpi_ec_init_workqueues(). */
2204 acpi_bus_register_driver(&acpi_ec_driver);
2206 acpi_ec_ecdt_start();
2209 /* EC driver currently not unloadable */
2211 static void __exit acpi_ec_exit(void)
2214 acpi_bus_unregister_driver(&acpi_ec_driver);
2215 acpi_ec_destroy_workqueues();