1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ec.c - ACPI Embedded Controller Driver (v3)
5 * Copyright (C) 2001-2015 Intel Corporation
6 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
7 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
8 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * 2004 Luming Yu <luming.yu@intel.com>
10 * 2001, 2002 Andy Grover <andrew.grover@intel.com>
11 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
12 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
15 /* Uncomment next line to get verbose printout */
17 #define pr_fmt(fmt) "ACPI: EC: " fmt
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/types.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/list.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/acpi.h>
30 #include <linux/dmi.h>
35 #define ACPI_EC_CLASS "embedded_controller"
36 #define ACPI_EC_DEVICE_NAME "Embedded Controller"
38 /* EC status register */
39 #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
40 #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
41 #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
42 #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
43 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
46 * The SCI_EVT clearing timing is not defined by the ACPI specification.
47 * This leads to lots of practical timing issues for the host EC driver.
48 * The following variations are defined (from the target EC firmware's
50 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
51 * target can clear SCI_EVT at any time so long as the host can see
52 * the indication by reading the status register (EC_SC). So the
53 * host should re-check SCI_EVT after the first time the SCI_EVT
54 * indication is seen, which is the same time the query request
55 * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
56 * at any later time could indicate another event. Normally such
57 * kind of EC firmware has implemented an event queue and will
58 * return 0x00 to indicate "no outstanding event".
59 * QUERY: After seeing the query request (QR_EC) written to the command
60 * register (EC_CMD) by the host and having prepared the responding
61 * event value in the data register (EC_DATA), the target can safely
62 * clear SCI_EVT because the target can confirm that the current
63 * event is being handled by the host. The host then should check
64 * SCI_EVT right after reading the event response from the data
66 * EVENT: After seeing the event response read from the data register
67 * (EC_DATA) by the host, the target can clear SCI_EVT. As the
68 * target requires time to notice the change in the data register
69 * (EC_DATA), the host may be required to wait additional guarding
70 * time before checking the SCI_EVT again. Such guarding may not be
71 * necessary if the host is notified via another IRQ.
73 #define ACPI_EC_EVT_TIMING_STATUS 0x00
74 #define ACPI_EC_EVT_TIMING_QUERY 0x01
75 #define ACPI_EC_EVT_TIMING_EVENT 0x02
79 ACPI_EC_COMMAND_READ = 0x80,
80 ACPI_EC_COMMAND_WRITE = 0x81,
81 ACPI_EC_BURST_ENABLE = 0x82,
82 ACPI_EC_BURST_DISABLE = 0x83,
83 ACPI_EC_COMMAND_QUERY = 0x84,
86 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
87 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
88 #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
89 #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
90 * when trying to clear the EC */
91 #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
94 EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
95 EC_FLAGS_QUERY_PENDING, /* Query is pending */
96 EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
97 EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
98 EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
99 EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
100 EC_FLAGS_STARTED, /* Driver is started */
101 EC_FLAGS_STOPPED, /* Driver is stopped */
102 EC_FLAGS_EVENTS_MASKED, /* Events masked */
105 #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
106 #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
108 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
109 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
110 module_param(ec_delay, uint, 0644);
111 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
113 static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
114 module_param(ec_max_queries, uint, 0644);
115 MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
117 static bool ec_busy_polling __read_mostly;
118 module_param(ec_busy_polling, bool, 0644);
119 MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
121 static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
122 module_param(ec_polling_guard, uint, 0644);
123 MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
125 static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
128 * If the number of false interrupts per one transaction exceeds
129 * this threshold, will think there is a GPE storm happened and
130 * will disable the GPE for normal transaction.
132 static unsigned int ec_storm_threshold __read_mostly = 8;
133 module_param(ec_storm_threshold, uint, 0644);
134 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
136 static bool ec_freeze_events __read_mostly = false;
137 module_param(ec_freeze_events, bool, 0644);
138 MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
140 static bool ec_no_wakeup __read_mostly;
141 module_param(ec_no_wakeup, bool, 0644);
142 MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
144 struct acpi_ec_query_handler {
145 struct list_head node;
146 acpi_ec_query_func func;
156 unsigned short irq_count;
165 struct acpi_ec_query {
166 struct transaction transaction;
167 struct work_struct work;
168 struct acpi_ec_query_handler *handler;
172 static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
173 static void advance_transaction(struct acpi_ec *ec, bool interrupt);
174 static void acpi_ec_event_handler(struct work_struct *work);
175 static void acpi_ec_event_processor(struct work_struct *work);
177 struct acpi_ec *first_ec;
178 EXPORT_SYMBOL(first_ec);
180 static struct acpi_ec *boot_ec;
181 static bool boot_ec_is_ecdt = false;
182 static struct workqueue_struct *ec_wq;
183 static struct workqueue_struct *ec_query_wq;
185 static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
186 static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
187 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
189 /* --------------------------------------------------------------------------
191 * -------------------------------------------------------------------------- */
194 * Splitters used by the developers to track the boundary of the EC
195 * handling processes.
198 #define EC_DBG_SEP " "
199 #define EC_DBG_DRV "+++++"
200 #define EC_DBG_STM "====="
201 #define EC_DBG_REQ "*****"
202 #define EC_DBG_EVT "#####"
204 #define EC_DBG_SEP ""
211 #define ec_log_raw(fmt, ...) \
212 pr_info(fmt "\n", ##__VA_ARGS__)
213 #define ec_dbg_raw(fmt, ...) \
214 pr_debug(fmt "\n", ##__VA_ARGS__)
215 #define ec_log(filter, fmt, ...) \
216 ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
217 #define ec_dbg(filter, fmt, ...) \
218 ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
220 #define ec_log_drv(fmt, ...) \
221 ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
222 #define ec_dbg_drv(fmt, ...) \
223 ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
224 #define ec_dbg_stm(fmt, ...) \
225 ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
226 #define ec_dbg_req(fmt, ...) \
227 ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
228 #define ec_dbg_evt(fmt, ...) \
229 ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
230 #define ec_dbg_ref(ec, fmt, ...) \
231 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
233 /* --------------------------------------------------------------------------
235 * -------------------------------------------------------------------------- */
237 static bool acpi_ec_started(struct acpi_ec *ec)
239 return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
240 !test_bit(EC_FLAGS_STOPPED, &ec->flags);
243 static bool acpi_ec_event_enabled(struct acpi_ec *ec)
246 * There is an OSPM early stage logic. During the early stages
247 * (boot/resume), OSPMs shouldn't enable the event handling, only
248 * the EC transactions are allowed to be performed.
250 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
253 * However, disabling the event handling is experimental for late
254 * stage (suspend), and is controlled by the boot parameter of
255 * "ec_freeze_events":
256 * 1. true: The EC event handling is disabled before entering
258 * 2. false: The EC event handling is automatically disabled as
259 * soon as the EC driver is stopped.
261 if (ec_freeze_events)
262 return acpi_ec_started(ec);
264 return test_bit(EC_FLAGS_STARTED, &ec->flags);
267 static bool acpi_ec_flushed(struct acpi_ec *ec)
269 return ec->reference_count == 1;
272 /* --------------------------------------------------------------------------
274 * -------------------------------------------------------------------------- */
276 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
278 u8 x = inb(ec->command_addr);
280 ec_dbg_raw("EC_SC(R) = 0x%2.2x "
281 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
283 !!(x & ACPI_EC_FLAG_SCI),
284 !!(x & ACPI_EC_FLAG_BURST),
285 !!(x & ACPI_EC_FLAG_CMD),
286 !!(x & ACPI_EC_FLAG_IBF),
287 !!(x & ACPI_EC_FLAG_OBF));
291 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
293 u8 x = inb(ec->data_addr);
295 ec->timestamp = jiffies;
296 ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
300 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
302 ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
303 outb(command, ec->command_addr);
304 ec->timestamp = jiffies;
307 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
309 ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
310 outb(data, ec->data_addr);
311 ec->timestamp = jiffies;
314 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
315 static const char *acpi_ec_cmd_string(u8 cmd)
332 #define acpi_ec_cmd_string(cmd) "UNDEF"
335 /* --------------------------------------------------------------------------
337 * -------------------------------------------------------------------------- */
339 static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec)
341 acpi_event_status gpe_status = 0;
343 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
344 return !!(gpe_status & ACPI_EVENT_FLAG_STATUS_SET);
347 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
350 acpi_enable_gpe(NULL, ec->gpe);
352 BUG_ON(ec->reference_count < 1);
353 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
355 if (acpi_ec_gpe_status_set(ec)) {
357 * On some platforms, EN=1 writes cannot trigger GPE. So
358 * software need to manually trigger a pseudo GPE event on
361 ec_dbg_raw("Polling quirk");
362 advance_transaction(ec, false);
366 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
369 acpi_disable_gpe(NULL, ec->gpe);
371 BUG_ON(ec->reference_count < 1);
372 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
376 /* --------------------------------------------------------------------------
377 * Transaction Management
378 * -------------------------------------------------------------------------- */
380 static void acpi_ec_submit_request(struct acpi_ec *ec)
382 ec->reference_count++;
383 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
384 ec->gpe >= 0 && ec->reference_count == 1)
385 acpi_ec_enable_gpe(ec, true);
388 static void acpi_ec_complete_request(struct acpi_ec *ec)
390 bool flushed = false;
392 ec->reference_count--;
393 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
394 ec->gpe >= 0 && ec->reference_count == 0)
395 acpi_ec_disable_gpe(ec, true);
396 flushed = acpi_ec_flushed(ec);
401 static void acpi_ec_mask_events(struct acpi_ec *ec)
403 if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
405 acpi_ec_disable_gpe(ec, false);
407 disable_irq_nosync(ec->irq);
409 ec_dbg_drv("Polling enabled");
410 set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
414 static void acpi_ec_unmask_events(struct acpi_ec *ec)
416 if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
417 clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
419 acpi_ec_enable_gpe(ec, false);
423 ec_dbg_drv("Polling disabled");
428 * acpi_ec_submit_flushable_request() - Increase the reference count unless
429 * the flush operation is not in
433 * This function must be used before taking a new action that should hold
434 * the reference count. If this function returns false, then the action
435 * must be discarded or it will prevent the flush operation from being
438 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
440 if (!acpi_ec_started(ec))
442 acpi_ec_submit_request(ec);
446 static void acpi_ec_submit_query(struct acpi_ec *ec)
448 acpi_ec_mask_events(ec);
449 if (!acpi_ec_event_enabled(ec))
451 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
452 ec_dbg_evt("Command(%s) submitted/blocked",
453 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
454 ec->nr_pending_queries++;
455 ec->events_in_progress++;
456 queue_work(ec_wq, &ec->work);
460 static void acpi_ec_complete_query(struct acpi_ec *ec)
462 if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
463 ec_dbg_evt("Command(%s) unblocked",
464 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
465 acpi_ec_unmask_events(ec);
468 static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
470 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
471 ec_log_drv("event unblocked");
473 * Unconditionally invoke this once after enabling the event
474 * handling mechanism to detect the pending events.
476 advance_transaction(ec, false);
479 static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
481 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
482 ec_log_drv("event blocked");
486 * Process _Q events that might have accumulated in the EC.
487 * Run with locked ec mutex.
489 static void acpi_ec_clear(struct acpi_ec *ec)
494 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
495 status = acpi_ec_query(ec, &value);
496 if (status || !value)
499 if (unlikely(i == ACPI_EC_CLEAR_MAX))
500 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
502 pr_info("%d stale EC events cleared\n", i);
505 static void acpi_ec_enable_event(struct acpi_ec *ec)
509 spin_lock_irqsave(&ec->lock, flags);
510 if (acpi_ec_started(ec))
511 __acpi_ec_enable_event(ec);
512 spin_unlock_irqrestore(&ec->lock, flags);
514 /* Drain additional events if hardware requires that */
515 if (EC_FLAGS_CLEAR_ON_RESUME)
519 #ifdef CONFIG_PM_SLEEP
520 static void __acpi_ec_flush_work(void)
522 flush_workqueue(ec_wq); /* flush ec->work */
523 flush_workqueue(ec_query_wq); /* flush queries */
526 static void acpi_ec_disable_event(struct acpi_ec *ec)
530 spin_lock_irqsave(&ec->lock, flags);
531 __acpi_ec_disable_event(ec);
532 spin_unlock_irqrestore(&ec->lock, flags);
535 * When ec_freeze_events is true, we need to flush events in
536 * the proper position before entering the noirq stage.
538 __acpi_ec_flush_work();
541 void acpi_ec_flush_work(void)
543 /* Without ec_wq there is nothing to flush. */
547 __acpi_ec_flush_work();
549 #endif /* CONFIG_PM_SLEEP */
551 static bool acpi_ec_guard_event(struct acpi_ec *ec)
556 spin_lock_irqsave(&ec->lock, flags);
558 * If firmware SCI_EVT clearing timing is "event", we actually
559 * don't know when the SCI_EVT will be cleared by firmware after
560 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
563 * The guarding period begins when EC_FLAGS_QUERY_PENDING is
564 * flagged, which means SCI_EVT check has just been performed.
565 * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
566 * guarding should have already been performed (via
567 * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
568 * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
569 * ACPI_EC_COMMAND_POLL state immediately.
571 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
572 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
573 !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
574 (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
576 spin_unlock_irqrestore(&ec->lock, flags);
580 static int ec_transaction_polled(struct acpi_ec *ec)
585 spin_lock_irqsave(&ec->lock, flags);
586 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
588 spin_unlock_irqrestore(&ec->lock, flags);
592 static int ec_transaction_completed(struct acpi_ec *ec)
597 spin_lock_irqsave(&ec->lock, flags);
598 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
600 spin_unlock_irqrestore(&ec->lock, flags);
604 static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
606 ec->curr->flags |= flag;
607 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
608 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
609 flag == ACPI_EC_COMMAND_POLL)
610 acpi_ec_complete_query(ec);
611 if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
612 flag == ACPI_EC_COMMAND_COMPLETE)
613 acpi_ec_complete_query(ec);
614 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
615 flag == ACPI_EC_COMMAND_COMPLETE)
616 set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
620 static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t)
622 if (t->irq_count < ec_storm_threshold)
625 /* Trigger if the threshold is 0 too. */
626 if (t->irq_count == ec_storm_threshold)
627 acpi_ec_mask_events(ec);
630 static void advance_transaction(struct acpi_ec *ec, bool interrupt)
632 struct transaction *t = ec->curr;
636 ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
639 * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
640 * changes to always trigger a GPE interrupt.
642 * GPE STS is a W1C register, which means:
644 * 1. Software can clear it without worrying about clearing the other
645 * GPEs' STS bits when the hardware sets them in parallel.
647 * 2. As long as software can ensure only clearing it when it is set,
648 * hardware won't set it in parallel.
650 if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
651 acpi_clear_gpe(NULL, ec->gpe);
653 status = acpi_ec_read_status(ec);
656 * Another IRQ or a guarded polling mode advancement is detected,
657 * the next QR_EC submission is then allowed.
659 if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
660 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
661 (!ec->nr_pending_queries ||
662 test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
663 clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
664 acpi_ec_complete_query(ec);
670 if (t->flags & ACPI_EC_COMMAND_POLL) {
671 if (t->wlen > t->wi) {
672 if (!(status & ACPI_EC_FLAG_IBF))
673 acpi_ec_write_data(ec, t->wdata[t->wi++]);
674 else if (interrupt && !(status & ACPI_EC_FLAG_SCI))
675 acpi_ec_spurious_interrupt(ec, t);
676 } else if (t->rlen > t->ri) {
677 if (status & ACPI_EC_FLAG_OBF) {
678 t->rdata[t->ri++] = acpi_ec_read_data(ec);
679 if (t->rlen == t->ri) {
680 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
682 if (t->command == ACPI_EC_COMMAND_QUERY)
683 ec_dbg_evt("Command(%s) completed by hardware",
684 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
686 } else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) {
687 acpi_ec_spurious_interrupt(ec, t);
689 } else if (t->wlen == t->wi && !(status & ACPI_EC_FLAG_IBF)) {
690 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
693 } else if (!(status & ACPI_EC_FLAG_IBF)) {
694 acpi_ec_write_cmd(ec, t->command);
695 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
699 if (status & ACPI_EC_FLAG_SCI)
700 acpi_ec_submit_query(ec);
702 if (wakeup && interrupt)
706 static void start_transaction(struct acpi_ec *ec)
708 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
712 static int ec_guard(struct acpi_ec *ec)
714 unsigned long guard = usecs_to_jiffies(ec->polling_guard);
715 unsigned long timeout = ec->timestamp + guard;
717 /* Ensure guarding period before polling EC status */
719 if (ec->busy_polling) {
720 /* Perform busy polling */
721 if (ec_transaction_completed(ec))
723 udelay(jiffies_to_usecs(guard));
726 * Perform wait polling
727 * 1. Wait the transaction to be completed by the
728 * GPE handler after the transaction enters
729 * ACPI_EC_COMMAND_POLL state.
730 * 2. A special guarding logic is also required
731 * for event clearing mode "event" before the
732 * transaction enters ACPI_EC_COMMAND_POLL
735 if (!ec_transaction_polled(ec) &&
736 !acpi_ec_guard_event(ec))
738 if (wait_event_timeout(ec->wait,
739 ec_transaction_completed(ec),
743 } while (time_before(jiffies, timeout));
747 static int ec_poll(struct acpi_ec *ec)
750 int repeat = 5; /* number of command restarts */
753 unsigned long delay = jiffies +
754 msecs_to_jiffies(ec_delay);
758 spin_lock_irqsave(&ec->lock, flags);
759 advance_transaction(ec, false);
760 spin_unlock_irqrestore(&ec->lock, flags);
761 } while (time_before(jiffies, delay));
762 pr_debug("controller reset, restart transaction\n");
763 spin_lock_irqsave(&ec->lock, flags);
764 start_transaction(ec);
765 spin_unlock_irqrestore(&ec->lock, flags);
770 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
771 struct transaction *t)
776 /* start transaction */
777 spin_lock_irqsave(&ec->lock, tmp);
778 /* Enable GPE for command processing (IBF=0/OBF=1) */
779 if (!acpi_ec_submit_flushable_request(ec)) {
783 ec_dbg_ref(ec, "Increase command");
784 /* following two actions should be kept atomic */
786 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
787 start_transaction(ec);
788 spin_unlock_irqrestore(&ec->lock, tmp);
792 spin_lock_irqsave(&ec->lock, tmp);
793 if (t->irq_count == ec_storm_threshold)
794 acpi_ec_unmask_events(ec);
795 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
797 /* Disable GPE for command processing (IBF=0/OBF=1) */
798 acpi_ec_complete_request(ec);
799 ec_dbg_ref(ec, "Decrease command");
801 spin_unlock_irqrestore(&ec->lock, tmp);
805 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
810 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
813 memset(t->rdata, 0, t->rlen);
815 mutex_lock(&ec->mutex);
816 if (ec->global_lock) {
817 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
818 if (ACPI_FAILURE(status)) {
824 status = acpi_ec_transaction_unlocked(ec, t);
827 acpi_release_global_lock(glk);
829 mutex_unlock(&ec->mutex);
833 static int acpi_ec_burst_enable(struct acpi_ec *ec)
836 struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
837 .wdata = NULL, .rdata = &d,
838 .wlen = 0, .rlen = 1};
840 return acpi_ec_transaction(ec, &t);
843 static int acpi_ec_burst_disable(struct acpi_ec *ec)
845 struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
846 .wdata = NULL, .rdata = NULL,
847 .wlen = 0, .rlen = 0};
849 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
850 acpi_ec_transaction(ec, &t) : 0;
853 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
857 struct transaction t = {.command = ACPI_EC_COMMAND_READ,
858 .wdata = &address, .rdata = &d,
859 .wlen = 1, .rlen = 1};
861 result = acpi_ec_transaction(ec, &t);
866 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
868 u8 wdata[2] = { address, data };
869 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
870 .wdata = wdata, .rdata = NULL,
871 .wlen = 2, .rlen = 0};
873 return acpi_ec_transaction(ec, &t);
876 int ec_read(u8 addr, u8 *val)
884 err = acpi_ec_read(first_ec, addr, &temp_data);
892 EXPORT_SYMBOL(ec_read);
894 int ec_write(u8 addr, u8 val)
901 err = acpi_ec_write(first_ec, addr, val);
905 EXPORT_SYMBOL(ec_write);
907 int ec_transaction(u8 command,
908 const u8 *wdata, unsigned wdata_len,
909 u8 *rdata, unsigned rdata_len)
911 struct transaction t = {.command = command,
912 .wdata = wdata, .rdata = rdata,
913 .wlen = wdata_len, .rlen = rdata_len};
918 return acpi_ec_transaction(first_ec, &t);
920 EXPORT_SYMBOL(ec_transaction);
922 /* Get the handle to the EC device */
923 acpi_handle ec_get_handle(void)
927 return first_ec->handle;
929 EXPORT_SYMBOL(ec_get_handle);
931 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
935 spin_lock_irqsave(&ec->lock, flags);
936 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
937 ec_dbg_drv("Starting EC");
938 /* Enable GPE for event processing (SCI_EVT=1) */
940 acpi_ec_submit_request(ec);
941 ec_dbg_ref(ec, "Increase driver");
943 ec_log_drv("EC started");
945 spin_unlock_irqrestore(&ec->lock, flags);
948 static bool acpi_ec_stopped(struct acpi_ec *ec)
953 spin_lock_irqsave(&ec->lock, flags);
954 flushed = acpi_ec_flushed(ec);
955 spin_unlock_irqrestore(&ec->lock, flags);
959 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
963 spin_lock_irqsave(&ec->lock, flags);
964 if (acpi_ec_started(ec)) {
965 ec_dbg_drv("Stopping EC");
966 set_bit(EC_FLAGS_STOPPED, &ec->flags);
967 spin_unlock_irqrestore(&ec->lock, flags);
968 wait_event(ec->wait, acpi_ec_stopped(ec));
969 spin_lock_irqsave(&ec->lock, flags);
970 /* Disable GPE for event processing (SCI_EVT=1) */
972 acpi_ec_complete_request(ec);
973 ec_dbg_ref(ec, "Decrease driver");
974 } else if (!ec_freeze_events)
975 __acpi_ec_disable_event(ec);
976 clear_bit(EC_FLAGS_STARTED, &ec->flags);
977 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
978 ec_log_drv("EC stopped");
980 spin_unlock_irqrestore(&ec->lock, flags);
983 static void acpi_ec_enter_noirq(struct acpi_ec *ec)
987 spin_lock_irqsave(&ec->lock, flags);
988 ec->busy_polling = true;
989 ec->polling_guard = 0;
990 ec_log_drv("interrupt blocked");
991 spin_unlock_irqrestore(&ec->lock, flags);
994 static void acpi_ec_leave_noirq(struct acpi_ec *ec)
998 spin_lock_irqsave(&ec->lock, flags);
999 ec->busy_polling = ec_busy_polling;
1000 ec->polling_guard = ec_polling_guard;
1001 ec_log_drv("interrupt unblocked");
1002 spin_unlock_irqrestore(&ec->lock, flags);
1005 void acpi_ec_block_transactions(void)
1007 struct acpi_ec *ec = first_ec;
1012 mutex_lock(&ec->mutex);
1013 /* Prevent transactions from being carried out */
1014 acpi_ec_stop(ec, true);
1015 mutex_unlock(&ec->mutex);
1018 void acpi_ec_unblock_transactions(void)
1021 * Allow transactions to happen again (this function is called from
1022 * atomic context during wakeup, so we don't need to acquire the mutex).
1025 acpi_ec_start(first_ec, true);
1028 /* --------------------------------------------------------------------------
1030 -------------------------------------------------------------------------- */
1031 static struct acpi_ec_query_handler *
1032 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
1034 struct acpi_ec_query_handler *handler;
1036 mutex_lock(&ec->mutex);
1037 list_for_each_entry(handler, &ec->list, node) {
1038 if (value == handler->query_bit) {
1039 kref_get(&handler->kref);
1040 mutex_unlock(&ec->mutex);
1044 mutex_unlock(&ec->mutex);
1048 static void acpi_ec_query_handler_release(struct kref *kref)
1050 struct acpi_ec_query_handler *handler =
1051 container_of(kref, struct acpi_ec_query_handler, kref);
1056 static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
1058 kref_put(&handler->kref, acpi_ec_query_handler_release);
1061 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
1062 acpi_handle handle, acpi_ec_query_func func,
1065 struct acpi_ec_query_handler *handler =
1066 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
1071 handler->query_bit = query_bit;
1072 handler->handle = handle;
1073 handler->func = func;
1074 handler->data = data;
1075 mutex_lock(&ec->mutex);
1076 kref_init(&handler->kref);
1077 list_add(&handler->node, &ec->list);
1078 mutex_unlock(&ec->mutex);
1081 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
1083 static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
1084 bool remove_all, u8 query_bit)
1086 struct acpi_ec_query_handler *handler, *tmp;
1087 LIST_HEAD(free_list);
1089 mutex_lock(&ec->mutex);
1090 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
1091 if (remove_all || query_bit == handler->query_bit) {
1092 list_del_init(&handler->node);
1093 list_add(&handler->node, &free_list);
1096 mutex_unlock(&ec->mutex);
1097 list_for_each_entry_safe(handler, tmp, &free_list, node)
1098 acpi_ec_put_query_handler(handler);
1101 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
1103 acpi_ec_remove_query_handlers(ec, false, query_bit);
1105 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
1107 static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
1109 struct acpi_ec_query *q;
1110 struct transaction *t;
1112 q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
1116 INIT_WORK(&q->work, acpi_ec_event_processor);
1117 t = &q->transaction;
1118 t->command = ACPI_EC_COMMAND_QUERY;
1125 static void acpi_ec_delete_query(struct acpi_ec_query *q)
1129 acpi_ec_put_query_handler(q->handler);
1134 static void acpi_ec_event_processor(struct work_struct *work)
1136 struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
1137 struct acpi_ec_query_handler *handler = q->handler;
1138 struct acpi_ec *ec = q->ec;
1140 ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
1143 handler->func(handler->data);
1144 else if (handler->handle)
1145 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
1147 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
1149 spin_lock_irq(&ec->lock);
1150 ec->queries_in_progress--;
1151 spin_unlock_irq(&ec->lock);
1153 acpi_ec_delete_query(q);
1156 static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1160 struct acpi_ec_query *q;
1162 q = acpi_ec_create_query(ec, &value);
1167 * Query the EC to find out which _Qxx method we need to evaluate.
1168 * Note that successful completion of the query causes the ACPI_EC_SCI
1169 * bit to be cleared (and thus clearing the interrupt source).
1171 result = acpi_ec_transaction(ec, &q->transaction);
1177 q->handler = acpi_ec_get_query_handler_by_value(ec, value);
1184 * It is reported that _Qxx are evaluated in a parallel way on Windows:
1185 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
1187 * Put this log entry before queue_work() to make it appear in the log
1188 * before any other messages emitted during workqueue handling.
1190 ec_dbg_evt("Query(0x%02x) scheduled", value);
1192 spin_lock_irq(&ec->lock);
1194 ec->queries_in_progress++;
1195 queue_work(ec_query_wq, &q->work);
1197 spin_unlock_irq(&ec->lock);
1201 acpi_ec_delete_query(q);
1207 static void acpi_ec_check_event(struct acpi_ec *ec)
1209 unsigned long flags;
1211 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
1213 spin_lock_irqsave(&ec->lock, flags);
1215 * Take care of the SCI_EVT unless no one else is
1216 * taking care of it.
1219 advance_transaction(ec, false);
1220 spin_unlock_irqrestore(&ec->lock, flags);
1225 static void acpi_ec_event_handler(struct work_struct *work)
1227 unsigned long flags;
1228 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
1230 ec_dbg_evt("Event started");
1232 spin_lock_irqsave(&ec->lock, flags);
1233 while (ec->nr_pending_queries) {
1234 spin_unlock_irqrestore(&ec->lock, flags);
1235 (void)acpi_ec_query(ec, NULL);
1236 spin_lock_irqsave(&ec->lock, flags);
1237 ec->nr_pending_queries--;
1239 * Before exit, make sure that this work item can be
1240 * scheduled again. There might be QR_EC failures, leaving
1241 * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
1242 * item from being scheduled again.
1244 if (!ec->nr_pending_queries) {
1245 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
1246 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
1247 acpi_ec_complete_query(ec);
1250 spin_unlock_irqrestore(&ec->lock, flags);
1252 ec_dbg_evt("Event stopped");
1254 acpi_ec_check_event(ec);
1256 spin_lock_irqsave(&ec->lock, flags);
1257 ec->events_in_progress--;
1258 spin_unlock_irqrestore(&ec->lock, flags);
1261 static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
1263 unsigned long flags;
1265 spin_lock_irqsave(&ec->lock, flags);
1266 advance_transaction(ec, true);
1267 spin_unlock_irqrestore(&ec->lock, flags);
1270 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
1271 u32 gpe_number, void *data)
1273 acpi_ec_handle_interrupt(data);
1274 return ACPI_INTERRUPT_HANDLED;
1277 static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
1279 acpi_ec_handle_interrupt(data);
1283 /* --------------------------------------------------------------------------
1284 * Address Space Management
1285 * -------------------------------------------------------------------------- */
1288 acpi_ec_space_handler(u32 function, acpi_physical_address address,
1289 u32 bits, u64 *value64,
1290 void *handler_context, void *region_context)
1292 struct acpi_ec *ec = handler_context;
1293 int result = 0, i, bytes = bits / 8;
1294 u8 *value = (u8 *)value64;
1296 if ((address > 0xFF) || !value || !handler_context)
1297 return AE_BAD_PARAMETER;
1299 if (function != ACPI_READ && function != ACPI_WRITE)
1300 return AE_BAD_PARAMETER;
1302 if (ec->busy_polling || bits > 8)
1303 acpi_ec_burst_enable(ec);
1305 for (i = 0; i < bytes; ++i, ++address, ++value)
1306 result = (function == ACPI_READ) ?
1307 acpi_ec_read(ec, address, value) :
1308 acpi_ec_write(ec, address, *value);
1310 if (ec->busy_polling || bits > 8)
1311 acpi_ec_burst_disable(ec);
1315 return AE_BAD_PARAMETER;
1317 return AE_NOT_FOUND;
1325 /* --------------------------------------------------------------------------
1327 * -------------------------------------------------------------------------- */
1330 ec_parse_io_ports(struct acpi_resource *resource, void *context);
1332 static void acpi_ec_free(struct acpi_ec *ec)
1341 static struct acpi_ec *acpi_ec_alloc(void)
1343 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
1347 mutex_init(&ec->mutex);
1348 init_waitqueue_head(&ec->wait);
1349 INIT_LIST_HEAD(&ec->list);
1350 spin_lock_init(&ec->lock);
1351 INIT_WORK(&ec->work, acpi_ec_event_handler);
1352 ec->timestamp = jiffies;
1353 ec->busy_polling = true;
1354 ec->polling_guard = 0;
1361 acpi_ec_register_query_methods(acpi_handle handle, u32 level,
1362 void *context, void **return_value)
1365 struct acpi_buffer buffer = { sizeof(node_name), node_name };
1366 struct acpi_ec *ec = context;
1370 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
1372 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
1373 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
1378 ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
1381 unsigned long long tmp = 0;
1382 struct acpi_ec *ec = context;
1384 /* clear addr values, ec_parse_io_ports depend on it */
1385 ec->command_addr = ec->data_addr = 0;
1387 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1388 ec_parse_io_ports, ec);
1389 if (ACPI_FAILURE(status))
1391 if (ec->data_addr == 0 || ec->command_addr == 0)
1394 /* Get GPE bit assignment (EC events). */
1395 /* TODO: Add support for _GPE returning a package */
1396 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
1397 if (ACPI_SUCCESS(status))
1400 * Errors are non-fatal, allowing for ACPI Reduced Hardware
1401 * platforms which use GpioInt instead of GPE.
1404 /* Use the global lock for all EC transactions? */
1406 acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
1407 ec->global_lock = tmp;
1408 ec->handle = handle;
1409 return AE_CTRL_TERMINATE;
1412 static bool install_gpe_event_handler(struct acpi_ec *ec)
1416 status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
1417 ACPI_GPE_EDGE_TRIGGERED,
1418 &acpi_ec_gpe_handler, ec);
1419 if (ACPI_FAILURE(status))
1422 if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1)
1423 acpi_ec_enable_gpe(ec, true);
1428 static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
1430 return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED,
1431 "ACPI EC", ec) >= 0;
1435 * ec_install_handlers - Install service callbacks and register query methods.
1437 * @device: ACPI device object corresponding to @ec.
1439 * Install a handler for the EC address space type unless it has been installed
1440 * already. If @device is not NULL, also look for EC query methods in the
1441 * namespace and register them, and install an event (either GPE or GPIO IRQ)
1442 * handler for the EC, if possible.
1445 * -ENODEV if the address space handler cannot be installed, which means
1446 * "unable to handle transactions",
1447 * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred,
1448 * or 0 (success) otherwise.
1450 static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device)
1454 acpi_ec_start(ec, false);
1456 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1457 acpi_ec_enter_noirq(ec);
1458 status = acpi_install_address_space_handler(ec->handle,
1460 &acpi_ec_space_handler,
1462 if (ACPI_FAILURE(status)) {
1463 acpi_ec_stop(ec, false);
1466 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1473 /* ACPI reduced hardware platforms use a GpioInt from _CRS. */
1474 int irq = acpi_dev_gpio_irq_get(device, 0);
1476 * Bail out right away for deferred probing or complete the
1477 * initialization regardless of any other errors.
1479 if (irq == -EPROBE_DEFER)
1480 return -EPROBE_DEFER;
1485 if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1486 /* Find and register all query methods */
1487 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
1488 acpi_ec_register_query_methods,
1490 set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1492 if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1496 ready = install_gpe_event_handler(ec);
1497 else if (ec->irq >= 0)
1498 ready = install_gpio_irq_event_handler(ec);
1501 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1502 acpi_ec_leave_noirq(ec);
1505 * Failures to install an event handler are not fatal, because
1506 * the EC can be polled for events.
1509 /* EC is fully operational, allow queries */
1510 acpi_ec_enable_event(ec);
1515 static void ec_remove_handlers(struct acpi_ec *ec)
1517 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1518 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
1519 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
1520 pr_err("failed to remove space handler\n");
1521 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1525 * Stops handling the EC transactions after removing the operation
1526 * region handler. This is required because _REG(DISCONNECT)
1527 * invoked during the removal can result in new EC transactions.
1529 * Flushes the EC requests and thus disables the GPE before
1530 * removing the GPE handler. This is required by the current ACPICA
1531 * GPE core. ACPICA GPE core will automatically disable a GPE when
1532 * it is indicated but there is no way to handle it. So the drivers
1533 * must disable the GPEs prior to removing the GPE handlers.
1535 acpi_ec_stop(ec, false);
1537 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1539 ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
1540 &acpi_ec_gpe_handler)))
1541 pr_err("failed to remove gpe handler\n");
1544 free_irq(ec->irq, ec);
1546 clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1548 if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1549 acpi_ec_remove_query_handlers(ec, true, 0);
1550 clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1554 static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device)
1558 ret = ec_install_handlers(ec, device);
1562 /* First EC capable of handling transactions */
1566 pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
1569 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1571 pr_info("GPE=0x%x\n", ec->gpe);
1573 pr_info("IRQ=%d\n", ec->irq);
1579 static int acpi_ec_add(struct acpi_device *device)
1584 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
1585 strcpy(acpi_device_class(device), ACPI_EC_CLASS);
1587 if (boot_ec && (boot_ec->handle == device->handle ||
1588 !strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) {
1589 /* Fast path: this device corresponds to the boot EC. */
1594 ec = acpi_ec_alloc();
1598 status = ec_parse_device(device->handle, 0, ec, NULL);
1599 if (status != AE_CTRL_TERMINATE) {
1604 if (boot_ec && ec->command_addr == boot_ec->command_addr &&
1605 ec->data_addr == boot_ec->data_addr &&
1606 !EC_FLAGS_TRUST_DSDT_GPE) {
1608 * Trust PNP0C09 namespace location rather than
1609 * ECDT ID. But trust ECDT GPE rather than _GPE
1610 * because of ASUS quirks, so do not change
1611 * boot_ec->gpe to ec->gpe.
1613 boot_ec->handle = ec->handle;
1614 acpi_handle_debug(ec->handle, "duplicated.\n");
1620 ret = acpi_ec_setup(ec, device);
1625 acpi_handle_info(boot_ec->handle,
1626 "Boot %s EC initialization complete\n",
1627 boot_ec_is_ecdt ? "ECDT" : "DSDT");
1629 acpi_handle_info(ec->handle,
1630 "EC: Used to handle transactions and events\n");
1632 device->driver_data = ec;
1634 ret = !!request_region(ec->data_addr, 1, "EC data");
1635 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
1636 ret = !!request_region(ec->command_addr, 1, "EC cmd");
1637 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
1639 /* Reprobe devices depending on the EC */
1640 acpi_dev_clear_dependencies(device);
1642 acpi_handle_debug(ec->handle, "enumerated.\n");
1652 static int acpi_ec_remove(struct acpi_device *device)
1659 ec = acpi_driver_data(device);
1660 release_region(ec->data_addr, 1);
1661 release_region(ec->command_addr, 1);
1662 device->driver_data = NULL;
1663 if (ec != boot_ec) {
1664 ec_remove_handlers(ec);
1671 ec_parse_io_ports(struct acpi_resource *resource, void *context)
1673 struct acpi_ec *ec = context;
1675 if (resource->type != ACPI_RESOURCE_TYPE_IO)
1679 * The first address region returned is the data port, and
1680 * the second address region returned is the status/command
1683 if (ec->data_addr == 0)
1684 ec->data_addr = resource->data.io.minimum;
1685 else if (ec->command_addr == 0)
1686 ec->command_addr = resource->data.io.minimum;
1688 return AE_CTRL_TERMINATE;
1693 static const struct acpi_device_id ec_device_ids[] = {
1700 * This function is not Windows-compatible as Windows never enumerates the
1701 * namespace EC before the main ACPI device enumeration process. It is
1702 * retained for historical reason and will be deprecated in the future.
1704 void __init acpi_ec_dsdt_probe(void)
1711 * If a platform has ECDT, there is no need to proceed as the
1712 * following probe is not a part of the ACPI device enumeration,
1713 * executing _STA is not safe, and thus this probe may risk of
1714 * picking up an invalid EC device.
1719 ec = acpi_ec_alloc();
1724 * At this point, the namespace is initialized, so start to find
1725 * the namespace objects.
1727 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
1728 if (ACPI_FAILURE(status) || !ec->handle) {
1734 * When the DSDT EC is available, always re-configure boot EC to
1735 * have _REG evaluated. _REG can only be evaluated after the
1736 * namespace initialization.
1737 * At this point, the GPE is not fully initialized, so do not to
1738 * handle the events.
1740 ret = acpi_ec_setup(ec, NULL);
1748 acpi_handle_info(ec->handle,
1749 "Boot DSDT EC used to handle transactions\n");
1753 * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization.
1755 * First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not
1756 * found a matching object in the namespace.
1758 * Next, in case the DSDT EC is not functioning, it is still necessary to
1759 * provide a functional ECDT EC to handle events, so add an extra device object
1760 * to represent it (see https://bugzilla.kernel.org/show_bug.cgi?id=115021).
1762 * This is useful on platforms with valid ECDT and invalid DSDT EC settings,
1763 * like ASUS X550ZE (see https://bugzilla.kernel.org/show_bug.cgi?id=196847).
1765 static void __init acpi_ec_ecdt_start(void)
1767 struct acpi_table_ecdt *ecdt_ptr;
1771 /* Bail out if a matching EC has been found in the namespace. */
1772 if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT)
1775 /* Look up the object pointed to from the ECDT in the namespace. */
1776 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1777 (struct acpi_table_header **)&ecdt_ptr);
1778 if (ACPI_FAILURE(status))
1781 status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
1782 if (ACPI_SUCCESS(status)) {
1783 boot_ec->handle = handle;
1785 /* Add a special ACPI device object to represent the boot EC. */
1786 acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
1789 acpi_put_table((struct acpi_table_header *)ecdt_ptr);
1793 * On some hardware it is necessary to clear events accumulated by the EC during
1794 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1795 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1797 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1799 * Ideally, the EC should also be instructed NOT to accumulate events during
1800 * sleep (which Windows seems to do somehow), but the interface to control this
1801 * behaviour is not known at this time.
1803 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
1804 * however it is very likely that other Samsung models are affected.
1806 * On systems which don't accumulate _Q events during sleep, this extra check
1807 * should be harmless.
1809 static int ec_clear_on_resume(const struct dmi_system_id *id)
1811 pr_debug("Detected system needing EC poll on resume.\n");
1812 EC_FLAGS_CLEAR_ON_RESUME = 1;
1813 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
1818 * Some ECDTs contain wrong register addresses.
1820 * https://bugzilla.kernel.org/show_bug.cgi?id=12461
1822 static int ec_correct_ecdt(const struct dmi_system_id *id)
1824 pr_debug("Detected system needing ECDT address correction.\n");
1825 EC_FLAGS_CORRECT_ECDT = 1;
1830 * Some ECDTs contain wrong GPE setting, but they share the same port addresses
1831 * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
1832 * https://bugzilla.kernel.org/show_bug.cgi?id=209989
1834 static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
1836 pr_debug("Detected system needing DSDT GPE setting.\n");
1837 EC_FLAGS_TRUST_DSDT_GPE = 1;
1841 static const struct dmi_system_id ec_dmi_table[] __initconst = {
1843 ec_correct_ecdt, "MSI MS-171F", {
1844 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
1845 DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
1847 /* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */
1848 ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", {
1849 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1850 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL},
1852 ec_clear_on_resume, "Samsung hardware", {
1853 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1857 void __init acpi_ec_ecdt_probe(void)
1859 struct acpi_table_ecdt *ecdt_ptr;
1864 /* Generate a boot ec context. */
1865 dmi_check_system(ec_dmi_table);
1866 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1867 (struct acpi_table_header **)&ecdt_ptr);
1868 if (ACPI_FAILURE(status))
1871 if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
1874 * https://bugzilla.kernel.org/show_bug.cgi?id=11880
1879 ec = acpi_ec_alloc();
1883 if (EC_FLAGS_CORRECT_ECDT) {
1884 ec->command_addr = ecdt_ptr->data.address;
1885 ec->data_addr = ecdt_ptr->control.address;
1887 ec->command_addr = ecdt_ptr->control.address;
1888 ec->data_addr = ecdt_ptr->data.address;
1892 * Ignore the GPE value on Reduced Hardware platforms.
1893 * Some products have this set to an erroneous value.
1895 if (!acpi_gbl_reduced_hardware)
1896 ec->gpe = ecdt_ptr->gpe;
1898 ec->handle = ACPI_ROOT_OBJECT;
1901 * At this point, the namespace is not initialized, so do not find
1902 * the namespace objects, or handle the events.
1904 ret = acpi_ec_setup(ec, NULL);
1911 boot_ec_is_ecdt = true;
1913 pr_info("Boot ECDT EC used to handle transactions\n");
1916 acpi_put_table((struct acpi_table_header *)ecdt_ptr);
1919 #ifdef CONFIG_PM_SLEEP
1920 static int acpi_ec_suspend(struct device *dev)
1922 struct acpi_ec *ec =
1923 acpi_driver_data(to_acpi_device(dev));
1925 if (!pm_suspend_no_platform() && ec_freeze_events)
1926 acpi_ec_disable_event(ec);
1930 static int acpi_ec_suspend_noirq(struct device *dev)
1932 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1935 * The SCI handler doesn't run at this point, so the GPE can be
1936 * masked at the low level without side effects.
1938 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1939 ec->gpe >= 0 && ec->reference_count >= 1)
1940 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
1942 acpi_ec_enter_noirq(ec);
1947 static int acpi_ec_resume_noirq(struct device *dev)
1949 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1951 acpi_ec_leave_noirq(ec);
1953 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1954 ec->gpe >= 0 && ec->reference_count >= 1)
1955 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
1960 static int acpi_ec_resume(struct device *dev)
1962 struct acpi_ec *ec =
1963 acpi_driver_data(to_acpi_device(dev));
1965 acpi_ec_enable_event(ec);
1969 void acpi_ec_mark_gpe_for_wake(void)
1971 if (first_ec && !ec_no_wakeup)
1972 acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
1974 EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake);
1976 void acpi_ec_set_gpe_wake_mask(u8 action)
1978 if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup)
1979 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
1982 bool acpi_ec_dispatch_gpe(void)
1984 bool work_in_progress;
1988 return acpi_any_gpe_status_set(U32_MAX);
1991 * Report wakeup if the status bit is set for any enabled GPE other
1994 if (acpi_any_gpe_status_set(first_ec->gpe))
1998 * Dispatch the EC GPE in-band, but do not report wakeup in any case
1999 * to allow the caller to process events properly after that.
2001 ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
2002 if (ret == ACPI_INTERRUPT_HANDLED)
2003 pm_pr_dbg("ACPI EC GPE dispatched\n");
2005 /* Drain EC work. */
2007 acpi_ec_flush_work();
2009 pm_pr_dbg("ACPI EC work flushed\n");
2011 spin_lock_irq(&first_ec->lock);
2013 work_in_progress = first_ec->events_in_progress +
2014 first_ec->queries_in_progress > 0;
2016 spin_unlock_irq(&first_ec->lock);
2017 } while (work_in_progress && !pm_wakeup_pending());
2021 #endif /* CONFIG_PM_SLEEP */
2023 static const struct dev_pm_ops acpi_ec_pm = {
2024 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
2025 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
2028 static int param_set_event_clearing(const char *val,
2029 const struct kernel_param *kp)
2033 if (!strncmp(val, "status", sizeof("status") - 1)) {
2034 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
2035 pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
2036 } else if (!strncmp(val, "query", sizeof("query") - 1)) {
2037 ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
2038 pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
2039 } else if (!strncmp(val, "event", sizeof("event") - 1)) {
2040 ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
2041 pr_info("Assuming SCI_EVT clearing on event reads\n");
2047 static int param_get_event_clearing(char *buffer,
2048 const struct kernel_param *kp)
2050 switch (ec_event_clearing) {
2051 case ACPI_EC_EVT_TIMING_STATUS:
2052 return sprintf(buffer, "status\n");
2053 case ACPI_EC_EVT_TIMING_QUERY:
2054 return sprintf(buffer, "query\n");
2055 case ACPI_EC_EVT_TIMING_EVENT:
2056 return sprintf(buffer, "event\n");
2058 return sprintf(buffer, "invalid\n");
2063 module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
2065 MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
2067 static struct acpi_driver acpi_ec_driver = {
2069 .class = ACPI_EC_CLASS,
2070 .ids = ec_device_ids,
2073 .remove = acpi_ec_remove,
2075 .drv.pm = &acpi_ec_pm,
2078 static void acpi_ec_destroy_workqueues(void)
2081 destroy_workqueue(ec_wq);
2085 destroy_workqueue(ec_query_wq);
2090 static int acpi_ec_init_workqueues(void)
2093 ec_wq = alloc_ordered_workqueue("kec", 0);
2096 ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
2098 if (!ec_wq || !ec_query_wq) {
2099 acpi_ec_destroy_workqueues();
2105 static const struct dmi_system_id acpi_ec_no_wakeup[] = {
2107 .ident = "Thinkpad X1 Carbon 6th",
2109 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2110 DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
2114 .ident = "ThinkPad X1 Yoga 3rd",
2116 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2117 DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
2123 void __init acpi_ec_init(void)
2127 result = acpi_ec_init_workqueues();
2132 * Disable EC wakeup on following systems to prevent periodic
2133 * wakeup from EC GPE.
2135 if (dmi_check_system(acpi_ec_no_wakeup)) {
2136 ec_no_wakeup = true;
2137 pr_debug("Disabling EC wakeup on suspend-to-idle\n");
2140 /* Driver must be registered after acpi_ec_init_workqueues(). */
2141 acpi_bus_register_driver(&acpi_ec_driver);
2143 acpi_ec_ecdt_start();
2146 /* EC driver currently not unloadable */
2148 static void __exit acpi_ec_exit(void)
2151 acpi_bus_unregister_driver(&acpi_ec_driver);
2152 acpi_ec_destroy_workqueues();