blob: e8370732a7fa163b8572bc3cb40aee6dd9caa72b [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * ec.c - ACPI Embedded Controller Driver (v3)
4 *
5 * Copyright (C) 2001-2015 Intel Corporation
6 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
7 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
8 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * 2004 Luming Yu <luming.yu@intel.com>
10 * 2001, 2002 Andy Grover <andrew.grover@intel.com>
11 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
12 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
13 */
14
15/* Uncomment next line to get verbose printout */
16/* #define DEBUG */
17#define pr_fmt(fmt) "ACPI: EC: " fmt
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/types.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/list.h>
26#include <linux/spinlock.h>
27#include <linux/slab.h>
28#include <linux/suspend.h>
29#include <linux/acpi.h>
30#include <linux/dmi.h>
31#include <asm/io.h>
32
33#include "internal.h"
34
35#define ACPI_EC_CLASS "embedded_controller"
36#define ACPI_EC_DEVICE_NAME "Embedded Controller"
37#define ACPI_EC_FILE_INFO "info"
38
39/* EC status register */
40#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
41#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
42#define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
43#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
44#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
45
46/*
47 * The SCI_EVT clearing timing is not defined by the ACPI specification.
48 * This leads to lots of practical timing issues for the host EC driver.
49 * The following variations are defined (from the target EC firmware's
50 * perspective):
51 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
52 * target can clear SCI_EVT at any time so long as the host can see
53 * the indication by reading the status register (EC_SC). So the
54 * host should re-check SCI_EVT after the first time the SCI_EVT
55 * indication is seen, which is the same time the query request
56 * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
57 * at any later time could indicate another event. Normally such
58 * kind of EC firmware has implemented an event queue and will
59 * return 0x00 to indicate "no outstanding event".
60 * QUERY: After seeing the query request (QR_EC) written to the command
61 * register (EC_CMD) by the host and having prepared the responding
62 * event value in the data register (EC_DATA), the target can safely
63 * clear SCI_EVT because the target can confirm that the current
64 * event is being handled by the host. The host then should check
65 * SCI_EVT right after reading the event response from the data
66 * register (EC_DATA).
67 * EVENT: After seeing the event response read from the data register
68 * (EC_DATA) by the host, the target can clear SCI_EVT. As the
69 * target requires time to notice the change in the data register
70 * (EC_DATA), the host may be required to wait additional guarding
71 * time before checking the SCI_EVT again. Such guarding may not be
72 * necessary if the host is notified via another IRQ.
73 */
74#define ACPI_EC_EVT_TIMING_STATUS 0x00
75#define ACPI_EC_EVT_TIMING_QUERY 0x01
76#define ACPI_EC_EVT_TIMING_EVENT 0x02
77
78/* EC commands */
79enum ec_command {
80 ACPI_EC_COMMAND_READ = 0x80,
81 ACPI_EC_COMMAND_WRITE = 0x81,
82 ACPI_EC_BURST_ENABLE = 0x82,
83 ACPI_EC_BURST_DISABLE = 0x83,
84 ACPI_EC_COMMAND_QUERY = 0x84,
85};
86
87#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
88#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
89#define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
90#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
91 * when trying to clear the EC */
92#define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
93
94enum {
95 EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
96 EC_FLAGS_QUERY_PENDING, /* Query is pending */
97 EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
98 EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
99 EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
100 EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
101 EC_FLAGS_STARTED, /* Driver is started */
102 EC_FLAGS_STOPPED, /* Driver is stopped */
103 EC_FLAGS_GPE_MASKED, /* GPE masked */
104};
105
106#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
107#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
108
109/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
110static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
111module_param(ec_delay, uint, 0644);
112MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
113
114static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
115module_param(ec_max_queries, uint, 0644);
116MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
117
118static bool ec_busy_polling __read_mostly;
119module_param(ec_busy_polling, bool, 0644);
120MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
121
122static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
123module_param(ec_polling_guard, uint, 0644);
124MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
125
126static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
127
128/*
129 * If the number of false interrupts per one transaction exceeds
130 * this threshold, will think there is a GPE storm happened and
131 * will disable the GPE for normal transaction.
132 */
133static unsigned int ec_storm_threshold __read_mostly = 8;
134module_param(ec_storm_threshold, uint, 0644);
135MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
136
137static bool ec_freeze_events __read_mostly = false;
138module_param(ec_freeze_events, bool, 0644);
139MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
140
141static bool ec_no_wakeup __read_mostly;
142module_param(ec_no_wakeup, bool, 0644);
143MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
144
145struct acpi_ec_query_handler {
146 struct list_head node;
147 acpi_ec_query_func func;
148 acpi_handle handle;
149 void *data;
150 u8 query_bit;
151 struct kref kref;
152};
153
154struct transaction {
155 const u8 *wdata;
156 u8 *rdata;
157 unsigned short irq_count;
158 u8 command;
159 u8 wi;
160 u8 ri;
161 u8 wlen;
162 u8 rlen;
163 u8 flags;
164};
165
166struct acpi_ec_query {
167 struct transaction transaction;
168 struct work_struct work;
169 struct acpi_ec_query_handler *handler;
170 struct acpi_ec *ec;
171};
172
173static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
174static void advance_transaction(struct acpi_ec *ec);
175static void acpi_ec_event_handler(struct work_struct *work);
176static void acpi_ec_event_processor(struct work_struct *work);
177
178struct acpi_ec *first_ec;
179EXPORT_SYMBOL(first_ec);
180
181static struct acpi_ec *boot_ec;
182static bool boot_ec_is_ecdt = false;
183static struct workqueue_struct *ec_wq;
184static struct workqueue_struct *ec_query_wq;
185
186static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
187static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
188static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
189static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
190
191/* --------------------------------------------------------------------------
192 * Logging/Debugging
193 * -------------------------------------------------------------------------- */
194
195/*
196 * Splitters used by the developers to track the boundary of the EC
197 * handling processes.
198 */
199#ifdef DEBUG
200#define EC_DBG_SEP " "
201#define EC_DBG_DRV "+++++"
202#define EC_DBG_STM "====="
203#define EC_DBG_REQ "*****"
204#define EC_DBG_EVT "#####"
205#else
206#define EC_DBG_SEP ""
207#define EC_DBG_DRV
208#define EC_DBG_STM
209#define EC_DBG_REQ
210#define EC_DBG_EVT
211#endif
212
213#define ec_log_raw(fmt, ...) \
214 pr_info(fmt "\n", ##__VA_ARGS__)
215#define ec_dbg_raw(fmt, ...) \
216 pr_debug(fmt "\n", ##__VA_ARGS__)
217#define ec_log(filter, fmt, ...) \
218 ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
219#define ec_dbg(filter, fmt, ...) \
220 ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
221
222#define ec_log_drv(fmt, ...) \
223 ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
224#define ec_dbg_drv(fmt, ...) \
225 ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
226#define ec_dbg_stm(fmt, ...) \
227 ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
228#define ec_dbg_req(fmt, ...) \
229 ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
230#define ec_dbg_evt(fmt, ...) \
231 ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
232#define ec_dbg_ref(ec, fmt, ...) \
233 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
234
235/* --------------------------------------------------------------------------
236 * Device Flags
237 * -------------------------------------------------------------------------- */
238
239static bool acpi_ec_started(struct acpi_ec *ec)
240{
241 return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
242 !test_bit(EC_FLAGS_STOPPED, &ec->flags);
243}
244
245static bool acpi_ec_event_enabled(struct acpi_ec *ec)
246{
247 /*
248 * There is an OSPM early stage logic. During the early stages
249 * (boot/resume), OSPMs shouldn't enable the event handling, only
250 * the EC transactions are allowed to be performed.
251 */
252 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
253 return false;
254 /*
255 * However, disabling the event handling is experimental for late
256 * stage (suspend), and is controlled by the boot parameter of
257 * "ec_freeze_events":
258 * 1. true: The EC event handling is disabled before entering
259 * the noirq stage.
260 * 2. false: The EC event handling is automatically disabled as
261 * soon as the EC driver is stopped.
262 */
263 if (ec_freeze_events)
264 return acpi_ec_started(ec);
265 else
266 return test_bit(EC_FLAGS_STARTED, &ec->flags);
267}
268
269static bool acpi_ec_flushed(struct acpi_ec *ec)
270{
271 return ec->reference_count == 1;
272}
273
274/* --------------------------------------------------------------------------
275 * EC Registers
276 * -------------------------------------------------------------------------- */
277
278static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
279{
280 u8 x = inb(ec->command_addr);
281
282 ec_dbg_raw("EC_SC(R) = 0x%2.2x "
283 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
284 x,
285 !!(x & ACPI_EC_FLAG_SCI),
286 !!(x & ACPI_EC_FLAG_BURST),
287 !!(x & ACPI_EC_FLAG_CMD),
288 !!(x & ACPI_EC_FLAG_IBF),
289 !!(x & ACPI_EC_FLAG_OBF));
290 return x;
291}
292
293static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
294{
295 u8 x = inb(ec->data_addr);
296
297 ec->timestamp = jiffies;
298 ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
299 return x;
300}
301
302static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
303{
304 ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
305 outb(command, ec->command_addr);
306 ec->timestamp = jiffies;
307}
308
309static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
310{
311 ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
312 outb(data, ec->data_addr);
313 ec->timestamp = jiffies;
314}
315
316#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
317static const char *acpi_ec_cmd_string(u8 cmd)
318{
319 switch (cmd) {
320 case 0x80:
321 return "RD_EC";
322 case 0x81:
323 return "WR_EC";
324 case 0x82:
325 return "BE_EC";
326 case 0x83:
327 return "BD_EC";
328 case 0x84:
329 return "QR_EC";
330 }
331 return "UNKNOWN";
332}
333#else
334#define acpi_ec_cmd_string(cmd) "UNDEF"
335#endif
336
337/* --------------------------------------------------------------------------
338 * GPE Registers
339 * -------------------------------------------------------------------------- */
340
341static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
342{
343 acpi_event_status gpe_status = 0;
344
345 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
346 return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
347}
348
349static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
350{
351 if (open)
352 acpi_enable_gpe(NULL, ec->gpe);
353 else {
354 BUG_ON(ec->reference_count < 1);
355 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
356 }
357 if (acpi_ec_is_gpe_raised(ec)) {
358 /*
359 * On some platforms, EN=1 writes cannot trigger GPE. So
360 * software need to manually trigger a pseudo GPE event on
361 * EN=1 writes.
362 */
363 ec_dbg_raw("Polling quirk");
364 advance_transaction(ec);
365 }
366}
367
368static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
369{
370 if (close)
371 acpi_disable_gpe(NULL, ec->gpe);
372 else {
373 BUG_ON(ec->reference_count < 1);
374 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
375 }
376}
377
378static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
379{
380 /*
381 * GPE STS is a W1C register, which means:
382 * 1. Software can clear it without worrying about clearing other
383 * GPEs' STS bits when the hardware sets them in parallel.
384 * 2. As long as software can ensure only clearing it when it is
385 * set, hardware won't set it in parallel.
386 * So software can clear GPE in any contexts.
387 * Warning: do not move the check into advance_transaction() as the
388 * EC commands will be sent without GPE raised.
389 */
390 if (!acpi_ec_is_gpe_raised(ec))
391 return;
392 acpi_clear_gpe(NULL, ec->gpe);
393}
394
395/* --------------------------------------------------------------------------
396 * Transaction Management
397 * -------------------------------------------------------------------------- */
398
399static void acpi_ec_submit_request(struct acpi_ec *ec)
400{
401 ec->reference_count++;
402 if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
403 ec->reference_count == 1)
404 acpi_ec_enable_gpe(ec, true);
405}
406
407static void acpi_ec_complete_request(struct acpi_ec *ec)
408{
409 bool flushed = false;
410
411 ec->reference_count--;
412 if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
413 ec->reference_count == 0)
414 acpi_ec_disable_gpe(ec, true);
415 flushed = acpi_ec_flushed(ec);
416 if (flushed)
417 wake_up(&ec->wait);
418}
419
420static void acpi_ec_mask_gpe(struct acpi_ec *ec)
421{
422 if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
423 acpi_ec_disable_gpe(ec, false);
424 ec_dbg_drv("Polling enabled");
425 set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
426 }
427}
428
429static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
430{
431 if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
432 clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
433 acpi_ec_enable_gpe(ec, false);
434 ec_dbg_drv("Polling disabled");
435 }
436}
437
438/*
439 * acpi_ec_submit_flushable_request() - Increase the reference count unless
440 * the flush operation is not in
441 * progress
442 * @ec: the EC device
443 *
444 * This function must be used before taking a new action that should hold
445 * the reference count. If this function returns false, then the action
446 * must be discarded or it will prevent the flush operation from being
447 * completed.
448 */
449static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
450{
451 if (!acpi_ec_started(ec))
452 return false;
453 acpi_ec_submit_request(ec);
454 return true;
455}
456
457static void acpi_ec_submit_query(struct acpi_ec *ec)
458{
459 acpi_ec_mask_gpe(ec);
460 if (!acpi_ec_event_enabled(ec))
461 return;
462 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
463 ec_dbg_evt("Command(%s) submitted/blocked",
464 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
465 ec->nr_pending_queries++;
466 ec->events_in_progress++;
467 queue_work(ec_wq, &ec->work);
468 }
469}
470
471static void acpi_ec_complete_query(struct acpi_ec *ec)
472{
473 if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
474 ec_dbg_evt("Command(%s) unblocked",
475 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
476 acpi_ec_unmask_gpe(ec);
477}
478
479static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
480{
481 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
482 ec_log_drv("event unblocked");
483 /*
484 * Unconditionally invoke this once after enabling the event
485 * handling mechanism to detect the pending events.
486 */
487 advance_transaction(ec);
488}
489
490static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
491{
492 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
493 ec_log_drv("event blocked");
494}
495
496/*
497 * Process _Q events that might have accumulated in the EC.
498 * Run with locked ec mutex.
499 */
500static void acpi_ec_clear(struct acpi_ec *ec)
501{
502 int i, status;
503 u8 value = 0;
504
505 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
506 status = acpi_ec_query(ec, &value);
507 if (status || !value)
508 break;
509 }
510 if (unlikely(i == ACPI_EC_CLEAR_MAX))
511 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
512 else
513 pr_info("%d stale EC events cleared\n", i);
514}
515
516static void acpi_ec_enable_event(struct acpi_ec *ec)
517{
518 unsigned long flags;
519
520 spin_lock_irqsave(&ec->lock, flags);
521 if (acpi_ec_started(ec))
522 __acpi_ec_enable_event(ec);
523 spin_unlock_irqrestore(&ec->lock, flags);
524
525 /* Drain additional events if hardware requires that */
526 if (EC_FLAGS_CLEAR_ON_RESUME)
527 acpi_ec_clear(ec);
528}
529
530#ifdef CONFIG_PM_SLEEP
531static void __acpi_ec_flush_work(void)
532{
533 flush_workqueue(ec_wq); /* flush ec->work */
534 flush_workqueue(ec_query_wq); /* flush queries */
535}
536
537static void acpi_ec_disable_event(struct acpi_ec *ec)
538{
539 unsigned long flags;
540
541 spin_lock_irqsave(&ec->lock, flags);
542 __acpi_ec_disable_event(ec);
543 spin_unlock_irqrestore(&ec->lock, flags);
544
545 /*
546 * When ec_freeze_events is true, we need to flush events in
547 * the proper position before entering the noirq stage.
548 */
549 __acpi_ec_flush_work();
550}
551
552void acpi_ec_flush_work(void)
553{
554 /* Without ec_wq there is nothing to flush. */
555 if (!ec_wq)
556 return;
557
558 __acpi_ec_flush_work();
559}
560#endif /* CONFIG_PM_SLEEP */
561
562static bool acpi_ec_guard_event(struct acpi_ec *ec)
563{
564 bool guarded = true;
565 unsigned long flags;
566
567 spin_lock_irqsave(&ec->lock, flags);
568 /*
569 * If firmware SCI_EVT clearing timing is "event", we actually
570 * don't know when the SCI_EVT will be cleared by firmware after
571 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
572 * acceptable period.
573 *
574 * The guarding period begins when EC_FLAGS_QUERY_PENDING is
575 * flagged, which means SCI_EVT check has just been performed.
576 * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
577 * guarding should have already been performed (via
578 * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
579 * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
580 * ACPI_EC_COMMAND_POLL state immediately.
581 */
582 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
583 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
584 !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
585 (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
586 guarded = false;
587 spin_unlock_irqrestore(&ec->lock, flags);
588 return guarded;
589}
590
591static int ec_transaction_polled(struct acpi_ec *ec)
592{
593 unsigned long flags;
594 int ret = 0;
595
596 spin_lock_irqsave(&ec->lock, flags);
597 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
598 ret = 1;
599 spin_unlock_irqrestore(&ec->lock, flags);
600 return ret;
601}
602
603static int ec_transaction_completed(struct acpi_ec *ec)
604{
605 unsigned long flags;
606 int ret = 0;
607
608 spin_lock_irqsave(&ec->lock, flags);
609 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
610 ret = 1;
611 spin_unlock_irqrestore(&ec->lock, flags);
612 return ret;
613}
614
615static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
616{
617 ec->curr->flags |= flag;
618 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
619 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
620 flag == ACPI_EC_COMMAND_POLL)
621 acpi_ec_complete_query(ec);
622 if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
623 flag == ACPI_EC_COMMAND_COMPLETE)
624 acpi_ec_complete_query(ec);
625 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
626 flag == ACPI_EC_COMMAND_COMPLETE)
627 set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
628 }
629}
630
631static void advance_transaction(struct acpi_ec *ec)
632{
633 struct transaction *t;
634 u8 status;
635 bool wakeup = false;
636
637 ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
638 smp_processor_id());
639 /*
640 * By always clearing STS before handling all indications, we can
641 * ensure a hardware STS 0->1 change after this clearing can always
642 * trigger a GPE interrupt.
643 */
644 acpi_ec_clear_gpe(ec);
645 status = acpi_ec_read_status(ec);
646 t = ec->curr;
647 /*
648 * Another IRQ or a guarded polling mode advancement is detected,
649 * the next QR_EC submission is then allowed.
650 */
651 if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
652 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
653 (!ec->nr_pending_queries ||
654 test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
655 clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
656 acpi_ec_complete_query(ec);
657 }
658 }
659 if (!t)
660 goto err;
661 if (t->flags & ACPI_EC_COMMAND_POLL) {
662 if (t->wlen > t->wi) {
663 if ((status & ACPI_EC_FLAG_IBF) == 0)
664 acpi_ec_write_data(ec, t->wdata[t->wi++]);
665 else
666 goto err;
667 } else if (t->rlen > t->ri) {
668 if ((status & ACPI_EC_FLAG_OBF) == 1) {
669 t->rdata[t->ri++] = acpi_ec_read_data(ec);
670 if (t->rlen == t->ri) {
671 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
672 if (t->command == ACPI_EC_COMMAND_QUERY)
673 ec_dbg_evt("Command(%s) completed by hardware",
674 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
675 wakeup = true;
676 }
677 } else
678 goto err;
679 } else if (t->wlen == t->wi &&
680 (status & ACPI_EC_FLAG_IBF) == 0) {
681 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
682 wakeup = true;
683 }
684 goto out;
685 } else {
686 if (EC_FLAGS_QUERY_HANDSHAKE &&
687 !(status & ACPI_EC_FLAG_SCI) &&
688 (t->command == ACPI_EC_COMMAND_QUERY)) {
689 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
690 t->rdata[t->ri++] = 0x00;
691 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
692 ec_dbg_evt("Command(%s) completed by software",
693 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
694 wakeup = true;
695 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
696 acpi_ec_write_cmd(ec, t->command);
697 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
698 } else
699 goto err;
700 goto out;
701 }
702err:
703 /*
704 * If SCI bit is set, then don't think it's a false IRQ
705 * otherwise will take a not handled IRQ as a false one.
706 */
707 if (!(status & ACPI_EC_FLAG_SCI)) {
708 if (in_interrupt() && t) {
709 if (t->irq_count < ec_storm_threshold)
710 ++t->irq_count;
711 /* Allow triggering on 0 threshold */
712 if (t->irq_count == ec_storm_threshold)
713 acpi_ec_mask_gpe(ec);
714 }
715 }
716out:
717 if (status & ACPI_EC_FLAG_SCI)
718 acpi_ec_submit_query(ec);
719 if (wakeup && in_interrupt())
720 wake_up(&ec->wait);
721}
722
723static void start_transaction(struct acpi_ec *ec)
724{
725 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
726 ec->curr->flags = 0;
727}
728
729static int ec_guard(struct acpi_ec *ec)
730{
731 unsigned long guard = usecs_to_jiffies(ec->polling_guard);
732 unsigned long timeout = ec->timestamp + guard;
733
734 /* Ensure guarding period before polling EC status */
735 do {
736 if (ec->busy_polling) {
737 /* Perform busy polling */
738 if (ec_transaction_completed(ec))
739 return 0;
740 udelay(jiffies_to_usecs(guard));
741 } else {
742 /*
743 * Perform wait polling
744 * 1. Wait the transaction to be completed by the
745 * GPE handler after the transaction enters
746 * ACPI_EC_COMMAND_POLL state.
747 * 2. A special guarding logic is also required
748 * for event clearing mode "event" before the
749 * transaction enters ACPI_EC_COMMAND_POLL
750 * state.
751 */
752 if (!ec_transaction_polled(ec) &&
753 !acpi_ec_guard_event(ec))
754 break;
755 if (wait_event_timeout(ec->wait,
756 ec_transaction_completed(ec),
757 guard))
758 return 0;
759 }
760 } while (time_before(jiffies, timeout));
761 return -ETIME;
762}
763
764static int ec_poll(struct acpi_ec *ec)
765{
766 unsigned long flags;
767 int repeat = 5; /* number of command restarts */
768
769 while (repeat--) {
770 unsigned long delay = jiffies +
771 msecs_to_jiffies(ec_delay);
772 do {
773 if (!ec_guard(ec))
774 return 0;
775 spin_lock_irqsave(&ec->lock, flags);
776 advance_transaction(ec);
777 spin_unlock_irqrestore(&ec->lock, flags);
778 } while (time_before(jiffies, delay));
779 pr_debug("controller reset, restart transaction\n");
780 spin_lock_irqsave(&ec->lock, flags);
781 start_transaction(ec);
782 spin_unlock_irqrestore(&ec->lock, flags);
783 }
784 return -ETIME;
785}
786
787static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
788 struct transaction *t)
789{
790 unsigned long tmp;
791 int ret = 0;
792
793 if (t->rdata)
794 memset(t->rdata, 0, t->rlen);
795
796 /* start transaction */
797 spin_lock_irqsave(&ec->lock, tmp);
798 /* Enable GPE for command processing (IBF=0/OBF=1) */
799 if (!acpi_ec_submit_flushable_request(ec)) {
800 ret = -EINVAL;
801 goto unlock;
802 }
803 ec_dbg_ref(ec, "Increase command");
804 /* following two actions should be kept atomic */
805 ec->curr = t;
806 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
807 start_transaction(ec);
808 spin_unlock_irqrestore(&ec->lock, tmp);
809
810 ret = ec_poll(ec);
811
812 spin_lock_irqsave(&ec->lock, tmp);
813 if (t->irq_count == ec_storm_threshold)
814 acpi_ec_unmask_gpe(ec);
815 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
816 ec->curr = NULL;
817 /* Disable GPE for command processing (IBF=0/OBF=1) */
818 acpi_ec_complete_request(ec);
819 ec_dbg_ref(ec, "Decrease command");
820unlock:
821 spin_unlock_irqrestore(&ec->lock, tmp);
822 return ret;
823}
824
825static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
826{
827 int status;
828 u32 glk;
829
830 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
831 return -EINVAL;
832
833 mutex_lock(&ec->mutex);
834 if (ec->global_lock) {
835 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
836 if (ACPI_FAILURE(status)) {
837 status = -ENODEV;
838 goto unlock;
839 }
840 }
841
842 status = acpi_ec_transaction_unlocked(ec, t);
843
844 if (ec->global_lock)
845 acpi_release_global_lock(glk);
846unlock:
847 mutex_unlock(&ec->mutex);
848 return status;
849}
850
851static int acpi_ec_burst_enable(struct acpi_ec *ec)
852{
853 u8 d;
854 struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
855 .wdata = NULL, .rdata = &d,
856 .wlen = 0, .rlen = 1};
857
858 return acpi_ec_transaction_unlocked(ec, &t);
859}
860
861static int acpi_ec_burst_disable(struct acpi_ec *ec)
862{
863 struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
864 .wdata = NULL, .rdata = NULL,
865 .wlen = 0, .rlen = 0};
866
867 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
868 acpi_ec_transaction_unlocked(ec, &t) : 0;
869}
870
871static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
872{
873 int result;
874 u8 d;
875 struct transaction t = {.command = ACPI_EC_COMMAND_READ,
876 .wdata = &address, .rdata = &d,
877 .wlen = 1, .rlen = 1};
878
879 result = acpi_ec_transaction(ec, &t);
880 *data = d;
881 return result;
882}
883
884static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data)
885{
886 int result;
887 u8 d;
888 struct transaction t = {.command = ACPI_EC_COMMAND_READ,
889 .wdata = &address, .rdata = &d,
890 .wlen = 1, .rlen = 1};
891
892 result = acpi_ec_transaction_unlocked(ec, &t);
893 *data = d;
894 return result;
895}
896
897static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
898{
899 u8 wdata[2] = { address, data };
900 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
901 .wdata = wdata, .rdata = NULL,
902 .wlen = 2, .rlen = 0};
903
904 return acpi_ec_transaction(ec, &t);
905}
906
907static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data)
908{
909 u8 wdata[2] = { address, data };
910 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
911 .wdata = wdata, .rdata = NULL,
912 .wlen = 2, .rlen = 0};
913
914 return acpi_ec_transaction_unlocked(ec, &t);
915}
916
917int ec_read(u8 addr, u8 *val)
918{
919 int err;
920 u8 temp_data;
921
922 if (!first_ec)
923 return -ENODEV;
924
925 err = acpi_ec_read(first_ec, addr, &temp_data);
926
927 if (!err) {
928 *val = temp_data;
929 return 0;
930 }
931 return err;
932}
933EXPORT_SYMBOL(ec_read);
934
935int ec_write(u8 addr, u8 val)
936{
937 int err;
938
939 if (!first_ec)
940 return -ENODEV;
941
942 err = acpi_ec_write(first_ec, addr, val);
943
944 return err;
945}
946EXPORT_SYMBOL(ec_write);
947
948int ec_transaction(u8 command,
949 const u8 *wdata, unsigned wdata_len,
950 u8 *rdata, unsigned rdata_len)
951{
952 struct transaction t = {.command = command,
953 .wdata = wdata, .rdata = rdata,
954 .wlen = wdata_len, .rlen = rdata_len};
955
956 if (!first_ec)
957 return -ENODEV;
958
959 return acpi_ec_transaction(first_ec, &t);
960}
961EXPORT_SYMBOL(ec_transaction);
962
963/* Get the handle to the EC device */
964acpi_handle ec_get_handle(void)
965{
966 if (!first_ec)
967 return NULL;
968 return first_ec->handle;
969}
970EXPORT_SYMBOL(ec_get_handle);
971
972static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
973{
974 unsigned long flags;
975
976 spin_lock_irqsave(&ec->lock, flags);
977 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
978 ec_dbg_drv("Starting EC");
979 /* Enable GPE for event processing (SCI_EVT=1) */
980 if (!resuming) {
981 acpi_ec_submit_request(ec);
982 ec_dbg_ref(ec, "Increase driver");
983 }
984 ec_log_drv("EC started");
985 }
986 spin_unlock_irqrestore(&ec->lock, flags);
987}
988
989static bool acpi_ec_stopped(struct acpi_ec *ec)
990{
991 unsigned long flags;
992 bool flushed;
993
994 spin_lock_irqsave(&ec->lock, flags);
995 flushed = acpi_ec_flushed(ec);
996 spin_unlock_irqrestore(&ec->lock, flags);
997 return flushed;
998}
999
1000static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
1001{
1002 unsigned long flags;
1003
1004 spin_lock_irqsave(&ec->lock, flags);
1005 if (acpi_ec_started(ec)) {
1006 ec_dbg_drv("Stopping EC");
1007 set_bit(EC_FLAGS_STOPPED, &ec->flags);
1008 spin_unlock_irqrestore(&ec->lock, flags);
1009 wait_event(ec->wait, acpi_ec_stopped(ec));
1010 spin_lock_irqsave(&ec->lock, flags);
1011 /* Disable GPE for event processing (SCI_EVT=1) */
1012 if (!suspending) {
1013 acpi_ec_complete_request(ec);
1014 ec_dbg_ref(ec, "Decrease driver");
1015 } else if (!ec_freeze_events)
1016 __acpi_ec_disable_event(ec);
1017 clear_bit(EC_FLAGS_STARTED, &ec->flags);
1018 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
1019 ec_log_drv("EC stopped");
1020 }
1021 spin_unlock_irqrestore(&ec->lock, flags);
1022}
1023
1024static void acpi_ec_enter_noirq(struct acpi_ec *ec)
1025{
1026 unsigned long flags;
1027
1028 spin_lock_irqsave(&ec->lock, flags);
1029 ec->busy_polling = true;
1030 ec->polling_guard = 0;
1031 ec_log_drv("interrupt blocked");
1032 spin_unlock_irqrestore(&ec->lock, flags);
1033}
1034
1035static void acpi_ec_leave_noirq(struct acpi_ec *ec)
1036{
1037 unsigned long flags;
1038
1039 spin_lock_irqsave(&ec->lock, flags);
1040 ec->busy_polling = ec_busy_polling;
1041 ec->polling_guard = ec_polling_guard;
1042 ec_log_drv("interrupt unblocked");
1043 spin_unlock_irqrestore(&ec->lock, flags);
1044}
1045
1046void acpi_ec_block_transactions(void)
1047{
1048 struct acpi_ec *ec = first_ec;
1049
1050 if (!ec)
1051 return;
1052
1053 mutex_lock(&ec->mutex);
1054 /* Prevent transactions from being carried out */
1055 acpi_ec_stop(ec, true);
1056 mutex_unlock(&ec->mutex);
1057}
1058
1059void acpi_ec_unblock_transactions(void)
1060{
1061 /*
1062 * Allow transactions to happen again (this function is called from
1063 * atomic context during wakeup, so we don't need to acquire the mutex).
1064 */
1065 if (first_ec)
1066 acpi_ec_start(first_ec, true);
1067}
1068
1069/* --------------------------------------------------------------------------
1070 Event Management
1071 -------------------------------------------------------------------------- */
1072static struct acpi_ec_query_handler *
1073acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
1074{
1075 struct acpi_ec_query_handler *handler;
1076
1077 mutex_lock(&ec->mutex);
1078 list_for_each_entry(handler, &ec->list, node) {
1079 if (value == handler->query_bit) {
1080 kref_get(&handler->kref);
1081 mutex_unlock(&ec->mutex);
1082 return handler;
1083 }
1084 }
1085 mutex_unlock(&ec->mutex);
1086 return NULL;
1087}
1088
1089static void acpi_ec_query_handler_release(struct kref *kref)
1090{
1091 struct acpi_ec_query_handler *handler =
1092 container_of(kref, struct acpi_ec_query_handler, kref);
1093
1094 kfree(handler);
1095}
1096
1097static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
1098{
1099 kref_put(&handler->kref, acpi_ec_query_handler_release);
1100}
1101
1102int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
1103 acpi_handle handle, acpi_ec_query_func func,
1104 void *data)
1105{
1106 struct acpi_ec_query_handler *handler =
1107 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
1108
1109 if (!handler)
1110 return -ENOMEM;
1111
1112 handler->query_bit = query_bit;
1113 handler->handle = handle;
1114 handler->func = func;
1115 handler->data = data;
1116 mutex_lock(&ec->mutex);
1117 kref_init(&handler->kref);
1118 list_add(&handler->node, &ec->list);
1119 mutex_unlock(&ec->mutex);
1120 return 0;
1121}
1122EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
1123
1124static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
1125 bool remove_all, u8 query_bit)
1126{
1127 struct acpi_ec_query_handler *handler, *tmp;
1128 LIST_HEAD(free_list);
1129
1130 mutex_lock(&ec->mutex);
1131 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
1132 if (remove_all || query_bit == handler->query_bit) {
1133 list_del_init(&handler->node);
1134 list_add(&handler->node, &free_list);
1135 }
1136 }
1137 mutex_unlock(&ec->mutex);
1138 list_for_each_entry_safe(handler, tmp, &free_list, node)
1139 acpi_ec_put_query_handler(handler);
1140}
1141
1142void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
1143{
1144 acpi_ec_remove_query_handlers(ec, false, query_bit);
1145 flush_workqueue(ec_query_wq);
1146}
1147EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
1148
1149static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
1150{
1151 struct acpi_ec_query *q;
1152 struct transaction *t;
1153
1154 q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
1155 if (!q)
1156 return NULL;
1157
1158 INIT_WORK(&q->work, acpi_ec_event_processor);
1159 t = &q->transaction;
1160 t->command = ACPI_EC_COMMAND_QUERY;
1161 t->rdata = pval;
1162 t->rlen = 1;
1163 q->ec = ec;
1164 return q;
1165}
1166
1167static void acpi_ec_delete_query(struct acpi_ec_query *q)
1168{
1169 if (q) {
1170 if (q->handler)
1171 acpi_ec_put_query_handler(q->handler);
1172 kfree(q);
1173 }
1174}
1175
1176static void acpi_ec_event_processor(struct work_struct *work)
1177{
1178 struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
1179 struct acpi_ec_query_handler *handler = q->handler;
1180 struct acpi_ec *ec = q->ec;
1181
1182 ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
1183
1184 if (handler->func)
1185 handler->func(handler->data);
1186 else if (handler->handle)
1187 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
1188
1189 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
1190
1191 spin_lock_irq(&ec->lock);
1192 ec->queries_in_progress--;
1193 spin_unlock_irq(&ec->lock);
1194
1195 acpi_ec_delete_query(q);
1196}
1197
1198static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1199{
1200 u8 value = 0;
1201 int result;
1202 struct acpi_ec_query *q;
1203
1204 q = acpi_ec_create_query(ec, &value);
1205 if (!q)
1206 return -ENOMEM;
1207
1208 /*
1209 * Query the EC to find out which _Qxx method we need to evaluate.
1210 * Note that successful completion of the query causes the ACPI_EC_SCI
1211 * bit to be cleared (and thus clearing the interrupt source).
1212 */
1213 result = acpi_ec_transaction(ec, &q->transaction);
1214 if (!value)
1215 result = -ENODATA;
1216 if (result)
1217 goto err_exit;
1218
1219 q->handler = acpi_ec_get_query_handler_by_value(ec, value);
1220 if (!q->handler) {
1221 result = -ENODATA;
1222 goto err_exit;
1223 }
1224
1225 /*
1226 * It is reported that _Qxx are evaluated in a parallel way on Windows:
1227 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
1228 *
1229 * Put this log entry before queue_work() to make it appear in the log
1230 * before any other messages emitted during workqueue handling.
1231 */
1232 ec_dbg_evt("Query(0x%02x) scheduled", value);
1233
1234 spin_lock_irq(&ec->lock);
1235
1236 ec->queries_in_progress++;
1237 queue_work(ec_query_wq, &q->work);
1238
1239 spin_unlock_irq(&ec->lock);
1240
1241err_exit:
1242 if (result)
1243 acpi_ec_delete_query(q);
1244 if (data)
1245 *data = value;
1246 return result;
1247}
1248
1249static void acpi_ec_check_event(struct acpi_ec *ec)
1250{
1251 unsigned long flags;
1252
1253 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
1254 if (ec_guard(ec)) {
1255 spin_lock_irqsave(&ec->lock, flags);
1256 /*
1257 * Take care of the SCI_EVT unless no one else is
1258 * taking care of it.
1259 */
1260 if (!ec->curr)
1261 advance_transaction(ec);
1262 spin_unlock_irqrestore(&ec->lock, flags);
1263 }
1264 }
1265}
1266
1267static void acpi_ec_event_handler(struct work_struct *work)
1268{
1269 unsigned long flags;
1270 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
1271
1272 ec_dbg_evt("Event started");
1273
1274 spin_lock_irqsave(&ec->lock, flags);
1275 while (ec->nr_pending_queries) {
1276 spin_unlock_irqrestore(&ec->lock, flags);
1277 (void)acpi_ec_query(ec, NULL);
1278 spin_lock_irqsave(&ec->lock, flags);
1279 ec->nr_pending_queries--;
1280 /*
1281 * Before exit, make sure that this work item can be
1282 * scheduled again. There might be QR_EC failures, leaving
1283 * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
1284 * item from being scheduled again.
1285 */
1286 if (!ec->nr_pending_queries) {
1287 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
1288 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
1289 acpi_ec_complete_query(ec);
1290 }
1291 }
1292 spin_unlock_irqrestore(&ec->lock, flags);
1293
1294 ec_dbg_evt("Event stopped");
1295
1296 acpi_ec_check_event(ec);
1297
1298 spin_lock_irqsave(&ec->lock, flags);
1299 ec->events_in_progress--;
1300 spin_unlock_irqrestore(&ec->lock, flags);
1301}
1302
1303static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
1304 u32 gpe_number, void *data)
1305{
1306 unsigned long flags;
1307 struct acpi_ec *ec = data;
1308
1309 spin_lock_irqsave(&ec->lock, flags);
1310 advance_transaction(ec);
1311 spin_unlock_irqrestore(&ec->lock, flags);
1312 return ACPI_INTERRUPT_HANDLED;
1313}
1314
1315/* --------------------------------------------------------------------------
1316 * Address Space Management
1317 * -------------------------------------------------------------------------- */
1318
1319static acpi_status
1320acpi_ec_space_handler(u32 function, acpi_physical_address address,
1321 u32 bits, u64 *value64,
1322 void *handler_context, void *region_context)
1323{
1324 struct acpi_ec *ec = handler_context;
1325 int result = 0, i, bytes = bits / 8;
1326 u8 *value = (u8 *)value64;
1327 u32 glk;
1328
1329 if ((address > 0xFF) || !value || !handler_context)
1330 return AE_BAD_PARAMETER;
1331
1332 if (function != ACPI_READ && function != ACPI_WRITE)
1333 return AE_BAD_PARAMETER;
1334
1335 mutex_lock(&ec->mutex);
1336
1337 if (ec->global_lock) {
1338 acpi_status status;
1339
1340 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
1341 if (ACPI_FAILURE(status)) {
1342 result = -ENODEV;
1343 goto unlock;
1344 }
1345 }
1346
1347 if (ec->busy_polling || bits > 8)
1348 acpi_ec_burst_enable(ec);
1349
1350 for (i = 0; i < bytes; ++i, ++address, ++value) {
1351 result = (function == ACPI_READ) ?
1352 acpi_ec_read_unlocked(ec, address, value) :
1353 acpi_ec_write_unlocked(ec, address, *value);
1354 if (result < 0)
1355 break;
1356 }
1357
1358 if (ec->busy_polling || bits > 8)
1359 acpi_ec_burst_disable(ec);
1360
1361 if (ec->global_lock)
1362 acpi_release_global_lock(glk);
1363
1364unlock:
1365 mutex_unlock(&ec->mutex);
1366
1367 switch (result) {
1368 case -EINVAL:
1369 return AE_BAD_PARAMETER;
1370 case -ENODEV:
1371 return AE_NOT_FOUND;
1372 case -ETIME:
1373 return AE_TIME;
1374 case 0:
1375 return AE_OK;
1376 default:
1377 return AE_ERROR;
1378 }
1379}
1380
1381/* --------------------------------------------------------------------------
1382 * Driver Interface
1383 * -------------------------------------------------------------------------- */
1384
1385static acpi_status
1386ec_parse_io_ports(struct acpi_resource *resource, void *context);
1387
1388static void acpi_ec_free(struct acpi_ec *ec)
1389{
1390 if (first_ec == ec)
1391 first_ec = NULL;
1392 if (boot_ec == ec)
1393 boot_ec = NULL;
1394 kfree(ec);
1395}
1396
1397static struct acpi_ec *acpi_ec_alloc(void)
1398{
1399 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
1400
1401 if (!ec)
1402 return NULL;
1403 mutex_init(&ec->mutex);
1404 init_waitqueue_head(&ec->wait);
1405 INIT_LIST_HEAD(&ec->list);
1406 spin_lock_init(&ec->lock);
1407 INIT_WORK(&ec->work, acpi_ec_event_handler);
1408 ec->timestamp = jiffies;
1409 ec->busy_polling = true;
1410 ec->polling_guard = 0;
1411 return ec;
1412}
1413
1414static acpi_status
1415acpi_ec_register_query_methods(acpi_handle handle, u32 level,
1416 void *context, void **return_value)
1417{
1418 char node_name[5];
1419 struct acpi_buffer buffer = { sizeof(node_name), node_name };
1420 struct acpi_ec *ec = context;
1421 int value = 0;
1422 acpi_status status;
1423
1424 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
1425
1426 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
1427 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
1428 return AE_OK;
1429}
1430
1431static acpi_status
1432ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
1433{
1434 acpi_status status;
1435 unsigned long long tmp = 0;
1436 struct acpi_ec *ec = context;
1437
1438 /* clear addr values, ec_parse_io_ports depend on it */
1439 ec->command_addr = ec->data_addr = 0;
1440
1441 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1442 ec_parse_io_ports, ec);
1443 if (ACPI_FAILURE(status))
1444 return status;
1445 if (ec->data_addr == 0 || ec->command_addr == 0)
1446 return AE_OK;
1447
1448 if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
1449 /*
1450 * Always inherit the GPE number setting from the ECDT
1451 * EC.
1452 */
1453 ec->gpe = boot_ec->gpe;
1454 } else {
1455 /* Get GPE bit assignment (EC events). */
1456 /* TODO: Add support for _GPE returning a package */
1457 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
1458 if (ACPI_FAILURE(status))
1459 return status;
1460 ec->gpe = tmp;
1461 }
1462 /* Use the global lock for all EC transactions? */
1463 tmp = 0;
1464 acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
1465 ec->global_lock = tmp;
1466 ec->handle = handle;
1467 return AE_CTRL_TERMINATE;
1468}
1469
1470/*
1471 * Note: This function returns an error code only when the address space
1472 * handler is not installed, which means "not able to handle
1473 * transactions".
1474 */
1475static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
1476{
1477 acpi_status status;
1478
1479 acpi_ec_start(ec, false);
1480
1481 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1482 acpi_ec_enter_noirq(ec);
1483 status = acpi_install_address_space_handler(ec->handle,
1484 ACPI_ADR_SPACE_EC,
1485 &acpi_ec_space_handler,
1486 NULL, ec);
1487 if (ACPI_FAILURE(status)) {
1488 if (status == AE_NOT_FOUND) {
1489 /*
1490 * Maybe OS fails in evaluating the _REG
1491 * object. The AE_NOT_FOUND error will be
1492 * ignored and OS * continue to initialize
1493 * EC.
1494 */
1495 pr_err("Fail in evaluating the _REG object"
1496 " of EC device. Broken bios is suspected.\n");
1497 } else {
1498 acpi_ec_stop(ec, false);
1499 return -ENODEV;
1500 }
1501 }
1502 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1503 }
1504
1505 if (!handle_events)
1506 return 0;
1507
1508 if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
1509 /* Find and register all query methods */
1510 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
1511 acpi_ec_register_query_methods,
1512 NULL, ec, NULL);
1513 set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
1514 }
1515 if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
1516 status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
1517 ACPI_GPE_EDGE_TRIGGERED,
1518 &acpi_ec_gpe_handler, ec);
1519 /* This is not fatal as we can poll EC events */
1520 if (ACPI_SUCCESS(status)) {
1521 set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
1522 acpi_ec_leave_noirq(ec);
1523 if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1524 ec->reference_count >= 1)
1525 acpi_ec_enable_gpe(ec, true);
1526 }
1527 }
1528 /* EC is fully operational, allow queries */
1529 acpi_ec_enable_event(ec);
1530
1531 return 0;
1532}
1533
1534static void ec_remove_handlers(struct acpi_ec *ec)
1535{
1536 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1537 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
1538 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
1539 pr_err("failed to remove space handler\n");
1540 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1541 }
1542
1543 /*
1544 * Stops handling the EC transactions after removing the operation
1545 * region handler. This is required because _REG(DISCONNECT)
1546 * invoked during the removal can result in new EC transactions.
1547 *
1548 * Flushes the EC requests and thus disables the GPE before
1549 * removing the GPE handler. This is required by the current ACPICA
1550 * GPE core. ACPICA GPE core will automatically disable a GPE when
1551 * it is indicated but there is no way to handle it. So the drivers
1552 * must disable the GPEs prior to removing the GPE handlers.
1553 */
1554 acpi_ec_stop(ec, false);
1555
1556 if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
1557 if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
1558 &acpi_ec_gpe_handler)))
1559 pr_err("failed to remove gpe handler\n");
1560 clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
1561 }
1562 if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
1563 acpi_ec_remove_query_handlers(ec, true, 0);
1564 clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
1565 }
1566}
1567
1568static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
1569{
1570 int ret;
1571
1572 ret = ec_install_handlers(ec, handle_events);
1573 if (ret)
1574 return ret;
1575
1576 /* First EC capable of handling transactions */
1577 if (!first_ec) {
1578 first_ec = ec;
1579 acpi_handle_info(first_ec->handle, "Used as first EC\n");
1580 }
1581
1582 acpi_handle_info(ec->handle,
1583 "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
1584 ec->gpe, ec->command_addr, ec->data_addr);
1585 return ret;
1586}
1587
1588static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
1589{
1590 struct acpi_table_ecdt *ecdt_ptr;
1591 acpi_status status;
1592 acpi_handle handle;
1593
1594 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1595 (struct acpi_table_header **)&ecdt_ptr);
1596 if (ACPI_FAILURE(status))
1597 return false;
1598
1599 status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
1600 if (ACPI_FAILURE(status))
1601 return false;
1602
1603 *phandle = handle;
1604 return true;
1605}
1606
1607static int acpi_ec_add(struct acpi_device *device)
1608{
1609 struct acpi_ec *ec = NULL;
1610 bool dep_update = true;
1611 acpi_status status;
1612 int ret;
1613
1614 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
1615 strcpy(acpi_device_class(device), ACPI_EC_CLASS);
1616
1617 if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
1618 boot_ec_is_ecdt = true;
1619 ec = boot_ec;
1620 dep_update = false;
1621 } else {
1622 ec = acpi_ec_alloc();
1623 if (!ec)
1624 return -ENOMEM;
1625
1626 status = ec_parse_device(device->handle, 0, ec, NULL);
1627 if (status != AE_CTRL_TERMINATE) {
1628 ret = -EINVAL;
1629 goto err_alloc;
1630 }
1631
1632 if (boot_ec && ec->command_addr == boot_ec->command_addr &&
1633 ec->data_addr == boot_ec->data_addr) {
1634 boot_ec_is_ecdt = false;
1635 /*
1636 * Trust PNP0C09 namespace location rather than
1637 * ECDT ID. But trust ECDT GPE rather than _GPE
1638 * because of ASUS quirks, so do not change
1639 * boot_ec->gpe to ec->gpe.
1640 */
1641 boot_ec->handle = ec->handle;
1642 acpi_handle_debug(ec->handle, "duplicated.\n");
1643 acpi_ec_free(ec);
1644 ec = boot_ec;
1645 }
1646 }
1647
1648 ret = acpi_ec_setup(ec, true);
1649 if (ret)
1650 goto err_query;
1651
1652 if (ec == boot_ec)
1653 acpi_handle_info(boot_ec->handle,
1654 "Boot %s EC used to handle transactions and events\n",
1655 boot_ec_is_ecdt ? "ECDT" : "DSDT");
1656
1657 device->driver_data = ec;
1658
1659 ret = !!request_region(ec->data_addr, 1, "EC data");
1660 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
1661 ret = !!request_region(ec->command_addr, 1, "EC cmd");
1662 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
1663
1664 if (dep_update) {
1665 /* Reprobe devices depending on the EC */
1666 acpi_walk_dep_device_list(ec->handle);
1667 }
1668 acpi_handle_debug(ec->handle, "enumerated.\n");
1669 return 0;
1670
1671err_query:
1672 if (ec != boot_ec)
1673 acpi_ec_remove_query_handlers(ec, true, 0);
1674err_alloc:
1675 if (ec != boot_ec)
1676 acpi_ec_free(ec);
1677 return ret;
1678}
1679
1680static int acpi_ec_remove(struct acpi_device *device)
1681{
1682 struct acpi_ec *ec;
1683
1684 if (!device)
1685 return -EINVAL;
1686
1687 ec = acpi_driver_data(device);
1688 release_region(ec->data_addr, 1);
1689 release_region(ec->command_addr, 1);
1690 device->driver_data = NULL;
1691 if (ec != boot_ec) {
1692 ec_remove_handlers(ec);
1693 acpi_ec_free(ec);
1694 }
1695 return 0;
1696}
1697
1698static acpi_status
1699ec_parse_io_ports(struct acpi_resource *resource, void *context)
1700{
1701 struct acpi_ec *ec = context;
1702
1703 if (resource->type != ACPI_RESOURCE_TYPE_IO)
1704 return AE_OK;
1705
1706 /*
1707 * The first address region returned is the data port, and
1708 * the second address region returned is the status/command
1709 * port.
1710 */
1711 if (ec->data_addr == 0)
1712 ec->data_addr = resource->data.io.minimum;
1713 else if (ec->command_addr == 0)
1714 ec->command_addr = resource->data.io.minimum;
1715 else
1716 return AE_CTRL_TERMINATE;
1717
1718 return AE_OK;
1719}
1720
1721static const struct acpi_device_id ec_device_ids[] = {
1722 {"PNP0C09", 0},
1723 {ACPI_ECDT_HID, 0},
1724 {"", 0},
1725};
1726
1727/*
1728 * This function is not Windows-compatible as Windows never enumerates the
1729 * namespace EC before the main ACPI device enumeration process. It is
1730 * retained for historical reason and will be deprecated in the future.
1731 */
1732void __init acpi_ec_dsdt_probe(void)
1733{
1734 struct acpi_ec *ec;
1735 acpi_status status;
1736 int ret;
1737
1738 /*
1739 * If a platform has ECDT, there is no need to proceed as the
1740 * following probe is not a part of the ACPI device enumeration,
1741 * executing _STA is not safe, and thus this probe may risk of
1742 * picking up an invalid EC device.
1743 */
1744 if (boot_ec)
1745 return;
1746
1747 ec = acpi_ec_alloc();
1748 if (!ec)
1749 return;
1750
1751 /*
1752 * At this point, the namespace is initialized, so start to find
1753 * the namespace objects.
1754 */
1755 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
1756 if (ACPI_FAILURE(status) || !ec->handle) {
1757 acpi_ec_free(ec);
1758 return;
1759 }
1760
1761 /*
1762 * When the DSDT EC is available, always re-configure boot EC to
1763 * have _REG evaluated. _REG can only be evaluated after the
1764 * namespace initialization.
1765 * At this point, the GPE is not fully initialized, so do not to
1766 * handle the events.
1767 */
1768 ret = acpi_ec_setup(ec, false);
1769 if (ret) {
1770 acpi_ec_free(ec);
1771 return;
1772 }
1773
1774 boot_ec = ec;
1775
1776 acpi_handle_info(ec->handle,
1777 "Boot DSDT EC used to handle transactions\n");
1778}
1779
1780/*
1781 * If the DSDT EC is not functioning, we still need to prepare a fully
1782 * functioning ECDT EC first in order to handle the events.
1783 * https://bugzilla.kernel.org/show_bug.cgi?id=115021
1784 */
1785static int __init acpi_ec_ecdt_start(void)
1786{
1787 acpi_handle handle;
1788
1789 if (!boot_ec)
1790 return -ENODEV;
1791 /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
1792 if (!boot_ec_is_ecdt)
1793 return -ENODEV;
1794
1795 /*
1796 * At this point, the namespace and the GPE is initialized, so
1797 * start to find the namespace objects and handle the events.
1798 *
1799 * Note: ec->handle can be valid if this function is called after
1800 * acpi_ec_add(), hence the fast path.
1801 */
1802 if (boot_ec->handle == ACPI_ROOT_OBJECT) {
1803 if (!acpi_ec_ecdt_get_handle(&handle))
1804 return -ENODEV;
1805 boot_ec->handle = handle;
1806 }
1807
1808 /* Register to ACPI bus with PM ops attached */
1809 return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
1810}
1811
1812#if 0
1813/*
1814 * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
1815 * set, for which case, we complete the QR_EC without issuing it to the
1816 * firmware.
1817 * https://bugzilla.kernel.org/show_bug.cgi?id=82611
1818 * https://bugzilla.kernel.org/show_bug.cgi?id=97381
1819 */
1820static int ec_flag_query_handshake(const struct dmi_system_id *id)
1821{
1822 pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1823 EC_FLAGS_QUERY_HANDSHAKE = 1;
1824 return 0;
1825}
1826#endif
1827
1828/*
1829 * On some hardware it is necessary to clear events accumulated by the EC during
1830 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1831 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1832 *
1833 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1834 *
1835 * Ideally, the EC should also be instructed NOT to accumulate events during
1836 * sleep (which Windows seems to do somehow), but the interface to control this
1837 * behaviour is not known at this time.
1838 *
1839 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
1840 * however it is very likely that other Samsung models are affected.
1841 *
1842 * On systems which don't accumulate _Q events during sleep, this extra check
1843 * should be harmless.
1844 */
1845static int ec_clear_on_resume(const struct dmi_system_id *id)
1846{
1847 pr_debug("Detected system needing EC poll on resume.\n");
1848 EC_FLAGS_CLEAR_ON_RESUME = 1;
1849 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
1850 return 0;
1851}
1852
1853/*
1854 * Some ECDTs contain wrong register addresses.
1855 * MSI MS-171F
1856 * https://bugzilla.kernel.org/show_bug.cgi?id=12461
1857 */
1858static int ec_correct_ecdt(const struct dmi_system_id *id)
1859{
1860 pr_debug("Detected system needing ECDT address correction.\n");
1861 EC_FLAGS_CORRECT_ECDT = 1;
1862 return 0;
1863}
1864
1865/*
1866 * Some DSDTs contain wrong GPE setting.
1867 * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
1868 * https://bugzilla.kernel.org/show_bug.cgi?id=195651
1869 */
1870static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
1871{
1872 pr_debug("Detected system needing ignore DSDT GPE setting.\n");
1873 EC_FLAGS_IGNORE_DSDT_GPE = 1;
1874 return 0;
1875}
1876
1877static const struct dmi_system_id ec_dmi_table[] __initconst = {
1878 {
1879 ec_correct_ecdt, "MSI MS-171F", {
1880 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
1881 DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
1882 {
1883 ec_honor_ecdt_gpe, "ASUS FX502VD", {
1884 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1885 DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
1886 {
1887 ec_honor_ecdt_gpe, "ASUS FX502VE", {
1888 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1889 DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
1890 {
1891 ec_honor_ecdt_gpe, "ASUS GL702VMK", {
1892 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1893 DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
1894 {
1895 ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
1896 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1897 DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
1898 {
1899 ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
1900 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1901 DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
1902 {
1903 ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
1904 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1905 DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
1906 {
1907 ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
1908 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1909 DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
1910 {
1911 ec_honor_ecdt_gpe, "ASUS X550VXK", {
1912 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1913 DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
1914 {
1915 ec_honor_ecdt_gpe, "ASUS X580VD", {
1916 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1917 DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
1918 {
1919 ec_clear_on_resume, "Samsung hardware", {
1920 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1921 {},
1922};
1923
1924void __init acpi_ec_ecdt_probe(void)
1925{
1926 struct acpi_table_ecdt *ecdt_ptr;
1927 struct acpi_ec *ec;
1928 acpi_status status;
1929 int ret;
1930
1931 /* Generate a boot ec context. */
1932 dmi_check_system(ec_dmi_table);
1933 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1934 (struct acpi_table_header **)&ecdt_ptr);
1935 if (ACPI_FAILURE(status))
1936 return;
1937
1938 if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
1939 /*
1940 * Asus X50GL:
1941 * https://bugzilla.kernel.org/show_bug.cgi?id=11880
1942 */
1943 return;
1944 }
1945
1946 ec = acpi_ec_alloc();
1947 if (!ec)
1948 return;
1949
1950 if (EC_FLAGS_CORRECT_ECDT) {
1951 ec->command_addr = ecdt_ptr->data.address;
1952 ec->data_addr = ecdt_ptr->control.address;
1953 } else {
1954 ec->command_addr = ecdt_ptr->control.address;
1955 ec->data_addr = ecdt_ptr->data.address;
1956 }
1957 ec->gpe = ecdt_ptr->gpe;
1958 ec->handle = ACPI_ROOT_OBJECT;
1959
1960 /*
1961 * At this point, the namespace is not initialized, so do not find
1962 * the namespace objects, or handle the events.
1963 */
1964 ret = acpi_ec_setup(ec, false);
1965 if (ret) {
1966 acpi_ec_free(ec);
1967 return;
1968 }
1969
1970 boot_ec = ec;
1971 boot_ec_is_ecdt = true;
1972
1973 pr_info("Boot ECDT EC used to handle transactions\n");
1974}
1975
1976#ifdef CONFIG_PM_SLEEP
1977static int acpi_ec_suspend(struct device *dev)
1978{
1979 struct acpi_ec *ec =
1980 acpi_driver_data(to_acpi_device(dev));
1981
1982 if (!pm_suspend_no_platform() && ec_freeze_events)
1983 acpi_ec_disable_event(ec);
1984 return 0;
1985}
1986
1987static int acpi_ec_suspend_noirq(struct device *dev)
1988{
1989 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1990
1991 /*
1992 * The SCI handler doesn't run at this point, so the GPE can be
1993 * masked at the low level without side effects.
1994 */
1995 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1996 ec->reference_count >= 1)
1997 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
1998
1999 acpi_ec_enter_noirq(ec);
2000
2001 return 0;
2002}
2003
2004static int acpi_ec_resume_noirq(struct device *dev)
2005{
2006 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
2007
2008 acpi_ec_leave_noirq(ec);
2009
2010 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
2011 ec->reference_count >= 1)
2012 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
2013
2014 return 0;
2015}
2016
2017static int acpi_ec_resume(struct device *dev)
2018{
2019 struct acpi_ec *ec =
2020 acpi_driver_data(to_acpi_device(dev));
2021
2022 acpi_ec_enable_event(ec);
2023 return 0;
2024}
2025
2026void acpi_ec_mark_gpe_for_wake(void)
2027{
2028 if (first_ec && !ec_no_wakeup)
2029 acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
2030}
2031EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake);
2032
2033void acpi_ec_set_gpe_wake_mask(u8 action)
2034{
2035 if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup)
2036 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
2037}
2038
2039bool acpi_ec_dispatch_gpe(void)
2040{
2041 bool work_in_progress;
2042 u32 ret;
2043
2044 if (!first_ec)
2045 return acpi_any_gpe_status_set(U32_MAX);
2046
2047 /*
2048 * Report wakeup if the status bit is set for any enabled GPE other
2049 * than the EC one.
2050 */
2051 if (acpi_any_gpe_status_set(first_ec->gpe))
2052 return true;
2053
2054 /*
2055 * Dispatch the EC GPE in-band, but do not report wakeup in any case
2056 * to allow the caller to process events properly after that.
2057 */
2058 ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
2059 if (ret == ACPI_INTERRUPT_HANDLED)
2060 pm_pr_dbg("EC GPE dispatched\n");
2061
2062 /* Drain EC work. */
2063 do {
2064 acpi_ec_flush_work();
2065
2066 pm_pr_dbg("ACPI EC work flushed\n");
2067
2068 spin_lock_irq(&first_ec->lock);
2069
2070 work_in_progress = first_ec->events_in_progress +
2071 first_ec->queries_in_progress > 0;
2072
2073 spin_unlock_irq(&first_ec->lock);
2074 } while (work_in_progress && !pm_wakeup_pending());
2075
2076 return false;
2077}
2078#endif /* CONFIG_PM_SLEEP */
2079
2080static const struct dev_pm_ops acpi_ec_pm = {
2081 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
2082 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
2083};
2084
2085static int param_set_event_clearing(const char *val,
2086 const struct kernel_param *kp)
2087{
2088 int result = 0;
2089
2090 if (!strncmp(val, "status", sizeof("status") - 1)) {
2091 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
2092 pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
2093 } else if (!strncmp(val, "query", sizeof("query") - 1)) {
2094 ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
2095 pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
2096 } else if (!strncmp(val, "event", sizeof("event") - 1)) {
2097 ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
2098 pr_info("Assuming SCI_EVT clearing on event reads\n");
2099 } else
2100 result = -EINVAL;
2101 return result;
2102}
2103
2104static int param_get_event_clearing(char *buffer,
2105 const struct kernel_param *kp)
2106{
2107 switch (ec_event_clearing) {
2108 case ACPI_EC_EVT_TIMING_STATUS:
2109 return sprintf(buffer, "status");
2110 case ACPI_EC_EVT_TIMING_QUERY:
2111 return sprintf(buffer, "query");
2112 case ACPI_EC_EVT_TIMING_EVENT:
2113 return sprintf(buffer, "event");
2114 default:
2115 return sprintf(buffer, "invalid");
2116 }
2117 return 0;
2118}
2119
2120module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
2121 NULL, 0644);
2122MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
2123
2124static struct acpi_driver acpi_ec_driver = {
2125 .name = "ec",
2126 .class = ACPI_EC_CLASS,
2127 .ids = ec_device_ids,
2128 .ops = {
2129 .add = acpi_ec_add,
2130 .remove = acpi_ec_remove,
2131 },
2132 .drv.pm = &acpi_ec_pm,
2133};
2134
2135static void acpi_ec_destroy_workqueues(void)
2136{
2137 if (ec_wq) {
2138 destroy_workqueue(ec_wq);
2139 ec_wq = NULL;
2140 }
2141 if (ec_query_wq) {
2142 destroy_workqueue(ec_query_wq);
2143 ec_query_wq = NULL;
2144 }
2145}
2146
2147static int acpi_ec_init_workqueues(void)
2148{
2149 if (!ec_wq)
2150 ec_wq = alloc_ordered_workqueue("kec", 0);
2151
2152 if (!ec_query_wq)
2153 ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
2154
2155 if (!ec_wq || !ec_query_wq) {
2156 acpi_ec_destroy_workqueues();
2157 return -ENODEV;
2158 }
2159 return 0;
2160}
2161
2162static const struct dmi_system_id acpi_ec_no_wakeup[] = {
2163 {
2164 .ident = "Thinkpad X1 Carbon 6th",
2165 .matches = {
2166 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2167 DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
2168 },
2169 },
2170 {
2171 .ident = "ThinkPad X1 Yoga 3rd",
2172 .matches = {
2173 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2174 DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
2175 },
2176 },
2177 { },
2178};
2179
2180int __init acpi_ec_init(void)
2181{
2182 int result;
2183 int ecdt_fail, dsdt_fail;
2184
2185 result = acpi_ec_init_workqueues();
2186 if (result)
2187 return result;
2188
2189 /*
2190 * Disable EC wakeup on following systems to prevent periodic
2191 * wakeup from EC GPE.
2192 */
2193 if (dmi_check_system(acpi_ec_no_wakeup)) {
2194 ec_no_wakeup = true;
2195 pr_debug("Disabling EC wakeup on suspend-to-idle\n");
2196 }
2197
2198 /* Drivers must be started after acpi_ec_query_init() */
2199 dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
2200 /*
2201 * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
2202 * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
2203 * settings but invalid DSDT settings.
2204 * https://bugzilla.kernel.org/show_bug.cgi?id=196847
2205 */
2206 ecdt_fail = acpi_ec_ecdt_start();
2207 return ecdt_fail && dsdt_fail ? -ENODEV : 0;
2208}
2209
2210/* EC driver currently not unloadable */
2211#if 0
2212static void __exit acpi_ec_exit(void)
2213{
2214
2215 acpi_bus_unregister_driver(&acpi_ec_driver);
2216 acpi_ec_destroy_workqueues();
2217}
2218#endif /* 0 */