From 6d392d8daa7514a431678521c2af8be10fc31bc1 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 14 Nov 2023 14:06:11 +0200 Subject: ACPI: Run USB4 _OSC() first with query bit set The platform can deny certain tunneling from the OS and it does that by clearing the control bits it does not want the OS to get and returning with OSC_CAPABILITIES_MASK_ERROR bit set. Currently we do not handle this properly so if this happens, for example when the platform denies PCIe tunneling, we just fail the whole negotiation and revert back to what the Thunderbolt driver is doing to figure out whether the controller is running firmware connection manager or not. However, we should honor what the platform returns. For this reason run the USB4 _OSC() first with query bit set, and then use the returned control double word (that may contain some of the bits cleared by the platform) and run it second time with query bit clear. While there, remove an extra space from the assignment of the control double word. Reported-by: NaamaX Shachar Signed-off-by: Mika Westerberg Signed-off-by: Rafael J. Wysocki --- drivers/acpi/bus.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 72e64c0718c9..569bd15f211b 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -408,7 +408,7 @@ static void acpi_bus_decode_usb_osc(const char *msg, u32 bits) static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A"; static void acpi_bus_osc_negotiate_usb_control(void) { - u32 capbuf[3]; + u32 capbuf[3], *capbuf_ret; struct acpi_osc_context context = { .uuid_str = sb_usb_uuid_str, .rev = 1, @@ -428,7 +428,12 @@ static void acpi_bus_osc_negotiate_usb_control(void) control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING | OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN; - capbuf[OSC_QUERY_DWORD] = 0; + /* + * Run _OSC first with query bit set, trying to get control over + * all tunneling. The platform can then clear out bits in the + * control dword that it does not want to grant to the OS. + */ + capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; capbuf[OSC_SUPPORT_DWORD] = 0; capbuf[OSC_CONTROL_DWORD] = control; @@ -441,8 +446,29 @@ static void acpi_bus_osc_negotiate_usb_control(void) goto out_free; } + /* + * Run _OSC again now with query bit clear and the control dword + * matching what the platform granted (which may not have all + * the control bits set). + */ + capbuf_ret = context.ret.pointer; + + capbuf[OSC_QUERY_DWORD] = 0; + capbuf[OSC_CONTROL_DWORD] = capbuf_ret[OSC_CONTROL_DWORD]; + + kfree(context.ret.pointer); + + status = acpi_run_osc(handle, &context); + if (ACPI_FAILURE(status)) + return; + + if (context.ret.length != sizeof(capbuf)) { + pr_info("USB4 _OSC: returned invalid length buffer\n"); + goto out_free; + } + osc_sb_native_usb4_control = - control & acpi_osc_ctx_get_pci_control(&context); + control & acpi_osc_ctx_get_pci_control(&context); acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control); acpi_bus_decode_usb_osc("USB4 _OSC: OS controls", -- cgit From 7a36b901a6eb0e9945341db71ed3c45c7721cfa9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 27 Nov 2023 20:57:43 +0100 Subject: ACPI: OSL: Use a threaded interrupt handler for SCI In the current arrangement, all of the acpi_ev_sci_xrupt_handler() code is run as an interrupt handler for the SCI, in interrupt context. Among other things, this causes it to run with local interrupts off which can be problematic if many GPEs are enabled and they are located in the I/O address space, for example (because in that case local interrupts will be off for the duration of all of the GPE hardware accesses carried out while handling an SCI combined and that may be quite a bit of time in extreme scenarios). However, there is no particular reason why the code in question really needs to run in interrupt context and in particular, it has no specific reason to run with local interrupts off. The only real requirement is to prevent multiple instences of it from running in parallel with each other, but that can be achieved regardless. For this reason, use request_threaded_irq() instead of request_irq() for the ACPI SCI and pass IRQF_ONESHOT to it in flags to indicate that the interrupt needs to be masked while its handling thread is running so as to prevent it from re-triggering while it is being handled (and in particular until the final handled/not handled outcome is determined). While at it, drop a redundant local variable from acpi_irq(). Signed-off-by: Rafael J. Wysocki Tested-by: Mario Limonciello Tested-by: Mika Westerberg --- drivers/acpi/osl.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index c09cc3c68633..d56dda795118 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -544,11 +544,7 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val, static irqreturn_t acpi_irq(int irq, void *dev_id) { - u32 handled; - - handled = (*acpi_irq_handler) (acpi_irq_context); - - if (handled) { + if ((*acpi_irq_handler)(acpi_irq_context)) { acpi_irq_handled++; return IRQ_HANDLED; } else { @@ -582,7 +578,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, acpi_irq_handler = handler; acpi_irq_context = context; - if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { + if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED | IRQF_ONESHOT, + "acpi", acpi_irq)) { pr_err("SCI (IRQ%d) allocation failed\n", irq); acpi_irq_handler = NULL; return AE_NOT_ACQUIRED; -- cgit From 4b3805daaacb2168665c6222f261e68accb120dc Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Mon, 20 Nov 2023 19:41:43 +0800 Subject: ACPI: tables: Correct and clean up the logic of acpi_parse_entries_array() The original intention of acpi_parse_entries_array() is to return the number of all matching entries on success. This number may be greater than the value of the max_entries parameter. When this happens, the function will output a warning message, indicating that `count - max_entries` matching entries remain unprocessed and have been ignored. However, commit 4ceacd02f5a1 ("ACPI / table: Always count matched and successfully parsed entries") changed this logic to return the number of entries successfully processed by the handler. In this case, when the max_entries parameter is not zero, the number of entries successfully processed can never be greater than the value of max_entries. In other words, the expression `count > max_entries` will always evaluate to false. This means that the logic in the final if statement will never be executed. Commit 99b0efd7c886 ("ACPI / tables: do not report the number of entries ignored by acpi_parse_entries()") mentioned this issue, but it tried to fix it by removing part of the warning message. This is meaningless because the pr_warn statement will never be executed in the first place. Commit 8726d4f44150 ("ACPI / tables: fix acpi_parse_entries_array() so it traverses all subtables") introduced an errs variable, which is intended to make acpi_parse_entries_array() always traverse all of the subtables, calling as many of the callbacks as possible. However, it seems that the commit does not achieve this goal. For example, when a handler returns an error, none of the handlers will be called again in the subsequent iterations. This result appears to be no different from before the change. This patch corrects and cleans up the logic of acpi_parse_entries_array(), making it return the number of all matching entries, rather than the number of entries successfully processed by handlers. Additionally, if an error occurs when executing a handler, the function will return -EINVAL immediately. This patch should not affect existing users of acpi_parse_entries_array(). Signed-off-by: Yuntao Wang Reviewed-by: Dave Jiang Signed-off-by: Rafael J. Wysocki --- lib/fw_table.c | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/lib/fw_table.c b/lib/fw_table.c index 294df54e33b6..c49a09ee3853 100644 --- a/lib/fw_table.c +++ b/lib/fw_table.c @@ -85,11 +85,6 @@ acpi_get_subtable_type(char *id) return ACPI_SUBTABLE_COMMON; } -static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc) -{ - return proc->handler || proc->handler_arg; -} - static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc, union acpi_subtable_headers *hdr, unsigned long end) @@ -133,7 +128,6 @@ acpi_parse_entries_array(char *id, unsigned long table_size, unsigned long table_end, subtable_len, entry_len; struct acpi_subtable_entry entry; int count = 0; - int errs = 0; int i; table_end = (unsigned long)table_header + table_header->length; @@ -145,25 +139,19 @@ acpi_parse_entries_array(char *id, unsigned long table_size, ((unsigned long)table_header + table_size); subtable_len = acpi_get_subtable_header_length(&entry); - while (((unsigned long)entry.hdr) + subtable_len < table_end) { - if (max_entries && count >= max_entries) - break; - + while (((unsigned long)entry.hdr) + subtable_len < table_end) { for (i = 0; i < proc_num; i++) { if (acpi_get_entry_type(&entry) != proc[i].id) continue; - if (!has_handler(&proc[i]) || - (!errs && - call_handler(&proc[i], entry.hdr, table_end))) { - errs++; - continue; - } + + if (!max_entries || count < max_entries) + if (call_handler(&proc[i], entry.hdr, table_end)) + return -EINVAL; proc[i].count++; + count++; break; } - if (i != proc_num) - count++; /* * If entry->length is 0, break from this loop to avoid @@ -180,9 +168,9 @@ acpi_parse_entries_array(char *id, unsigned long table_size, } if (max_entries && count > max_entries) { - pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n", - id, proc->id, count); + pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n", + id, proc->id, count - max_entries, count); } - return errs ? -EINVAL : count; + return count; } -- cgit From 392829ede37f36efa2e0f034631594786a9c8139 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 29 Nov 2023 14:46:51 +0100 Subject: ACPI: OSL: Rework error handling in acpi_os_execute() Reduce the number of checks and goto labels related to error handling in acpi_os_execute() and drop the status local variable, which turns out to be redundant, from it. No intentional functional impact. Signed-off-by: Rafael J. Wysocki Reviewed-by: Andy Shevchenko --- drivers/acpi/osl.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index d56dda795118..603057f6c63e 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1060,7 +1060,6 @@ int __init acpi_debugger_init(void) acpi_status acpi_os_execute(acpi_execute_type type, acpi_osd_exec_callback function, void *context) { - acpi_status status = AE_OK; struct acpi_os_dpc *dpc; struct workqueue_struct *queue; int ret; @@ -1073,9 +1072,9 @@ acpi_status acpi_os_execute(acpi_execute_type type, ret = acpi_debugger_create_thread(function, context); if (ret) { pr_err("Kernel thread creation failed\n"); - status = AE_ERROR; + return AE_ERROR; } - goto out_thread; + return AE_OK; } /* @@ -1107,12 +1106,9 @@ acpi_status acpi_os_execute(acpi_execute_type type, INIT_WORK(&dpc->work, acpi_os_execute_deferred); } else { pr_err("Unsupported os_execute type %d.\n", type); - status = AE_ERROR; + goto err; } - if (ACPI_FAILURE(status)) - goto err_workqueue; - /* * On some machines, a software-initiated SMI causes corruption unless * the SMI runs on CPU 0. An SMI can be initiated by any AML, but @@ -1123,13 +1119,14 @@ acpi_status acpi_os_execute(acpi_execute_type type, ret = queue_work_on(0, queue, &dpc->work); if (!ret) { pr_err("Unable to queue work\n"); - status = AE_ERROR; + goto err; } -err_workqueue: - if (ACPI_FAILURE(status)) - kfree(dpc); -out_thread: - return status; + + return AE_OK; + +err: + kfree(dpc); + return AE_ERROR; } EXPORT_SYMBOL(acpi_os_execute); -- cgit From 3f3a2599374ede5ac47ca89981ff8dd8f304d915 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 29 Nov 2023 14:48:22 +0100 Subject: ACPI: OSL: Rearrange workqueue selection in acpi_os_execute() Replace the 3-branch if () statement used for selecting the target workqueue in acpi_os_execute() with a switch () one that is more suitable for this purpose and carry out the work item initialization before it to avoid code duplication. No intentional functional impact. Signed-off-by: Rafael J. Wysocki Reviewed-by: Andy Shevchenko --- drivers/acpi/osl.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 603057f6c63e..5eacf807d552 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1092,19 +1092,21 @@ acpi_status acpi_os_execute(acpi_execute_type type, dpc->function = function; dpc->context = context; + INIT_WORK(&dpc->work, acpi_os_execute_deferred); /* * To prevent lockdep from complaining unnecessarily, make sure that * there is a different static lockdep key for each workqueue by using * INIT_WORK() for each of them separately. */ - if (type == OSL_NOTIFY_HANDLER) { + switch (type) { + case OSL_NOTIFY_HANDLER: queue = kacpi_notify_wq; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - } else if (type == OSL_GPE_HANDLER) { + break; + case OSL_GPE_HANDLER: queue = kacpid_wq; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - } else { + break; + default: pr_err("Unsupported os_execute type %d.\n", type); goto err; } -- cgit From e2ffcda1629012a2c1a3706432bc45fdc899a584 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 29 Nov 2023 14:50:54 +0100 Subject: ACPI: OSL: Allow Notify () handlers to run on all CPUs Notify () handlers, like GPE handlers, are only allowed to run on CPU0 now out of the concern that they might trigger an SMM trap leading to memory corruption. Namely, in some cases, SMM code might corrupt memory if not run on CPU0. However, Notify () handlers are registered by kernel code and they are not likely to evaluate AML that would trigger an SMM trap. In fact, many of them don't even evaluate any AML at all and even if they do, that AML may as well be evaluated in other code paths. In other words, they are not special from the AML evaluation perspective, so there is no real reason to treat them in any special way. Accordingly, allow Notify () handlers, unlike GPE handlers, to be executed by all CPUs in the system. Also adjust the allocation of the "notify" workqueue to allow multiple handlers to be executed at the same time, because they need not be serialized. Signed-off-by: Rafael J. Wysocki Reviewed-by: Andy Shevchenko --- drivers/acpi/osl.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 5eacf807d552..a55cb578741a 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1061,7 +1061,6 @@ acpi_status acpi_os_execute(acpi_execute_type type, acpi_osd_exec_callback function, void *context) { struct acpi_os_dpc *dpc; - struct workqueue_struct *queue; int ret; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, @@ -1101,24 +1100,22 @@ acpi_status acpi_os_execute(acpi_execute_type type, */ switch (type) { case OSL_NOTIFY_HANDLER: - queue = kacpi_notify_wq; + ret = queue_work(kacpi_notify_wq, &dpc->work); break; case OSL_GPE_HANDLER: - queue = kacpid_wq; + /* + * On some machines, a software-initiated SMI causes corruption + * unless the SMI runs on CPU 0. An SMI can be initiated by + * any AML, but typically it's done in GPE-related methods that + * are run via workqueues, so we can avoid the known corruption + * cases by always queueing on CPU 0. + */ + ret = queue_work_on(0, kacpid_wq, &dpc->work); break; default: pr_err("Unsupported os_execute type %d.\n", type); goto err; } - - /* - * On some machines, a software-initiated SMI causes corruption unless - * the SMI runs on CPU 0. An SMI can be initiated by any AML, but - * typically it's done in GPE-related methods that are run via - * workqueues, so we can avoid the known corruption cases by always - * queueing on CPU 0. - */ - ret = queue_work_on(0, queue, &dpc->work); if (!ret) { pr_err("Unable to queue work\n"); goto err; @@ -1668,7 +1665,7 @@ acpi_status __init acpi_os_initialize(void) acpi_status __init acpi_os_initialize1(void) { kacpid_wq = alloc_workqueue("kacpid", 0, 1); - kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); + kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 0); kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); BUG_ON(!kacpid_wq); BUG_ON(!kacpi_notify_wq); -- cgit From 8e57de43076477c5cce113f2579bef02ce3e8b27 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 15 Dec 2023 12:25:15 +0100 Subject: ACPI: OSL: Use spin locks without disabling interrupts After commit 7a36b901a6eb ("ACPI: OSL: Use a threaded interrupt handler for SCI") any ACPICA code never runs in a hardirq handler, so it need not dissable interrupts on the local CPU when acquiring a spin lock. Make it use spin locks without disabling interrupts. Signed-off-by: Rafael J. Wysocki --- drivers/acpi/osl.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index a55cb578741a..70af3fbbebe5 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1515,20 +1515,18 @@ void acpi_os_delete_lock(acpi_spinlock handle) acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) __acquires(lockp) { - acpi_cpu_flags flags; - - spin_lock_irqsave(lockp, flags); - return flags; + spin_lock(lockp); + return 0; } /* * Release a spinlock. See above. */ -void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) +void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used) __releases(lockp) { - spin_unlock_irqrestore(lockp, flags); + spin_unlock(lockp); } #ifndef ACPI_USE_LOCAL_CACHE -- cgit From 655a6e7c0d83d47c36218525708c9fcfdd7f4b43 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 15 Dec 2023 12:26:33 +0100 Subject: ACPI: EC: Use a threaded handler for dedicated IRQ After commit 7a36b901a6eb ("ACPI: OSL: Use a threaded interrupt handler for SCI") all of the EC code runs in thread context on all systems where EC events are signaled through a GPE. It may as well run in thread context on systems using a dedicated IRQ for EC events signaling, so make it use a threaded handler for that IRQ. Signed-off-by: Rafael J. Wysocki --- drivers/acpi/ec.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a59c11df7375..02255795b800 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1458,8 +1458,8 @@ static bool install_gpe_event_handler(struct acpi_ec *ec) static bool install_gpio_irq_event_handler(struct acpi_ec *ec) { - return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED, - "ACPI EC", ec) >= 0; + return request_threaded_irq(ec->irq, NULL, acpi_ec_irq_handler, + IRQF_SHARED | IRQF_ONESHOT, "ACPI EC", ec) >= 0; } /** -- cgit From eb9299beadbdd7be8de1e97f1059e89bcb64b05d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 15 Dec 2023 12:27:30 +0100 Subject: ACPI: EC: Use a spin lock without disabing interrupts Since all of the ACPI EC driver code runs in thread context after recent changes, it does not need to disable interrupts on the local CPU when acquiring a spin lock. Make it use the spin lock without disabling interrupts. Signed-off-by: Rafael J. Wysocki --- drivers/acpi/ec.c | 112 ++++++++++++++++++++++-------------------------------- 1 file changed, 46 insertions(+), 66 deletions(-) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 02255795b800..dbdee2924594 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -525,12 +525,10 @@ static void acpi_ec_clear(struct acpi_ec *ec) static void acpi_ec_enable_event(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (acpi_ec_started(ec)) __acpi_ec_enable_event(ec); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); /* Drain additional events if hardware requires that */ if (EC_FLAGS_CLEAR_ON_RESUME) @@ -546,11 +544,9 @@ static void __acpi_ec_flush_work(void) static void acpi_ec_disable_event(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); __acpi_ec_disable_event(ec); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); /* * When ec_freeze_events is true, we need to flush events in @@ -571,10 +567,9 @@ void acpi_ec_flush_work(void) static bool acpi_ec_guard_event(struct acpi_ec *ec) { - unsigned long flags; bool guarded; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); /* * If firmware SCI_EVT clearing timing is "event", we actually * don't know when the SCI_EVT will be cleared by firmware after @@ -590,31 +585,29 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec) guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && ec->event_state != EC_EVENT_READY && (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); return guarded; } static int ec_transaction_polled(struct acpi_ec *ec) { - unsigned long flags; int ret = 0; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) ret = 1; - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); return ret; } static int ec_transaction_completed(struct acpi_ec *ec) { - unsigned long flags; int ret = 0; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); return ret; } @@ -756,7 +749,6 @@ static int ec_guard(struct acpi_ec *ec) static int ec_poll(struct acpi_ec *ec) { - unsigned long flags; int repeat = 5; /* number of command restarts */ while (repeat--) { @@ -765,14 +757,14 @@ static int ec_poll(struct acpi_ec *ec) do { if (!ec_guard(ec)) return 0; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); advance_transaction(ec, false); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); start_transaction(ec); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } return -ETIME; } @@ -780,11 +772,10 @@ static int ec_poll(struct acpi_ec *ec) static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, struct transaction *t) { - unsigned long tmp; int ret = 0; /* start transaction */ - spin_lock_irqsave(&ec->lock, tmp); + spin_lock(&ec->lock); /* Enable GPE for command processing (IBF=0/OBF=1) */ if (!acpi_ec_submit_flushable_request(ec)) { ret = -EINVAL; @@ -795,11 +786,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, ec->curr = t; ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command)); start_transaction(ec); - spin_unlock_irqrestore(&ec->lock, tmp); + spin_unlock(&ec->lock); ret = ec_poll(ec); - spin_lock_irqsave(&ec->lock, tmp); + spin_lock(&ec->lock); if (t->irq_count == ec_storm_threshold) acpi_ec_unmask_events(ec); ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command)); @@ -808,7 +799,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, acpi_ec_complete_request(ec); ec_dbg_ref(ec, "Decrease command"); unlock: - spin_unlock_irqrestore(&ec->lock, tmp); + spin_unlock(&ec->lock); return ret; } @@ -936,9 +927,7 @@ EXPORT_SYMBOL(ec_get_handle); static void acpi_ec_start(struct acpi_ec *ec, bool resuming) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) { ec_dbg_drv("Starting EC"); /* Enable GPE for event processing (SCI_EVT=1) */ @@ -948,31 +937,28 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming) } ec_log_drv("EC started"); } - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } static bool acpi_ec_stopped(struct acpi_ec *ec) { - unsigned long flags; bool flushed; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); flushed = acpi_ec_flushed(ec); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); return flushed; } static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (acpi_ec_started(ec)) { ec_dbg_drv("Stopping EC"); set_bit(EC_FLAGS_STOPPED, &ec->flags); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); wait_event(ec->wait, acpi_ec_stopped(ec)); - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); /* Disable GPE for event processing (SCI_EVT=1) */ if (!suspending) { acpi_ec_complete_request(ec); @@ -983,29 +969,25 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) clear_bit(EC_FLAGS_STOPPED, &ec->flags); ec_log_drv("EC stopped"); } - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } static void acpi_ec_enter_noirq(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); ec->busy_polling = true; ec->polling_guard = 0; ec_log_drv("interrupt blocked"); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } static void acpi_ec_leave_noirq(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); ec->busy_polling = ec_busy_polling; ec->polling_guard = ec_polling_guard; ec_log_drv("interrupt unblocked"); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } void acpi_ec_block_transactions(void) @@ -1137,9 +1119,9 @@ static void acpi_ec_event_processor(struct work_struct *work) ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); ec->queries_in_progress--; - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); acpi_ec_put_query_handler(handler); kfree(q); @@ -1202,12 +1184,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec) */ ec_dbg_evt("Query(0x%02x) scheduled", value); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); ec->queries_in_progress++; queue_work(ec_query_wq, &q->work); - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); return 0; @@ -1223,14 +1205,14 @@ static void acpi_ec_event_handler(struct work_struct *work) ec_dbg_evt("Event started"); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); while (ec->events_to_process) { - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); acpi_ec_submit_query(ec); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); ec->events_to_process--; } @@ -1247,11 +1229,11 @@ static void acpi_ec_event_handler(struct work_struct *work) ec_dbg_evt("Event stopped"); - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); guard_timeout = !!ec_guard(ec); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); /* Take care of SCI_EVT unless someone else is doing that. */ if (guard_timeout && !ec->curr) @@ -1264,7 +1246,7 @@ static void acpi_ec_event_handler(struct work_struct *work) ec->events_in_progress--; - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); } static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt) @@ -1289,13 +1271,11 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt static void acpi_ec_handle_interrupt(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); clear_gpe_and_advance_transaction(ec, true); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, @@ -2105,7 +2085,7 @@ bool acpi_ec_dispatch_gpe(void) * Dispatch the EC GPE in-band, but do not report wakeup in any case * to allow the caller to process events properly after that. */ - spin_lock_irq(&first_ec->lock); + spin_lock(&first_ec->lock); if (acpi_ec_gpe_status_set(first_ec)) { pm_pr_dbg("ACPI EC GPE status set\n"); @@ -2114,7 +2094,7 @@ bool acpi_ec_dispatch_gpe(void) work_in_progress = acpi_ec_work_in_progress(first_ec); } - spin_unlock_irq(&first_ec->lock); + spin_unlock(&first_ec->lock); if (!work_in_progress) return false; @@ -2127,11 +2107,11 @@ bool acpi_ec_dispatch_gpe(void) pm_pr_dbg("ACPI EC work flushed\n"); - spin_lock_irq(&first_ec->lock); + spin_lock(&first_ec->lock); work_in_progress = acpi_ec_work_in_progress(first_ec); - spin_unlock_irq(&first_ec->lock); + spin_unlock(&first_ec->lock); } while (work_in_progress && !pm_wakeup_pending()); return false; -- cgit