2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
50 #define _COMPONENT ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
54 acpi_osd_exec_callback function;
56 struct work_struct work;
59 #ifdef ENABLE_DEBUGGER
60 #include <linux/kdb.h>
62 /* stuff for debugger support */
64 EXPORT_SYMBOL(acpi_in_debugger);
65 #endif /*ENABLE_DEBUGGER */
67 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
69 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
72 static acpi_osd_handler acpi_irq_handler;
73 static void *acpi_irq_context;
74 static struct workqueue_struct *kacpid_wq;
75 static struct workqueue_struct *kacpi_notify_wq;
76 static struct workqueue_struct *kacpi_hotplug_wq;
77 static bool acpi_os_initialized;
78 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
79 bool acpi_permanent_mmap = false;
82 * This list of permanent mappings is for memory that may be accessed from
83 * interrupt context, where we can't do the ioremap().
86 struct list_head list;
88 acpi_physical_address phys;
90 unsigned long refcount;
93 static LIST_HEAD(acpi_ioremaps);
94 static DEFINE_MUTEX(acpi_ioremap_lock);
96 static void __init acpi_request_region (struct acpi_generic_address *gas,
97 unsigned int length, char *desc)
101 /* Handle possible alignment issues */
102 memcpy(&addr, &gas->address, sizeof(addr));
103 if (!addr || !length)
106 /* Resources are never freed */
107 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
108 request_region(addr, length, desc);
109 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
110 request_mem_region(addr, length, desc);
113 static int __init acpi_reserve_resources(void)
115 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
116 "ACPI PM1a_EVT_BLK");
118 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
119 "ACPI PM1b_EVT_BLK");
121 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
122 "ACPI PM1a_CNT_BLK");
124 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
125 "ACPI PM1b_CNT_BLK");
127 if (acpi_gbl_FADT.pm_timer_length == 4)
128 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
130 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
133 /* Length of GPE blocks must be a non-negative multiple of 2 */
135 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
136 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
137 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
139 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
140 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
141 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
145 fs_initcall_sync(acpi_reserve_resources);
147 void acpi_os_printf(const char *fmt, ...)
151 acpi_os_vprintf(fmt, args);
154 EXPORT_SYMBOL(acpi_os_printf);
156 void acpi_os_vprintf(const char *fmt, va_list args)
158 static char buffer[512];
160 vsprintf(buffer, fmt, args);
162 #ifdef ENABLE_DEBUGGER
163 if (acpi_in_debugger) {
164 kdb_printf("%s", buffer);
166 if (printk_get_level(buffer))
167 printk("%s", buffer);
169 printk(KERN_CONT "%s", buffer);
172 if (acpi_debugger_write_log(buffer) < 0) {
173 if (printk_get_level(buffer))
174 printk("%s", buffer);
176 printk(KERN_CONT "%s", buffer);
182 static unsigned long acpi_rsdp;
183 static int __init setup_acpi_rsdp(char *arg)
185 if (kstrtoul(arg, 16, &acpi_rsdp))
189 early_param("acpi_rsdp", setup_acpi_rsdp);
192 acpi_physical_address __init acpi_os_get_root_pointer(void)
199 if (efi_enabled(EFI_CONFIG_TABLES)) {
200 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
202 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
205 printk(KERN_ERR PREFIX
206 "System description tables not found\n");
209 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
210 acpi_physical_address pa = 0;
212 acpi_find_root_pointer(&pa);
219 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
220 static struct acpi_ioremap *
221 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
223 struct acpi_ioremap *map;
225 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
226 if (map->phys <= phys &&
227 phys + size <= map->phys + map->size)
233 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
234 static void __iomem *
235 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
237 struct acpi_ioremap *map;
239 map = acpi_map_lookup(phys, size);
241 return map->virt + (phys - map->phys);
246 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
248 struct acpi_ioremap *map;
249 void __iomem *virt = NULL;
251 mutex_lock(&acpi_ioremap_lock);
252 map = acpi_map_lookup(phys, size);
254 virt = map->virt + (phys - map->phys);
257 mutex_unlock(&acpi_ioremap_lock);
260 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
262 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
263 static struct acpi_ioremap *
264 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
266 struct acpi_ioremap *map;
268 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
269 if (map->virt <= virt &&
270 virt + size <= map->virt + map->size)
276 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
277 /* ioremap will take care of cache attributes */
278 #define should_use_kmap(pfn) 0
280 #define should_use_kmap(pfn) page_is_ram(pfn)
283 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
287 pfn = pg_off >> PAGE_SHIFT;
288 if (should_use_kmap(pfn)) {
289 if (pg_sz > PAGE_SIZE)
291 return (void __iomem __force *)kmap(pfn_to_page(pfn));
293 return acpi_os_ioremap(pg_off, pg_sz);
296 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
300 pfn = pg_off >> PAGE_SHIFT;
301 if (should_use_kmap(pfn))
302 kunmap(pfn_to_page(pfn));
308 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
309 * @phys: Start of the physical address range to map.
310 * @size: Size of the physical address range to map.
312 * Look up the given physical address range in the list of existing ACPI memory
313 * mappings. If found, get a reference to it and return a pointer to it (its
314 * virtual address). If not found, map it, add it to that list and return a
317 * During early init (when acpi_permanent_mmap has not been set yet) this
318 * routine simply calls __acpi_map_table() to get the job done.
321 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
323 struct acpi_ioremap *map;
325 acpi_physical_address pg_off;
328 if (phys > ULONG_MAX) {
329 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
333 if (!acpi_permanent_mmap)
334 return __acpi_map_table((unsigned long)phys, size);
336 mutex_lock(&acpi_ioremap_lock);
337 /* Check if there's a suitable mapping already. */
338 map = acpi_map_lookup(phys, size);
344 map = kzalloc(sizeof(*map), GFP_KERNEL);
346 mutex_unlock(&acpi_ioremap_lock);
350 pg_off = round_down(phys, PAGE_SIZE);
351 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
352 virt = acpi_map(pg_off, pg_sz);
354 mutex_unlock(&acpi_ioremap_lock);
359 INIT_LIST_HEAD(&map->list);
365 list_add_tail_rcu(&map->list, &acpi_ioremaps);
368 mutex_unlock(&acpi_ioremap_lock);
369 return map->virt + (phys - map->phys);
371 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
373 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
375 return (void *)acpi_os_map_iomem(phys, size);
377 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
379 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
381 if (!--map->refcount)
382 list_del_rcu(&map->list);
385 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
387 if (!map->refcount) {
388 synchronize_rcu_expedited();
389 acpi_unmap(map->phys, map->virt);
395 * acpi_os_unmap_iomem - Drop a memory mapping reference.
396 * @virt: Start of the address range to drop a reference to.
397 * @size: Size of the address range to drop a reference to.
399 * Look up the given virtual address range in the list of existing ACPI memory
400 * mappings, drop a reference to it and unmap it if there are no more active
403 * During early init (when acpi_permanent_mmap has not been set yet) this
404 * routine simply calls __acpi_unmap_table() to get the job done. Since
405 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
408 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
410 struct acpi_ioremap *map;
412 if (!acpi_permanent_mmap) {
413 __acpi_unmap_table(virt, size);
417 mutex_lock(&acpi_ioremap_lock);
418 map = acpi_map_lookup_virt(virt, size);
420 mutex_unlock(&acpi_ioremap_lock);
421 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
424 acpi_os_drop_map_ref(map);
425 mutex_unlock(&acpi_ioremap_lock);
427 acpi_os_map_cleanup(map);
429 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
431 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
433 return acpi_os_unmap_iomem((void __iomem *)virt, size);
435 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
437 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
442 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
445 /* Handle possible alignment issues */
446 memcpy(&addr, &gas->address, sizeof(addr));
447 if (!addr || !gas->bit_width)
450 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
456 EXPORT_SYMBOL(acpi_os_map_generic_address);
458 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
461 struct acpi_ioremap *map;
463 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
466 /* Handle possible alignment issues */
467 memcpy(&addr, &gas->address, sizeof(addr));
468 if (!addr || !gas->bit_width)
471 mutex_lock(&acpi_ioremap_lock);
472 map = acpi_map_lookup(addr, gas->bit_width / 8);
474 mutex_unlock(&acpi_ioremap_lock);
477 acpi_os_drop_map_ref(map);
478 mutex_unlock(&acpi_ioremap_lock);
480 acpi_os_map_cleanup(map);
482 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
484 #ifdef ACPI_FUTURE_USAGE
486 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
489 return AE_BAD_PARAMETER;
491 *phys = virt_to_phys(virt);
497 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
498 static bool acpi_rev_override;
500 int __init acpi_rev_override_setup(char *str)
502 acpi_rev_override = true;
505 __setup("acpi_rev_override", acpi_rev_override_setup);
507 #define acpi_rev_override false
510 #define ACPI_MAX_OVERRIDE_LEN 100
512 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
515 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
516 acpi_string *new_val)
518 if (!init_val || !new_val)
519 return AE_BAD_PARAMETER;
522 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
523 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
525 *new_val = acpi_os_name;
528 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
529 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
530 *new_val = (char *)5;
536 static irqreturn_t acpi_irq(int irq, void *dev_id)
540 handled = (*acpi_irq_handler) (acpi_irq_context);
546 acpi_irq_not_handled++;
552 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
557 acpi_irq_stats_init();
560 * ACPI interrupts different from the SCI in our copy of the FADT are
563 if (gsi != acpi_gbl_FADT.sci_interrupt)
564 return AE_BAD_PARAMETER;
566 if (acpi_irq_handler)
567 return AE_ALREADY_ACQUIRED;
569 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
570 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
575 acpi_irq_handler = handler;
576 acpi_irq_context = context;
577 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
578 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
579 acpi_irq_handler = NULL;
580 return AE_NOT_ACQUIRED;
587 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
589 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
590 return AE_BAD_PARAMETER;
592 free_irq(acpi_sci_irq, acpi_irq);
593 acpi_irq_handler = NULL;
594 acpi_sci_irq = INVALID_ACPI_IRQ;
600 * Running in interpreter thread context, safe to sleep
603 void acpi_os_sleep(u64 ms)
608 void acpi_os_stall(u32 us)
616 touch_nmi_watchdog();
622 * Support ACPI 3.0 AML Timer operand
623 * Returns 64-bit free-running, monotonically increasing timer
624 * with 100ns granularity
626 u64 acpi_os_get_timer(void)
628 u64 time_ns = ktime_to_ns(ktime_get());
629 do_div(time_ns, 100);
633 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
642 *(u8 *) value = inb(port);
643 } else if (width <= 16) {
644 *(u16 *) value = inw(port);
645 } else if (width <= 32) {
646 *(u32 *) value = inl(port);
654 EXPORT_SYMBOL(acpi_os_read_port);
656 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
660 } else if (width <= 16) {
662 } else if (width <= 32) {
671 EXPORT_SYMBOL(acpi_os_write_port);
674 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
676 void __iomem *virt_addr;
677 unsigned int size = width / 8;
682 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
685 virt_addr = acpi_os_ioremap(phys_addr, size);
687 return AE_BAD_ADDRESS;
696 *(u8 *) value = readb(virt_addr);
699 *(u16 *) value = readw(virt_addr);
702 *(u32 *) value = readl(virt_addr);
705 *(u64 *) value = readq(virt_addr);
720 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
722 void __iomem *virt_addr;
723 unsigned int size = width / 8;
727 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
730 virt_addr = acpi_os_ioremap(phys_addr, size);
732 return AE_BAD_ADDRESS;
738 writeb(value, virt_addr);
741 writew(value, virt_addr);
744 writel(value, virt_addr);
747 writeq(value, virt_addr);
762 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
763 u64 *value, u32 width)
769 return AE_BAD_PARAMETER;
785 result = raw_pci_read(pci_id->segment, pci_id->bus,
786 PCI_DEVFN(pci_id->device, pci_id->function),
787 reg, size, &value32);
790 return (result ? AE_ERROR : AE_OK);
794 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
795 u64 value, u32 width)
813 result = raw_pci_write(pci_id->segment, pci_id->bus,
814 PCI_DEVFN(pci_id->device, pci_id->function),
817 return (result ? AE_ERROR : AE_OK);
820 static void acpi_os_execute_deferred(struct work_struct *work)
822 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
824 dpc->function(dpc->context);
828 #ifdef CONFIG_ACPI_DEBUGGER
829 static struct acpi_debugger acpi_debugger;
830 static bool acpi_debugger_initialized;
832 int acpi_register_debugger(struct module *owner,
833 const struct acpi_debugger_ops *ops)
837 mutex_lock(&acpi_debugger.lock);
838 if (acpi_debugger.ops) {
843 acpi_debugger.owner = owner;
844 acpi_debugger.ops = ops;
847 mutex_unlock(&acpi_debugger.lock);
850 EXPORT_SYMBOL(acpi_register_debugger);
852 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
854 mutex_lock(&acpi_debugger.lock);
855 if (ops == acpi_debugger.ops) {
856 acpi_debugger.ops = NULL;
857 acpi_debugger.owner = NULL;
859 mutex_unlock(&acpi_debugger.lock);
861 EXPORT_SYMBOL(acpi_unregister_debugger);
863 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
866 int (*func)(acpi_osd_exec_callback, void *);
867 struct module *owner;
869 if (!acpi_debugger_initialized)
871 mutex_lock(&acpi_debugger.lock);
872 if (!acpi_debugger.ops) {
876 if (!try_module_get(acpi_debugger.owner)) {
880 func = acpi_debugger.ops->create_thread;
881 owner = acpi_debugger.owner;
882 mutex_unlock(&acpi_debugger.lock);
884 ret = func(function, context);
886 mutex_lock(&acpi_debugger.lock);
889 mutex_unlock(&acpi_debugger.lock);
893 ssize_t acpi_debugger_write_log(const char *msg)
896 ssize_t (*func)(const char *);
897 struct module *owner;
899 if (!acpi_debugger_initialized)
901 mutex_lock(&acpi_debugger.lock);
902 if (!acpi_debugger.ops) {
906 if (!try_module_get(acpi_debugger.owner)) {
910 func = acpi_debugger.ops->write_log;
911 owner = acpi_debugger.owner;
912 mutex_unlock(&acpi_debugger.lock);
916 mutex_lock(&acpi_debugger.lock);
919 mutex_unlock(&acpi_debugger.lock);
923 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
926 ssize_t (*func)(char *, size_t);
927 struct module *owner;
929 if (!acpi_debugger_initialized)
931 mutex_lock(&acpi_debugger.lock);
932 if (!acpi_debugger.ops) {
936 if (!try_module_get(acpi_debugger.owner)) {
940 func = acpi_debugger.ops->read_cmd;
941 owner = acpi_debugger.owner;
942 mutex_unlock(&acpi_debugger.lock);
944 ret = func(buffer, buffer_length);
946 mutex_lock(&acpi_debugger.lock);
949 mutex_unlock(&acpi_debugger.lock);
953 int acpi_debugger_wait_command_ready(void)
956 int (*func)(bool, char *, size_t);
957 struct module *owner;
959 if (!acpi_debugger_initialized)
961 mutex_lock(&acpi_debugger.lock);
962 if (!acpi_debugger.ops) {
966 if (!try_module_get(acpi_debugger.owner)) {
970 func = acpi_debugger.ops->wait_command_ready;
971 owner = acpi_debugger.owner;
972 mutex_unlock(&acpi_debugger.lock);
974 ret = func(acpi_gbl_method_executing,
975 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
977 mutex_lock(&acpi_debugger.lock);
980 mutex_unlock(&acpi_debugger.lock);
984 int acpi_debugger_notify_command_complete(void)
988 struct module *owner;
990 if (!acpi_debugger_initialized)
992 mutex_lock(&acpi_debugger.lock);
993 if (!acpi_debugger.ops) {
997 if (!try_module_get(acpi_debugger.owner)) {
1001 func = acpi_debugger.ops->notify_command_complete;
1002 owner = acpi_debugger.owner;
1003 mutex_unlock(&acpi_debugger.lock);
1007 mutex_lock(&acpi_debugger.lock);
1010 mutex_unlock(&acpi_debugger.lock);
1014 int __init acpi_debugger_init(void)
1016 mutex_init(&acpi_debugger.lock);
1017 acpi_debugger_initialized = true;
1022 /*******************************************************************************
1024 * FUNCTION: acpi_os_execute
1026 * PARAMETERS: Type - Type of the callback
1027 * Function - Function to be executed
1028 * Context - Function parameters
1032 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1033 * immediately executes function on a separate thread.
1035 ******************************************************************************/
1037 acpi_status acpi_os_execute(acpi_execute_type type,
1038 acpi_osd_exec_callback function, void *context)
1040 acpi_status status = AE_OK;
1041 struct acpi_os_dpc *dpc;
1042 struct workqueue_struct *queue;
1044 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1045 "Scheduling function [%p(%p)] for deferred execution.\n",
1046 function, context));
1048 if (type == OSL_DEBUGGER_MAIN_THREAD) {
1049 ret = acpi_debugger_create_thread(function, context);
1051 pr_err("Call to kthread_create() failed.\n");
1058 * Allocate/initialize DPC structure. Note that this memory will be
1059 * freed by the callee. The kernel handles the work_struct list in a
1060 * way that allows us to also free its memory inside the callee.
1061 * Because we may want to schedule several tasks with different
1062 * parameters we can't use the approach some kernel code uses of
1063 * having a static work_struct.
1066 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1068 return AE_NO_MEMORY;
1070 dpc->function = function;
1071 dpc->context = context;
1074 * To prevent lockdep from complaining unnecessarily, make sure that
1075 * there is a different static lockdep key for each workqueue by using
1076 * INIT_WORK() for each of them separately.
1078 if (type == OSL_NOTIFY_HANDLER) {
1079 queue = kacpi_notify_wq;
1080 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1081 } else if (type == OSL_GPE_HANDLER) {
1083 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1085 pr_err("Unsupported os_execute type %d.\n", type);
1089 if (ACPI_FAILURE(status))
1093 * On some machines, a software-initiated SMI causes corruption unless
1094 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1095 * typically it's done in GPE-related methods that are run via
1096 * workqueues, so we can avoid the known corruption cases by always
1097 * queueing on CPU 0.
1099 ret = queue_work_on(0, queue, &dpc->work);
1101 printk(KERN_ERR PREFIX
1102 "Call to queue_work() failed.\n");
1106 if (ACPI_FAILURE(status))
1111 EXPORT_SYMBOL(acpi_os_execute);
1113 void acpi_os_wait_events_complete(void)
1116 * Make sure the GPE handler or the fixed event handler is not used
1117 * on another CPU after removal.
1119 if (acpi_sci_irq_valid())
1120 synchronize_hardirq(acpi_sci_irq);
1121 flush_workqueue(kacpid_wq);
1122 flush_workqueue(kacpi_notify_wq);
1125 struct acpi_hp_work {
1126 struct work_struct work;
1127 struct acpi_device *adev;
1131 static void acpi_hotplug_work_fn(struct work_struct *work)
1133 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1135 acpi_os_wait_events_complete();
1136 acpi_device_hotplug(hpw->adev, hpw->src);
1140 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1142 struct acpi_hp_work *hpw;
1144 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1145 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1148 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1150 return AE_NO_MEMORY;
1152 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1156 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1157 * the hotplug code may call driver .remove() functions, which may
1158 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1161 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1168 bool acpi_queue_hotplug_work(struct work_struct *work)
1170 return queue_work(kacpi_hotplug_wq, work);
1174 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1176 struct semaphore *sem = NULL;
1178 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1180 return AE_NO_MEMORY;
1182 sema_init(sem, initial_units);
1184 *handle = (acpi_handle *) sem;
1186 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1187 *handle, initial_units));
1193 * TODO: A better way to delete semaphores? Linux doesn't have a
1194 * 'delete_semaphore()' function -- may result in an invalid
1195 * pointer dereference for non-synchronized consumers. Should
1196 * we at least check for blocked threads and signal/cancel them?
1199 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1201 struct semaphore *sem = (struct semaphore *)handle;
1204 return AE_BAD_PARAMETER;
1206 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1208 BUG_ON(!list_empty(&sem->wait_list));
1216 * TODO: Support for units > 1?
1218 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1220 acpi_status status = AE_OK;
1221 struct semaphore *sem = (struct semaphore *)handle;
1225 if (!acpi_os_initialized)
1228 if (!sem || (units < 1))
1229 return AE_BAD_PARAMETER;
1234 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1235 handle, units, timeout));
1237 if (timeout == ACPI_WAIT_FOREVER)
1238 jiffies = MAX_SCHEDULE_TIMEOUT;
1240 jiffies = msecs_to_jiffies(timeout);
1242 ret = down_timeout(sem, jiffies);
1246 if (ACPI_FAILURE(status)) {
1247 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1248 "Failed to acquire semaphore[%p|%d|%d], %s",
1249 handle, units, timeout,
1250 acpi_format_exception(status)));
1252 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1253 "Acquired semaphore[%p|%d|%d]", handle,
1261 * TODO: Support for units > 1?
1263 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1265 struct semaphore *sem = (struct semaphore *)handle;
1267 if (!acpi_os_initialized)
1270 if (!sem || (units < 1))
1271 return AE_BAD_PARAMETER;
1276 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1284 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1286 #ifdef ENABLE_DEBUGGER
1287 if (acpi_in_debugger) {
1290 kdb_read(buffer, buffer_length);
1292 /* remove the CR kdb includes */
1293 chars = strlen(buffer) - 1;
1294 buffer[chars] = '\0';
1299 ret = acpi_debugger_read_cmd(buffer, buffer_length);
1308 EXPORT_SYMBOL(acpi_os_get_line);
1310 acpi_status acpi_os_wait_command_ready(void)
1314 ret = acpi_debugger_wait_command_ready();
1320 acpi_status acpi_os_notify_command_complete(void)
1324 ret = acpi_debugger_notify_command_complete();
1330 acpi_status acpi_os_signal(u32 function, void *info)
1333 case ACPI_SIGNAL_FATAL:
1334 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1336 case ACPI_SIGNAL_BREAKPOINT:
1339 * ACPI spec. says to treat it as a NOP unless
1340 * you are debugging. So if/when we integrate
1341 * AML debugger into the kernel debugger its
1342 * hook will go here. But until then it is
1343 * not useful to print anything on breakpoints.
1353 static int __init acpi_os_name_setup(char *str)
1355 char *p = acpi_os_name;
1356 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1361 for (; count-- && *str; str++) {
1362 if (isalnum(*str) || *str == ' ' || *str == ':')
1364 else if (*str == '\'' || *str == '"')
1375 __setup("acpi_os_name=", acpi_os_name_setup);
1378 * Disable the auto-serialization of named objects creation methods.
1380 * This feature is enabled by default. It marks the AML control methods
1381 * that contain the opcodes to create named objects as "Serialized".
1383 static int __init acpi_no_auto_serialize_setup(char *str)
1385 acpi_gbl_auto_serialize_methods = FALSE;
1386 pr_info("ACPI: auto-serialization disabled\n");
1391 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1393 /* Check of resource interference between native drivers and ACPI
1394 * OperationRegions (SystemIO and System Memory only).
1395 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1396 * in arbitrary AML code and can interfere with legacy drivers.
1397 * acpi_enforce_resources= can be set to:
1399 * - strict (default) (2)
1400 * -> further driver trying to access the resources will not load
1402 * -> further driver trying to access the resources will load, but you
1403 * get a system message that something might go wrong...
1406 * -> ACPI Operation Region resources will not be registered
1409 #define ENFORCE_RESOURCES_STRICT 2
1410 #define ENFORCE_RESOURCES_LAX 1
1411 #define ENFORCE_RESOURCES_NO 0
1413 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1415 static int __init acpi_enforce_resources_setup(char *str)
1417 if (str == NULL || *str == '\0')
1420 if (!strcmp("strict", str))
1421 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1422 else if (!strcmp("lax", str))
1423 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1424 else if (!strcmp("no", str))
1425 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1430 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1432 /* Check for resource conflicts between ACPI OperationRegions and native
1434 int acpi_check_resource_conflict(const struct resource *res)
1436 acpi_adr_space_type space_id;
1441 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1443 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1446 if (res->flags & IORESOURCE_IO)
1447 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1449 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1451 length = resource_size(res);
1452 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1454 clash = acpi_check_address_range(space_id, res->start, length, warn);
1457 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1458 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1459 printk(KERN_NOTICE "ACPI: This conflict may"
1460 " cause random problems and system"
1462 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1463 " for this device, you should use it instead of"
1464 " the native driver\n");
1466 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1471 EXPORT_SYMBOL(acpi_check_resource_conflict);
1473 int acpi_check_region(resource_size_t start, resource_size_t n,
1476 struct resource res = {
1478 .end = start + n - 1,
1480 .flags = IORESOURCE_IO,
1483 return acpi_check_resource_conflict(&res);
1485 EXPORT_SYMBOL(acpi_check_region);
1488 * Let drivers know whether the resource checks are effective
1490 int acpi_resources_are_enforced(void)
1492 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1494 EXPORT_SYMBOL(acpi_resources_are_enforced);
1497 * Deallocate the memory for a spinlock.
1499 void acpi_os_delete_lock(acpi_spinlock handle)
1505 * Acquire a spinlock.
1507 * handle is a pointer to the spinlock_t.
1510 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1512 acpi_cpu_flags flags;
1513 spin_lock_irqsave(lockp, flags);
1518 * Release a spinlock. See above.
1521 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1523 spin_unlock_irqrestore(lockp, flags);
1526 #ifndef ACPI_USE_LOCAL_CACHE
1528 /*******************************************************************************
1530 * FUNCTION: acpi_os_create_cache
1532 * PARAMETERS: name - Ascii name for the cache
1533 * size - Size of each cached object
1534 * depth - Maximum depth of the cache (in objects) <ignored>
1535 * cache - Where the new cache object is returned
1539 * DESCRIPTION: Create a cache object
1541 ******************************************************************************/
1544 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1546 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1553 /*******************************************************************************
1555 * FUNCTION: acpi_os_purge_cache
1557 * PARAMETERS: Cache - Handle to cache object
1561 * DESCRIPTION: Free all objects within the requested cache.
1563 ******************************************************************************/
1565 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1567 kmem_cache_shrink(cache);
1571 /*******************************************************************************
1573 * FUNCTION: acpi_os_delete_cache
1575 * PARAMETERS: Cache - Handle to cache object
1579 * DESCRIPTION: Free all objects within the requested cache and delete the
1582 ******************************************************************************/
1584 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1586 kmem_cache_destroy(cache);
1590 /*******************************************************************************
1592 * FUNCTION: acpi_os_release_object
1594 * PARAMETERS: Cache - Handle to cache object
1595 * Object - The object to be released
1599 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1600 * the object is deleted.
1602 ******************************************************************************/
1604 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1606 kmem_cache_free(cache, object);
1611 static int __init acpi_no_static_ssdt_setup(char *s)
1613 acpi_gbl_disable_ssdt_table_install = TRUE;
1614 pr_info("ACPI: static SSDT installation disabled\n");
1619 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1621 static int __init acpi_disable_return_repair(char *s)
1623 printk(KERN_NOTICE PREFIX
1624 "ACPI: Predefined validation mechanism disabled\n");
1625 acpi_gbl_disable_auto_repair = TRUE;
1630 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1632 acpi_status __init acpi_os_initialize(void)
1634 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1635 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1636 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1637 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1638 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1640 * Use acpi_os_map_generic_address to pre-map the reset
1641 * register if it's in system memory.
1645 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1646 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1648 acpi_os_initialized = true;
1653 acpi_status __init acpi_os_initialize1(void)
1655 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1656 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1657 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1659 BUG_ON(!kacpi_notify_wq);
1660 BUG_ON(!kacpi_hotplug_wq);
1665 acpi_status acpi_os_terminate(void)
1667 if (acpi_irq_handler) {
1668 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1672 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1673 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1674 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1675 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1676 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1677 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1679 destroy_workqueue(kacpid_wq);
1680 destroy_workqueue(kacpi_notify_wq);
1681 destroy_workqueue(kacpi_hotplug_wq);
1686 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1690 if (__acpi_os_prepare_sleep)
1691 rc = __acpi_os_prepare_sleep(sleep_state,
1692 pm1a_control, pm1b_control);
1696 return AE_CTRL_SKIP;
1701 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1702 u32 pm1a_ctrl, u32 pm1b_ctrl))
1704 __acpi_os_prepare_sleep = func;
1707 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1711 if (__acpi_os_prepare_extended_sleep)
1712 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1717 return AE_CTRL_SKIP;
1722 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1723 u32 val_a, u32 val_b))
1725 __acpi_os_prepare_extended_sleep = func;