ACPI / osl: Remove deprecated acpi_get_table_with_size()/early_acpi_os_unmap_memory()
[platform/kernel/linux-rpi.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  *
24  */
25
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
43
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
47
48 #include "internal.h"
49
50 #define _COMPONENT              ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52
53 struct acpi_os_dpc {
54         acpi_osd_exec_callback function;
55         void *context;
56         struct work_struct work;
57 };
58
59 #ifdef ENABLE_DEBUGGER
60 #include <linux/kdb.h>
61
62 /* stuff for debugger support */
63 int acpi_in_debugger;
64 EXPORT_SYMBOL(acpi_in_debugger);
65 #endif                          /*ENABLE_DEBUGGER */
66
67 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
68                                       u32 pm1b_ctrl);
69 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
70                                       u32 val_b);
71
72 static acpi_osd_handler acpi_irq_handler;
73 static void *acpi_irq_context;
74 static struct workqueue_struct *kacpid_wq;
75 static struct workqueue_struct *kacpi_notify_wq;
76 static struct workqueue_struct *kacpi_hotplug_wq;
77 static bool acpi_os_initialized;
78 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
79 bool acpi_permanent_mmap = false;
80
81 /*
82  * This list of permanent mappings is for memory that may be accessed from
83  * interrupt context, where we can't do the ioremap().
84  */
85 struct acpi_ioremap {
86         struct list_head list;
87         void __iomem *virt;
88         acpi_physical_address phys;
89         acpi_size size;
90         unsigned long refcount;
91 };
92
93 static LIST_HEAD(acpi_ioremaps);
94 static DEFINE_MUTEX(acpi_ioremap_lock);
95
96 static void __init acpi_request_region (struct acpi_generic_address *gas,
97         unsigned int length, char *desc)
98 {
99         u64 addr;
100
101         /* Handle possible alignment issues */
102         memcpy(&addr, &gas->address, sizeof(addr));
103         if (!addr || !length)
104                 return;
105
106         /* Resources are never freed */
107         if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
108                 request_region(addr, length, desc);
109         else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
110                 request_mem_region(addr, length, desc);
111 }
112
113 static int __init acpi_reserve_resources(void)
114 {
115         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
116                 "ACPI PM1a_EVT_BLK");
117
118         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
119                 "ACPI PM1b_EVT_BLK");
120
121         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
122                 "ACPI PM1a_CNT_BLK");
123
124         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
125                 "ACPI PM1b_CNT_BLK");
126
127         if (acpi_gbl_FADT.pm_timer_length == 4)
128                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
129
130         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
131                 "ACPI PM2_CNT_BLK");
132
133         /* Length of GPE blocks must be a non-negative multiple of 2 */
134
135         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
136                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
137                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
138
139         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
140                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
141                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
142
143         return 0;
144 }
145 fs_initcall_sync(acpi_reserve_resources);
146
147 void acpi_os_printf(const char *fmt, ...)
148 {
149         va_list args;
150         va_start(args, fmt);
151         acpi_os_vprintf(fmt, args);
152         va_end(args);
153 }
154 EXPORT_SYMBOL(acpi_os_printf);
155
156 void acpi_os_vprintf(const char *fmt, va_list args)
157 {
158         static char buffer[512];
159
160         vsprintf(buffer, fmt, args);
161
162 #ifdef ENABLE_DEBUGGER
163         if (acpi_in_debugger) {
164                 kdb_printf("%s", buffer);
165         } else {
166                 if (printk_get_level(buffer))
167                         printk("%s", buffer);
168                 else
169                         printk(KERN_CONT "%s", buffer);
170         }
171 #else
172         if (acpi_debugger_write_log(buffer) < 0) {
173                 if (printk_get_level(buffer))
174                         printk("%s", buffer);
175                 else
176                         printk(KERN_CONT "%s", buffer);
177         }
178 #endif
179 }
180
181 #ifdef CONFIG_KEXEC
182 static unsigned long acpi_rsdp;
183 static int __init setup_acpi_rsdp(char *arg)
184 {
185         if (kstrtoul(arg, 16, &acpi_rsdp))
186                 return -EINVAL;
187         return 0;
188 }
189 early_param("acpi_rsdp", setup_acpi_rsdp);
190 #endif
191
192 acpi_physical_address __init acpi_os_get_root_pointer(void)
193 {
194 #ifdef CONFIG_KEXEC
195         if (acpi_rsdp)
196                 return acpi_rsdp;
197 #endif
198
199         if (efi_enabled(EFI_CONFIG_TABLES)) {
200                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
201                         return efi.acpi20;
202                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
203                         return efi.acpi;
204                 else {
205                         printk(KERN_ERR PREFIX
206                                "System description tables not found\n");
207                         return 0;
208                 }
209         } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
210                 acpi_physical_address pa = 0;
211
212                 acpi_find_root_pointer(&pa);
213                 return pa;
214         }
215
216         return 0;
217 }
218
219 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
220 static struct acpi_ioremap *
221 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
222 {
223         struct acpi_ioremap *map;
224
225         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
226                 if (map->phys <= phys &&
227                     phys + size <= map->phys + map->size)
228                         return map;
229
230         return NULL;
231 }
232
233 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
234 static void __iomem *
235 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
236 {
237         struct acpi_ioremap *map;
238
239         map = acpi_map_lookup(phys, size);
240         if (map)
241                 return map->virt + (phys - map->phys);
242
243         return NULL;
244 }
245
246 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
247 {
248         struct acpi_ioremap *map;
249         void __iomem *virt = NULL;
250
251         mutex_lock(&acpi_ioremap_lock);
252         map = acpi_map_lookup(phys, size);
253         if (map) {
254                 virt = map->virt + (phys - map->phys);
255                 map->refcount++;
256         }
257         mutex_unlock(&acpi_ioremap_lock);
258         return virt;
259 }
260 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
261
262 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
263 static struct acpi_ioremap *
264 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
265 {
266         struct acpi_ioremap *map;
267
268         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
269                 if (map->virt <= virt &&
270                     virt + size <= map->virt + map->size)
271                         return map;
272
273         return NULL;
274 }
275
276 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
277 /* ioremap will take care of cache attributes */
278 #define should_use_kmap(pfn)   0
279 #else
280 #define should_use_kmap(pfn)   page_is_ram(pfn)
281 #endif
282
283 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
284 {
285         unsigned long pfn;
286
287         pfn = pg_off >> PAGE_SHIFT;
288         if (should_use_kmap(pfn)) {
289                 if (pg_sz > PAGE_SIZE)
290                         return NULL;
291                 return (void __iomem __force *)kmap(pfn_to_page(pfn));
292         } else
293                 return acpi_os_ioremap(pg_off, pg_sz);
294 }
295
296 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
297 {
298         unsigned long pfn;
299
300         pfn = pg_off >> PAGE_SHIFT;
301         if (should_use_kmap(pfn))
302                 kunmap(pfn_to_page(pfn));
303         else
304                 iounmap(vaddr);
305 }
306
307 /**
308  * acpi_os_map_iomem - Get a virtual address for a given physical address range.
309  * @phys: Start of the physical address range to map.
310  * @size: Size of the physical address range to map.
311  *
312  * Look up the given physical address range in the list of existing ACPI memory
313  * mappings.  If found, get a reference to it and return a pointer to it (its
314  * virtual address).  If not found, map it, add it to that list and return a
315  * pointer to it.
316  *
317  * During early init (when acpi_permanent_mmap has not been set yet) this
318  * routine simply calls __acpi_map_table() to get the job done.
319  */
320 void __iomem *__ref
321 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
322 {
323         struct acpi_ioremap *map;
324         void __iomem *virt;
325         acpi_physical_address pg_off;
326         acpi_size pg_sz;
327
328         if (phys > ULONG_MAX) {
329                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
330                 return NULL;
331         }
332
333         if (!acpi_permanent_mmap)
334                 return __acpi_map_table((unsigned long)phys, size);
335
336         mutex_lock(&acpi_ioremap_lock);
337         /* Check if there's a suitable mapping already. */
338         map = acpi_map_lookup(phys, size);
339         if (map) {
340                 map->refcount++;
341                 goto out;
342         }
343
344         map = kzalloc(sizeof(*map), GFP_KERNEL);
345         if (!map) {
346                 mutex_unlock(&acpi_ioremap_lock);
347                 return NULL;
348         }
349
350         pg_off = round_down(phys, PAGE_SIZE);
351         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
352         virt = acpi_map(pg_off, pg_sz);
353         if (!virt) {
354                 mutex_unlock(&acpi_ioremap_lock);
355                 kfree(map);
356                 return NULL;
357         }
358
359         INIT_LIST_HEAD(&map->list);
360         map->virt = virt;
361         map->phys = pg_off;
362         map->size = pg_sz;
363         map->refcount = 1;
364
365         list_add_tail_rcu(&map->list, &acpi_ioremaps);
366
367 out:
368         mutex_unlock(&acpi_ioremap_lock);
369         return map->virt + (phys - map->phys);
370 }
371 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
372
373 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
374 {
375         return (void *)acpi_os_map_iomem(phys, size);
376 }
377 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
378
379 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
380 {
381         if (!--map->refcount)
382                 list_del_rcu(&map->list);
383 }
384
385 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
386 {
387         if (!map->refcount) {
388                 synchronize_rcu_expedited();
389                 acpi_unmap(map->phys, map->virt);
390                 kfree(map);
391         }
392 }
393
394 /**
395  * acpi_os_unmap_iomem - Drop a memory mapping reference.
396  * @virt: Start of the address range to drop a reference to.
397  * @size: Size of the address range to drop a reference to.
398  *
399  * Look up the given virtual address range in the list of existing ACPI memory
400  * mappings, drop a reference to it and unmap it if there are no more active
401  * references to it.
402  *
403  * During early init (when acpi_permanent_mmap has not been set yet) this
404  * routine simply calls __acpi_unmap_table() to get the job done.  Since
405  * __acpi_unmap_table() is an __init function, the __ref annotation is needed
406  * here.
407  */
408 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
409 {
410         struct acpi_ioremap *map;
411
412         if (!acpi_permanent_mmap) {
413                 __acpi_unmap_table(virt, size);
414                 return;
415         }
416
417         mutex_lock(&acpi_ioremap_lock);
418         map = acpi_map_lookup_virt(virt, size);
419         if (!map) {
420                 mutex_unlock(&acpi_ioremap_lock);
421                 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
422                 return;
423         }
424         acpi_os_drop_map_ref(map);
425         mutex_unlock(&acpi_ioremap_lock);
426
427         acpi_os_map_cleanup(map);
428 }
429 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
430
431 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
432 {
433         return acpi_os_unmap_iomem((void __iomem *)virt, size);
434 }
435 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
436
437 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
438 {
439         u64 addr;
440         void __iomem *virt;
441
442         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
443                 return 0;
444
445         /* Handle possible alignment issues */
446         memcpy(&addr, &gas->address, sizeof(addr));
447         if (!addr || !gas->bit_width)
448                 return -EINVAL;
449
450         virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
451         if (!virt)
452                 return -EIO;
453
454         return 0;
455 }
456 EXPORT_SYMBOL(acpi_os_map_generic_address);
457
458 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
459 {
460         u64 addr;
461         struct acpi_ioremap *map;
462
463         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
464                 return;
465
466         /* Handle possible alignment issues */
467         memcpy(&addr, &gas->address, sizeof(addr));
468         if (!addr || !gas->bit_width)
469                 return;
470
471         mutex_lock(&acpi_ioremap_lock);
472         map = acpi_map_lookup(addr, gas->bit_width / 8);
473         if (!map) {
474                 mutex_unlock(&acpi_ioremap_lock);
475                 return;
476         }
477         acpi_os_drop_map_ref(map);
478         mutex_unlock(&acpi_ioremap_lock);
479
480         acpi_os_map_cleanup(map);
481 }
482 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
483
484 #ifdef ACPI_FUTURE_USAGE
485 acpi_status
486 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
487 {
488         if (!phys || !virt)
489                 return AE_BAD_PARAMETER;
490
491         *phys = virt_to_phys(virt);
492
493         return AE_OK;
494 }
495 #endif
496
497 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
498 static bool acpi_rev_override;
499
500 int __init acpi_rev_override_setup(char *str)
501 {
502         acpi_rev_override = true;
503         return 1;
504 }
505 __setup("acpi_rev_override", acpi_rev_override_setup);
506 #else
507 #define acpi_rev_override       false
508 #endif
509
510 #define ACPI_MAX_OVERRIDE_LEN 100
511
512 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
513
514 acpi_status
515 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
516                             acpi_string *new_val)
517 {
518         if (!init_val || !new_val)
519                 return AE_BAD_PARAMETER;
520
521         *new_val = NULL;
522         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
523                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
524                        acpi_os_name);
525                 *new_val = acpi_os_name;
526         }
527
528         if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
529                 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
530                 *new_val = (char *)5;
531         }
532
533         return AE_OK;
534 }
535
536 static irqreturn_t acpi_irq(int irq, void *dev_id)
537 {
538         u32 handled;
539
540         handled = (*acpi_irq_handler) (acpi_irq_context);
541
542         if (handled) {
543                 acpi_irq_handled++;
544                 return IRQ_HANDLED;
545         } else {
546                 acpi_irq_not_handled++;
547                 return IRQ_NONE;
548         }
549 }
550
551 acpi_status
552 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
553                                   void *context)
554 {
555         unsigned int irq;
556
557         acpi_irq_stats_init();
558
559         /*
560          * ACPI interrupts different from the SCI in our copy of the FADT are
561          * not supported.
562          */
563         if (gsi != acpi_gbl_FADT.sci_interrupt)
564                 return AE_BAD_PARAMETER;
565
566         if (acpi_irq_handler)
567                 return AE_ALREADY_ACQUIRED;
568
569         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
570                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
571                        gsi);
572                 return AE_OK;
573         }
574
575         acpi_irq_handler = handler;
576         acpi_irq_context = context;
577         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
578                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
579                 acpi_irq_handler = NULL;
580                 return AE_NOT_ACQUIRED;
581         }
582         acpi_sci_irq = irq;
583
584         return AE_OK;
585 }
586
587 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
588 {
589         if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
590                 return AE_BAD_PARAMETER;
591
592         free_irq(acpi_sci_irq, acpi_irq);
593         acpi_irq_handler = NULL;
594         acpi_sci_irq = INVALID_ACPI_IRQ;
595
596         return AE_OK;
597 }
598
599 /*
600  * Running in interpreter thread context, safe to sleep
601  */
602
603 void acpi_os_sleep(u64 ms)
604 {
605         msleep(ms);
606 }
607
608 void acpi_os_stall(u32 us)
609 {
610         while (us) {
611                 u32 delay = 1000;
612
613                 if (delay > us)
614                         delay = us;
615                 udelay(delay);
616                 touch_nmi_watchdog();
617                 us -= delay;
618         }
619 }
620
621 /*
622  * Support ACPI 3.0 AML Timer operand
623  * Returns 64-bit free-running, monotonically increasing timer
624  * with 100ns granularity
625  */
626 u64 acpi_os_get_timer(void)
627 {
628         u64 time_ns = ktime_to_ns(ktime_get());
629         do_div(time_ns, 100);
630         return time_ns;
631 }
632
633 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
634 {
635         u32 dummy;
636
637         if (!value)
638                 value = &dummy;
639
640         *value = 0;
641         if (width <= 8) {
642                 *(u8 *) value = inb(port);
643         } else if (width <= 16) {
644                 *(u16 *) value = inw(port);
645         } else if (width <= 32) {
646                 *(u32 *) value = inl(port);
647         } else {
648                 BUG();
649         }
650
651         return AE_OK;
652 }
653
654 EXPORT_SYMBOL(acpi_os_read_port);
655
656 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
657 {
658         if (width <= 8) {
659                 outb(value, port);
660         } else if (width <= 16) {
661                 outw(value, port);
662         } else if (width <= 32) {
663                 outl(value, port);
664         } else {
665                 BUG();
666         }
667
668         return AE_OK;
669 }
670
671 EXPORT_SYMBOL(acpi_os_write_port);
672
673 acpi_status
674 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
675 {
676         void __iomem *virt_addr;
677         unsigned int size = width / 8;
678         bool unmap = false;
679         u64 dummy;
680
681         rcu_read_lock();
682         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
683         if (!virt_addr) {
684                 rcu_read_unlock();
685                 virt_addr = acpi_os_ioremap(phys_addr, size);
686                 if (!virt_addr)
687                         return AE_BAD_ADDRESS;
688                 unmap = true;
689         }
690
691         if (!value)
692                 value = &dummy;
693
694         switch (width) {
695         case 8:
696                 *(u8 *) value = readb(virt_addr);
697                 break;
698         case 16:
699                 *(u16 *) value = readw(virt_addr);
700                 break;
701         case 32:
702                 *(u32 *) value = readl(virt_addr);
703                 break;
704         case 64:
705                 *(u64 *) value = readq(virt_addr);
706                 break;
707         default:
708                 BUG();
709         }
710
711         if (unmap)
712                 iounmap(virt_addr);
713         else
714                 rcu_read_unlock();
715
716         return AE_OK;
717 }
718
719 acpi_status
720 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
721 {
722         void __iomem *virt_addr;
723         unsigned int size = width / 8;
724         bool unmap = false;
725
726         rcu_read_lock();
727         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
728         if (!virt_addr) {
729                 rcu_read_unlock();
730                 virt_addr = acpi_os_ioremap(phys_addr, size);
731                 if (!virt_addr)
732                         return AE_BAD_ADDRESS;
733                 unmap = true;
734         }
735
736         switch (width) {
737         case 8:
738                 writeb(value, virt_addr);
739                 break;
740         case 16:
741                 writew(value, virt_addr);
742                 break;
743         case 32:
744                 writel(value, virt_addr);
745                 break;
746         case 64:
747                 writeq(value, virt_addr);
748                 break;
749         default:
750                 BUG();
751         }
752
753         if (unmap)
754                 iounmap(virt_addr);
755         else
756                 rcu_read_unlock();
757
758         return AE_OK;
759 }
760
761 acpi_status
762 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
763                                u64 *value, u32 width)
764 {
765         int result, size;
766         u32 value32;
767
768         if (!value)
769                 return AE_BAD_PARAMETER;
770
771         switch (width) {
772         case 8:
773                 size = 1;
774                 break;
775         case 16:
776                 size = 2;
777                 break;
778         case 32:
779                 size = 4;
780                 break;
781         default:
782                 return AE_ERROR;
783         }
784
785         result = raw_pci_read(pci_id->segment, pci_id->bus,
786                                 PCI_DEVFN(pci_id->device, pci_id->function),
787                                 reg, size, &value32);
788         *value = value32;
789
790         return (result ? AE_ERROR : AE_OK);
791 }
792
793 acpi_status
794 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
795                                 u64 value, u32 width)
796 {
797         int result, size;
798
799         switch (width) {
800         case 8:
801                 size = 1;
802                 break;
803         case 16:
804                 size = 2;
805                 break;
806         case 32:
807                 size = 4;
808                 break;
809         default:
810                 return AE_ERROR;
811         }
812
813         result = raw_pci_write(pci_id->segment, pci_id->bus,
814                                 PCI_DEVFN(pci_id->device, pci_id->function),
815                                 reg, size, value);
816
817         return (result ? AE_ERROR : AE_OK);
818 }
819
820 static void acpi_os_execute_deferred(struct work_struct *work)
821 {
822         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
823
824         dpc->function(dpc->context);
825         kfree(dpc);
826 }
827
828 #ifdef CONFIG_ACPI_DEBUGGER
829 static struct acpi_debugger acpi_debugger;
830 static bool acpi_debugger_initialized;
831
832 int acpi_register_debugger(struct module *owner,
833                            const struct acpi_debugger_ops *ops)
834 {
835         int ret = 0;
836
837         mutex_lock(&acpi_debugger.lock);
838         if (acpi_debugger.ops) {
839                 ret = -EBUSY;
840                 goto err_lock;
841         }
842
843         acpi_debugger.owner = owner;
844         acpi_debugger.ops = ops;
845
846 err_lock:
847         mutex_unlock(&acpi_debugger.lock);
848         return ret;
849 }
850 EXPORT_SYMBOL(acpi_register_debugger);
851
852 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
853 {
854         mutex_lock(&acpi_debugger.lock);
855         if (ops == acpi_debugger.ops) {
856                 acpi_debugger.ops = NULL;
857                 acpi_debugger.owner = NULL;
858         }
859         mutex_unlock(&acpi_debugger.lock);
860 }
861 EXPORT_SYMBOL(acpi_unregister_debugger);
862
863 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
864 {
865         int ret;
866         int (*func)(acpi_osd_exec_callback, void *);
867         struct module *owner;
868
869         if (!acpi_debugger_initialized)
870                 return -ENODEV;
871         mutex_lock(&acpi_debugger.lock);
872         if (!acpi_debugger.ops) {
873                 ret = -ENODEV;
874                 goto err_lock;
875         }
876         if (!try_module_get(acpi_debugger.owner)) {
877                 ret = -ENODEV;
878                 goto err_lock;
879         }
880         func = acpi_debugger.ops->create_thread;
881         owner = acpi_debugger.owner;
882         mutex_unlock(&acpi_debugger.lock);
883
884         ret = func(function, context);
885
886         mutex_lock(&acpi_debugger.lock);
887         module_put(owner);
888 err_lock:
889         mutex_unlock(&acpi_debugger.lock);
890         return ret;
891 }
892
893 ssize_t acpi_debugger_write_log(const char *msg)
894 {
895         ssize_t ret;
896         ssize_t (*func)(const char *);
897         struct module *owner;
898
899         if (!acpi_debugger_initialized)
900                 return -ENODEV;
901         mutex_lock(&acpi_debugger.lock);
902         if (!acpi_debugger.ops) {
903                 ret = -ENODEV;
904                 goto err_lock;
905         }
906         if (!try_module_get(acpi_debugger.owner)) {
907                 ret = -ENODEV;
908                 goto err_lock;
909         }
910         func = acpi_debugger.ops->write_log;
911         owner = acpi_debugger.owner;
912         mutex_unlock(&acpi_debugger.lock);
913
914         ret = func(msg);
915
916         mutex_lock(&acpi_debugger.lock);
917         module_put(owner);
918 err_lock:
919         mutex_unlock(&acpi_debugger.lock);
920         return ret;
921 }
922
923 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
924 {
925         ssize_t ret;
926         ssize_t (*func)(char *, size_t);
927         struct module *owner;
928
929         if (!acpi_debugger_initialized)
930                 return -ENODEV;
931         mutex_lock(&acpi_debugger.lock);
932         if (!acpi_debugger.ops) {
933                 ret = -ENODEV;
934                 goto err_lock;
935         }
936         if (!try_module_get(acpi_debugger.owner)) {
937                 ret = -ENODEV;
938                 goto err_lock;
939         }
940         func = acpi_debugger.ops->read_cmd;
941         owner = acpi_debugger.owner;
942         mutex_unlock(&acpi_debugger.lock);
943
944         ret = func(buffer, buffer_length);
945
946         mutex_lock(&acpi_debugger.lock);
947         module_put(owner);
948 err_lock:
949         mutex_unlock(&acpi_debugger.lock);
950         return ret;
951 }
952
953 int acpi_debugger_wait_command_ready(void)
954 {
955         int ret;
956         int (*func)(bool, char *, size_t);
957         struct module *owner;
958
959         if (!acpi_debugger_initialized)
960                 return -ENODEV;
961         mutex_lock(&acpi_debugger.lock);
962         if (!acpi_debugger.ops) {
963                 ret = -ENODEV;
964                 goto err_lock;
965         }
966         if (!try_module_get(acpi_debugger.owner)) {
967                 ret = -ENODEV;
968                 goto err_lock;
969         }
970         func = acpi_debugger.ops->wait_command_ready;
971         owner = acpi_debugger.owner;
972         mutex_unlock(&acpi_debugger.lock);
973
974         ret = func(acpi_gbl_method_executing,
975                    acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
976
977         mutex_lock(&acpi_debugger.lock);
978         module_put(owner);
979 err_lock:
980         mutex_unlock(&acpi_debugger.lock);
981         return ret;
982 }
983
984 int acpi_debugger_notify_command_complete(void)
985 {
986         int ret;
987         int (*func)(void);
988         struct module *owner;
989
990         if (!acpi_debugger_initialized)
991                 return -ENODEV;
992         mutex_lock(&acpi_debugger.lock);
993         if (!acpi_debugger.ops) {
994                 ret = -ENODEV;
995                 goto err_lock;
996         }
997         if (!try_module_get(acpi_debugger.owner)) {
998                 ret = -ENODEV;
999                 goto err_lock;
1000         }
1001         func = acpi_debugger.ops->notify_command_complete;
1002         owner = acpi_debugger.owner;
1003         mutex_unlock(&acpi_debugger.lock);
1004
1005         ret = func();
1006
1007         mutex_lock(&acpi_debugger.lock);
1008         module_put(owner);
1009 err_lock:
1010         mutex_unlock(&acpi_debugger.lock);
1011         return ret;
1012 }
1013
1014 int __init acpi_debugger_init(void)
1015 {
1016         mutex_init(&acpi_debugger.lock);
1017         acpi_debugger_initialized = true;
1018         return 0;
1019 }
1020 #endif
1021
1022 /*******************************************************************************
1023  *
1024  * FUNCTION:    acpi_os_execute
1025  *
1026  * PARAMETERS:  Type               - Type of the callback
1027  *              Function           - Function to be executed
1028  *              Context            - Function parameters
1029  *
1030  * RETURN:      Status
1031  *
1032  * DESCRIPTION: Depending on type, either queues function for deferred execution or
1033  *              immediately executes function on a separate thread.
1034  *
1035  ******************************************************************************/
1036
1037 acpi_status acpi_os_execute(acpi_execute_type type,
1038                             acpi_osd_exec_callback function, void *context)
1039 {
1040         acpi_status status = AE_OK;
1041         struct acpi_os_dpc *dpc;
1042         struct workqueue_struct *queue;
1043         int ret;
1044         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1045                           "Scheduling function [%p(%p)] for deferred execution.\n",
1046                           function, context));
1047
1048         if (type == OSL_DEBUGGER_MAIN_THREAD) {
1049                 ret = acpi_debugger_create_thread(function, context);
1050                 if (ret) {
1051                         pr_err("Call to kthread_create() failed.\n");
1052                         status = AE_ERROR;
1053                 }
1054                 goto out_thread;
1055         }
1056
1057         /*
1058          * Allocate/initialize DPC structure.  Note that this memory will be
1059          * freed by the callee.  The kernel handles the work_struct list  in a
1060          * way that allows us to also free its memory inside the callee.
1061          * Because we may want to schedule several tasks with different
1062          * parameters we can't use the approach some kernel code uses of
1063          * having a static work_struct.
1064          */
1065
1066         dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1067         if (!dpc)
1068                 return AE_NO_MEMORY;
1069
1070         dpc->function = function;
1071         dpc->context = context;
1072
1073         /*
1074          * To prevent lockdep from complaining unnecessarily, make sure that
1075          * there is a different static lockdep key for each workqueue by using
1076          * INIT_WORK() for each of them separately.
1077          */
1078         if (type == OSL_NOTIFY_HANDLER) {
1079                 queue = kacpi_notify_wq;
1080                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1081         } else if (type == OSL_GPE_HANDLER) {
1082                 queue = kacpid_wq;
1083                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1084         } else {
1085                 pr_err("Unsupported os_execute type %d.\n", type);
1086                 status = AE_ERROR;
1087         }
1088
1089         if (ACPI_FAILURE(status))
1090                 goto err_workqueue;
1091
1092         /*
1093          * On some machines, a software-initiated SMI causes corruption unless
1094          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
1095          * typically it's done in GPE-related methods that are run via
1096          * workqueues, so we can avoid the known corruption cases by always
1097          * queueing on CPU 0.
1098          */
1099         ret = queue_work_on(0, queue, &dpc->work);
1100         if (!ret) {
1101                 printk(KERN_ERR PREFIX
1102                           "Call to queue_work() failed.\n");
1103                 status = AE_ERROR;
1104         }
1105 err_workqueue:
1106         if (ACPI_FAILURE(status))
1107                 kfree(dpc);
1108 out_thread:
1109         return status;
1110 }
1111 EXPORT_SYMBOL(acpi_os_execute);
1112
1113 void acpi_os_wait_events_complete(void)
1114 {
1115         /*
1116          * Make sure the GPE handler or the fixed event handler is not used
1117          * on another CPU after removal.
1118          */
1119         if (acpi_sci_irq_valid())
1120                 synchronize_hardirq(acpi_sci_irq);
1121         flush_workqueue(kacpid_wq);
1122         flush_workqueue(kacpi_notify_wq);
1123 }
1124
1125 struct acpi_hp_work {
1126         struct work_struct work;
1127         struct acpi_device *adev;
1128         u32 src;
1129 };
1130
1131 static void acpi_hotplug_work_fn(struct work_struct *work)
1132 {
1133         struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1134
1135         acpi_os_wait_events_complete();
1136         acpi_device_hotplug(hpw->adev, hpw->src);
1137         kfree(hpw);
1138 }
1139
1140 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1141 {
1142         struct acpi_hp_work *hpw;
1143
1144         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1145                   "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1146                   adev, src));
1147
1148         hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1149         if (!hpw)
1150                 return AE_NO_MEMORY;
1151
1152         INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1153         hpw->adev = adev;
1154         hpw->src = src;
1155         /*
1156          * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1157          * the hotplug code may call driver .remove() functions, which may
1158          * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1159          * these workqueues.
1160          */
1161         if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1162                 kfree(hpw);
1163                 return AE_ERROR;
1164         }
1165         return AE_OK;
1166 }
1167
1168 bool acpi_queue_hotplug_work(struct work_struct *work)
1169 {
1170         return queue_work(kacpi_hotplug_wq, work);
1171 }
1172
1173 acpi_status
1174 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1175 {
1176         struct semaphore *sem = NULL;
1177
1178         sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1179         if (!sem)
1180                 return AE_NO_MEMORY;
1181
1182         sema_init(sem, initial_units);
1183
1184         *handle = (acpi_handle *) sem;
1185
1186         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1187                           *handle, initial_units));
1188
1189         return AE_OK;
1190 }
1191
1192 /*
1193  * TODO: A better way to delete semaphores?  Linux doesn't have a
1194  * 'delete_semaphore()' function -- may result in an invalid
1195  * pointer dereference for non-synchronized consumers.  Should
1196  * we at least check for blocked threads and signal/cancel them?
1197  */
1198
1199 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1200 {
1201         struct semaphore *sem = (struct semaphore *)handle;
1202
1203         if (!sem)
1204                 return AE_BAD_PARAMETER;
1205
1206         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1207
1208         BUG_ON(!list_empty(&sem->wait_list));
1209         kfree(sem);
1210         sem = NULL;
1211
1212         return AE_OK;
1213 }
1214
1215 /*
1216  * TODO: Support for units > 1?
1217  */
1218 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1219 {
1220         acpi_status status = AE_OK;
1221         struct semaphore *sem = (struct semaphore *)handle;
1222         long jiffies;
1223         int ret = 0;
1224
1225         if (!acpi_os_initialized)
1226                 return AE_OK;
1227
1228         if (!sem || (units < 1))
1229                 return AE_BAD_PARAMETER;
1230
1231         if (units > 1)
1232                 return AE_SUPPORT;
1233
1234         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1235                           handle, units, timeout));
1236
1237         if (timeout == ACPI_WAIT_FOREVER)
1238                 jiffies = MAX_SCHEDULE_TIMEOUT;
1239         else
1240                 jiffies = msecs_to_jiffies(timeout);
1241
1242         ret = down_timeout(sem, jiffies);
1243         if (ret)
1244                 status = AE_TIME;
1245
1246         if (ACPI_FAILURE(status)) {
1247                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1248                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1249                                   handle, units, timeout,
1250                                   acpi_format_exception(status)));
1251         } else {
1252                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1253                                   "Acquired semaphore[%p|%d|%d]", handle,
1254                                   units, timeout));
1255         }
1256
1257         return status;
1258 }
1259
1260 /*
1261  * TODO: Support for units > 1?
1262  */
1263 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1264 {
1265         struct semaphore *sem = (struct semaphore *)handle;
1266
1267         if (!acpi_os_initialized)
1268                 return AE_OK;
1269
1270         if (!sem || (units < 1))
1271                 return AE_BAD_PARAMETER;
1272
1273         if (units > 1)
1274                 return AE_SUPPORT;
1275
1276         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1277                           units));
1278
1279         up(sem);
1280
1281         return AE_OK;
1282 }
1283
1284 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1285 {
1286 #ifdef ENABLE_DEBUGGER
1287         if (acpi_in_debugger) {
1288                 u32 chars;
1289
1290                 kdb_read(buffer, buffer_length);
1291
1292                 /* remove the CR kdb includes */
1293                 chars = strlen(buffer) - 1;
1294                 buffer[chars] = '\0';
1295         }
1296 #else
1297         int ret;
1298
1299         ret = acpi_debugger_read_cmd(buffer, buffer_length);
1300         if (ret < 0)
1301                 return AE_ERROR;
1302         if (bytes_read)
1303                 *bytes_read = ret;
1304 #endif
1305
1306         return AE_OK;
1307 }
1308 EXPORT_SYMBOL(acpi_os_get_line);
1309
1310 acpi_status acpi_os_wait_command_ready(void)
1311 {
1312         int ret;
1313
1314         ret = acpi_debugger_wait_command_ready();
1315         if (ret < 0)
1316                 return AE_ERROR;
1317         return AE_OK;
1318 }
1319
1320 acpi_status acpi_os_notify_command_complete(void)
1321 {
1322         int ret;
1323
1324         ret = acpi_debugger_notify_command_complete();
1325         if (ret < 0)
1326                 return AE_ERROR;
1327         return AE_OK;
1328 }
1329
1330 acpi_status acpi_os_signal(u32 function, void *info)
1331 {
1332         switch (function) {
1333         case ACPI_SIGNAL_FATAL:
1334                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1335                 break;
1336         case ACPI_SIGNAL_BREAKPOINT:
1337                 /*
1338                  * AML Breakpoint
1339                  * ACPI spec. says to treat it as a NOP unless
1340                  * you are debugging.  So if/when we integrate
1341                  * AML debugger into the kernel debugger its
1342                  * hook will go here.  But until then it is
1343                  * not useful to print anything on breakpoints.
1344                  */
1345                 break;
1346         default:
1347                 break;
1348         }
1349
1350         return AE_OK;
1351 }
1352
1353 static int __init acpi_os_name_setup(char *str)
1354 {
1355         char *p = acpi_os_name;
1356         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1357
1358         if (!str || !*str)
1359                 return 0;
1360
1361         for (; count-- && *str; str++) {
1362                 if (isalnum(*str) || *str == ' ' || *str == ':')
1363                         *p++ = *str;
1364                 else if (*str == '\'' || *str == '"')
1365                         continue;
1366                 else
1367                         break;
1368         }
1369         *p = 0;
1370
1371         return 1;
1372
1373 }
1374
1375 __setup("acpi_os_name=", acpi_os_name_setup);
1376
1377 /*
1378  * Disable the auto-serialization of named objects creation methods.
1379  *
1380  * This feature is enabled by default.  It marks the AML control methods
1381  * that contain the opcodes to create named objects as "Serialized".
1382  */
1383 static int __init acpi_no_auto_serialize_setup(char *str)
1384 {
1385         acpi_gbl_auto_serialize_methods = FALSE;
1386         pr_info("ACPI: auto-serialization disabled\n");
1387
1388         return 1;
1389 }
1390
1391 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1392
1393 /* Check of resource interference between native drivers and ACPI
1394  * OperationRegions (SystemIO and System Memory only).
1395  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1396  * in arbitrary AML code and can interfere with legacy drivers.
1397  * acpi_enforce_resources= can be set to:
1398  *
1399  *   - strict (default) (2)
1400  *     -> further driver trying to access the resources will not load
1401  *   - lax              (1)
1402  *     -> further driver trying to access the resources will load, but you
1403  *     get a system message that something might go wrong...
1404  *
1405  *   - no               (0)
1406  *     -> ACPI Operation Region resources will not be registered
1407  *
1408  */
1409 #define ENFORCE_RESOURCES_STRICT 2
1410 #define ENFORCE_RESOURCES_LAX    1
1411 #define ENFORCE_RESOURCES_NO     0
1412
1413 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1414
1415 static int __init acpi_enforce_resources_setup(char *str)
1416 {
1417         if (str == NULL || *str == '\0')
1418                 return 0;
1419
1420         if (!strcmp("strict", str))
1421                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1422         else if (!strcmp("lax", str))
1423                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1424         else if (!strcmp("no", str))
1425                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1426
1427         return 1;
1428 }
1429
1430 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1431
1432 /* Check for resource conflicts between ACPI OperationRegions and native
1433  * drivers */
1434 int acpi_check_resource_conflict(const struct resource *res)
1435 {
1436         acpi_adr_space_type space_id;
1437         acpi_size length;
1438         u8 warn = 0;
1439         int clash = 0;
1440
1441         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1442                 return 0;
1443         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1444                 return 0;
1445
1446         if (res->flags & IORESOURCE_IO)
1447                 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1448         else
1449                 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1450
1451         length = resource_size(res);
1452         if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1453                 warn = 1;
1454         clash = acpi_check_address_range(space_id, res->start, length, warn);
1455
1456         if (clash) {
1457                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1458                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1459                                 printk(KERN_NOTICE "ACPI: This conflict may"
1460                                        " cause random problems and system"
1461                                        " instability\n");
1462                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1463                                " for this device, you should use it instead of"
1464                                " the native driver\n");
1465                 }
1466                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1467                         return -EBUSY;
1468         }
1469         return 0;
1470 }
1471 EXPORT_SYMBOL(acpi_check_resource_conflict);
1472
1473 int acpi_check_region(resource_size_t start, resource_size_t n,
1474                       const char *name)
1475 {
1476         struct resource res = {
1477                 .start = start,
1478                 .end   = start + n - 1,
1479                 .name  = name,
1480                 .flags = IORESOURCE_IO,
1481         };
1482
1483         return acpi_check_resource_conflict(&res);
1484 }
1485 EXPORT_SYMBOL(acpi_check_region);
1486
1487 /*
1488  * Let drivers know whether the resource checks are effective
1489  */
1490 int acpi_resources_are_enforced(void)
1491 {
1492         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1493 }
1494 EXPORT_SYMBOL(acpi_resources_are_enforced);
1495
1496 /*
1497  * Deallocate the memory for a spinlock.
1498  */
1499 void acpi_os_delete_lock(acpi_spinlock handle)
1500 {
1501         ACPI_FREE(handle);
1502 }
1503
1504 /*
1505  * Acquire a spinlock.
1506  *
1507  * handle is a pointer to the spinlock_t.
1508  */
1509
1510 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1511 {
1512         acpi_cpu_flags flags;
1513         spin_lock_irqsave(lockp, flags);
1514         return flags;
1515 }
1516
1517 /*
1518  * Release a spinlock. See above.
1519  */
1520
1521 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1522 {
1523         spin_unlock_irqrestore(lockp, flags);
1524 }
1525
1526 #ifndef ACPI_USE_LOCAL_CACHE
1527
1528 /*******************************************************************************
1529  *
1530  * FUNCTION:    acpi_os_create_cache
1531  *
1532  * PARAMETERS:  name      - Ascii name for the cache
1533  *              size      - Size of each cached object
1534  *              depth     - Maximum depth of the cache (in objects) <ignored>
1535  *              cache     - Where the new cache object is returned
1536  *
1537  * RETURN:      status
1538  *
1539  * DESCRIPTION: Create a cache object
1540  *
1541  ******************************************************************************/
1542
1543 acpi_status
1544 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1545 {
1546         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1547         if (*cache == NULL)
1548                 return AE_ERROR;
1549         else
1550                 return AE_OK;
1551 }
1552
1553 /*******************************************************************************
1554  *
1555  * FUNCTION:    acpi_os_purge_cache
1556  *
1557  * PARAMETERS:  Cache           - Handle to cache object
1558  *
1559  * RETURN:      Status
1560  *
1561  * DESCRIPTION: Free all objects within the requested cache.
1562  *
1563  ******************************************************************************/
1564
1565 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1566 {
1567         kmem_cache_shrink(cache);
1568         return (AE_OK);
1569 }
1570
1571 /*******************************************************************************
1572  *
1573  * FUNCTION:    acpi_os_delete_cache
1574  *
1575  * PARAMETERS:  Cache           - Handle to cache object
1576  *
1577  * RETURN:      Status
1578  *
1579  * DESCRIPTION: Free all objects within the requested cache and delete the
1580  *              cache object.
1581  *
1582  ******************************************************************************/
1583
1584 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1585 {
1586         kmem_cache_destroy(cache);
1587         return (AE_OK);
1588 }
1589
1590 /*******************************************************************************
1591  *
1592  * FUNCTION:    acpi_os_release_object
1593  *
1594  * PARAMETERS:  Cache       - Handle to cache object
1595  *              Object      - The object to be released
1596  *
1597  * RETURN:      None
1598  *
1599  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1600  *              the object is deleted.
1601  *
1602  ******************************************************************************/
1603
1604 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1605 {
1606         kmem_cache_free(cache, object);
1607         return (AE_OK);
1608 }
1609 #endif
1610
1611 static int __init acpi_no_static_ssdt_setup(char *s)
1612 {
1613         acpi_gbl_disable_ssdt_table_install = TRUE;
1614         pr_info("ACPI: static SSDT installation disabled\n");
1615
1616         return 0;
1617 }
1618
1619 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1620
1621 static int __init acpi_disable_return_repair(char *s)
1622 {
1623         printk(KERN_NOTICE PREFIX
1624                "ACPI: Predefined validation mechanism disabled\n");
1625         acpi_gbl_disable_auto_repair = TRUE;
1626
1627         return 1;
1628 }
1629
1630 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1631
1632 acpi_status __init acpi_os_initialize(void)
1633 {
1634         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1635         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1636         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1637         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1638         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1639                 /*
1640                  * Use acpi_os_map_generic_address to pre-map the reset
1641                  * register if it's in system memory.
1642                  */
1643                 int rv;
1644
1645                 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1646                 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1647         }
1648         acpi_os_initialized = true;
1649
1650         return AE_OK;
1651 }
1652
1653 acpi_status __init acpi_os_initialize1(void)
1654 {
1655         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1656         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1657         kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1658         BUG_ON(!kacpid_wq);
1659         BUG_ON(!kacpi_notify_wq);
1660         BUG_ON(!kacpi_hotplug_wq);
1661         acpi_osi_init();
1662         return AE_OK;
1663 }
1664
1665 acpi_status acpi_os_terminate(void)
1666 {
1667         if (acpi_irq_handler) {
1668                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1669                                                  acpi_irq_handler);
1670         }
1671
1672         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1673         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1674         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1675         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1676         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1677                 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1678
1679         destroy_workqueue(kacpid_wq);
1680         destroy_workqueue(kacpi_notify_wq);
1681         destroy_workqueue(kacpi_hotplug_wq);
1682
1683         return AE_OK;
1684 }
1685
1686 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1687                                   u32 pm1b_control)
1688 {
1689         int rc = 0;
1690         if (__acpi_os_prepare_sleep)
1691                 rc = __acpi_os_prepare_sleep(sleep_state,
1692                                              pm1a_control, pm1b_control);
1693         if (rc < 0)
1694                 return AE_ERROR;
1695         else if (rc > 0)
1696                 return AE_CTRL_SKIP;
1697
1698         return AE_OK;
1699 }
1700
1701 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1702                                u32 pm1a_ctrl, u32 pm1b_ctrl))
1703 {
1704         __acpi_os_prepare_sleep = func;
1705 }
1706
1707 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1708                                   u32 val_b)
1709 {
1710         int rc = 0;
1711         if (__acpi_os_prepare_extended_sleep)
1712                 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1713                                              val_a, val_b);
1714         if (rc < 0)
1715                 return AE_ERROR;
1716         else if (rc > 0)
1717                 return AE_CTRL_SKIP;
1718
1719         return AE_OK;
1720 }
1721
1722 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1723                                u32 val_a, u32 val_b))
1724 {
1725         __acpi_os_prepare_extended_sleep = func;
1726 }