ACPI: tables: complete searching upon RSDP w/ bad checksum.
[platform/kernel/linux-rpi.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2 of the License, or
13  *  (at your option) any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to the Free Software
22  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  *
26  */
27
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/interrupt.h>
34 #include <linux/kmod.h>
35 #include <linux/delay.h>
36 #include <linux/dmi.h>
37 #include <linux/workqueue.h>
38 #include <linux/nmi.h>
39 #include <linux/acpi.h>
40 #include <acpi/acpi.h>
41 #include <asm/io.h>
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44 #include <asm/uaccess.h>
45
46 #include <linux/efi.h>
47
48 #define _COMPONENT              ACPI_OS_SERVICES
49 ACPI_MODULE_NAME("osl");
50 #define PREFIX          "ACPI: "
51 struct acpi_os_dpc {
52         acpi_osd_exec_callback function;
53         void *context;
54         struct work_struct work;
55 };
56
57 #ifdef CONFIG_ACPI_CUSTOM_DSDT
58 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
59 #endif
60
61 #ifdef ENABLE_DEBUGGER
62 #include <linux/kdb.h>
63
64 /* stuff for debugger support */
65 int acpi_in_debugger;
66 EXPORT_SYMBOL(acpi_in_debugger);
67
68 extern char line_buf[80];
69 #endif                          /*ENABLE_DEBUGGER */
70
71 static unsigned int acpi_irq_irq;
72 static acpi_osd_handler acpi_irq_handler;
73 static void *acpi_irq_context;
74 static struct workqueue_struct *kacpid_wq;
75 static struct workqueue_struct *kacpi_notify_wq;
76
77 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
78 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
79
80 static int osi_linux;           /* disable _OSI(Linux) by default */
81
82 #ifdef CONFIG_DMI
83 static struct __initdata dmi_system_id acpi_osl_dmi_table[];
84 #endif
85
86 static void __init acpi_request_region (struct acpi_generic_address *addr,
87         unsigned int length, char *desc)
88 {
89         struct resource *res;
90
91         if (!addr->address || !length)
92                 return;
93
94         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
95                 res = request_region(addr->address, length, desc);
96         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
97                 res = request_mem_region(addr->address, length, desc);
98 }
99
100 static int __init acpi_reserve_resources(void)
101 {
102         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
103                 "ACPI PM1a_EVT_BLK");
104
105         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
106                 "ACPI PM1b_EVT_BLK");
107
108         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
109                 "ACPI PM1a_CNT_BLK");
110
111         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
112                 "ACPI PM1b_CNT_BLK");
113
114         if (acpi_gbl_FADT.pm_timer_length == 4)
115                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
116
117         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
118                 "ACPI PM2_CNT_BLK");
119
120         /* Length of GPE blocks must be a non-negative multiple of 2 */
121
122         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
123                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
124                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
125
126         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
127                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
128                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
129
130         return 0;
131 }
132 device_initcall(acpi_reserve_resources);
133
134 acpi_status __init acpi_os_initialize(void)
135 {
136         dmi_check_system(acpi_osl_dmi_table);
137         return AE_OK;
138 }
139
140 acpi_status acpi_os_initialize1(void)
141 {
142         /*
143          * Initialize PCI configuration space access, as we'll need to access
144          * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
145          */
146         if (!raw_pci_ops) {
147                 printk(KERN_ERR PREFIX
148                        "Access to PCI configuration space unavailable\n");
149                 return AE_NULL_ENTRY;
150         }
151         kacpid_wq = create_singlethread_workqueue("kacpid");
152         kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
153         BUG_ON(!kacpid_wq);
154         BUG_ON(!kacpi_notify_wq);
155         return AE_OK;
156 }
157
158 acpi_status acpi_os_terminate(void)
159 {
160         if (acpi_irq_handler) {
161                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
162                                                  acpi_irq_handler);
163         }
164
165         destroy_workqueue(kacpid_wq);
166         destroy_workqueue(kacpi_notify_wq);
167
168         return AE_OK;
169 }
170
171 void acpi_os_printf(const char *fmt, ...)
172 {
173         va_list args;
174         va_start(args, fmt);
175         acpi_os_vprintf(fmt, args);
176         va_end(args);
177 }
178
179 EXPORT_SYMBOL(acpi_os_printf);
180
181 void acpi_os_vprintf(const char *fmt, va_list args)
182 {
183         static char buffer[512];
184
185         vsprintf(buffer, fmt, args);
186
187 #ifdef ENABLE_DEBUGGER
188         if (acpi_in_debugger) {
189                 kdb_printf("%s", buffer);
190         } else {
191                 printk("%s", buffer);
192         }
193 #else
194         printk("%s", buffer);
195 #endif
196 }
197
198 acpi_physical_address __init acpi_os_get_root_pointer(void)
199 {
200         if (efi_enabled) {
201                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
202                         return efi.acpi20;
203                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
204                         return efi.acpi;
205                 else {
206                         printk(KERN_ERR PREFIX
207                                "System description tables not found\n");
208                         return 0;
209                 }
210         } else {
211                 acpi_physical_address pa = 0;
212
213                 acpi_find_root_pointer(&pa);
214                 return pa;
215         }
216 }
217
218 void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
219 {
220         if (phys > ULONG_MAX) {
221                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
222                 return NULL;
223         }
224         if (acpi_gbl_permanent_mmap)
225                 /*
226                 * ioremap checks to ensure this is in reserved space
227                 */
228                 return ioremap((unsigned long)phys, size);
229         else
230                 return __acpi_map_table((unsigned long)phys, size);
231 }
232 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
233
234 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
235 {
236         if (acpi_gbl_permanent_mmap) {
237                 iounmap(virt);
238         }
239 }
240 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
241
242 #ifdef ACPI_FUTURE_USAGE
243 acpi_status
244 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
245 {
246         if (!phys || !virt)
247                 return AE_BAD_PARAMETER;
248
249         *phys = virt_to_phys(virt);
250
251         return AE_OK;
252 }
253 #endif
254
255 #define ACPI_MAX_OVERRIDE_LEN 100
256
257 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
258
259 acpi_status
260 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
261                             acpi_string * new_val)
262 {
263         if (!init_val || !new_val)
264                 return AE_BAD_PARAMETER;
265
266         *new_val = NULL;
267         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
268                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
269                        acpi_os_name);
270                 *new_val = acpi_os_name;
271         }
272
273         return AE_OK;
274 }
275
276 acpi_status
277 acpi_os_table_override(struct acpi_table_header * existing_table,
278                        struct acpi_table_header ** new_table)
279 {
280         if (!existing_table || !new_table)
281                 return AE_BAD_PARAMETER;
282
283 #ifdef CONFIG_ACPI_CUSTOM_DSDT
284         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
285                 *new_table = (struct acpi_table_header *)AmlCode;
286         else
287                 *new_table = NULL;
288 #else
289         *new_table = NULL;
290 #endif
291         return AE_OK;
292 }
293
294 static irqreturn_t acpi_irq(int irq, void *dev_id)
295 {
296         return (*acpi_irq_handler) (acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE;
297 }
298
299 acpi_status
300 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
301                                   void *context)
302 {
303         unsigned int irq;
304
305         /*
306          * Ignore the GSI from the core, and use the value in our copy of the
307          * FADT. It may not be the same if an interrupt source override exists
308          * for the SCI.
309          */
310         gsi = acpi_gbl_FADT.sci_interrupt;
311         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
312                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
313                        gsi);
314                 return AE_OK;
315         }
316
317         acpi_irq_handler = handler;
318         acpi_irq_context = context;
319         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
320                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
321                 return AE_NOT_ACQUIRED;
322         }
323         acpi_irq_irq = irq;
324
325         return AE_OK;
326 }
327
328 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
329 {
330         if (irq) {
331                 free_irq(irq, acpi_irq);
332                 acpi_irq_handler = NULL;
333                 acpi_irq_irq = 0;
334         }
335
336         return AE_OK;
337 }
338
339 /*
340  * Running in interpreter thread context, safe to sleep
341  */
342
343 void acpi_os_sleep(acpi_integer ms)
344 {
345         schedule_timeout_interruptible(msecs_to_jiffies(ms));
346 }
347
348 EXPORT_SYMBOL(acpi_os_sleep);
349
350 void acpi_os_stall(u32 us)
351 {
352         while (us) {
353                 u32 delay = 1000;
354
355                 if (delay > us)
356                         delay = us;
357                 udelay(delay);
358                 touch_nmi_watchdog();
359                 us -= delay;
360         }
361 }
362
363 EXPORT_SYMBOL(acpi_os_stall);
364
365 /*
366  * Support ACPI 3.0 AML Timer operand
367  * Returns 64-bit free-running, monotonically increasing timer
368  * with 100ns granularity
369  */
370 u64 acpi_os_get_timer(void)
371 {
372         static u64 t;
373
374 #ifdef  CONFIG_HPET
375         /* TBD: use HPET if available */
376 #endif
377
378 #ifdef  CONFIG_X86_PM_TIMER
379         /* TBD: default to PM timer if HPET was not available */
380 #endif
381         if (!t)
382                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
383
384         return ++t;
385 }
386
387 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
388 {
389         u32 dummy;
390
391         if (!value)
392                 value = &dummy;
393
394         switch (width) {
395         case 8:
396                 *(u8 *) value = inb(port);
397                 break;
398         case 16:
399                 *(u16 *) value = inw(port);
400                 break;
401         case 32:
402                 *(u32 *) value = inl(port);
403                 break;
404         default:
405                 BUG();
406         }
407
408         return AE_OK;
409 }
410
411 EXPORT_SYMBOL(acpi_os_read_port);
412
413 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
414 {
415         switch (width) {
416         case 8:
417                 outb(value, port);
418                 break;
419         case 16:
420                 outw(value, port);
421                 break;
422         case 32:
423                 outl(value, port);
424                 break;
425         default:
426                 BUG();
427         }
428
429         return AE_OK;
430 }
431
432 EXPORT_SYMBOL(acpi_os_write_port);
433
434 acpi_status
435 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
436 {
437         u32 dummy;
438         void __iomem *virt_addr;
439
440         virt_addr = ioremap(phys_addr, width);
441         if (!value)
442                 value = &dummy;
443
444         switch (width) {
445         case 8:
446                 *(u8 *) value = readb(virt_addr);
447                 break;
448         case 16:
449                 *(u16 *) value = readw(virt_addr);
450                 break;
451         case 32:
452                 *(u32 *) value = readl(virt_addr);
453                 break;
454         default:
455                 BUG();
456         }
457
458         iounmap(virt_addr);
459
460         return AE_OK;
461 }
462
463 acpi_status
464 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
465 {
466         void __iomem *virt_addr;
467
468         virt_addr = ioremap(phys_addr, width);
469
470         switch (width) {
471         case 8:
472                 writeb(value, virt_addr);
473                 break;
474         case 16:
475                 writew(value, virt_addr);
476                 break;
477         case 32:
478                 writel(value, virt_addr);
479                 break;
480         default:
481                 BUG();
482         }
483
484         iounmap(virt_addr);
485
486         return AE_OK;
487 }
488
489 acpi_status
490 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
491                                void *value, u32 width)
492 {
493         int result, size;
494
495         if (!value)
496                 return AE_BAD_PARAMETER;
497
498         switch (width) {
499         case 8:
500                 size = 1;
501                 break;
502         case 16:
503                 size = 2;
504                 break;
505         case 32:
506                 size = 4;
507                 break;
508         default:
509                 return AE_ERROR;
510         }
511
512         BUG_ON(!raw_pci_ops);
513
514         result = raw_pci_ops->read(pci_id->segment, pci_id->bus,
515                                    PCI_DEVFN(pci_id->device, pci_id->function),
516                                    reg, size, value);
517
518         return (result ? AE_ERROR : AE_OK);
519 }
520
521 EXPORT_SYMBOL(acpi_os_read_pci_configuration);
522
523 acpi_status
524 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
525                                 acpi_integer value, u32 width)
526 {
527         int result, size;
528
529         switch (width) {
530         case 8:
531                 size = 1;
532                 break;
533         case 16:
534                 size = 2;
535                 break;
536         case 32:
537                 size = 4;
538                 break;
539         default:
540                 return AE_ERROR;
541         }
542
543         BUG_ON(!raw_pci_ops);
544
545         result = raw_pci_ops->write(pci_id->segment, pci_id->bus,
546                                     PCI_DEVFN(pci_id->device, pci_id->function),
547                                     reg, size, value);
548
549         return (result ? AE_ERROR : AE_OK);
550 }
551
552 /* TODO: Change code to take advantage of driver model more */
553 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
554                                     acpi_handle chandle,        /* current node */
555                                     struct acpi_pci_id **id,
556                                     int *is_bridge, u8 * bus_number)
557 {
558         acpi_handle handle;
559         struct acpi_pci_id *pci_id = *id;
560         acpi_status status;
561         unsigned long temp;
562         acpi_object_type type;
563         u8 tu8;
564
565         acpi_get_parent(chandle, &handle);
566         if (handle != rhandle) {
567                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
568                                         bus_number);
569
570                 status = acpi_get_type(handle, &type);
571                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
572                         return;
573
574                 status =
575                     acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
576                                           &temp);
577                 if (ACPI_SUCCESS(status)) {
578                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
579                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
580
581                         if (*is_bridge)
582                                 pci_id->bus = *bus_number;
583
584                         /* any nicer way to get bus number of bridge ? */
585                         status =
586                             acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8,
587                                                            8);
588                         if (ACPI_SUCCESS(status)
589                             && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
590                                 status =
591                                     acpi_os_read_pci_configuration(pci_id, 0x18,
592                                                                    &tu8, 8);
593                                 if (!ACPI_SUCCESS(status)) {
594                                         /* Certainly broken...  FIX ME */
595                                         return;
596                                 }
597                                 *is_bridge = 1;
598                                 pci_id->bus = tu8;
599                                 status =
600                                     acpi_os_read_pci_configuration(pci_id, 0x19,
601                                                                    &tu8, 8);
602                                 if (ACPI_SUCCESS(status)) {
603                                         *bus_number = tu8;
604                                 }
605                         } else
606                                 *is_bridge = 0;
607                 }
608         }
609 }
610
611 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
612                            acpi_handle chandle, /* current node */
613                            struct acpi_pci_id **id)
614 {
615         int is_bridge = 1;
616         u8 bus_number = (*id)->bus;
617
618         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
619 }
620
621 static void acpi_os_execute_deferred(struct work_struct *work)
622 {
623         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
624         if (!dpc) {
625                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
626                 return;
627         }
628
629         dpc->function(dpc->context);
630         kfree(dpc);
631
632         /* Yield cpu to notify thread */
633         cond_resched();
634
635         return;
636 }
637
638 static void acpi_os_execute_notify(struct work_struct *work)
639 {
640         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
641
642         if (!dpc) {
643                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
644                 return;
645         }
646
647         dpc->function(dpc->context);
648
649         kfree(dpc);
650
651         return;
652 }
653
654 /*******************************************************************************
655  *
656  * FUNCTION:    acpi_os_execute
657  *
658  * PARAMETERS:  Type               - Type of the callback
659  *              Function           - Function to be executed
660  *              Context            - Function parameters
661  *
662  * RETURN:      Status
663  *
664  * DESCRIPTION: Depending on type, either queues function for deferred execution or
665  *              immediately executes function on a separate thread.
666  *
667  ******************************************************************************/
668
669 acpi_status acpi_os_execute(acpi_execute_type type,
670                             acpi_osd_exec_callback function, void *context)
671 {
672         acpi_status status = AE_OK;
673         struct acpi_os_dpc *dpc;
674
675         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
676                           "Scheduling function [%p(%p)] for deferred execution.\n",
677                           function, context));
678
679         if (!function)
680                 return AE_BAD_PARAMETER;
681
682         /*
683          * Allocate/initialize DPC structure.  Note that this memory will be
684          * freed by the callee.  The kernel handles the work_struct list  in a
685          * way that allows us to also free its memory inside the callee.
686          * Because we may want to schedule several tasks with different
687          * parameters we can't use the approach some kernel code uses of
688          * having a static work_struct.
689          */
690
691         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
692         if (!dpc)
693                 return_ACPI_STATUS(AE_NO_MEMORY);
694
695         dpc->function = function;
696         dpc->context = context;
697
698         if (type == OSL_NOTIFY_HANDLER) {
699                 INIT_WORK(&dpc->work, acpi_os_execute_notify);
700                 if (!queue_work(kacpi_notify_wq, &dpc->work)) {
701                         status = AE_ERROR;
702                         kfree(dpc);
703                 }
704         } else {
705                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
706                 if (!queue_work(kacpid_wq, &dpc->work)) {
707                         ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
708                                   "Call to queue_work() failed.\n"));
709                         status = AE_ERROR;
710                         kfree(dpc);
711                 }
712         }
713         return_ACPI_STATUS(status);
714 }
715
716 EXPORT_SYMBOL(acpi_os_execute);
717
718 void acpi_os_wait_events_complete(void *context)
719 {
720         flush_workqueue(kacpid_wq);
721 }
722
723 EXPORT_SYMBOL(acpi_os_wait_events_complete);
724
725 /*
726  * Allocate the memory for a spinlock and initialize it.
727  */
728 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
729 {
730         spin_lock_init(*handle);
731
732         return AE_OK;
733 }
734
735 /*
736  * Deallocate the memory for a spinlock.
737  */
738 void acpi_os_delete_lock(acpi_spinlock handle)
739 {
740         return;
741 }
742
743 acpi_status
744 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
745 {
746         struct semaphore *sem = NULL;
747
748
749         sem = acpi_os_allocate(sizeof(struct semaphore));
750         if (!sem)
751                 return AE_NO_MEMORY;
752         memset(sem, 0, sizeof(struct semaphore));
753
754         sema_init(sem, initial_units);
755
756         *handle = (acpi_handle *) sem;
757
758         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
759                           *handle, initial_units));
760
761         return AE_OK;
762 }
763
764 EXPORT_SYMBOL(acpi_os_create_semaphore);
765
766 /*
767  * TODO: A better way to delete semaphores?  Linux doesn't have a
768  * 'delete_semaphore()' function -- may result in an invalid
769  * pointer dereference for non-synchronized consumers.  Should
770  * we at least check for blocked threads and signal/cancel them?
771  */
772
773 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
774 {
775         struct semaphore *sem = (struct semaphore *)handle;
776
777
778         if (!sem)
779                 return AE_BAD_PARAMETER;
780
781         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
782
783         kfree(sem);
784         sem = NULL;
785
786         return AE_OK;
787 }
788
789 EXPORT_SYMBOL(acpi_os_delete_semaphore);
790
791 /*
792  * TODO: The kernel doesn't have a 'down_timeout' function -- had to
793  * improvise.  The process is to sleep for one scheduler quantum
794  * until the semaphore becomes available.  Downside is that this
795  * may result in starvation for timeout-based waits when there's
796  * lots of semaphore activity.
797  *
798  * TODO: Support for units > 1?
799  */
800 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
801 {
802         acpi_status status = AE_OK;
803         struct semaphore *sem = (struct semaphore *)handle;
804         int ret = 0;
805
806
807         if (!sem || (units < 1))
808                 return AE_BAD_PARAMETER;
809
810         if (units > 1)
811                 return AE_SUPPORT;
812
813         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
814                           handle, units, timeout));
815
816         /*
817          * This can be called during resume with interrupts off.
818          * Like boot-time, we should be single threaded and will
819          * always get the lock if we try -- timeout or not.
820          * If this doesn't succeed, then we will oops courtesy of
821          * might_sleep() in down().
822          */
823         if (!down_trylock(sem))
824                 return AE_OK;
825
826         switch (timeout) {
827                 /*
828                  * No Wait:
829                  * --------
830                  * A zero timeout value indicates that we shouldn't wait - just
831                  * acquire the semaphore if available otherwise return AE_TIME
832                  * (a.k.a. 'would block').
833                  */
834         case 0:
835                 if (down_trylock(sem))
836                         status = AE_TIME;
837                 break;
838
839                 /*
840                  * Wait Indefinitely:
841                  * ------------------
842                  */
843         case ACPI_WAIT_FOREVER:
844                 down(sem);
845                 break;
846
847                 /*
848                  * Wait w/ Timeout:
849                  * ----------------
850                  */
851         default:
852                 // TODO: A better timeout algorithm?
853                 {
854                         int i = 0;
855                         static const int quantum_ms = 1000 / HZ;
856
857                         ret = down_trylock(sem);
858                         for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
859                                 schedule_timeout_interruptible(1);
860                                 ret = down_trylock(sem);
861                         }
862
863                         if (ret != 0)
864                                 status = AE_TIME;
865                 }
866                 break;
867         }
868
869         if (ACPI_FAILURE(status)) {
870                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
871                                   "Failed to acquire semaphore[%p|%d|%d], %s",
872                                   handle, units, timeout,
873                                   acpi_format_exception(status)));
874         } else {
875                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
876                                   "Acquired semaphore[%p|%d|%d]", handle,
877                                   units, timeout));
878         }
879
880         return status;
881 }
882
883 EXPORT_SYMBOL(acpi_os_wait_semaphore);
884
885 /*
886  * TODO: Support for units > 1?
887  */
888 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
889 {
890         struct semaphore *sem = (struct semaphore *)handle;
891
892
893         if (!sem || (units < 1))
894                 return AE_BAD_PARAMETER;
895
896         if (units > 1)
897                 return AE_SUPPORT;
898
899         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
900                           units));
901
902         up(sem);
903
904         return AE_OK;
905 }
906
907 EXPORT_SYMBOL(acpi_os_signal_semaphore);
908
909 #ifdef ACPI_FUTURE_USAGE
910 u32 acpi_os_get_line(char *buffer)
911 {
912
913 #ifdef ENABLE_DEBUGGER
914         if (acpi_in_debugger) {
915                 u32 chars;
916
917                 kdb_read(buffer, sizeof(line_buf));
918
919                 /* remove the CR kdb includes */
920                 chars = strlen(buffer) - 1;
921                 buffer[chars] = '\0';
922         }
923 #endif
924
925         return 0;
926 }
927 #endif                          /*  ACPI_FUTURE_USAGE  */
928
929 acpi_status acpi_os_signal(u32 function, void *info)
930 {
931         switch (function) {
932         case ACPI_SIGNAL_FATAL:
933                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
934                 break;
935         case ACPI_SIGNAL_BREAKPOINT:
936                 /*
937                  * AML Breakpoint
938                  * ACPI spec. says to treat it as a NOP unless
939                  * you are debugging.  So if/when we integrate
940                  * AML debugger into the kernel debugger its
941                  * hook will go here.  But until then it is
942                  * not useful to print anything on breakpoints.
943                  */
944                 break;
945         default:
946                 break;
947         }
948
949         return AE_OK;
950 }
951
952 EXPORT_SYMBOL(acpi_os_signal);
953
954 static int __init acpi_os_name_setup(char *str)
955 {
956         char *p = acpi_os_name;
957         int count = ACPI_MAX_OVERRIDE_LEN - 1;
958
959         if (!str || !*str)
960                 return 0;
961
962         for (; count-- && str && *str; str++) {
963                 if (isalnum(*str) || *str == ' ' || *str == ':')
964                         *p++ = *str;
965                 else if (*str == '\'' || *str == '"')
966                         continue;
967                 else
968                         break;
969         }
970         *p = 0;
971
972         return 1;
973
974 }
975
976 __setup("acpi_os_name=", acpi_os_name_setup);
977
978 static void enable_osi_linux(int enable) {
979
980         if (osi_linux != enable)
981                 printk(KERN_INFO PREFIX "%sabled _OSI(Linux)\n",
982                         enable ? "En": "Dis");
983
984         osi_linux = enable;
985         return;
986 }
987
988 /*
989  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
990  *
991  * empty string disables _OSI
992  * string starting with '!' disables that string
993  * otherwise string is added to list, augmenting built-in strings
994  */
995 static int __init acpi_osi_setup(char *str)
996 {
997         if (str == NULL || *str == '\0') {
998                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
999                 acpi_gbl_create_osi_method = FALSE;
1000         } else if (!strcmp("!Linux", str)) {
1001                 enable_osi_linux(0);
1002         } else if (*str == '!') {
1003                 if (acpi_osi_invalidate(++str) == AE_OK)
1004                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1005         } else if (!strcmp("Linux", str)) {
1006                 enable_osi_linux(1);
1007         } else if (*osi_additional_string == '\0') {
1008                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1009                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1010         }
1011
1012         return 1;
1013 }
1014
1015 __setup("acpi_osi=", acpi_osi_setup);
1016
1017 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1018 static int __init acpi_serialize_setup(char *str)
1019 {
1020         printk(KERN_INFO PREFIX "serialize enabled\n");
1021
1022         acpi_gbl_all_methods_serialized = TRUE;
1023
1024         return 1;
1025 }
1026
1027 __setup("acpi_serialize", acpi_serialize_setup);
1028
1029 /*
1030  * Wake and Run-Time GPES are expected to be separate.
1031  * We disable wake-GPEs at run-time to prevent spurious
1032  * interrupts.
1033  *
1034  * However, if a system exists that shares Wake and
1035  * Run-time events on the same GPE this flag is available
1036  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1037  */
1038 static int __init acpi_wake_gpes_always_on_setup(char *str)
1039 {
1040         printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1041
1042         acpi_gbl_leave_wake_gpes_disabled = FALSE;
1043
1044         return 1;
1045 }
1046
1047 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1048
1049 /*
1050  * Acquire a spinlock.
1051  *
1052  * handle is a pointer to the spinlock_t.
1053  */
1054
1055 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1056 {
1057         acpi_cpu_flags flags;
1058         spin_lock_irqsave(lockp, flags);
1059         return flags;
1060 }
1061
1062 /*
1063  * Release a spinlock. See above.
1064  */
1065
1066 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1067 {
1068         spin_unlock_irqrestore(lockp, flags);
1069 }
1070
1071 #ifndef ACPI_USE_LOCAL_CACHE
1072
1073 /*******************************************************************************
1074  *
1075  * FUNCTION:    acpi_os_create_cache
1076  *
1077  * PARAMETERS:  name      - Ascii name for the cache
1078  *              size      - Size of each cached object
1079  *              depth     - Maximum depth of the cache (in objects) <ignored>
1080  *              cache     - Where the new cache object is returned
1081  *
1082  * RETURN:      status
1083  *
1084  * DESCRIPTION: Create a cache object
1085  *
1086  ******************************************************************************/
1087
1088 acpi_status
1089 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1090 {
1091         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1092         if (*cache == NULL)
1093                 return AE_ERROR;
1094         else
1095                 return AE_OK;
1096 }
1097
1098 /*******************************************************************************
1099  *
1100  * FUNCTION:    acpi_os_purge_cache
1101  *
1102  * PARAMETERS:  Cache           - Handle to cache object
1103  *
1104  * RETURN:      Status
1105  *
1106  * DESCRIPTION: Free all objects within the requested cache.
1107  *
1108  ******************************************************************************/
1109
1110 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1111 {
1112         kmem_cache_shrink(cache);
1113         return (AE_OK);
1114 }
1115
1116 /*******************************************************************************
1117  *
1118  * FUNCTION:    acpi_os_delete_cache
1119  *
1120  * PARAMETERS:  Cache           - Handle to cache object
1121  *
1122  * RETURN:      Status
1123  *
1124  * DESCRIPTION: Free all objects within the requested cache and delete the
1125  *              cache object.
1126  *
1127  ******************************************************************************/
1128
1129 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1130 {
1131         kmem_cache_destroy(cache);
1132         return (AE_OK);
1133 }
1134
1135 /*******************************************************************************
1136  *
1137  * FUNCTION:    acpi_os_release_object
1138  *
1139  * PARAMETERS:  Cache       - Handle to cache object
1140  *              Object      - The object to be released
1141  *
1142  * RETURN:      None
1143  *
1144  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1145  *              the object is deleted.
1146  *
1147  ******************************************************************************/
1148
1149 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1150 {
1151         kmem_cache_free(cache, object);
1152         return (AE_OK);
1153 }
1154
1155 /******************************************************************************
1156  *
1157  * FUNCTION:    acpi_os_validate_interface
1158  *
1159  * PARAMETERS:  interface           - Requested interface to be validated
1160  *
1161  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1162  *
1163  * DESCRIPTION: Match an interface string to the interfaces supported by the
1164  *              host. Strings originate from an AML call to the _OSI method.
1165  *
1166  *****************************************************************************/
1167
1168 acpi_status
1169 acpi_os_validate_interface (char *interface)
1170 {
1171         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1172                 return AE_OK;
1173         if (!strcmp("Linux", interface)) {
1174                 printk(KERN_WARNING PREFIX
1175                         "System BIOS is requesting _OSI(Linux)\n");
1176                 printk(KERN_WARNING PREFIX
1177                         "If \"acpi_osi=Linux\" works better,\n"
1178                         "Please send dmidecode "
1179                         "to linux-acpi@vger.kernel.org\n");
1180                 if(osi_linux)
1181                         return AE_OK;
1182         }
1183         return AE_SUPPORT;
1184 }
1185
1186 /******************************************************************************
1187  *
1188  * FUNCTION:    acpi_os_validate_address
1189  *
1190  * PARAMETERS:  space_id             - ACPI space ID
1191  *              address             - Physical address
1192  *              length              - Address length
1193  *
1194  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1195  *              should return AE_AML_ILLEGAL_ADDRESS.
1196  *
1197  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1198  *              the addresses accessed by AML operation regions.
1199  *
1200  *****************************************************************************/
1201
1202 acpi_status
1203 acpi_os_validate_address (
1204     u8                   space_id,
1205     acpi_physical_address   address,
1206     acpi_size               length)
1207 {
1208
1209     return AE_OK;
1210 }
1211
1212 #ifdef CONFIG_DMI
1213 static int dmi_osi_linux(const struct dmi_system_id *d)
1214 {
1215         printk(KERN_NOTICE "%s detected: enabling _OSI(Linux)\n", d->ident);
1216         enable_osi_linux(1);
1217         return 0;
1218 }
1219
1220 static struct dmi_system_id acpi_osl_dmi_table[] __initdata = {
1221         /*
1222          * Boxes that need _OSI(Linux)
1223          */
1224         {
1225          .callback = dmi_osi_linux,
1226          .ident = "Intel Napa CRB",
1227          .matches = {
1228                      DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
1229                      DMI_MATCH(DMI_BOARD_NAME, "MPAD-MSAE Customer Reference Boards"),
1230                      },
1231          },
1232         {}
1233 };
1234 #endif /* CONFIG_DMI */
1235
1236 #endif