1 // SPDX-License-Identifier: GPL-2.0
3 * sysfs.c - ACPI sysfs interface to userspace.
6 #define pr_fmt(fmt) "ACPI: " fmt
8 #include <linux/acpi.h>
9 #include <linux/bitmap.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/kstrtox.h>
13 #include <linux/moduleparam.h>
17 #ifdef CONFIG_ACPI_DEBUG
19 * ACPI debug sysfs I/F, including:
20 * /sys/modules/acpi/parameters/debug_layer
21 * /sys/modules/acpi/parameters/debug_level
22 * /sys/modules/acpi/parameters/trace_method_name
23 * /sys/modules/acpi/parameters/trace_state
24 * /sys/modules/acpi/parameters/trace_debug_layer
25 * /sys/modules/acpi/parameters/trace_debug_level
36 #define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
38 static const struct acpi_dlayer acpi_debug_layers[] = {
39 ACPI_DEBUG_INIT(ACPI_UTILITIES),
40 ACPI_DEBUG_INIT(ACPI_HARDWARE),
41 ACPI_DEBUG_INIT(ACPI_EVENTS),
42 ACPI_DEBUG_INIT(ACPI_TABLES),
43 ACPI_DEBUG_INIT(ACPI_NAMESPACE),
44 ACPI_DEBUG_INIT(ACPI_PARSER),
45 ACPI_DEBUG_INIT(ACPI_DISPATCHER),
46 ACPI_DEBUG_INIT(ACPI_EXECUTER),
47 ACPI_DEBUG_INIT(ACPI_RESOURCES),
48 ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
49 ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
50 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
51 ACPI_DEBUG_INIT(ACPI_COMPILER),
52 ACPI_DEBUG_INIT(ACPI_TOOLS),
55 static const struct acpi_dlevel acpi_debug_levels[] = {
56 ACPI_DEBUG_INIT(ACPI_LV_INIT),
57 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
58 ACPI_DEBUG_INIT(ACPI_LV_INFO),
59 ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
60 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
62 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
63 ACPI_DEBUG_INIT(ACPI_LV_PARSE),
64 ACPI_DEBUG_INIT(ACPI_LV_LOAD),
65 ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
66 ACPI_DEBUG_INIT(ACPI_LV_EXEC),
67 ACPI_DEBUG_INIT(ACPI_LV_NAMES),
68 ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
69 ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
70 ACPI_DEBUG_INIT(ACPI_LV_TABLES),
71 ACPI_DEBUG_INIT(ACPI_LV_VALUES),
72 ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
73 ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
74 ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
75 ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
77 ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
78 ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
79 ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
81 ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
82 ACPI_DEBUG_INIT(ACPI_LV_THREADS),
83 ACPI_DEBUG_INIT(ACPI_LV_IO),
84 ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
86 ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
87 ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
88 ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
89 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
92 static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
97 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
99 for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
100 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
101 acpi_debug_layers[i].name,
102 acpi_debug_layers[i].value,
103 (acpi_dbg_layer & acpi_debug_layers[i].value)
107 sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
109 (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
110 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
113 sprintf(buffer + result,
114 "--\ndebug_layer = 0x%08X ( * = enabled)\n",
120 static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
125 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
127 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
128 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
129 acpi_debug_levels[i].name,
130 acpi_debug_levels[i].value,
131 (acpi_dbg_level & acpi_debug_levels[i].value)
135 sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
141 static const struct kernel_param_ops param_ops_debug_layer = {
142 .set = param_set_uint,
143 .get = param_get_debug_layer,
146 static const struct kernel_param_ops param_ops_debug_level = {
147 .set = param_set_uint,
148 .get = param_get_debug_level,
151 module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644);
152 module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644);
154 static char trace_method_name[1024];
156 static int param_set_trace_method_name(const char *val,
157 const struct kernel_param *kp)
160 bool is_abs_path = true;
165 if ((is_abs_path && strlen(val) > 1023) ||
166 (!is_abs_path && strlen(val) > 1022)) {
167 pr_err("%s: string parameter too long\n", kp->name);
172 * It's not safe to update acpi_gbl_trace_method_name without
173 * having the tracer stopped, so we save the original tracer
174 * state and disable it.
176 saved_flags = acpi_gbl_trace_flags;
177 (void)acpi_debug_trace(NULL,
178 acpi_gbl_trace_dbg_level,
179 acpi_gbl_trace_dbg_layer,
182 /* This is a hack. We can't kmalloc in early boot. */
184 strcpy(trace_method_name, val);
186 trace_method_name[0] = '\\';
187 strcpy(trace_method_name+1, val);
190 /* Restore the original tracer state */
191 (void)acpi_debug_trace(trace_method_name,
192 acpi_gbl_trace_dbg_level,
193 acpi_gbl_trace_dbg_layer,
199 static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
201 return sysfs_emit(buffer, "%s\n", acpi_gbl_trace_method_name);
204 static const struct kernel_param_ops param_ops_trace_method = {
205 .set = param_set_trace_method_name,
206 .get = param_get_trace_method_name,
209 static const struct kernel_param_ops param_ops_trace_attrib = {
210 .set = param_set_uint,
211 .get = param_get_uint,
214 module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644);
215 module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
216 module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
218 static int param_set_trace_state(const char *val,
219 const struct kernel_param *kp)
222 const char *method = trace_method_name;
225 /* So "xxx-once" comparison should go prior than "xxx" comparison */
226 #define acpi_compare_param(val, key) \
227 strncmp((val), (key), sizeof(key) - 1)
229 if (!acpi_compare_param(val, "enable")) {
231 flags = ACPI_TRACE_ENABLED;
232 } else if (!acpi_compare_param(val, "disable"))
234 else if (!acpi_compare_param(val, "method-once"))
235 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
236 else if (!acpi_compare_param(val, "method"))
237 flags = ACPI_TRACE_ENABLED;
238 else if (!acpi_compare_param(val, "opcode-once"))
239 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
240 else if (!acpi_compare_param(val, "opcode"))
241 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
245 status = acpi_debug_trace(method,
246 acpi_gbl_trace_dbg_level,
247 acpi_gbl_trace_dbg_layer,
249 if (ACPI_FAILURE(status))
255 static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
257 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
258 return sprintf(buffer, "disable\n");
259 if (!acpi_gbl_trace_method_name)
260 return sprintf(buffer, "enable\n");
261 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
262 return sprintf(buffer, "method-once\n");
264 return sprintf(buffer, "method\n");
267 module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
269 #endif /* CONFIG_ACPI_DEBUG */
272 /* /sys/modules/acpi/parameters/aml_debug_output */
274 module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
276 MODULE_PARM_DESC(aml_debug_output,
277 "To enable/disable the ACPI Debug Object output.");
279 /* /sys/module/acpi/parameters/acpica_version */
280 static int param_get_acpica_version(char *buffer,
281 const struct kernel_param *kp)
285 result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
290 module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
293 * ACPI table sysfs I/F:
294 * /sys/firmware/acpi/tables/
295 * /sys/firmware/acpi/tables/data/
296 * /sys/firmware/acpi/tables/dynamic/
299 static LIST_HEAD(acpi_table_attr_list);
300 static struct kobject *tables_kobj;
301 static struct kobject *tables_data_kobj;
302 static struct kobject *dynamic_tables_kobj;
303 static struct kobject *hotplug_kobj;
305 #define ACPI_MAX_TABLE_INSTANCES 999
306 #define ACPI_INST_SIZE 4 /* including trailing 0 */
308 struct acpi_table_attr {
309 struct bin_attribute attr;
310 char name[ACPI_NAMESEG_SIZE];
312 char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
313 struct list_head node;
316 struct acpi_data_attr {
317 struct bin_attribute attr;
321 static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
322 struct bin_attribute *bin_attr, char *buf,
323 loff_t offset, size_t count)
325 struct acpi_table_attr *table_attr =
326 container_of(bin_attr, struct acpi_table_attr, attr);
327 struct acpi_table_header *table_header = NULL;
331 status = acpi_get_table(table_attr->name, table_attr->instance,
333 if (ACPI_FAILURE(status))
336 rc = memory_read_from_buffer(buf, count, &offset, table_header,
337 table_header->length);
338 acpi_put_table(table_header);
342 static int acpi_table_attr_init(struct kobject *tables_obj,
343 struct acpi_table_attr *table_attr,
344 struct acpi_table_header *table_header)
346 struct acpi_table_header *header = NULL;
347 struct acpi_table_attr *attr = NULL;
348 char instance_str[ACPI_INST_SIZE];
350 sysfs_attr_init(&table_attr->attr.attr);
351 ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
353 list_for_each_entry(attr, &acpi_table_attr_list, node) {
354 if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
355 if (table_attr->instance < attr->instance)
356 table_attr->instance = attr->instance;
358 table_attr->instance++;
359 if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
360 pr_warn("%4.4s: too many table instances\n", table_attr->name);
364 ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
365 table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
366 if (table_attr->instance > 1 || (table_attr->instance == 1 &&
368 (table_header->signature, 2, &header))) {
369 snprintf(instance_str, sizeof(instance_str), "%u",
370 table_attr->instance);
371 strcat(table_attr->filename, instance_str);
374 table_attr->attr.size = table_header->length;
375 table_attr->attr.read = acpi_table_show;
376 table_attr->attr.attr.name = table_attr->filename;
377 table_attr->attr.attr.mode = 0400;
379 return sysfs_create_bin_file(tables_obj, &table_attr->attr);
382 acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
384 struct acpi_table_attr *table_attr;
387 case ACPI_TABLE_EVENT_INSTALL:
388 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
392 if (acpi_table_attr_init(dynamic_tables_kobj,
393 table_attr, table)) {
397 list_add_tail(&table_attr->node, &acpi_table_attr_list);
399 case ACPI_TABLE_EVENT_LOAD:
400 case ACPI_TABLE_EVENT_UNLOAD:
401 case ACPI_TABLE_EVENT_UNINSTALL:
403 * we do not need to do anything right now
404 * because the table is not deleted from the
405 * global table list when unloading it.
409 return AE_BAD_PARAMETER;
414 static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
415 struct bin_attribute *bin_attr, char *buf,
416 loff_t offset, size_t count)
418 struct acpi_data_attr *data_attr;
422 data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
423 size = data_attr->attr.size;
431 if (count > size - offset)
432 count = size - offset;
434 base = acpi_os_map_iomem(data_attr->addr, size);
438 memcpy_fromio(buf, base + offset, count);
440 acpi_os_unmap_iomem(base, size);
445 static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
447 struct acpi_table_bert *bert = th;
449 if (bert->header.length < sizeof(struct acpi_table_bert) ||
450 bert->region_length < sizeof(struct acpi_hest_generic_status)) {
454 data_attr->addr = bert->address;
455 data_attr->attr.size = bert->region_length;
456 data_attr->attr.attr.name = "BERT";
458 return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
461 static int acpi_ccel_data_init(void *th, struct acpi_data_attr *data_attr)
463 struct acpi_table_ccel *ccel = th;
465 if (ccel->header.length < sizeof(struct acpi_table_ccel) ||
466 !ccel->log_area_start_address || !ccel->log_area_minimum_length) {
470 data_attr->addr = ccel->log_area_start_address;
471 data_attr->attr.size = ccel->log_area_minimum_length;
472 data_attr->attr.attr.name = "CCEL";
474 return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
477 static struct acpi_data_obj {
479 int (*fn)(void *, struct acpi_data_attr *);
480 } acpi_data_objs[] = {
481 { ACPI_SIG_BERT, acpi_bert_data_init },
482 { ACPI_SIG_CCEL, acpi_ccel_data_init },
485 #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
487 static int acpi_table_data_init(struct acpi_table_header *th)
489 struct acpi_data_attr *data_attr;
492 for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
493 if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
494 data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
497 sysfs_attr_init(&data_attr->attr.attr);
498 data_attr->attr.read = acpi_data_show;
499 data_attr->attr.attr.mode = 0400;
500 return acpi_data_objs[i].fn(th, data_attr);
506 static int acpi_tables_sysfs_init(void)
508 struct acpi_table_attr *table_attr;
509 struct acpi_table_header *table_header = NULL;
514 tables_kobj = kobject_create_and_add("tables", acpi_kobj);
518 tables_data_kobj = kobject_create_and_add("data", tables_kobj);
519 if (!tables_data_kobj)
520 goto err_tables_data;
522 dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
523 if (!dynamic_tables_kobj)
524 goto err_dynamic_tables;
526 for (table_index = 0;; table_index++) {
527 status = acpi_get_table_by_index(table_index, &table_header);
529 if (status == AE_BAD_PARAMETER)
532 if (ACPI_FAILURE(status))
535 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
539 ret = acpi_table_attr_init(tables_kobj,
540 table_attr, table_header);
545 list_add_tail(&table_attr->node, &acpi_table_attr_list);
546 acpi_table_data_init(table_header);
549 kobject_uevent(tables_kobj, KOBJ_ADD);
550 kobject_uevent(tables_data_kobj, KOBJ_ADD);
551 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
555 kobject_put(tables_data_kobj);
557 kobject_put(tables_kobj);
563 * Detailed ACPI IRQ counters:
564 * /sys/firmware/acpi/interrupts/
567 u32 acpi_irq_handled;
568 u32 acpi_irq_not_handled;
571 #define COUNT_SCI 1 /* acpi_irq_handled */
572 #define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
573 #define COUNT_ERROR 3 /* other */
574 #define NUM_COUNTERS_EXTRA 4
576 struct event_counter {
581 static struct event_counter *all_counters;
583 static u32 num_counters;
584 static struct attribute **all_attrs;
585 static u32 acpi_gpe_count;
587 static struct attribute_group interrupt_stats_attr_group = {
588 .name = "interrupts",
591 static struct kobj_attribute *counter_attrs;
593 static void delete_gpe_attr_array(void)
595 struct event_counter *tmp = all_counters;
603 for (i = 0; i < num_gpes; i++)
604 kfree(counter_attrs[i].attr.name);
606 kfree(counter_attrs);
611 static void gpe_count(u32 gpe_number)
618 if (gpe_number < num_gpes)
619 all_counters[gpe_number].count++;
621 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
622 COUNT_ERROR].count++;
625 static void fixed_event_count(u32 event_number)
630 if (event_number < ACPI_NUM_FIXED_EVENTS)
631 all_counters[num_gpes + event_number].count++;
633 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
634 COUNT_ERROR].count++;
637 static void acpi_global_event_handler(u32 event_type, acpi_handle device,
638 u32 event_number, void *context)
640 if (event_type == ACPI_EVENT_TYPE_GPE) {
641 gpe_count(event_number);
642 pr_debug("GPE event 0x%02x\n", event_number);
643 } else if (event_type == ACPI_EVENT_TYPE_FIXED) {
644 fixed_event_count(event_number);
645 pr_debug("Fixed event 0x%02x\n", event_number);
647 pr_debug("Other event 0x%02x\n", event_number);
651 static int get_status(u32 index, acpi_event_status *ret,
656 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
659 if (index < num_gpes) {
660 status = acpi_get_gpe_device(index, handle);
661 if (ACPI_FAILURE(status)) {
662 pr_warn("Invalid GPE 0x%x", index);
665 status = acpi_get_gpe_status(*handle, index, ret);
667 status = acpi_get_event_status(index - num_gpes, ret);
669 if (ACPI_FAILURE(status))
675 static ssize_t counter_show(struct kobject *kobj,
676 struct kobj_attribute *attr, char *buf)
678 int index = attr - counter_attrs;
681 acpi_event_status status;
684 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
686 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
687 acpi_irq_not_handled;
688 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
690 size = sprintf(buf, "%8u", all_counters[index].count);
692 /* "gpe_all" or "sci" */
693 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
696 result = get_status(index, &status, &handle);
700 if (status & ACPI_EVENT_FLAG_ENABLE_SET)
701 size += sprintf(buf + size, " EN");
703 size += sprintf(buf + size, " ");
704 if (status & ACPI_EVENT_FLAG_STATUS_SET)
705 size += sprintf(buf + size, " STS");
707 size += sprintf(buf + size, " ");
709 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
710 size += sprintf(buf + size, " invalid ");
711 else if (status & ACPI_EVENT_FLAG_ENABLED)
712 size += sprintf(buf + size, " enabled ");
713 else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
714 size += sprintf(buf + size, " wake_enabled");
716 size += sprintf(buf + size, " disabled ");
717 if (status & ACPI_EVENT_FLAG_MASKED)
718 size += sprintf(buf + size, " masked ");
720 size += sprintf(buf + size, " unmasked");
723 size += sprintf(buf + size, "\n");
724 return result ? result : size;
728 * counter_set() sets the specified counter.
729 * setting the total "sci" file to any value clears all counters.
730 * enable/disable/clear a gpe/fixed event in user space.
732 static ssize_t counter_set(struct kobject *kobj,
733 struct kobj_attribute *attr, const char *buf,
736 int index = attr - counter_attrs;
737 acpi_event_status status;
742 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
744 for (i = 0; i < num_counters; ++i)
745 all_counters[i].count = 0;
747 acpi_irq_handled = 0;
748 acpi_irq_not_handled = 0;
752 /* show the event status for both GPEs and Fixed Events */
753 result = get_status(index, &status, &handle);
757 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
758 pr_warn("Can not change Invalid GPE/Fixed Event status\n");
762 if (index < num_gpes) {
763 if (!strcmp(buf, "disable\n") &&
764 (status & ACPI_EVENT_FLAG_ENABLED))
765 result = acpi_disable_gpe(handle, index);
766 else if (!strcmp(buf, "enable\n") &&
767 !(status & ACPI_EVENT_FLAG_ENABLED))
768 result = acpi_enable_gpe(handle, index);
769 else if (!strcmp(buf, "clear\n") &&
770 (status & ACPI_EVENT_FLAG_STATUS_SET))
771 result = acpi_clear_gpe(handle, index);
772 else if (!strcmp(buf, "mask\n"))
773 result = acpi_mask_gpe(handle, index, TRUE);
774 else if (!strcmp(buf, "unmask\n"))
775 result = acpi_mask_gpe(handle, index, FALSE);
776 else if (!kstrtoul(buf, 0, &tmp))
777 all_counters[index].count = tmp;
780 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
781 int event = index - num_gpes;
782 if (!strcmp(buf, "disable\n") &&
783 (status & ACPI_EVENT_FLAG_ENABLE_SET))
784 result = acpi_disable_event(event, ACPI_NOT_ISR);
785 else if (!strcmp(buf, "enable\n") &&
786 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
787 result = acpi_enable_event(event, ACPI_NOT_ISR);
788 else if (!strcmp(buf, "clear\n") &&
789 (status & ACPI_EVENT_FLAG_STATUS_SET))
790 result = acpi_clear_event(event);
791 else if (!kstrtoul(buf, 0, &tmp))
792 all_counters[index].count = tmp;
796 all_counters[index].count = strtoul(buf, NULL, 0);
798 if (ACPI_FAILURE(result))
801 return result ? result : size;
805 * A Quirk Mechanism for GPE Flooding Prevention:
807 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
808 * flooding typically cannot be detected and automatically prevented by
809 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
810 * the AML tables. This normally indicates a feature gap in Linux, thus
811 * instead of providing endless quirk tables, we provide a boot parameter
812 * for those who want this quirk. For example, if the users want to prevent
813 * the GPE flooding for GPE 00, they need to specify the following boot
816 * Note, the parameter can be a list (see bitmap_parselist() for the details).
817 * The masking status can be modified by the following runtime controlling
819 * echo unmask > /sys/firmware/acpi/interrupts/gpe00
821 #define ACPI_MASKABLE_GPE_MAX 0x100
822 static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
824 static int __init acpi_gpe_set_masked_gpes(char *val)
829 ret = kstrtou8(val, 0, &gpe);
831 ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
835 set_bit(gpe, acpi_masked_gpes_map);
839 __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
841 void __init acpi_gpe_apply_masked_gpes(void)
847 for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
848 status = acpi_get_gpe_device(gpe, &handle);
849 if (ACPI_SUCCESS(status)) {
850 pr_info("Masking GPE 0x%x.\n", gpe);
851 (void)acpi_mask_gpe(handle, gpe, TRUE);
856 void acpi_irq_stats_init(void)
864 num_gpes = acpi_current_gpe_count;
865 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
867 all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
868 if (all_attrs == NULL)
871 all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
872 if (all_counters == NULL)
875 status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
876 if (ACPI_FAILURE(status))
879 counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
880 if (counter_attrs == NULL)
883 for (i = 0; i < num_counters; ++i) {
888 sprintf(buffer, "gpe%02X", i);
889 else if (i == num_gpes + ACPI_EVENT_PMTIMER)
890 sprintf(buffer, "ff_pmtimer");
891 else if (i == num_gpes + ACPI_EVENT_GLOBAL)
892 sprintf(buffer, "ff_gbl_lock");
893 else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
894 sprintf(buffer, "ff_pwr_btn");
895 else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
896 sprintf(buffer, "ff_slp_btn");
897 else if (i == num_gpes + ACPI_EVENT_RTC)
898 sprintf(buffer, "ff_rt_clk");
899 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
900 sprintf(buffer, "gpe_all");
901 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
902 sprintf(buffer, "sci");
903 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
904 sprintf(buffer, "sci_not");
905 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
906 sprintf(buffer, "error");
908 sprintf(buffer, "bug%02X", i);
910 name = kstrdup(buffer, GFP_KERNEL);
914 sysfs_attr_init(&counter_attrs[i].attr);
915 counter_attrs[i].attr.name = name;
916 counter_attrs[i].attr.mode = 0644;
917 counter_attrs[i].show = counter_show;
918 counter_attrs[i].store = counter_set;
920 all_attrs[i] = &counter_attrs[i].attr;
923 interrupt_stats_attr_group.attrs = all_attrs;
924 if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
928 delete_gpe_attr_array();
931 static void __exit interrupt_stats_exit(void)
933 sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
935 delete_gpe_attr_array();
938 static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
940 return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
943 static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
945 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
947 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
949 return sprintf(buf, "%d\n", hotplug->enabled);
952 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
953 const char *buf, size_t size)
955 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
958 if (kstrtouint(buf, 10, &val) || val > 1)
961 acpi_scan_hotplug_enabled(hotplug, val);
965 static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
967 static struct attribute *hotplug_profile_attrs[] = {
968 &hotplug_enabled_attr.attr,
971 ATTRIBUTE_GROUPS(hotplug_profile);
973 static const struct kobj_type acpi_hotplug_profile_ktype = {
974 .sysfs_ops = &kobj_sysfs_ops,
975 .default_groups = hotplug_profile_groups,
978 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
986 error = kobject_init_and_add(&hotplug->kobj,
987 &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
989 kobject_put(&hotplug->kobj);
993 kobject_uevent(&hotplug->kobj, KOBJ_ADD);
997 pr_err("Unable to add hotplug profile '%s'\n", name);
1000 static ssize_t force_remove_show(struct kobject *kobj,
1001 struct kobj_attribute *attr, char *buf)
1003 return sprintf(buf, "%d\n", 0);
1006 static ssize_t force_remove_store(struct kobject *kobj,
1007 struct kobj_attribute *attr,
1008 const char *buf, size_t size)
1013 ret = kstrtobool(buf, &val);
1018 pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1024 static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
1026 int __init acpi_sysfs_init(void)
1030 result = acpi_tables_sysfs_init();
1034 hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1038 result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1042 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);