---------------------------
+ What: AR9170USB
+ When: 2.6.40
+
+ Why: This driver is deprecated and the firmware is no longer
+ maintained. The replacement driver "carl9170" has been
+ around for a while, so the devices are still supported.
+
+ Who: Christian Lamparter <chunkeey@googlemail.com>
+
+ ---------------------------
+
What: IRQF_SAMPLE_RANDOM
Check: IRQF_SAMPLE_RANDOM
When: July 2009
---------------------------
-What: /proc/acpi/button
-When: August 2007
-Why: /proc/acpi/button has been replaced by events to the input layer
- since 2.6.20.
-Who: Len Brown <len.brown@intel.com>
-
----------------------------
-
What: /proc/acpi/event
When: February 2008
Why: /proc/acpi/event has been replaced by events via the input layer
----------------------------
- What: i2c_adapter.id
- When: June 2011
- Why: This field is deprecated. I2C device drivers shouldn't change their
- behavior based on the underlying I2C adapter. Instead, the I2C
- adapter driver should instantiate the I2C devices and provide the
- needed platform-specific information.
- Who: Jean Delvare <khali@linux-fr.org>
-
- ----------------------------
-
What: cancel_rearming_delayed_work[queue]()
When: 2.6.39
----------------------------
+ What: xt_connlimit rev 0
+ When: 2012
+ Who: Jan Engelhardt <jengelh@medozas.de>
+ Files: net/netfilter/xt_connlimit.c
+
+ ----------------------------
+
What: noswapaccount kernel command line parameter
When: 2.6.40
Why: The original implementation of memsw feature enabled by
Who: Michal Hocko <mhocko@suse.cz>
----------------------------
+
+ What: ipt_addrtype match include file
+ When: 2012
+ Why: superseded by xt_addrtype
+ Who: Florian Westphal <fw@strlen.de>
+ Files: include/linux/netfilter_ipv4/ipt_addrtype.h
+
+ ----------------------------
+
+ What: i2c_driver.attach_adapter
+ i2c_driver.detach_adapter
+ When: September 2011
+ Why: These legacy callbacks should no longer be used as i2c-core offers
+ a variety of preferable alternative ways to instantiate I2C devices.
+ Who: Jean Delvare <khali@linux-fr.org>
+
+ ----------------------------
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mpspec.h>
+ #include <asm/trampoline.h>
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
acpi_noirq_set();
}
-/* routines for saving/restoring kernel state */
-extern int acpi_save_state_mem(void);
-extern void acpi_restore_state_mem(void);
+/* Low-level suspend routine. */
+extern int acpi_suspend_lowlevel(void);
- extern unsigned long acpi_wakeup_address;
+ extern const unsigned char acpi_wakeup_code[];
+ #define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
/* early initialization routine */
extern void acpi_reserve_wakeup_memory(void);
#ifdef CONFIG_ACPI_NUMA
extern int acpi_numa;
- extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
- unsigned long end);
- extern int acpi_scan_nodes(unsigned long start, unsigned long end);
- #define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-
- #ifdef CONFIG_NUMA_EMU
- extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
- int num_nodes);
- #endif
+ extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
#define acpi_unlazy_tlb(x) leave_mm(x)
#include "realmode/wakeup.h"
#include "sleep.h"
- unsigned long acpi_wakeup_address;
unsigned long acpi_realmode_flags;
- /* address in low memory of the wakeup routine. */
- static unsigned long acpi_realmode;
-
#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
static char temp_stack[4096];
#endif
/**
- * acpi_save_state_mem - save kernel state
+ * acpi_suspend_lowlevel - save kernel state
*
* Create an identity mapped page table and copy the wakeup routine to
* low memory.
- *
- * Note that this is too late to change acpi_wakeup_address.
*/
-int acpi_save_state_mem(void)
+int acpi_suspend_lowlevel(void)
{
struct wakeup_header *header;
+ /* address in low memory of the wakeup routine. */
+ char *acpi_realmode;
- if (!acpi_realmode) {
- printk(KERN_ERR "Could not allocate memory during boot, "
- "S3 disabled\n");
- return -ENOMEM;
- }
- memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE);
+ acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);
- header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET);
- if (header->signature != 0x51ee1111) {
+ header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
+ if (header->signature != WAKEUP_HEADER_SIGNATURE) {
printk(KERN_ERR "wakeup header does not match\n");
return -EINVAL;
}
/* GDT[0]: GDT self-pointer */
header->wakeup_gdt[0] =
(u64)(sizeof(header->wakeup_gdt) - 1) +
- ((u64)(acpi_wakeup_address +
- ((char *)&header->wakeup_gdt - (char *)acpi_realmode))
- << 16);
+ ((u64)__pa(&header->wakeup_gdt) << 16);
/* GDT[1]: big real mode-like code segment */
header->wakeup_gdt[1] =
GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
header->pmode_cr3 = (u32)__pa(&initial_page_table);
saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
- header->trampoline_segment = setup_trampoline() >> 4;
+ header->trampoline_segment = trampoline_address() >> 4;
#ifdef CONFIG_SMP
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */
+ do_suspend_lowlevel();
return 0;
}
- /**
- * acpi_reserve_wakeup_memory - do _very_ early ACPI initialisation
- *
- * We allocate a page from the first 1MB of memory for the wakeup
- * routine for when we come back from a sleep state. The
- * runtime allocator allows specification of <16MB pages, but not
- * <1MB pages.
-/*
- * acpi_restore_state - undo effects of acpi_save_state_mem
-- */
- void __init acpi_reserve_wakeup_memory(void)
- {
- phys_addr_t mem;
-
- if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) {
- printk(KERN_ERR
- "ACPI: Wakeup code way too big, S3 disabled.\n");
- return;
- }
-
- mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE);
-
- if (mem == MEMBLOCK_ERROR) {
- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
- return;
- }
- acpi_realmode = (unsigned long) phys_to_virt(mem);
- acpi_wakeup_address = mem;
- memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
- }
-
- int __init acpi_configure_wakeup_memory(void)
-void acpi_restore_state_mem(void)
--{
- if (acpi_realmode)
- set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
-
- return 0;
--}
- arch_initcall(acpi_configure_wakeup_memory);
-
--
static int __init acpi_sleep_setup(char *str)
{
while ((str != NULL) && (*str != '\0')) {
#include <asm/trampoline.h>
- extern char wakeup_code_start, wakeup_code_end;
-
extern unsigned long saved_video_mode;
extern long saved_magic;
extern int wakeup_pmode_return;
- extern char swsusp_pg_dir[PAGE_SIZE];
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
extern void wakeup_long64(void);
+
+extern void do_suspend_lowlevel(void);
config ACPI_APEI
bool "ACPI Platform Error Interface (APEI)"
+ select PSTORE
depends on X86
help
APEI allows to report errors (for example from the chipset)
by firmware to produce more valuable hardware error
information for Linux.
+config ACPI_APEI_PCIEAER
+ bool "APEI PCIe AER logging/recovering support"
+ depends on ACPI_APEI && PCIEAER
+ help
+ PCIe AER errors may be reported via APEI firmware first mode.
+ Turn on this option to enable the corresponding support.
+
config ACPI_APEI_EINJ
tristate "APEI Error INJection (EINJ)"
depends on ACPI_APEI && DEBUG_FS
#include <linux/cper.h>
#include <linux/nmi.h>
#include <linux/hardirq.h>
+ #include <linux/pstore.h>
#include <acpi/apei.h>
#include "apei-internal.h"
}
EXPORT_SYMBOL_GPL(erst_get_record_count);
+#define ERST_RECORD_ID_CACHE_SIZE_MIN 16
+#define ERST_RECORD_ID_CACHE_SIZE_MAX 1024
+
+struct erst_record_id_cache {
+ struct mutex lock;
+ u64 *entries;
+ int len;
+ int size;
+ int refcount;
+};
+
+static struct erst_record_id_cache erst_record_id_cache = {
+ .lock = __MUTEX_INITIALIZER(erst_record_id_cache.lock),
+ .refcount = 0,
+};
+
static int __erst_get_next_record_id(u64 *record_id)
{
struct apei_exec_context ctx;
return 0;
}
+int erst_get_record_id_begin(int *pos)
+{
+ int rc;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
+ if (rc)
+ return rc;
+ erst_record_id_cache.refcount++;
+ mutex_unlock(&erst_record_id_cache.lock);
+
+ *pos = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(erst_get_record_id_begin);
+
+/* erst_record_id_cache.lock must be held by caller */
+static int __erst_record_id_cache_add_one(void)
+{
+ u64 id, prev_id, first_id;
+ int i, rc;
+ u64 *entries;
+ unsigned long flags;
+
+ id = prev_id = first_id = APEI_ERST_INVALID_RECORD_ID;
+retry:
+ raw_spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(&id);
+ raw_spin_unlock_irqrestore(&erst_lock, flags);
+ if (rc == -ENOENT)
+ return 0;
+ if (rc)
+ return rc;
+ if (id == APEI_ERST_INVALID_RECORD_ID)
+ return 0;
+ /* can not skip current ID, or loop back to first ID */
+ if (id == prev_id || id == first_id)
+ return 0;
+ if (first_id == APEI_ERST_INVALID_RECORD_ID)
+ first_id = id;
+ prev_id = id;
+
+ entries = erst_record_id_cache.entries;
+ for (i = 0; i < erst_record_id_cache.len; i++) {
+ if (entries[i] == id)
+ break;
+ }
+ /* record id already in cache, try next */
+ if (i < erst_record_id_cache.len)
+ goto retry;
+ if (erst_record_id_cache.len >= erst_record_id_cache.size) {
+ int new_size, alloc_size;
+ u64 *new_entries;
+
+ new_size = erst_record_id_cache.size * 2;
+ new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN,
+ ERST_RECORD_ID_CACHE_SIZE_MAX);
+ if (new_size <= erst_record_id_cache.size) {
+ if (printk_ratelimit())
+ pr_warning(FW_WARN ERST_PFX
+ "too many record ID!\n");
+ return 0;
+ }
+ alloc_size = new_size * sizeof(entries[0]);
+ if (alloc_size < PAGE_SIZE)
+ new_entries = kmalloc(alloc_size, GFP_KERNEL);
+ else
+ new_entries = vmalloc(alloc_size);
+ if (!new_entries)
+ return -ENOMEM;
+ memcpy(new_entries, entries,
+ erst_record_id_cache.len * sizeof(entries[0]));
+ if (erst_record_id_cache.size < PAGE_SIZE)
+ kfree(entries);
+ else
+ vfree(entries);
+ erst_record_id_cache.entries = entries = new_entries;
+ erst_record_id_cache.size = new_size;
+ }
+ entries[i] = id;
+ erst_record_id_cache.len++;
+
+ return 1;
+}
+
/*
* Get the record ID of an existing error record on the persistent
* storage. If there is no error record on the persistent storage, the
* returned record_id is APEI_ERST_INVALID_RECORD_ID.
*/
-int erst_get_next_record_id(u64 *record_id)
+int erst_get_record_id_next(int *pos, u64 *record_id)
{
- int rc;
- unsigned long flags;
+ int rc = 0;
+ u64 *entries;
if (erst_disable)
return -ENODEV;
- raw_spin_lock_irqsave(&erst_lock, flags);
- rc = __erst_get_next_record_id(record_id);
- raw_spin_unlock_irqrestore(&erst_lock, flags);
+ /* must be enclosed by erst_get_record_id_begin/end */
+ BUG_ON(!erst_record_id_cache.refcount);
+ BUG_ON(*pos < 0 || *pos > erst_record_id_cache.len);
+
+ mutex_lock(&erst_record_id_cache.lock);
+ entries = erst_record_id_cache.entries;
+ for (; *pos < erst_record_id_cache.len; (*pos)++)
+ if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID)
+ break;
+ /* found next record id in cache */
+ if (*pos < erst_record_id_cache.len) {
+ *record_id = entries[*pos];
+ (*pos)++;
+ goto out_unlock;
+ }
+
+ /* Try to add one more record ID to cache */
+ rc = __erst_record_id_cache_add_one();
+ if (rc < 0)
+ goto out_unlock;
+ /* successfully add one new ID */
+ if (rc == 1) {
+ *record_id = erst_record_id_cache.entries[*pos];
+ (*pos)++;
+ rc = 0;
+ } else {
+ *pos = -1;
+ *record_id = APEI_ERST_INVALID_RECORD_ID;
+ }
+out_unlock:
+ mutex_unlock(&erst_record_id_cache.lock);
return rc;
}
-EXPORT_SYMBOL_GPL(erst_get_next_record_id);
+EXPORT_SYMBOL_GPL(erst_get_record_id_next);
+
+/* erst_record_id_cache.lock must be held by caller */
+static void __erst_record_id_cache_compact(void)
+{
+ int i, wpos = 0;
+ u64 *entries;
+
+ if (erst_record_id_cache.refcount)
+ return;
+
+ entries = erst_record_id_cache.entries;
+ for (i = 0; i < erst_record_id_cache.len; i++) {
+ if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
+ continue;
+ if (wpos != i)
+ memcpy(&entries[wpos], &entries[i], sizeof(entries[i]));
+ wpos++;
+ }
+ erst_record_id_cache.len = wpos;
+}
+
+void erst_get_record_id_end(void)
+{
+ /*
+ * erst_disable != 0 should be detected by invoker via the
+ * return value of erst_get_record_id_begin/next, so this
+ * function should not be called for erst_disable != 0.
+ */
+ BUG_ON(erst_disable);
+
+ mutex_lock(&erst_record_id_cache.lock);
+ erst_record_id_cache.refcount--;
+ BUG_ON(erst_record_id_cache.refcount < 0);
+ __erst_record_id_cache_compact();
+ mutex_unlock(&erst_record_id_cache.lock);
+}
+EXPORT_SYMBOL_GPL(erst_get_record_id_end);
static int __erst_write_to_storage(u64 offset)
{
}
EXPORT_SYMBOL_GPL(erst_read);
-/*
- * If return value > buflen, the buffer size is not big enough,
- * else if return value = 0, there is no more record to read,
- * else if return value < 0, something goes wrong,
- * else everything is OK, and return value is record length
- */
-ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
-{
- int rc;
- ssize_t len;
- unsigned long flags;
- u64 record_id;
-
- if (erst_disable)
- return -ENODEV;
-
- raw_spin_lock_irqsave(&erst_lock, flags);
- rc = __erst_get_next_record_id(&record_id);
- if (rc) {
- raw_spin_unlock_irqrestore(&erst_lock, flags);
- return rc;
- }
- /* no more record */
- if (record_id == APEI_ERST_INVALID_RECORD_ID) {
- raw_spin_unlock_irqrestore(&erst_lock, flags);
- return 0;
- }
-
- len = __erst_read(record_id, record, buflen);
- raw_spin_unlock_irqrestore(&erst_lock, flags);
-
- return len;
-}
-EXPORT_SYMBOL_GPL(erst_read_next);
-
int erst_clear(u64 record_id)
{
- int rc;
+ int rc, i;
unsigned long flags;
+ u64 *entries;
if (erst_disable)
return -ENODEV;
+ rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
+ if (rc)
+ return rc;
raw_spin_lock_irqsave(&erst_lock, flags);
if (erst_erange.attr & ERST_RANGE_NVRAM)
rc = __erst_clear_from_nvram(record_id);
else
rc = __erst_clear_from_storage(record_id);
raw_spin_unlock_irqrestore(&erst_lock, flags);
-
+ if (rc)
+ goto out;
+ entries = erst_record_id_cache.entries;
+ for (i = 0; i < erst_record_id_cache.len; i++) {
+ if (entries[i] == record_id)
+ entries[i] = APEI_ERST_INVALID_RECORD_ID;
+ }
+ __erst_record_id_cache_compact();
+out:
+ mutex_unlock(&erst_record_id_cache.lock);
return rc;
}
EXPORT_SYMBOL_GPL(erst_clear);
return 0;
}
+ static size_t erst_reader(u64 *id, enum pstore_type_id *type,
+ struct timespec *time);
+ static u64 erst_writer(enum pstore_type_id type, size_t size);
+
+ static struct pstore_info erst_info = {
+ .owner = THIS_MODULE,
+ .name = "erst",
+ .read = erst_reader,
+ .write = erst_writer,
+ .erase = erst_clear
+ };
+
+ #define CPER_CREATOR_PSTORE \
+ UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
+ 0x64, 0x90, 0xb8, 0x9d)
+ #define CPER_SECTION_TYPE_DMESG \
+ UUID_LE(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \
+ 0x94, 0x19, 0xeb, 0x12)
+ #define CPER_SECTION_TYPE_MCE \
+ UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
+ 0x04, 0x4a, 0x38, 0xfc)
+
+ struct cper_pstore_record {
+ struct cper_record_header hdr;
+ struct cper_section_descriptor sec_hdr;
+ char data[];
+ } __packed;
+
+ static size_t erst_reader(u64 *id, enum pstore_type_id *type,
+ struct timespec *time)
+ {
+ int rc;
+ ssize_t len;
+ unsigned long flags;
+ u64 record_id;
+ struct cper_pstore_record *rcd = (struct cper_pstore_record *)
+ (erst_info.buf - sizeof(*rcd));
+
+ if (erst_disable)
+ return -ENODEV;
+
+ raw_spin_lock_irqsave(&erst_lock, flags);
+ skip:
+ rc = __erst_get_next_record_id(&record_id);
+ if (rc) {
+ raw_spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+ /* no more record */
+ if (record_id == APEI_ERST_INVALID_RECORD_ID) {
+ raw_spin_unlock_irqrestore(&erst_lock, flags);
+ return 0;
+ }
+
+ len = __erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
+ erst_erange.size);
+ if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
+ goto skip;
+ raw_spin_unlock_irqrestore(&erst_lock, flags);
+
+ *id = record_id;
+ if (uuid_le_cmp(rcd->sec_hdr.section_type,
+ CPER_SECTION_TYPE_DMESG) == 0)
+ *type = PSTORE_TYPE_DMESG;
+ else if (uuid_le_cmp(rcd->sec_hdr.section_type,
+ CPER_SECTION_TYPE_MCE) == 0)
+ *type = PSTORE_TYPE_MCE;
+ else
+ *type = PSTORE_TYPE_UNKNOWN;
+
+ if (rcd->hdr.validation_bits & CPER_VALID_TIMESTAMP)
+ time->tv_sec = rcd->hdr.timestamp;
+ else
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+
+ return len - sizeof(*rcd);
+ }
+
+ static u64 erst_writer(enum pstore_type_id type, size_t size)
+ {
+ struct cper_pstore_record *rcd = (struct cper_pstore_record *)
+ (erst_info.buf - sizeof(*rcd));
+
+ memset(rcd, 0, sizeof(*rcd));
+ memcpy(rcd->hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
+ rcd->hdr.revision = CPER_RECORD_REV;
+ rcd->hdr.signature_end = CPER_SIG_END;
+ rcd->hdr.section_count = 1;
+ rcd->hdr.error_severity = CPER_SEV_FATAL;
+ /* timestamp valid. platform_id, partition_id are invalid */
+ rcd->hdr.validation_bits = CPER_VALID_TIMESTAMP;
+ rcd->hdr.timestamp = get_seconds();
+ rcd->hdr.record_length = sizeof(*rcd) + size;
+ rcd->hdr.creator_id = CPER_CREATOR_PSTORE;
+ rcd->hdr.notification_type = CPER_NOTIFY_MCE;
+ rcd->hdr.record_id = cper_next_record_id();
+ rcd->hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
+
+ rcd->sec_hdr.section_offset = sizeof(*rcd);
+ rcd->sec_hdr.section_length = size;
+ rcd->sec_hdr.revision = CPER_SEC_REV;
+ /* fru_id and fru_text is invalid */
+ rcd->sec_hdr.validation_bits = 0;
+ rcd->sec_hdr.flags = CPER_SEC_PRIMARY;
+ switch (type) {
+ case PSTORE_TYPE_DMESG:
+ rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG;
+ break;
+ case PSTORE_TYPE_MCE:
+ rcd->sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcd->sec_hdr.section_severity = CPER_SEV_FATAL;
+
+ erst_write(&rcd->hdr);
+
+ return rcd->hdr.record_id;
+ }
+
static int __init erst_init(void)
{
int rc = 0;
struct apei_exec_context ctx;
struct apei_resources erst_resources;
struct resource *r;
+ char *buf;
if (acpi_disabled)
goto err;
if (!erst_erange.vaddr)
goto err_release_erange;
+ buf = kmalloc(erst_erange.size, GFP_KERNEL);
+ mutex_init(&erst_info.buf_mutex);
+ if (buf) {
+ erst_info.buf = buf + sizeof(struct cper_pstore_record);
+ erst_info.bufsize = erst_erange.size -
+ sizeof(struct cper_pstore_record);
+ if (pstore_register(&erst_info)) {
+ pr_info(ERST_PFX "Could not register with persistent store\n");
+ kfree(buf);
+ }
+ }
+
pr_info(ERST_PFX
"Error Record Serialization Table (ERST) support is initialized.\n");
extern char line_buf[80];
#endif /*ENABLE_DEBUGGER */
-static unsigned int acpi_irq_irq;
static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
void __iomem *virt;
acpi_physical_address phys;
acpi_size size;
- struct kref ref;
+ unsigned long refcount;
};
static LIST_HEAD(acpi_ioremaps);
-static DEFINE_SPINLOCK(acpi_ioremap_lock);
+static DEFINE_MUTEX(acpi_ioremap_lock);
static void __init acpi_osi_setup_late(void);
return NULL;
}
+void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
+{
+ struct acpi_ioremap *map;
+ void __iomem *virt = NULL;
+
+ mutex_lock(&acpi_ioremap_lock);
+ map = acpi_map_lookup(phys, size);
+ if (map) {
+ virt = map->virt + (phys - map->phys);
+ map->refcount++;
+ }
+ mutex_unlock(&acpi_ioremap_lock);
+ return virt;
+}
+EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
+
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
static struct acpi_ioremap *
acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
void __iomem *__init_refok
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
- struct acpi_ioremap *map, *tmp_map;
- unsigned long flags;
+ struct acpi_ioremap *map;
void __iomem *virt;
acpi_physical_address pg_off;
acpi_size pg_sz;
if (!acpi_gbl_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
+ mutex_lock(&acpi_ioremap_lock);
+ /* Check if there's a suitable mapping already. */
+ map = acpi_map_lookup(phys, size);
+ if (map) {
+ map->refcount++;
+ goto out;
+ }
+
map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
+ if (!map) {
+ mutex_unlock(&acpi_ioremap_lock);
return NULL;
+ }
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
virt = acpi_os_ioremap(pg_off, pg_sz);
if (!virt) {
+ mutex_unlock(&acpi_ioremap_lock);
kfree(map);
return NULL;
}
map->virt = virt;
map->phys = pg_off;
map->size = pg_sz;
- kref_init(&map->ref);
-
- spin_lock_irqsave(&acpi_ioremap_lock, flags);
- /* Check if page has already been mapped. */
- tmp_map = acpi_map_lookup(phys, size);
- if (tmp_map) {
- kref_get(&tmp_map->ref);
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
- iounmap(map->virt);
- kfree(map);
- return tmp_map->virt + (phys - tmp_map->phys);
- }
+ map->refcount = 1;
+
list_add_tail_rcu(&map->list, &acpi_ioremaps);
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ out:
+ mutex_unlock(&acpi_ioremap_lock);
return map->virt + (phys - map->phys);
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
-static void acpi_kref_del_iomap(struct kref *ref)
+static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
{
- struct acpi_ioremap *map;
+ if (!--map->refcount)
+ list_del_rcu(&map->list);
+}
- map = container_of(ref, struct acpi_ioremap, ref);
- list_del_rcu(&map->list);
+static void acpi_os_map_cleanup(struct acpi_ioremap *map)
+{
+ if (!map->refcount) {
+ synchronize_rcu();
+ iounmap(map->virt);
+ kfree(map);
+ }
}
void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
- unsigned long flags;
- int del;
if (!acpi_gbl_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
- spin_lock_irqsave(&acpi_ioremap_lock, flags);
+ mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup_virt(virt, size);
if (!map) {
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
- printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
- dump_stack();
+ mutex_unlock(&acpi_ioremap_lock);
+ WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return;
}
+ acpi_os_drop_map_ref(map);
+ mutex_unlock(&acpi_ioremap_lock);
- del = kref_put(&map->ref, acpi_kref_del_iomap);
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
-
- if (!del)
- return;
-
- synchronize_rcu();
- iounmap(map->virt);
- kfree(map);
+ acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
__acpi_unmap_table(virt, size);
}
-int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
{
void __iomem *virt;
return 0;
}
-EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
-void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
{
- void __iomem *virt;
- unsigned long flags;
- acpi_size size = addr->bit_width / 8;
+ struct acpi_ioremap *map;
if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
if (!addr->address || !addr->bit_width)
return;
- spin_lock_irqsave(&acpi_ioremap_lock, flags);
- virt = acpi_map_vaddr_lookup(addr->address, size);
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ mutex_lock(&acpi_ioremap_lock);
+ map = acpi_map_lookup(addr->address, addr->bit_width / 8);
+ if (!map) {
+ mutex_unlock(&acpi_ioremap_lock);
+ return;
+ }
+ acpi_os_drop_map_ref(map);
+ mutex_unlock(&acpi_ioremap_lock);
- acpi_os_unmap_memory(virt, size);
+ acpi_os_map_cleanup(map);
}
-EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_irq_stats_init();
/*
- * Ignore the GSI from the core, and use the value in our copy of the
- * FADT. It may not be the same if an interrupt source override exists
- * for the SCI.
+ * ACPI interrupts different from the SCI in our copy of the FADT are
+ * not supported.
*/
- gsi = acpi_gbl_FADT.sci_interrupt;
+ if (gsi != acpi_gbl_FADT.sci_interrupt)
+ return AE_BAD_PARAMETER;
+
+ if (acpi_irq_handler)
+ return AE_ALREADY_ACQUIRED;
+
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
gsi);
acpi_irq_context = context;
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
+ acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
}
- acpi_irq_irq = irq;
return AE_OK;
}
acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
{
- if (irq) {
- free_irq(irq, acpi_irq);
- acpi_irq_handler = NULL;
- acpi_irq_irq = 0;
- }
+ if (irq != acpi_gbl_FADT.sci_interrupt)
+ return AE_BAD_PARAMETER;
+
+ free_irq(irq, acpi_irq);
+ acpi_irq_handler = NULL;
return AE_OK;
}
acpi_status __init acpi_os_initialize1(void)
{
- kacpid_wq = create_workqueue("kacpid");
- kacpi_notify_wq = create_workqueue("kacpi_notify");
- kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
+ kacpid_wq = alloc_workqueue("kacpid", 0, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
acpi_status acpi_os_terminate(void)
{
if (acpi_irq_handler) {
- acpi_os_remove_interrupt_handler(acpi_irq_irq,
+ acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
acpi_irq_handler);
}
#include <linux/device.h>
#include <linux/suspend.h>
#include <linux/reboot.h>
+ #include <linux/acpi.h>
#include <asm/io.h>
#endif /* CONFIG_ACPI_SLEEP */
#ifdef CONFIG_SUSPEND
-extern void do_suspend_lowlevel(void);
-
static u32 acpi_suspend_states[] = {
[PM_SUSPEND_ON] = ACPI_STATE_S0,
[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
static int acpi_suspend_enter(suspend_state_t pm_state)
{
acpi_status status = AE_OK;
- unsigned long flags = 0;
u32 acpi_state = acpi_target_sleep_state;
+ int error;
ACPI_FLUSH_CPU_CACHE();
- /* Do arch specific saving of state. */
- if (acpi_state == ACPI_STATE_S3) {
- int error = acpi_save_state_mem();
-
- if (error)
- return error;
- }
-
- local_irq_save(flags);
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
break;
case ACPI_STATE_S3:
- do_suspend_lowlevel();
+ error = acpi_suspend_lowlevel();
+ if (error)
+ return error;
+ pr_info(PREFIX "Low-level resume complete\n");
break;
}
/* Allow EC transactions to happen. */
acpi_ec_unblock_transactions_early();
- local_irq_restore(flags);
- printk(KERN_DEBUG "Back to C!\n");
-
- /* restore processor state */
- if (acpi_state == ACPI_STATE_S3)
- acpi_restore_state_mem();
-
suspend_nvs_restore();
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
static int acpi_hibernation_enter(void)
{
acpi_status status = AE_OK;
- unsigned long flags = 0;
ACPI_FLUSH_CPU_CACHE();
- local_irq_save(flags);
/* This shouldn't return. If it returns, we have a problem */
status = acpi_enter_sleep_state(ACPI_STATE_S4);
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
- local_irq_restore(flags);
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
return -EINVAL;
}
- #ifdef CONFIG_PM_OPS
+ #ifdef CONFIG_PM
/**
* acpi_pm_device_sleep_state - return preferred power state of ACPI device
* in the system sleep state given by %acpi_target_sleep_state
*d_min_p = d_min;
return d_max;
}
- #endif /* CONFIG_PM_OPS */
+ #endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
/**
struct acpi_handle_list resources;
struct acpi_device_wakeup_flags flags;
int prepare_count;
- int run_wake_count;
};
/* Device */
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
- #ifdef CONFIG_PM_OPS
+ #ifdef CONFIG_PM
int acpi_pm_device_sleep_state(struct device *, int *);
#else
static inline int acpi_pm_device_sleep_state(struct device *d, int *p)