Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 Jan 2012 06:05:44 +0000 (22:05 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 Jan 2012 06:05:44 +0000 (22:05 -0800)
Quoth Len:
 "This fixes a merge-window regression due to a conflict
  between error injection and preparation to remove atomicio.c
  Here we fix that regression and complete the removal
  of atomicio.c.

  This also re-orders some idle initialization code to
  complete the merge window series that allows cpuidle
  to cope with bringing processors on-line after boot."

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux:
  Use acpi_os_map_memory() instead of ioremap() in einj driver
  ACPI, APEI, EINJ, cleanup 0 vs NULL confusion
  ACPI, APEI, EINJ Allow empty Trigger Error Action Table
  thermal: Rename generate_netlink_event
  ACPI / PM: Add Sony Vaio VPCCW29FX to nonvs blacklist.
  ACPI: Remove ./drivers/acpi/atomicio.[ch]
  ACPI, APEI: Add RAM mapping support to ACPI
  ACPI, APEI: Add 64-bit read/write support for APEI on i386
  ACPI processor hotplug: Delay acpi_processor_start() call for hotplugged cores
  ACPI processor hotplug: Split up acpi_processor_add

14 files changed:
Documentation/thermal/sysfs-api.txt
drivers/acpi/Makefile
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/einj.c
drivers/acpi/atomicio.c [deleted file]
drivers/acpi/osl.c
drivers/acpi/processor_driver.c
drivers/acpi/sleep.c
drivers/idle/intel_idle.c
drivers/thermal/thermal_sys.c
include/acpi/acpiosxf.h
include/acpi/atomicio.h [deleted file]
include/acpi/processor.h
include/linux/thermal.h

index b61e46f..1733ab9 100644 (file)
@@ -284,7 +284,7 @@ method, the sys I/F structure will be built like this:
 The framework includes a simple notification mechanism, in the form of a
 netlink event. Netlink socket initialization is done during the _init_
 of the framework. Drivers which intend to use the notification mechanism
-just need to call generate_netlink_event() with two arguments viz
+just need to call thermal_generate_netlink_event() with two arguments viz
 (originator, event). Typically the originator will be an integer assigned
 to a thermal_zone_device when it registers itself with the framework. The
 event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
index c07f44f..1567028 100644 (file)
@@ -19,7 +19,6 @@ obj-y                         += acpi.o \
 
 # All the builtin files are in the "acpi." module_param namespace.
 acpi-y                         += osl.o utils.o reboot.o
-acpi-y                         += atomicio.o
 acpi-y                         += nvs.o
 
 # sleep related files
index e45350c..e5d53b7 100644 (file)
@@ -596,33 +596,19 @@ int apei_read(u64 *val, struct acpi_generic_address *reg)
 {
        int rc;
        u64 address;
-       u32 tmp, width = reg->bit_width;
        acpi_status status;
 
        rc = apei_check_gar(reg, &address);
        if (rc)
                return rc;
 
-       if (width == 64)
-               width = 32;     /* Break into two 32-bit transfers */
-
        *val = 0;
        switch(reg->space_id) {
        case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-               status = acpi_os_read_memory((acpi_physical_address)
-                                            address, &tmp, width);
+               status = acpi_os_read_memory64((acpi_physical_address)
+                                            address, val, reg->bit_width);
                if (ACPI_FAILURE(status))
                        return -EIO;
-               *val = tmp;
-
-               if (reg->bit_width == 64) {
-                       /* Read the top 32 bits */
-                       status = acpi_os_read_memory((acpi_physical_address)
-                                                    (address + 4), &tmp, 32);
-                       if (ACPI_FAILURE(status))
-                               return -EIO;
-                       *val |= ((u64)tmp << 32);
-               }
                break;
        case ACPI_ADR_SPACE_SYSTEM_IO:
                status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
@@ -642,31 +628,18 @@ int apei_write(u64 val, struct acpi_generic_address *reg)
 {
        int rc;
        u64 address;
-       u32 width = reg->bit_width;
        acpi_status status;
 
        rc = apei_check_gar(reg, &address);
        if (rc)
                return rc;
 
-       if (width == 64)
-               width = 32;     /* Break into two 32-bit transfers */
-
        switch (reg->space_id) {
        case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-               status = acpi_os_write_memory((acpi_physical_address)
-                                             address, ACPI_LODWORD(val),
-                                             width);
+               status = acpi_os_write_memory64((acpi_physical_address)
+                                             address, val, reg->bit_width);
                if (ACPI_FAILURE(status))
                        return -EIO;
-
-               if (reg->bit_width == 64) {
-                       status = acpi_os_write_memory((acpi_physical_address)
-                                                     (address + 4),
-                                                     ACPI_HIDWORD(val), 32);
-                       if (ACPI_FAILURE(status))
-                               return -EIO;
-               }
                break;
        case ACPI_ADR_SPACE_SYSTEM_IO:
                status = acpi_os_write_port(address, val, reg->bit_width);
index 5b898d4..4ca087d 100644 (file)
@@ -141,21 +141,6 @@ static DEFINE_MUTEX(einj_mutex);
 
 static void *einj_param;
 
-#ifndef readq
-static inline __u64 readq(volatile void __iomem *addr)
-{
-       return ((__u64)readl(addr+4) << 32) + readl(addr);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(__u64 val, volatile void __iomem *addr)
-{
-       writel(val, addr);
-       writel(val >> 32, addr+4);
-}
-#endif
-
 static void einj_exec_ctx_init(struct apei_exec_context *ctx)
 {
        apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
@@ -204,22 +189,21 @@ static int einj_timedout(u64 *t)
 static void check_vendor_extension(u64 paddr,
                                   struct set_error_type_with_address *v5param)
 {
-       int     offset = readl(&v5param->vendor_extension);
+       int     offset = v5param->vendor_extension;
        struct  vendor_error_type_extension *v;
        u32     sbdf;
 
        if (!offset)
                return;
-       v = ioremap(paddr + offset, sizeof(*v));
+       v = acpi_os_map_memory(paddr + offset, sizeof(*v));
        if (!v)
                return;
-       sbdf = readl(&v->pcie_sbdf);
+       sbdf = v->pcie_sbdf;
        sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
                sbdf >> 24, (sbdf >> 16) & 0xff,
                (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
-                readw(&v->vendor_id), readw(&v->device_id),
-               readb(&v->rev_id));
-       iounmap(v);
+                v->vendor_id, v->device_id, v->rev_id);
+       acpi_os_unmap_memory(v, sizeof(*v));
 }
 
 static void *einj_get_parameter_address(void)
@@ -247,7 +231,7 @@ static void *einj_get_parameter_address(void)
        if (paddrv5) {
                struct set_error_type_with_address *v5param;
 
-               v5param = ioremap(paddrv5, sizeof(*v5param));
+               v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param));
                if (v5param) {
                        acpi5 = 1;
                        check_vendor_extension(paddrv5, v5param);
@@ -257,17 +241,17 @@ static void *einj_get_parameter_address(void)
        if (paddrv4) {
                struct einj_parameter *v4param;
 
-               v4param = ioremap(paddrv4, sizeof(*v4param));
+               v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param));
                if (!v4param)
-                       return 0;
-               if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) {
-                       iounmap(v4param);
-                       return 0;
+                       return NULL;
+               if (v4param->reserved1 || v4param->reserved2) {
+                       acpi_os_unmap_memory(v4param, sizeof(*v4param));
+                       return NULL;
                }
                return v4param;
        }
 
-       return 0;
+       return NULL;
 }
 
 /* do sanity check to trigger table */
@@ -276,7 +260,7 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
        if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
                return -EINVAL;
        if (trigger_tab->table_size > PAGE_SIZE ||
-           trigger_tab->table_size <= trigger_tab->header_size)
+           trigger_tab->table_size < trigger_tab->header_size)
                return -EINVAL;
        if (trigger_tab->entry_count !=
            (trigger_tab->table_size - trigger_tab->header_size) /
@@ -340,6 +324,11 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
                           "The trigger error action table is invalid\n");
                goto out_rel_header;
        }
+
+       /* No action structures in the TRIGGER_ERROR table, nothing to do */
+       if (!trigger_tab->entry_count)
+               goto out_rel_header;
+
        rc = -EIO;
        table_size = trigger_tab->table_size;
        r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
@@ -435,41 +424,41 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
        if (acpi5) {
                struct set_error_type_with_address *v5param = einj_param;
 
-               writel(type, &v5param->type);
+               v5param->type = type;
                if (type & 0x80000000) {
                        switch (vendor_flags) {
                        case SETWA_FLAGS_APICID:
-                               writel(param1, &v5param->apicid);
+                               v5param->apicid = param1;
                                break;
                        case SETWA_FLAGS_MEM:
-                               writeq(param1, &v5param->memory_address);
-                               writeq(param2, &v5param->memory_address_range);
+                               v5param->memory_address = param1;
+                               v5param->memory_address_range = param2;
                                break;
                        case SETWA_FLAGS_PCIE_SBDF:
-                               writel(param1, &v5param->pcie_sbdf);
+                               v5param->pcie_sbdf = param1;
                                break;
                        }
-                       writel(vendor_flags, &v5param->flags);
+                       v5param->flags = vendor_flags;
                } else {
                        switch (type) {
                        case ACPI_EINJ_PROCESSOR_CORRECTABLE:
                        case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
                        case ACPI_EINJ_PROCESSOR_FATAL:
-                               writel(param1, &v5param->apicid);
-                               writel(SETWA_FLAGS_APICID, &v5param->flags);
+                               v5param->apicid = param1;
+                               v5param->flags = SETWA_FLAGS_APICID;
                                break;
                        case ACPI_EINJ_MEMORY_CORRECTABLE:
                        case ACPI_EINJ_MEMORY_UNCORRECTABLE:
                        case ACPI_EINJ_MEMORY_FATAL:
-                               writeq(param1, &v5param->memory_address);
-                               writeq(param2, &v5param->memory_address_range);
-                               writel(SETWA_FLAGS_MEM, &v5param->flags);
+                               v5param->memory_address = param1;
+                               v5param->memory_address_range = param2;
+                               v5param->flags = SETWA_FLAGS_MEM;
                                break;
                        case ACPI_EINJ_PCIX_CORRECTABLE:
                        case ACPI_EINJ_PCIX_UNCORRECTABLE:
                        case ACPI_EINJ_PCIX_FATAL:
-                               writel(param1, &v5param->pcie_sbdf);
-                               writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags);
+                               v5param->pcie_sbdf = param1;
+                               v5param->flags = SETWA_FLAGS_PCIE_SBDF;
                                break;
                        }
                }
@@ -479,8 +468,8 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
                        return rc;
                if (einj_param) {
                        struct einj_parameter *v4param = einj_param;
-                       writeq(param1, &v4param->param1);
-                       writeq(param2, &v4param->param2);
+                       v4param->param1 = param1;
+                       v4param->param2 = param2;
                }
        }
        rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
@@ -731,8 +720,13 @@ static int __init einj_init(void)
        return 0;
 
 err_unmap:
-       if (einj_param)
-               iounmap(einj_param);
+       if (einj_param) {
+               acpi_size size = (acpi5) ?
+                       sizeof(struct set_error_type_with_address) :
+                       sizeof(struct einj_parameter);
+
+               acpi_os_unmap_memory(einj_param, size);
+       }
        apei_exec_post_unmap_gars(&ctx);
 err_release:
        apei_resources_release(&einj_resources);
@@ -748,8 +742,13 @@ static void __exit einj_exit(void)
 {
        struct apei_exec_context ctx;
 
-       if (einj_param)
-               iounmap(einj_param);
+       if (einj_param) {
+               acpi_size size = (acpi5) ?
+                       sizeof(struct set_error_type_with_address) :
+                       sizeof(struct einj_parameter);
+
+               acpi_os_unmap_memory(einj_param, size);
+       }
        einj_exec_ctx_init(&ctx);
        apei_exec_post_unmap_gars(&ctx);
        apei_resources_release(&einj_resources);
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
deleted file mode 100644 (file)
index d4a5b3d..0000000
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
- * accessing in atomic context.
- *
- * This is used for NMI handler to access IO memory area, because
- * ioremap/iounmap can not be used in NMI handler. The IO memory area
- * is pre-mapped in process context and accessed in NMI handler.
- *
- * Copyright (C) 2009-2010, Intel Corp.
- *     Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/io.h>
-#include <linux/kref.h>
-#include <linux/rculist.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <acpi/atomicio.h>
-
-#define ACPI_PFX "ACPI: "
-
-static LIST_HEAD(acpi_iomaps);
-/*
- * Used for mutual exclusion between writers of acpi_iomaps list, for
- * synchronization between readers and writer, RCU is used.
- */
-static DEFINE_SPINLOCK(acpi_iomaps_lock);
-
-struct acpi_iomap {
-       struct list_head list;
-       void __iomem *vaddr;
-       unsigned long size;
-       phys_addr_t paddr;
-       struct kref ref;
-};
-
-/* acpi_iomaps_lock or RCU read lock must be held before calling */
-static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
-                                           unsigned long size)
-{
-       struct acpi_iomap *map;
-
-       list_for_each_entry_rcu(map, &acpi_iomaps, list) {
-               if (map->paddr + map->size >= paddr + size &&
-                   map->paddr <= paddr)
-                       return map;
-       }
-       return NULL;
-}
-
-/*
- * Atomic "ioremap" used by NMI handler, if the specified IO memory
- * area is not pre-mapped, NULL will be returned.
- *
- * acpi_iomaps_lock or RCU read lock must be held before calling
- */
-static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
-                                        unsigned long size)
-{
-       struct acpi_iomap *map;
-
-       map = __acpi_find_iomap(paddr, size/8);
-       if (map)
-               return map->vaddr + (paddr - map->paddr);
-       else
-               return NULL;
-}
-
-/* acpi_iomaps_lock must be held before calling */
-static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
-                                       unsigned long size)
-{
-       struct acpi_iomap *map;
-
-       map = __acpi_find_iomap(paddr, size);
-       if (map) {
-               kref_get(&map->ref);
-               return map->vaddr + (paddr - map->paddr);
-       } else
-               return NULL;
-}
-
-#ifndef CONFIG_IA64
-#define should_use_kmap(pfn)   page_is_ram(pfn)
-#else
-/* ioremap will take care of cache attributes */
-#define should_use_kmap(pfn)   0
-#endif
-
-static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
-{
-       unsigned long pfn;
-
-       pfn = pg_off >> PAGE_SHIFT;
-       if (should_use_kmap(pfn)) {
-               if (pg_sz > PAGE_SIZE)
-                       return NULL;
-               return (void __iomem __force *)kmap(pfn_to_page(pfn));
-       } else
-               return ioremap(pg_off, pg_sz);
-}
-
-static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
-{
-       unsigned long pfn;
-
-       pfn = pg_off >> PAGE_SHIFT;
-       if (page_is_ram(pfn))
-               kunmap(pfn_to_page(pfn));
-       else
-               iounmap(vaddr);
-}
-
-/*
- * Used to pre-map the specified IO memory area. First try to find
- * whether the area is already pre-mapped, if it is, increase the
- * reference count (in __acpi_try_ioremap) and return; otherwise, do
- * the real ioremap, and add the mapping into acpi_iomaps list.
- */
-static void __iomem *acpi_pre_map(phys_addr_t paddr,
-                                 unsigned long size)
-{
-       void __iomem *vaddr;
-       struct acpi_iomap *map;
-       unsigned long pg_sz, flags;
-       phys_addr_t pg_off;
-
-       spin_lock_irqsave(&acpi_iomaps_lock, flags);
-       vaddr = __acpi_try_ioremap(paddr, size);
-       spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-       if (vaddr)
-               return vaddr;
-
-       pg_off = paddr & PAGE_MASK;
-       pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
-       vaddr = acpi_map(pg_off, pg_sz);
-       if (!vaddr)
-               return NULL;
-       map = kmalloc(sizeof(*map), GFP_KERNEL);
-       if (!map)
-               goto err_unmap;
-       INIT_LIST_HEAD(&map->list);
-       map->paddr = pg_off;
-       map->size = pg_sz;
-       map->vaddr = vaddr;
-       kref_init(&map->ref);
-
-       spin_lock_irqsave(&acpi_iomaps_lock, flags);
-       vaddr = __acpi_try_ioremap(paddr, size);
-       if (vaddr) {
-               spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-               acpi_unmap(pg_off, map->vaddr);
-               kfree(map);
-               return vaddr;
-       }
-       list_add_tail_rcu(&map->list, &acpi_iomaps);
-       spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-
-       return map->vaddr + (paddr - map->paddr);
-err_unmap:
-       acpi_unmap(pg_off, vaddr);
-       return NULL;
-}
-
-/* acpi_iomaps_lock must be held before calling */
-static void __acpi_kref_del_iomap(struct kref *ref)
-{
-       struct acpi_iomap *map;
-
-       map = container_of(ref, struct acpi_iomap, ref);
-       list_del_rcu(&map->list);
-}
-
-/*
- * Used to post-unmap the specified IO memory area. The iounmap is
- * done only if the reference count goes zero.
- */
-static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
-{
-       struct acpi_iomap *map;
-       unsigned long flags;
-       int del;
-
-       spin_lock_irqsave(&acpi_iomaps_lock, flags);
-       map = __acpi_find_iomap(paddr, size);
-       BUG_ON(!map);
-       del = kref_put(&map->ref, __acpi_kref_del_iomap);
-       spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-
-       if (!del)
-               return;
-
-       synchronize_rcu();
-       acpi_unmap(map->paddr, map->vaddr);
-       kfree(map);
-}
-
-/* In NMI handler, should set silent = 1 */
-static int acpi_check_gar(struct acpi_generic_address *reg,
-                         u64 *paddr, int silent)
-{
-       u32 width, space_id;
-
-       width = reg->bit_width;
-       space_id = reg->space_id;
-       /* Handle possible alignment issues */
-       memcpy(paddr, &reg->address, sizeof(*paddr));
-       if (!*paddr) {
-               if (!silent)
-                       pr_warning(FW_BUG ACPI_PFX
-                       "Invalid physical address in GAR [0x%llx/%u/%u]\n",
-                                  *paddr, width, space_id);
-               return -EINVAL;
-       }
-
-       if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
-               if (!silent)
-                       pr_warning(FW_BUG ACPI_PFX
-                                  "Invalid bit width in GAR [0x%llx/%u/%u]\n",
-                                  *paddr, width, space_id);
-               return -EINVAL;
-       }
-
-       if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
-           space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
-               if (!silent)
-                       pr_warning(FW_BUG ACPI_PFX
-                       "Invalid address space type in GAR [0x%llx/%u/%u]\n",
-                                  *paddr, width, space_id);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/* Pre-map, working on GAR */
-int acpi_pre_map_gar(struct acpi_generic_address *reg)
-{
-       u64 paddr;
-       void __iomem *vaddr;
-       int rc;
-
-       if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
-               return 0;
-
-       rc = acpi_check_gar(reg, &paddr, 0);
-       if (rc)
-               return rc;
-
-       vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
-       if (!vaddr)
-               return -EIO;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
-
-/* Post-unmap, working on GAR */
-int acpi_post_unmap_gar(struct acpi_generic_address *reg)
-{
-       u64 paddr;
-       int rc;
-
-       if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
-               return 0;
-
-       rc = acpi_check_gar(reg, &paddr, 0);
-       if (rc)
-               return rc;
-
-       acpi_post_unmap(paddr, reg->bit_width / 8);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
-
-#ifdef readq
-static inline u64 read64(const volatile void __iomem *addr)
-{
-       return readq(addr);
-}
-#else
-static inline u64 read64(const volatile void __iomem *addr)
-{
-       u64 l, h;
-       l = readl(addr);
-       h = readl(addr+4);
-       return l | (h << 32);
-}
-#endif
-
-/*
- * Can be used in atomic (including NMI) or process context. RCU read
- * lock can only be released after the IO memory area accessing.
- */
-static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
-{
-       void __iomem *addr;
-
-       rcu_read_lock();
-       addr = __acpi_ioremap_fast(paddr, width);
-       switch (width) {
-       case 8:
-               *val = readb(addr);
-               break;
-       case 16:
-               *val = readw(addr);
-               break;
-       case 32:
-               *val = readl(addr);
-               break;
-       case 64:
-               *val = read64(addr);
-               break;
-       default:
-               return -EINVAL;
-       }
-       rcu_read_unlock();
-
-       return 0;
-}
-
-#ifdef writeq
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
-       writeq(val, addr);
-}
-#else
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
-       writel(val, addr);
-       writel(val>>32, addr+4);
-}
-#endif
-
-static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
-{
-       void __iomem *addr;
-
-       rcu_read_lock();
-       addr = __acpi_ioremap_fast(paddr, width);
-       switch (width) {
-       case 8:
-               writeb(val, addr);
-               break;
-       case 16:
-               writew(val, addr);
-               break;
-       case 32:
-               writel(val, addr);
-               break;
-       case 64:
-               write64(val, addr);
-               break;
-       default:
-               return -EINVAL;
-       }
-       rcu_read_unlock();
-
-       return 0;
-}
-
-/* GAR accessing in atomic (including NMI) or process context */
-int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
-{
-       u64 paddr;
-       int rc;
-
-       rc = acpi_check_gar(reg, &paddr, 1);
-       if (rc)
-               return rc;
-
-       *val = 0;
-       switch (reg->space_id) {
-       case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-               return acpi_atomic_read_mem(paddr, val, reg->bit_width);
-       case ACPI_ADR_SPACE_SYSTEM_IO:
-               return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
-       default:
-               return -EINVAL;
-       }
-}
-EXPORT_SYMBOL_GPL(acpi_atomic_read);
-
-int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
-{
-       u64 paddr;
-       int rc;
-
-       rc = acpi_check_gar(reg, &paddr, 1);
-       if (rc)
-               return rc;
-
-       switch (reg->space_id) {
-       case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-               return acpi_atomic_write_mem(paddr, val, reg->bit_width);
-       case ACPI_ADR_SPACE_SYSTEM_IO:
-               return acpi_os_write_port(paddr, val, reg->bit_width);
-       default:
-               return -EINVAL;
-       }
-}
-EXPORT_SYMBOL_GPL(acpi_atomic_write);
index fcc12d8..412a1e0 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/kmod.h>
@@ -321,6 +322,37 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
        return NULL;
 }
 
+#ifndef CONFIG_IA64
+#define should_use_kmap(pfn)   page_is_ram(pfn)
+#else
+/* ioremap will take care of cache attributes */
+#define should_use_kmap(pfn)   0
+#endif
+
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
+{
+       unsigned long pfn;
+
+       pfn = pg_off >> PAGE_SHIFT;
+       if (should_use_kmap(pfn)) {
+               if (pg_sz > PAGE_SIZE)
+                       return NULL;
+               return (void __iomem __force *)kmap(pfn_to_page(pfn));
+       } else
+               return acpi_os_ioremap(pg_off, pg_sz);
+}
+
+static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
+{
+       unsigned long pfn;
+
+       pfn = pg_off >> PAGE_SHIFT;
+       if (page_is_ram(pfn))
+               kunmap(pfn_to_page(pfn));
+       else
+               iounmap(vaddr);
+}
+
 void __iomem *__init_refok
 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
@@ -353,7 +385,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 
        pg_off = round_down(phys, PAGE_SIZE);
        pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
-       virt = acpi_os_ioremap(pg_off, pg_sz);
+       virt = acpi_map(pg_off, pg_sz);
        if (!virt) {
                mutex_unlock(&acpi_ioremap_lock);
                kfree(map);
@@ -384,7 +416,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
 {
        if (!map->refcount) {
                synchronize_rcu();
-               iounmap(map->virt);
+               acpi_unmap(map->phys, map->virt);
                kfree(map);
        }
 }
@@ -710,6 +742,67 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
        return AE_OK;
 }
 
+#ifdef readq
+static inline u64 read64(const volatile void __iomem *addr)
+{
+       return readq(addr);
+}
+#else
+static inline u64 read64(const volatile void __iomem *addr)
+{
+       u64 l, h;
+       l = readl(addr);
+       h = readl(addr+4);
+       return l | (h << 32);
+}
+#endif
+
+acpi_status
+acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width)
+{
+       void __iomem *virt_addr;
+       unsigned int size = width / 8;
+       bool unmap = false;
+       u64 dummy;
+
+       rcu_read_lock();
+       virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+       if (!virt_addr) {
+               rcu_read_unlock();
+               virt_addr = acpi_os_ioremap(phys_addr, size);
+               if (!virt_addr)
+                       return AE_BAD_ADDRESS;
+               unmap = true;
+       }
+
+       if (!value)
+               value = &dummy;
+
+       switch (width) {
+       case 8:
+               *(u8 *) value = readb(virt_addr);
+               break;
+       case 16:
+               *(u16 *) value = readw(virt_addr);
+               break;
+       case 32:
+               *(u32 *) value = readl(virt_addr);
+               break;
+       case 64:
+               *(u64 *) value = read64(virt_addr);
+               break;
+       default:
+               BUG();
+       }
+
+       if (unmap)
+               iounmap(virt_addr);
+       else
+               rcu_read_unlock();
+
+       return AE_OK;
+}
+
 acpi_status
 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
 {
@@ -749,6 +842,61 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
        return AE_OK;
 }
 
+#ifdef writeq
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+       writeq(val, addr);
+}
+#else
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+       writel(val, addr);
+       writel(val>>32, addr+4);
+}
+#endif
+
+acpi_status
+acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width)
+{
+       void __iomem *virt_addr;
+       unsigned int size = width / 8;
+       bool unmap = false;
+
+       rcu_read_lock();
+       virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+       if (!virt_addr) {
+               rcu_read_unlock();
+               virt_addr = acpi_os_ioremap(phys_addr, size);
+               if (!virt_addr)
+                       return AE_BAD_ADDRESS;
+               unmap = true;
+       }
+
+       switch (width) {
+       case 8:
+               writeb(value, virt_addr);
+               break;
+       case 16:
+               writew(value, virt_addr);
+               break;
+       case 32:
+               writel(value, virt_addr);
+               break;
+       case 64:
+               write64(value, virt_addr);
+               break;
+       default:
+               BUG();
+       }
+
+       if (unmap)
+               iounmap(virt_addr);
+       else
+               rcu_read_unlock();
+
+       return AE_OK;
+}
+
 acpi_status
 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
                               u64 *value, u32 width)
index 0034ede..2b805d7 100644 (file)
@@ -84,7 +84,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type);
 static void acpi_processor_notify(struct acpi_device *device, u32 event);
 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
 static int acpi_processor_handle_eject(struct acpi_processor *pr);
-
+static int acpi_processor_start(struct acpi_processor *pr);
 
 static const struct acpi_device_id processor_device_ids[] = {
        {ACPI_PROCESSOR_OBJECT_HID, 0},
@@ -423,10 +423,29 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
        struct acpi_processor *pr = per_cpu(processors, cpu);
 
        if (action == CPU_ONLINE && pr) {
-               acpi_processor_ppc_has_changed(pr, 0);
-               acpi_processor_hotplug(pr);
-               acpi_processor_reevaluate_tstate(pr, action);
-               acpi_processor_tstate_has_changed(pr);
+               /* CPU got physically hotplugged and onlined the first time:
+                * Initialize missing things
+                */
+               if (pr->flags.need_hotplug_init) {
+                       struct cpuidle_driver *idle_driver =
+                               cpuidle_get_driver();
+
+                       printk(KERN_INFO "Will online and init hotplugged "
+                              "CPU: %d\n", pr->id);
+                       WARN(acpi_processor_start(pr), "Failed to start CPU:"
+                               " %d\n", pr->id);
+                       pr->flags.need_hotplug_init = 0;
+                       if (idle_driver && !strcmp(idle_driver->name,
+                                                  "intel_idle")) {
+                               intel_idle_cpu_init(pr->id);
+                       }
+               /* Normal CPU soft online event */
+               } else {
+                       acpi_processor_ppc_has_changed(pr, 0);
+                       acpi_processor_cst_has_changed(pr);
+                       acpi_processor_reevaluate_tstate(pr, action);
+                       acpi_processor_tstate_has_changed(pr);
+               }
        }
        if (action == CPU_DEAD && pr) {
                /* invalidate the flag.throttling after one CPU is offline */
@@ -440,6 +459,71 @@ static struct notifier_block acpi_cpu_notifier =
            .notifier_call = acpi_cpu_soft_notify,
 };
 
+/*
+ * acpi_processor_start() is called by the cpu_hotplug_notifier func:
+ * acpi_cpu_soft_notify(). Getting it __cpuinit{data} is difficult, the
+ * root cause seem to be that acpi_processor_uninstall_hotplug_notify()
+ * is in the module_exit (__exit) func. Allowing acpi_processor_start()
+ * to not be in __cpuinit section, but being called from __cpuinit funcs
+ * via __ref looks like the right thing to do here.
+ */
+static __ref int acpi_processor_start(struct acpi_processor *pr)
+{
+       struct acpi_device *device = per_cpu(processor_device_array, pr->id);
+       int result = 0;
+
+#ifdef CONFIG_CPU_FREQ
+       acpi_processor_ppc_has_changed(pr, 0);
+#endif
+       acpi_processor_get_throttling_info(pr);
+       acpi_processor_get_limit_info(pr);
+
+       if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
+               acpi_processor_power_init(pr, device);
+
+       pr->cdev = thermal_cooling_device_register("Processor", device,
+                                                  &processor_cooling_ops);
+       if (IS_ERR(pr->cdev)) {
+               result = PTR_ERR(pr->cdev);
+               goto err_power_exit;
+       }
+
+       dev_dbg(&device->dev, "registered as cooling_device%d\n",
+               pr->cdev->id);
+
+       result = sysfs_create_link(&device->dev.kobj,
+                                  &pr->cdev->device.kobj,
+                                  "thermal_cooling");
+       if (result) {
+               printk(KERN_ERR PREFIX "Create sysfs link\n");
+               goto err_thermal_unregister;
+       }
+       result = sysfs_create_link(&pr->cdev->device.kobj,
+                                  &device->dev.kobj,
+                                  "device");
+       if (result) {
+               printk(KERN_ERR PREFIX "Create sysfs link\n");
+               goto err_remove_sysfs_thermal;
+       }
+
+       return 0;
+
+err_remove_sysfs_thermal:
+       sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+err_thermal_unregister:
+       thermal_cooling_device_unregister(pr->cdev);
+err_power_exit:
+       acpi_processor_power_exit(pr, device);
+
+       return result;
+}
+
+/*
+ * Do not put anything in here which needs the core to be online.
+ * For example MSR access or setting up things which check for cpuinfo_x86
+ * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
+ * Such things have to be put in and set up above in acpi_processor_start()
+ */
 static int __cpuinit acpi_processor_add(struct acpi_device *device)
 {
        struct acpi_processor *pr = NULL;
@@ -495,48 +579,27 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
                goto err_free_cpumask;
        }
 
-#ifdef CONFIG_CPU_FREQ
-       acpi_processor_ppc_has_changed(pr, 0);
-#endif
-       acpi_processor_get_throttling_info(pr);
-       acpi_processor_get_limit_info(pr);
-
-       if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
-               acpi_processor_power_init(pr, device);
-
-       pr->cdev = thermal_cooling_device_register("Processor", device,
-                                               &processor_cooling_ops);
-       if (IS_ERR(pr->cdev)) {
-               result = PTR_ERR(pr->cdev);
-               goto err_power_exit;
-       }
+       /*
+        * Do not start hotplugged CPUs now, but when they
+        * are onlined the first time
+        */
+       if (pr->flags.need_hotplug_init)
+               return 0;
 
-       dev_dbg(&device->dev, "registered as cooling_device%d\n",
-                pr->cdev->id);
+       /*
+        * Do not start hotplugged CPUs now, but when they
+        * are onlined the first time
+        */
+       if (pr->flags.need_hotplug_init)
+               return 0;
 
-       result = sysfs_create_link(&device->dev.kobj,
-                                  &pr->cdev->device.kobj,
-                                  "thermal_cooling");
-       if (result) {
-               printk(KERN_ERR PREFIX "Create sysfs link\n");
-               goto err_thermal_unregister;
-       }
-       result = sysfs_create_link(&pr->cdev->device.kobj,
-                                  &device->dev.kobj,
-                                  "device");
-       if (result) {
-               printk(KERN_ERR PREFIX "Create sysfs link\n");
+       result = acpi_processor_start(pr);
+       if (result)
                goto err_remove_sysfs;
-       }
 
        return 0;
 
 err_remove_sysfs:
-       sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
-err_thermal_unregister:
-       thermal_cooling_device_unregister(pr->cdev);
-err_power_exit:
-       acpi_processor_power_exit(pr, device);
        sysfs_remove_link(&device->dev.kobj, "sysdev");
 err_free_cpumask:
        free_cpumask_var(pr->throttling.shared_cpu_map);
@@ -735,6 +798,17 @@ static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
                return AE_ERROR;
        }
 
+       /* CPU got hot-plugged, but cpu_data is not initialized yet
+        * Set flag to delay cpu_idle/throttling initialization
+        * in:
+        * acpi_processor_add()
+        *   acpi_processor_get_info()
+        * and do it when the CPU gets online the first time
+        * TBD: Cleanup above functions and try to do this more elegant.
+        */
+       printk(KERN_INFO "CPU %d got hotplugged\n", pr->id);
+       pr->flags.need_hotplug_init = 1;
+
        return AE_OK;
 }
 
index 0a7ed69..ca191ff 100644 (file)
@@ -438,6 +438,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        },
        {
        .callback = init_nvs_nosave,
+       .ident = "Sony Vaio VPCCW29FX",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
+               },
+       },
+       {
+       .callback = init_nvs_nosave,
        .ident = "Averatec AV1020-ED2",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
index 20bce51..54ab97b 100644 (file)
@@ -527,7 +527,7 @@ int intel_idle_cpu_init(int cpu)
 
        return 0;
 }
-
+EXPORT_SYMBOL_GPL(intel_idle_cpu_init);
 
 static int __init intel_idle_init(void)
 {
index dd9a574..220ce7e 100644 (file)
@@ -1304,7 +1304,7 @@ static struct genl_multicast_group thermal_event_mcgrp = {
        .name = THERMAL_GENL_MCAST_GROUP_NAME,
 };
 
-int generate_netlink_event(u32 orig, enum events event)
+int thermal_generate_netlink_event(u32 orig, enum events event)
 {
        struct sk_buff *skb;
        struct nlattr *attr;
@@ -1363,7 +1363,7 @@ int generate_netlink_event(u32 orig, enum events event)
 
        return result;
 }
-EXPORT_SYMBOL(generate_netlink_event);
+EXPORT_SYMBOL(thermal_generate_netlink_event);
 
 static int genetlink_init(void)
 {
index 2fe8639..7c9aebe 100644 (file)
@@ -218,9 +218,13 @@ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
  */
 acpi_status
 acpi_os_read_memory(acpi_physical_address address, u32 * value, u32 width);
+acpi_status
+acpi_os_read_memory64(acpi_physical_address address, u64 *value, u32 width);
 
 acpi_status
 acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width);
+acpi_status
+acpi_os_write_memory64(acpi_physical_address address, u64 value, u32 width);
 
 /*
  * Platform and hardware-independent PCI configuration space access
diff --git a/include/acpi/atomicio.h b/include/acpi/atomicio.h
deleted file mode 100644 (file)
index 8b9fb4b..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef ACPI_ATOMIC_IO_H
-#define ACPI_ATOMIC_IO_H
-
-int acpi_pre_map_gar(struct acpi_generic_address *reg);
-int acpi_post_unmap_gar(struct acpi_generic_address *reg);
-
-int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg);
-int acpi_atomic_write(u64 val, struct acpi_generic_address *reg);
-
-#endif
index 610f6fb..8cf7e98 100644 (file)
@@ -195,6 +195,7 @@ struct acpi_processor_flags {
        u8 has_cst:1;
        u8 power_setup_done:1;
        u8 bm_rld_set:1;
+       u8 need_hotplug_init:1;
 };
 
 struct acpi_processor {
index 47b4a27..796f1ff 100644 (file)
@@ -152,9 +152,9 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
 
 #ifdef CONFIG_NET
-extern int generate_netlink_event(u32 orig, enum events event);
+extern int thermal_generate_netlink_event(u32 orig, enum events event);
 #else
-static inline int generate_netlink_event(u32 orig, enum events event)
+static inline int thermal_generate_netlink_event(u32 orig, enum events event)
 {
        return 0;
 }