Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 17 Sep 2013 15:40:49 +0000 (11:40 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 17 Sep 2013 15:40:49 +0000 (11:40 -0400)
Pull more tile architecture updates from Chris Metcalf:
 "This second batch of changes is just cleanup of various kinds from
  doing some tidying work in the sources.

  Some dead code is removed, comment typos fixed, whitespace and style
  issues cleaned up, and some header updates from our internal
  "upstream" architecture team"

* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
  tile: remove stray blank space
  tile: <arch/> header updates from upstream
  tile: improve gxio iorpc autogenerated code style
  tile: double default VMALLOC space
  tile: remove stale arch/tile/kernel/futex_64.S
  tile: remove HUGE_VMAP dead code
  tile: use pmd_pfn() instead of casting via pte_t
  tile: fix typos in comment in arch/tile/kernel/unaligned.c

119 files changed:
Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
Documentation/filesystems/cifs/cifs.txt
Documentation/scsi/ChangeLog.megaraid_sas
Makefile
arch/arm/common/timer-sp.c
arch/arm/mach-mvebu/armada-370-xp.c
arch/mips/dec/ioasic-irq.c
arch/mips/dec/time.c
arch/mips/include/asm/dec/ioasic.h
arch/mips/kernel/csrc-ioasic.c
arch/mips/kernel/smp-cmp.c
arch/mips/kernel/vpe.c
arch/xtensa/Makefile
arch/xtensa/boot/Makefile
arch/xtensa/include/asm/regs.h
arch/xtensa/include/asm/timex.h
arch/xtensa/kernel/align.S
arch/xtensa/kernel/coprocessor.S
arch/xtensa/kernel/entry.S
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/time.c
arch/xtensa/kernel/vectors.S
arch/xtensa/kernel/xtensa_ksyms.c
block/partitions/efi.c
drivers/clocksource/em_sti.c
drivers/clocksource/nomadik-mtu.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/time-armada-370-xp.c
drivers/hwmon/amc6821.c
drivers/hwmon/emc2103.c
drivers/hwmon/ibmaem.c
drivers/hwmon/k10temp.c
drivers/hwmon/tmp421.c
drivers/input/evdev.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/wl.c
drivers/net/ethernet/amd/declance.c
drivers/platform/x86/Kconfig
drivers/platform/x86/amilo-rfkill.c
drivers/platform/x86/classmate-laptop.c
drivers/platform/x86/compal-laptop.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/intel-rst.c
drivers/platform/x86/intel-smartconnect.c
drivers/platform/x86/intel_mid_powerbtn.c
drivers/platform/x86/intel_mid_thermal.c
drivers/platform/x86/panasonic-laptop.c
drivers/platform/x86/samsung-q10.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/wmi.c
drivers/scsi/aic7xxx/aic7xxx_pci.c
drivers/scsi/esas2r/esas2r_flash.c
drivers/scsi/esas2r/esas2r_init.c
drivers/scsi/esas2r/esas2r_ioctl.c
drivers/scsi/esas2r/esas2r_vda.c
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/fnic/vnic_scsi.h
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi/viosrp.h
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fp.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/megaraid/megaraid_sas_fusion.h
drivers/scsi/mpt3sas/Makefile
drivers/scsi/sd.c
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshci.h
drivers/scsi/ufs/unipro.h [new file with mode: 0644]
drivers/staging/android/logger.c
drivers/staging/lustre/lustre/llite/file.c
drivers/usb/gadget/inode.c
fs/aio.c
fs/anon_inodes.c
fs/block_dev.c
fs/cifs/file.c
fs/dcache.c
fs/fs-writeback.c
fs/nfs/direct.c
fs/ocfs2/file.c
fs/read_write.c
fs/ubifs/debug.c
fs/udf/file.c
include/linux/aio.h
include/linux/anon_inodes.h
include/linux/fs.h
include/linux/migrate.h
include/linux/mm_types.h
include/linux/pci_ids.h
include/linux/slab.h
include/linux/slab_def.h
include/linux/slob_def.h [deleted file]
include/linux/slub_def.h
include/linux/time-armada-370-xp.h [deleted file]
include/uapi/linux/input.h
init/Kconfig
kernel/fork.c
mm/migrate.c
mm/page_io.c
mm/slab_common.c
mm/slob.c
mm/slub.c
net/socket.c

index 3638112..f455182 100644 (file)
@@ -2,14 +2,40 @@ Marvell Armada 370 and Armada XP Timers
 ---------------------------------------
 
 Required properties:
-- compatible: Should be "marvell,armada-370-xp-timer"
+- compatible: Should be either "marvell,armada-370-timer" or
+  "marvell,armada-xp-timer" as appropriate.
 - interrupts: Should contain the list of Global Timer interrupts and
   then local timer interrupts
 - reg: Should contain location and length for timers register. First
   pair for the Global Timer registers, second pair for the
   local/private timers.
-- clocks: clock driving the timer hardware
 
-Optional properties:
-- marvell,timer-25Mhz: Tells whether the Global timer supports the 25
-  Mhz fixed mode (available on Armada XP and not on Armada 370)
+Clocks required for compatible = "marvell,armada-370-timer":
+- clocks : Must contain a single entry describing the clock input
+
+Clocks required for compatible = "marvell,armada-xp-timer":
+- clocks : Must contain an entry for each entry in clock-names.
+- clock-names : Must include the following entries:
+  "nbclk" (L2/coherency fabric clock),
+  "fixed" (Reference 25 MHz fixed-clock).
+
+Examples:
+
+- Armada 370:
+
+       timer {
+               compatible = "marvell,armada-370-timer";
+               reg = <0x20300 0x30>, <0x21040 0x30>;
+               interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
+               clocks = <&coreclk 2>;
+       };
+
+- Armada XP:
+
+       timer {
+               compatible = "marvell,armada-xp-timer";
+               reg = <0x20300 0x30>, <0x21040 0x30>;
+               interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
+               clocks = <&coreclk 2>, <&refclk>;
+               clock-names = "nbclk", "fixed";
+       };
index 49cc923..2fac91a 100644 (file)
@@ -1,18 +1,14 @@
   This is the client VFS module for the Common Internet File System
   (CIFS) protocol which is the successor to the Server Message Block 
   (SMB) protocol, the native file sharing mechanism for most early
-  PC operating systems.  CIFS is fully supported by current network
-  file servers such as Windows 2000, Windows 2003 (including  
-  Windows XP) as well by Samba (which provides excellent CIFS
+  PC operating systems. New and improved versions of CIFS are now
+  called SMB2 and SMB3. These dialects are also supported by the
+  CIFS VFS module. CIFS is fully supported by network
+  file servers such as Windows 2000, 2003, 2008 and 2012
+  as well by Samba (which provides excellent CIFS
   server support for Linux and many other operating systems), so
   this network filesystem client can mount to a wide variety of
-  servers.  The smbfs module should be used instead of this cifs module
-  for mounting to older SMB servers such as OS/2.  The smbfs and cifs
-  modules can coexist and do not conflict.  The CIFS VFS filesystem
-  module is designed to work well with servers that implement the
-  newer versions (dialects) of the SMB/CIFS protocol such as Samba, 
-  the program written by Andrew Tridgell that turns any Unix host 
-  into a SMB/CIFS file server.
+  servers.
 
   The intent of this module is to provide the most advanced network
   file system function for CIFS compliant servers, including better
   alternative to NFSv4 for fileserving in some Linux to Linux environments,
   not just in Linux to Windows environments.
 
-  This filesystem has an optional mount utility (mount.cifs) that can
-  be obtained from the project page and installed in the path in the same
-  directory with the other mount helpers (such as mount.smbfs). 
-  Mounting using the cifs filesystem without installing the mount helper
-  requires specifying the server's ip address.
+  This filesystem has an mount utility (mount.cifs) that can be obtained from
 
-  For Linux 2.4:
-    mount //anything/here /mnt_target -o
-            user=username,pass=password,unc=//ip_address_of_server/sharename
+      https://ftp.samba.org/pub/linux-cifs/cifs-utils/
 
-  For Linux 2.5: 
-    mount //ip_address_of_server/sharename /mnt_target -o user=username, pass=password
+  It must be installed in the directory with the other mount helpers.
 
+  For more information on the module see the project wiki page at
 
-  For more information on the module see the project page at
-
-      http://us1.samba.org/samba/Linux_CIFS_client.html 
-
-  For more information on CIFS see:
-
-      http://www.snia.org/tech_activities/CIFS
-
-  or the Samba site:
-     
-      http://www.samba.org
+      https://wiki.samba.org/index.php/LinuxCIFS_utils
index cc92ca8..6edaa65 100644 (file)
@@ -1,3 +1,13 @@
+Release Date    : Sat. Aug 31, 2013 17:00:00 PST 2013 -
+                       (emaild-id:megaraidlinux@lsi.com)
+                       Adam Radford
+                       Kashyap Desai
+                       Sumit Saxena
+Current Version : 06.700.06.00-rc1
+Old Version     : 06.600.18.00-rc1
+    1. Add High Availability clustering support using shared Logical Disks.
+    2. Version and Changelog update.
+-------------------------------------------------------------------------------
 Release Date    : Wed. May 15, 2013 17:00:00 PST 2013 -
                        (emaild-id:megaraidlinux@lsi.com)
                        Adam Radford
index e73f758..de004ce 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 3
-PATCHLEVEL = 11
+PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION =
-NAME = Suicidal Squirrel
+EXTRAVERSION = -rc1
+NAME = One Giant Leap for Frogkind
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index 023ee63..e901d0f 100644 (file)
@@ -166,7 +166,8 @@ static int sp804_set_next_event(unsigned long next,
 }
 
 static struct clock_event_device sp804_clockevent = {
-       .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+       .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+               CLOCK_EVT_FEAT_DYNIRQ,
        .set_mode       = sp804_set_mode,
        .set_next_event = sp804_set_next_event,
        .rating         = 300,
index 829b573..e2acff9 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/io.h>
-#include <linux/time-armada-370-xp.h>
+#include <linux/clocksource.h>
 #include <linux/dma-mapping.h>
 #include <linux/mbus.h>
 #include <asm/hardware/cache-l2x0.h>
@@ -37,7 +37,7 @@ static void __init armada_370_xp_map_io(void)
 static void __init armada_370_xp_timer_and_clk_init(void)
 {
        of_clk_init(NULL);
-       armada_370_xp_timer_init();
+       clocksource_of_init();
        coherency_init();
        BUG_ON(mvebu_mbus_dt_init());
 #ifdef CONFIG_CACHE_L2X0
index 824e08c..4b3e3a4 100644 (file)
@@ -51,6 +51,14 @@ static struct irq_chip ioasic_irq_type = {
        .irq_unmask = unmask_ioasic_irq,
 };
 
+void clear_ioasic_dma_irq(unsigned int irq)
+{
+       u32 sir;
+
+       sir = ~(1 << (irq - ioasic_irq_base));
+       ioasic_write(IO_REG_SIR, sir);
+}
+
 static struct irq_chip ioasic_dma_irq_type = {
        .name = "IO-ASIC-DMA",
        .irq_ack = ack_ioasic_irq,
index 56ebc7f..1914e56 100644 (file)
@@ -125,12 +125,16 @@ int rtc_mips_set_mmss(unsigned long nowtime)
 
 void __init plat_time_init(void)
 {
+       int ioasic_clock = 0;
        u32 start, end;
        int i = HZ / 8;
 
        /* Set up the rate of periodic DS1287 interrupts. */
        ds1287_set_base_clock(HZ);
 
+       /* On some I/O ASIC systems we have the I/O ASIC's counter.  */
+       if (IOASIC)
+               ioasic_clock = dec_ioasic_clocksource_init() == 0;
        if (cpu_has_counter) {
                ds1287_timer_state();
                while (!ds1287_timer_state())
@@ -147,9 +151,21 @@ void __init plat_time_init(void)
                mips_hpt_frequency = (end - start) * 8;
                printk(KERN_INFO "MIPS counter frequency %dHz\n",
                        mips_hpt_frequency);
-       } else if (IOASIC)
-               /* For pre-R4k systems we use the I/O ASIC's counter.  */
-               dec_ioasic_clocksource_init();
+
+               /*
+                * All R4k DECstations suffer from the CP0 Count erratum,
+                * so we can't use the timer as a clock source, and a clock
+                * event both at a time.  An accurate wall clock is more
+                * important than a high-precision interval timer so only
+                * use the timer as a clock source, and not a clock event
+                * if there's no I/O ASIC counter available to serve as a
+                * clock source.
+                */
+               if (!ioasic_clock) {
+                       init_r4k_clocksource();
+                       mips_hpt_frequency = 0;
+               }
+       }
 
        ds1287_clockevent_init(dec_interrupt[DEC_IRQ_RTC]);
 }
index 98badd6..a6e505a 100644 (file)
@@ -31,8 +31,10 @@ static inline u32 ioasic_read(unsigned int reg)
        return ioasic_base[reg / 4];
 }
 
+extern void clear_ioasic_dma_irq(unsigned int irq);
+
 extern void init_ioasic_irqs(int base);
 
-extern void dec_ioasic_clocksource_init(void);
+extern int dec_ioasic_clocksource_init(void);
 
 #endif /* __ASM_DEC_IOASIC_H */
index 87e88fe..6cbbf6e 100644 (file)
@@ -37,7 +37,7 @@ static struct clocksource clocksource_dec = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-void __init dec_ioasic_clocksource_init(void)
+int __init dec_ioasic_clocksource_init(void)
 {
        unsigned int freq;
        u32 start, end;
@@ -56,8 +56,14 @@ void __init dec_ioasic_clocksource_init(void)
        end = dec_ioasic_hpt_read(&clocksource_dec);
 
        freq = (end - start) * 8;
+
+       /* An early revision of the I/O ASIC didn't have the counter.  */
+       if (!freq)
+               return -ENXIO;
+
        printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
 
        clocksource_dec.rating = 200 + freq / 10000000;
        clocksource_register_hz(&clocksource_dec, freq);
+       return 0;
 }
index c2e5d74..5969f1e 100644 (file)
@@ -99,7 +99,9 @@ static void cmp_init_secondary(void)
 
        c->core = (read_c0_ebase() >> 1) & 0x1ff;
 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
-       c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
+       if (cpu_has_mipsmt)
+               c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
+                       TCBIND_CURVPE;
 #endif
 #ifdef CONFIG_MIPS_MT_SMTC
        c->tc_id  = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
@@ -177,9 +179,16 @@ void __init cmp_smp_setup(void)
        }
 
        if (cpu_has_mipsmt) {
-               unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
+               unsigned int nvpe = 1;
+#ifdef CONFIG_MIPS_MT_SMP
+               unsigned int mvpconf0 = read_c0_mvpconf0();
+
+               nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+#elif defined(CONFIG_MIPS_MT_SMTC)
+               unsigned int mvpconf0 = read_c0_mvpconf0();
 
                nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+#endif
                smp_num_siblings = nvpe;
        }
        pr_info("Detected %i available secondary CPU(s)\n", ncpu);
index faf84c5..59b2b3c 100644 (file)
@@ -1368,7 +1368,7 @@ out_einval:
 }
 static DEVICE_ATTR_RW(ntcs);
 
-static struct attribute vpe_attrs[] = {
+static struct attribute *vpe_attrs[] = {
        &dev_attr_kill.attr,
        &dev_attr_ntcs.attr,
        NULL,
index 136224b..81250ec 100644 (file)
@@ -55,10 +55,10 @@ ifneq ($(CONFIG_LD_NO_RELAX),)
 LDFLAGS := --no-relax
 endif
 
-ifeq ($(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
+ifeq ($(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
 CHECKFLAGS += -D__XTENSA_EB__
 endif
-ifeq ($(shell echo -e __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
+ifeq ($(shell echo __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
 CHECKFLAGS += -D__XTENSA_EL__
 endif
 
index 64ffc4b..ca20a89 100644 (file)
@@ -12,7 +12,7 @@
 KBUILD_CFLAGS  += -fno-builtin -Iarch/$(ARCH)/boot/include
 HOSTFLAGS      += -Iarch/$(ARCH)/boot/include
 
-BIG_ENDIAN     := $(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#")
+BIG_ENDIAN     := $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#")
 
 export ccflags-y
 export BIG_ENDIAN
index b24de67..4ba9f51 100644 (file)
@@ -82,6 +82,7 @@
 #define PS_CALLINC_SHIFT       16
 #define PS_CALLINC_MASK                0x00030000
 #define PS_OWB_SHIFT           8
+#define PS_OWB_WIDTH           4
 #define PS_OWB_MASK            0x00000F00
 #define PS_RING_SHIFT          6
 #define PS_RING_MASK           0x000000C0
index 69f9017..27fa3c1 100644 (file)
 # error "Bad timer number for Linux configurations!"
 #endif
 
-#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
 extern unsigned long ccount_freq;
-#define CCOUNT_PER_JIFFY (ccount_freq / HZ)
-#else
-#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ))
-#endif
-
 
 typedef unsigned long long cycles_t;
 
index aa2e87b..d4cef60 100644 (file)
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in DEPC
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  *
  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  *          <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -171,7 +171,6 @@ ENTRY(fast_unaligned)
        s32i    a8, a2, PT_AREG8
 
        rsr     a0, depc
-       xsr     a3, excsave1
        s32i    a0, a2, PT_AREG2
        s32i    a3, a2, PT_AREG3
 
index 6476574..a482df5 100644 (file)
@@ -32,9 +32,9 @@
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in DEPC
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  *
  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  *          <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -225,9 +225,9 @@ ENDPROC(coprocessor_restore)
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in DEPC
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  *
  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  *          <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -245,7 +245,6 @@ ENTRY(fast_coprocessor)
 
        /* Save remaining registers a1-a3 and SAR */
 
-       xsr     a3, excsave1
        s32i    a3, a2, PT_AREG3
        rsr     a3, sar
        s32i    a1, a2, PT_AREG1
index 9298742..de1dfa1 100644 (file)
@@ -31,8 +31,6 @@
 /* Unimplemented features. */
 
 #undef KERNEL_STACK_OVERFLOW_CHECK
-#undef PREEMPTIBLE_KERNEL
-#undef ALLOCA_EXCEPTION_IN_IRAM
 
 /* Not well tested.
  *
@@ -92,9 +90,9 @@
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original value in depc
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave1: a3
+ *   excsave1: dispatch table
  *
  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  *          <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
 
 ENTRY(user_exception)
 
-       /* Save a2, a3, and depc, restore excsave_1 and set SP. */
+       /* Save a1, a2, a3, and set SP. */
 
-       xsr     a3, excsave1
        rsr     a0, depc
        s32i    a1, a2, PT_AREG1
        s32i    a0, a2, PT_AREG2
@@ -238,9 +235,9 @@ ENDPROC(user_exception)
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in DEPC
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  *
  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  *          <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -256,9 +253,8 @@ ENDPROC(user_exception)
 
 ENTRY(kernel_exception)
 
-       /* Save a0, a2, a3, DEPC and set SP. */
+       /* Save a1, a2, a3, and set SP. */
 
-       xsr     a3, excsave1            # restore a3, excsave_1
        rsr     a0, depc                # get a2
        s32i    a1, a2, PT_AREG1
        s32i    a0, a2, PT_AREG2
@@ -409,7 +405,7 @@ common_exception:
         * exception handler and call the exception handler.
         */
 
-       movi    a4, exc_table
+       rsr     a4, excsave1
        mov     a6, a1                  # pass stack frame
        mov     a7, a0                  # pass EXCCAUSE
        addx4   a4, a0, a4
@@ -423,28 +419,15 @@ common_exception:
        .global common_exception_return
 common_exception_return:
 
-#ifdef CONFIG_TRACE_IRQFLAGS
-       l32i    a4, a1, PT_DEPC
-       /* Double exception means we came here with an exception
-        * while PS.EXCM was set, i.e. interrupts disabled.
-        */
-       bgeui   a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
-       l32i    a4, a1, PT_EXCCAUSE
-       bnei    a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
-       /* We came here with an interrupt means interrupts were enabled
-        * and we'll reenable them on return.
-        */
-       movi    a4, trace_hardirqs_on
-       callx4  a4
 1:
-#endif
+       rsil    a2, LOCKLEVEL
 
        /* Jump if we are returning from kernel exceptions. */
 
-1:     l32i    a3, a1, PT_PS
-       _bbci.l a3, PS_UM_BIT, 4f
-
-       rsil    a2, 0
+       l32i    a3, a1, PT_PS
+       GET_THREAD_INFO(a2, a1)
+       l32i    a4, a2, TI_FLAGS
+       _bbci.l a3, PS_UM_BIT, 6f
 
        /* Specific to a user exception exit:
         * We need to check some flags for signal handling and rescheduling,
@@ -453,9 +436,6 @@ common_exception_return:
         * Note that we don't disable interrupts here. 
         */
 
-       GET_THREAD_INFO(a2,a1)
-       l32i    a4, a2, TI_FLAGS
-
        _bbsi.l a4, TIF_NEED_RESCHED, 3f
        _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
        _bbci.l a4, TIF_SIGPENDING, 5f
@@ -465,6 +445,7 @@ common_exception_return:
 
        /* Call do_signal() */
 
+       rsil    a2, 0
        movi    a4, do_notify_resume    # int do_notify_resume(struct pt_regs*)
        mov     a6, a1
        callx4  a4
@@ -472,10 +453,24 @@ common_exception_return:
 
 3:     /* Reschedule */
 
+       rsil    a2, 0
        movi    a4, schedule    # void schedule (void)
        callx4  a4
        j       1b
 
+#ifdef CONFIG_PREEMPT
+6:
+       _bbci.l a4, TIF_NEED_RESCHED, 4f
+
+       /* Check current_thread_info->preempt_count */
+
+       l32i    a4, a2, TI_PRE_COUNT
+       bnez    a4, 4f
+       movi    a4, preempt_schedule_irq
+       callx4  a4
+       j       1b
+#endif
+
 5:
 #ifdef CONFIG_DEBUG_TLB_SANITY
        l32i    a4, a1, PT_DEPC
@@ -483,7 +478,24 @@ common_exception_return:
        movi    a4, check_tlb_sanity
        callx4  a4
 #endif
-4:     /* Restore optional registers. */
+6:
+4:
+#ifdef CONFIG_TRACE_IRQFLAGS
+       l32i    a4, a1, PT_DEPC
+       /* Double exception means we came here with an exception
+        * while PS.EXCM was set, i.e. interrupts disabled.
+        */
+       bgeui   a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+       l32i    a4, a1, PT_EXCCAUSE
+       bnei    a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
+       /* We came here with an interrupt means interrupts were enabled
+        * and we'll reenable them on return.
+        */
+       movi    a4, trace_hardirqs_on
+       callx4  a4
+1:
+#endif
+       /* Restore optional registers. */
 
        load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
 
@@ -570,29 +582,6 @@ user_exception_exit:
 
 kernel_exception_exit:
 
-#ifdef PREEMPTIBLE_KERNEL
-
-#ifdef CONFIG_PREEMPT
-
-       /*
-        * Note: We've just returned from a call4, so we have
-        * at least 4 addt'l regs.
-        */
-
-       /* Check current_thread_info->preempt_count */
-
-       GET_THREAD_INFO(a2)
-       l32i    a3, a2, TI_PREEMPT
-       bnez    a3, 1f
-
-       l32i    a2, a2, TI_FLAGS
-
-1:
-
-#endif
-
-#endif
-
        /* Check if we have to do a movsp.
         *
         * We only have to do a movsp if the previous window-frame has
@@ -829,176 +818,63 @@ ENDPROC(unrecoverable_exception)
  *
  *  The ALLOCA handler is entered when user code executes the MOVSP
  *  instruction and the caller's frame is not in the register file.
- *  In this case, the caller frame's a0..a3 are on the stack just
- *  below sp (a1), and this handler moves them.
  *
- *  For "MOVSP <ar>,<as>" without destination register a1, this routine
- *  simply moves the value from <as> to <ar> without moving the save area.
+ * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
+ *
+ *    /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
+ *
+ * It leverages the existing window spill/fill routines and their support for
+ * double exceptions. The 'movsp' instruction will only cause an exception if
+ * the next window needs to be loaded. In fact this ALLOCA exception may be
+ * replaced at some point by changing the hardware to do a underflow exception
+ * of the proper size instead.
+ *
+ * This algorithm simply backs out the register changes started by the user
+ * excpetion handler, makes it appear that we have started a window underflow
+ * by rotating the window back and then setting the old window base (OWB) in
+ * the 'ps' register with the rolled back window base. The 'movsp' instruction
+ * will be re-executed and this time since the next window frames is in the
+ * active AR registers it won't cause an exception.
+ *
+ * If the WindowUnderflow code gets a TLB miss the page will get mapped
+ * the the partial windeowUnderflow will be handeled in the double exception
+ * handler.
  *
  * Entry condition:
  *
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in DEPC
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  *
  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  *          <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
  */
 
-#if XCHAL_HAVE_BE
-#define _EXTUI_MOVSP_SRC(ar)   extui ar, ar, 4, 4
-#define _EXTUI_MOVSP_DST(ar)   extui ar, ar, 0, 4
-#else
-#define _EXTUI_MOVSP_SRC(ar)   extui ar, ar, 0, 4
-#define _EXTUI_MOVSP_DST(ar)   extui ar, ar, 4, 4
-#endif
-
 ENTRY(fast_alloca)
+       rsr     a0, windowbase
+       rotw    -1
+       rsr     a2, ps
+       extui   a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
+       xor     a3, a3, a4
+       l32i    a4, a6, PT_AREG0
+       l32i    a1, a6, PT_DEPC
+       rsr     a6, depc
+       wsr     a1, depc
+       slli    a3, a3, PS_OWB_SHIFT
+       xor     a2, a2, a3
+       wsr     a2, ps
+       rsync
 
-       /* We shouldn't be in a double exception. */
-
-       l32i    a0, a2, PT_DEPC
-       _bgeui  a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
-
-       rsr     a0, depc                # get a2
-       s32i    a4, a2, PT_AREG4        # save a4 and
-       s32i    a0, a2, PT_AREG2        # a2 to stack
-
-       /* Exit critical section. */
-
-       movi    a0, 0
-       s32i    a0, a3, EXC_TABLE_FIXUP
-
-       /* Restore a3, excsave_1 */
-
-       xsr     a3, excsave1            # make sure excsave_1 is valid for dbl.
-       rsr     a4, epc1                # get exception address
-       s32i    a3, a2, PT_AREG3        # save a3 to stack
-
-#ifdef ALLOCA_EXCEPTION_IN_IRAM
-#error iram not supported
-#else
-       /* Note: l8ui not allowed in IRAM/IROM!! */
-       l8ui    a0, a4, 1               # read as(src) from MOVSP instruction
-#endif
-       movi    a3, .Lmovsp_src
-       _EXTUI_MOVSP_SRC(a0)            # extract source register number
-       addx8   a3, a0, a3
-       jx      a3
-
-.Lunhandled_double:
-       wsr     a0, excsave1
-       movi    a0, unrecoverable_exception
-       callx0  a0
-
-       .align 8
-.Lmovsp_src:
-       l32i    a3, a2, PT_AREG0;       _j 1f;  .align 8
-       mov     a3, a1;                 _j 1f;  .align 8
-       l32i    a3, a2, PT_AREG2;       _j 1f;  .align 8
-       l32i    a3, a2, PT_AREG3;       _j 1f;  .align 8
-       l32i    a3, a2, PT_AREG4;       _j 1f;  .align 8
-       mov     a3, a5;                 _j 1f;  .align 8
-       mov     a3, a6;                 _j 1f;  .align 8
-       mov     a3, a7;                 _j 1f;  .align 8
-       mov     a3, a8;                 _j 1f;  .align 8
-       mov     a3, a9;                 _j 1f;  .align 8
-       mov     a3, a10;                _j 1f;  .align 8
-       mov     a3, a11;                _j 1f;  .align 8
-       mov     a3, a12;                _j 1f;  .align 8
-       mov     a3, a13;                _j 1f;  .align 8
-       mov     a3, a14;                _j 1f;  .align 8
-       mov     a3, a15;                _j 1f;  .align 8
-
-1:
-
-#ifdef ALLOCA_EXCEPTION_IN_IRAM
-#error iram not supported
-#else
-       l8ui    a0, a4, 0               # read ar(dst) from MOVSP instruction
-#endif
-       addi    a4, a4, 3               # step over movsp
-       _EXTUI_MOVSP_DST(a0)            # extract destination register
-       wsr     a4, epc1                # save new epc_1
-
-       _bnei   a0, 1, 1f               # no 'movsp a1, ax': jump
-
-       /* Move the save area. This implies the use of the L32E
-        * and S32E instructions, because this move must be done with
-        * the user's PS.RING privilege levels, not with ring 0
-        * (kernel's) privileges currently active with PS.EXCM
-        * set. Note that we have stil registered a fixup routine with the
-        * double exception vector in case a double exception occurs.
-        */
-
-       /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
-
-       l32e    a0, a1, -16
-       l32e    a4, a1, -12
-       s32e    a0, a3, -16
-       s32e    a4, a3, -12
-       l32e    a0, a1, -8
-       l32e    a4, a1, -4
-       s32e    a0, a3, -8
-       s32e    a4, a3, -4
-
-       /* Restore stack-pointer and all the other saved registers. */
-
-       mov     a1, a3
-
-       l32i    a4, a2, PT_AREG4
-       l32i    a3, a2, PT_AREG3
-       l32i    a0, a2, PT_AREG0
-       l32i    a2, a2, PT_AREG2
-       rfe
-
-       /*  MOVSP <at>,<as>  was invoked with <at> != a1.
-        *  Because the stack pointer is not being modified,
-        *  we should be able to just modify the pointer
-        *  without moving any save area.
-        *  The processor only traps these occurrences if the
-        *  caller window isn't live, so unfortunately we can't
-        *  use this as an alternate trap mechanism.
-        *  So we just do the move.  This requires that we
-        *  resolve the destination register, not just the source,
-        *  so there's some extra work.
-        *  (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
-        */
-
-       /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
-
-1:     movi    a4, .Lmovsp_dst
-       addx8   a4, a0, a4
-       jx      a4
-
-       .align 8
-.Lmovsp_dst:
-       s32i    a3, a2, PT_AREG0;       _j 1f;  .align 8
-       mov     a1, a3;                 _j 1f;  .align 8
-       s32i    a3, a2, PT_AREG2;       _j 1f;  .align 8
-       s32i    a3, a2, PT_AREG3;       _j 1f;  .align 8
-       s32i    a3, a2, PT_AREG4;       _j 1f;  .align 8
-       mov     a5, a3;                 _j 1f;  .align 8
-       mov     a6, a3;                 _j 1f;  .align 8
-       mov     a7, a3;                 _j 1f;  .align 8
-       mov     a8, a3;                 _j 1f;  .align 8
-       mov     a9, a3;                 _j 1f;  .align 8
-       mov     a10, a3;                _j 1f;  .align 8
-       mov     a11, a3;                _j 1f;  .align 8
-       mov     a12, a3;                _j 1f;  .align 8
-       mov     a13, a3;                _j 1f;  .align 8
-       mov     a14, a3;                _j 1f;  .align 8
-       mov     a15, a3;                _j 1f;  .align 8
-
-1:     l32i    a4, a2, PT_AREG4
-       l32i    a3, a2, PT_AREG3
-       l32i    a0, a2, PT_AREG0
-       l32i    a2, a2, PT_AREG2
-       rfe
-
+       _bbci.l a4, 31, 4f
+       rotw    -1
+       _bbci.l a8, 30, 8f
+       rotw    -1
+       j       _WindowUnderflow12
+8:     j       _WindowUnderflow8
+4:     j       _WindowUnderflow4
 ENDPROC(fast_alloca)
 
 /*
@@ -1015,9 +891,9 @@ ENDPROC(fast_alloca)
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in DEPC
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  */
 
 ENTRY(fast_syscall_kernel)
@@ -1064,7 +940,6 @@ ENTRY(fast_syscall_unrecoverable)
 
        l32i    a0, a2, PT_AREG0        # restore a0
        xsr     a2, depc                # restore a2, depc
-       rsr     a3, excsave1
 
        wsr     a0, excsave1
        movi    a0, unrecoverable_exception
@@ -1086,10 +961,10 @@ ENDPROC(fast_syscall_unrecoverable)
  *   a0:       a2 (syscall-nr), original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in a0 and DEPC
- *   a3:       dispatch table, original in excsave_1
+ *   a3:       a3
  *   a4..a15:  unchanged
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  *
  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  *          <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -1122,8 +997,6 @@ ENDPROC(fast_syscall_unrecoverable)
 
 ENTRY(fast_syscall_xtensa)
 
-       xsr     a3, excsave1            # restore a3, excsave1
-
        s32i    a7, a2, PT_AREG7        # we need an additional register
        movi    a7, 4                   # sizeof(unsigned int)
        access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
@@ -1186,9 +1059,9 @@ ENDPROC(fast_syscall_xtensa)
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in DEPC
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  *
  * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
  */
@@ -1197,15 +1070,16 @@ ENTRY(fast_syscall_spill_registers)
 
        /* Register a FIXUP handler (pass current wb as a parameter) */
 
+       xsr     a3, excsave1
        movi    a0, fast_syscall_spill_registers_fixup
        s32i    a0, a3, EXC_TABLE_FIXUP
        rsr     a0, windowbase
        s32i    a0, a3, EXC_TABLE_PARAM
+       xsr     a3, excsave1            # restore a3 and excsave_1
 
-       /* Save a3 and SAR on stack. */
+       /* Save a3, a4 and SAR on stack. */
 
        rsr     a0, sar
-       xsr     a3, excsave1            # restore a3 and excsave_1
        s32i    a3, a2, PT_AREG3
        s32i    a4, a2, PT_AREG4
        s32i    a0, a2, PT_AREG5        # store SAR to PT_AREG5
@@ -1259,14 +1133,14 @@ fast_syscall_spill_registers_fixup:
         * in WS, so that the exception handlers save them to the task stack.
         */
 
-       rsr     a3, excsave1    # get spill-mask
+       xsr     a3, excsave1    # get spill-mask
        slli    a2, a3, 1       # shift left by one
 
        slli    a3, a2, 32-WSBITS
        src     a2, a2, a3      # a1 = xxwww1yyxxxwww1yy......
        wsr     a2, windowstart # set corrected windowstart
 
-       movi    a3, exc_table
+       rsr     a3, excsave1
        l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE   # restore a2
        l32i    a3, a3, EXC_TABLE_PARAM # original WB (in user task)
 
@@ -1303,7 +1177,7 @@ fast_syscall_spill_registers_fixup:
 
        /* Jump to the exception handler. */
 
-       movi    a3, exc_table
+       rsr     a3, excsave1
        rsr     a0, exccause
        addx4   a0, a0, a3                      # find entry in table
        l32i    a0, a0, EXC_TABLE_FAST_USER     # load handler
@@ -1320,6 +1194,7 @@ fast_syscall_spill_registers_fixup_return:
        xsr     a3, excsave1
        movi    a2, fast_syscall_spill_registers_fixup
        s32i    a2, a3, EXC_TABLE_FIXUP
+       s32i    a0, a3, EXC_TABLE_DOUBLE_SAVE
        rsr     a2, windowbase
        s32i    a2, a3, EXC_TABLE_PARAM
        l32i    a2, a3, EXC_TABLE_KSTK
@@ -1331,11 +1206,6 @@ fast_syscall_spill_registers_fixup_return:
        wsr     a3, windowbase
        rsync
 
-       /* Restore a3 and return. */
-
-       movi    a3, exc_table
-       xsr     a3, excsave1
-
        rfde
 
 
@@ -1522,9 +1392,8 @@ ENTRY(_spill_registers)
 
        movi    a0, 0
 
-       movi    a3, exc_table
+       rsr     a3, excsave1
        l32i    a1, a3, EXC_TABLE_KSTK
-       wsr     a3, excsave1
 
        movi    a4, (1 << PS_WOE_BIT) | LOCKLEVEL
        wsr     a4, ps
@@ -1568,9 +1437,9 @@ ENDPROC(fast_second_level_miss_double_kernel)
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in DEPC
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  *
  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  *          <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -1578,9 +1447,10 @@ ENDPROC(fast_second_level_miss_double_kernel)
 
 ENTRY(fast_second_level_miss)
 
-       /* Save a1. Note: we don't expect a double exception. */
+       /* Save a1 and a3. Note: we don't expect a double exception. */
 
        s32i    a1, a2, PT_AREG1
+       s32i    a3, a2, PT_AREG3
 
        /* We need to map the page of PTEs for the user task.  Find
         * the pointer to that page.  Also, it's possible for tsk->mm
@@ -1602,9 +1472,6 @@ ENTRY(fast_second_level_miss)
        l32i    a0, a1, TASK_MM         # tsk->mm
        beqz    a0, 9f
 
-
-       /* We deliberately destroy a3 that holds the exception table. */
-
 8:     rsr     a3, excvaddr            # fault address
        _PGD_OFFSET(a0, a3, a1)
        l32i    a0, a0, 0               # read pmdval
@@ -1655,7 +1522,7 @@ ENTRY(fast_second_level_miss)
 
        /* Exit critical section. */
 
-4:     movi    a3, exc_table           # restore a3
+4:     rsr     a3, excsave1
        movi    a0, 0
        s32i    a0, a3, EXC_TABLE_FIXUP
 
@@ -1663,8 +1530,8 @@ ENTRY(fast_second_level_miss)
 
        l32i    a0, a2, PT_AREG0
        l32i    a1, a2, PT_AREG1
+       l32i    a3, a2, PT_AREG3
        l32i    a2, a2, PT_DEPC
-       xsr     a3, excsave1
 
        bgeui   a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
 
@@ -1751,11 +1618,8 @@ ENTRY(fast_second_level_miss)
 
 2:     /* Invalid PGD, default exception handling */
 
-       movi    a3, exc_table
        rsr     a1, depc
-       xsr     a3, excsave1
        s32i    a1, a2, PT_AREG2
-       s32i    a3, a2, PT_AREG3
        mov     a1, a2
 
        rsr     a2, ps
@@ -1775,9 +1639,9 @@ ENDPROC(fast_second_level_miss)
  *   a0:       trashed, original value saved on stack (PT_AREG0)
  *   a1:       a1
  *   a2:       new stack pointer, original in DEPC
- *   a3:       dispatch table
+ *   a3:       a3
  *   depc:     a2, original value saved on stack (PT_DEPC)
- *   excsave_1:        a3
+ *   excsave_1:        dispatch table
  *
  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  *          <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -1785,17 +1649,17 @@ ENDPROC(fast_second_level_miss)
 
 ENTRY(fast_store_prohibited)
 
-       /* Save a1 and a4. */
+       /* Save a1 and a3. */
 
        s32i    a1, a2, PT_AREG1
-       s32i    a4, a2, PT_AREG4
+       s32i    a3, a2, PT_AREG3
 
        GET_CURRENT(a1,a2)
        l32i    a0, a1, TASK_MM         # tsk->mm
        beqz    a0, 9f
 
 8:     rsr     a1, excvaddr            # fault address
-       _PGD_OFFSET(a0, a1, a4)
+       _PGD_OFFSET(a0, a1, a3)
        l32i    a0, a0, 0
        beqz    a0, 2f
 
@@ -1804,39 +1668,37 @@ ENTRY(fast_store_prohibited)
         * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
         */
 
-       _PTE_OFFSET(a0, a1, a4)
-       l32i    a4, a0, 0               # read pteval
+       _PTE_OFFSET(a0, a1, a3)
+       l32i    a3, a0, 0               # read pteval
        movi    a1, _PAGE_CA_INVALID
-       ball    a4, a1, 2f
-       bbci.l  a4, _PAGE_WRITABLE_BIT, 2f
+       ball    a3, a1, 2f
+       bbci.l  a3, _PAGE_WRITABLE_BIT, 2f
 
        movi    a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
-       or      a4, a4, a1
+       or      a3, a3, a1
        rsr     a1, excvaddr
-       s32i    a4, a0, 0
+       s32i    a3, a0, 0
 
        /* We need to flush the cache if we have page coloring. */
 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
        dhwb    a0, 0
 #endif
        pdtlb   a0, a1
-       wdtlb   a4, a0
+       wdtlb   a3, a0
 
        /* Exit critical section. */
 
        movi    a0, 0
+       rsr     a3, excsave1
        s32i    a0, a3, EXC_TABLE_FIXUP
 
        /* Restore the working registers, and return. */
 
-       l32i    a4, a2, PT_AREG4
+       l32i    a3, a2, PT_AREG3
        l32i    a1, a2, PT_AREG1
        l32i    a0, a2, PT_AREG0
        l32i    a2, a2, PT_DEPC
 
-       /* Restore excsave1 and a3. */
-
-       xsr     a3, excsave1
        bgeui   a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
 
        rsr     a2, depc
@@ -1853,11 +1715,8 @@ ENTRY(fast_store_prohibited)
 
 2:     /* If there was a problem, handle fault in C */
 
-       rsr     a4, depc        # still holds a2
-       xsr     a3, excsave1
-       s32i    a4, a2, PT_AREG2
-       s32i    a3, a2, PT_AREG3
-       l32i    a4, a2, PT_AREG4
+       rsr     a3, depc        # still holds a2
+       s32i    a3, a2, PT_AREG2
        mov     a1, a2
 
        rsr     a2, ps
index 101012b..946fb8d 100644 (file)
@@ -584,8 +584,8 @@ c_show(struct seq_file *f, void *slot)
                     "bogomips\t: %lu.%02lu\n",
                     XCHAL_BUILD_UNIQUE_ID,
                     XCHAL_HAVE_BE ?  "big" : "little",
-                    CCOUNT_PER_JIFFY/(1000000/HZ),
-                    (CCOUNT_PER_JIFFY/(10000/HZ)) % 100,
+                    ccount_freq/1000000,
+                    (ccount_freq/10000) % 100,
                     loops_per_jiffy/(500000/HZ),
                     (loops_per_jiffy/(5000/HZ)) % 100);
 
index 24bb0c1..9af3dd8 100644 (file)
@@ -29,9 +29,7 @@
 #include <asm/timex.h>
 #include <asm/platform.h>
 
-#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
 unsigned long ccount_freq;             /* ccount Hz */
-#endif
 
 static cycle_t ccount_read(struct clocksource *cs)
 {
@@ -129,8 +127,10 @@ void __init time_init(void)
        platform_calibrate_ccount();
        printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
                        (int)(ccount_freq/10000)%100);
+#else
+       ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
 #endif
-       clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ);
+       clocksource_register_hz(&ccount_clocksource, ccount_freq);
 
        ccount_timer.evt.cpumask = cpumask_of(0);
        ccount_timer.evt.irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
@@ -164,7 +164,7 @@ irqreturn_t timer_interrupt (int irq, void *dev_id)
 #ifndef CONFIG_GENERIC_CALIBRATE_DELAY
 void calibrate_delay(void)
 {
-       loops_per_jiffy = CCOUNT_PER_JIFFY;
+       loops_per_jiffy = ccount_freq / HZ;
        printk("Calibrating delay loop (skipped)... "
               "%lu.%02lu BogoMIPS preset\n",
               loops_per_jiffy/(1000000/HZ),
index f9e1753..cb8fd44 100644 (file)
@@ -78,6 +78,7 @@ ENTRY(_UserExceptionVector)
        s32i    a0, a2, PT_DEPC         # mark it as a regular exception
        addx4   a0, a0, a3              # find entry in table
        l32i    a0, a0, EXC_TABLE_FAST_USER     # load handler
+       xsr     a3, excsave1            # restore a3 and dispatch table
        jx      a0
 
 ENDPROC(_UserExceptionVector)
@@ -104,6 +105,7 @@ ENTRY(_KernelExceptionVector)
        s32i    a0, a2, PT_DEPC         # mark it as a regular exception
        addx4   a0, a0, a3              # find entry in table
        l32i    a0, a0, EXC_TABLE_FAST_KERNEL   # load handler address
+       xsr     a3, excsave1            # restore a3 and dispatch table
        jx      a0
 
 ENDPROC(_KernelExceptionVector)
@@ -168,7 +170,7 @@ ENDPROC(_KernelExceptionVector)
  *
  *     a0:        DEPC
  *     a1:        a1
- *     a2:        trashed, original value in EXC_TABLE_DOUBLE_A2
+ *     a2:        trashed, original value in EXC_TABLE_DOUBLE_SAVE
  *     a3:        exctable
  *     depc:      a0
  *     excsave_1: a3
@@ -204,47 +206,46 @@ ENDPROC(_KernelExceptionVector)
 
        .section .DoubleExceptionVector.text, "ax"
        .begin literal_prefix .DoubleExceptionVector
+       .globl _DoubleExceptionVector_WindowUnderflow
+       .globl _DoubleExceptionVector_WindowOverflow
 
 ENTRY(_DoubleExceptionVector)
 
-       /* Deliberately destroy excsave (don't assume it's value was valid). */
-
-       wsr     a3, excsave1            # save a3
+       xsr     a3, excsave1
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
 
        /* Check for kernel double exception (usually fatal). */
 
-       rsr     a3, ps
-       _bbci.l a3, PS_UM_BIT, .Lksp
+       rsr     a2, ps
+       _bbci.l a2, PS_UM_BIT, .Lksp
 
        /* Check if we are currently handling a window exception. */
        /* Note: We don't need to indicate that we enter a critical section. */
 
        xsr     a0, depc                # get DEPC, save a0
 
-       movi    a3, WINDOW_VECTORS_VADDR
-       _bltu   a0, a3, .Lfixup
-       addi    a3, a3, WINDOW_VECTORS_SIZE
-       _bgeu   a0, a3, .Lfixup
+       movi    a2, WINDOW_VECTORS_VADDR
+       _bltu   a0, a2, .Lfixup
+       addi    a2, a2, WINDOW_VECTORS_SIZE
+       _bgeu   a0, a2, .Lfixup
 
        /* Window overflow/underflow exception. Get stack pointer. */
 
-       mov     a3, a2
-       /* This explicit literal and the following references to it are made
-        * in order to fit DoubleExceptionVector.literals into the available
-        * 16-byte gap before DoubleExceptionVector.text in the absence of
-        * link time relaxation. See kernel/vmlinux.lds.S
-        */
-       .literal .Lexc_table, exc_table
-       l32r    a2, .Lexc_table
-       l32i    a2, a2, EXC_TABLE_KSTK
+       l32i    a2, a3, EXC_TABLE_KSTK
 
        /* Check for overflow/underflow exception, jump if overflow. */
 
-       _bbci.l a0, 6, .Lovfl
-
-       /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3  */
+       _bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
 
-       /* Restart window underflow exception.
+       /*
+        * Restart window underflow exception.
+        * Currently:
+        *      depc = orig a0,
+        *      a0 = orig DEPC,
+        *      a2 = new sp based on KSTK from exc_table
+        *      a3 = excsave_1
+        *      excsave_1 = orig a3
+        *
         * We return to the instruction in user space that caused the window
         * underflow exception. Therefore, we change window base to the value
         * before we entered the window underflow exception and prepare the
@@ -252,10 +253,11 @@ ENTRY(_DoubleExceptionVector)
         * by changing depc (in a0).
         * Note: We can trash the current window frame (a0...a3) and depc!
         */
-
+_DoubleExceptionVector_WindowUnderflow:
+       xsr     a3, excsave1
        wsr     a2, depc                # save stack pointer temporarily
        rsr     a0, ps
-       extui   a0, a0, PS_OWB_SHIFT, 4
+       extui   a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
        wsr     a0, windowbase
        rsync
 
@@ -263,28 +265,57 @@ ENTRY(_DoubleExceptionVector)
 
        xsr     a2, depc                # save a2 and get stack pointer
        s32i    a0, a2, PT_AREG0
-
-       wsr     a3, excsave1            # save a3
-       l32r    a3, .Lexc_table
-
+       xsr     a3, excsave1
        rsr     a0, exccause
        s32i    a0, a2, PT_DEPC         # mark it as a regular exception
        addx4   a0, a0, a3
+       xsr     a3, excsave1
        l32i    a0, a0, EXC_TABLE_FAST_USER
        jx      a0
 
-.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+       /*
+        * We only allow the ITLB miss exception if we are in kernel space.
+        * All other exceptions are unexpected and thus unrecoverable!
+        */
+
+#ifdef CONFIG_MMU
+       .extern fast_second_level_miss_double_kernel
+
+.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+
+       rsr     a3, exccause
+       beqi    a3, EXCCAUSE_ITLB_MISS, 1f
+       addi    a3, a3, -EXCCAUSE_DTLB_MISS
+       bnez    a3, .Lunrecoverable
+1:     movi    a3, fast_second_level_miss_double_kernel
+       jx      a3
+#else
+.equ   .Lksp,  .Lunrecoverable
+#endif
+
+       /* Critical! We can't handle this situation. PANIC! */
 
-       /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
+       .extern unrecoverable_exception
 
-       l32r    a3, .Lexc_table
-       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE   # temporary variable
+.Lunrecoverable_fixup:
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a0, depc
+
+.Lunrecoverable:
+       rsr     a3, excsave1
+       wsr     a0, excsave1
+       movi    a0, unrecoverable_exception
+       callx0  a0
+
+.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+
+       /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */
 
        /* Enter critical section. */
 
        l32i    a2, a3, EXC_TABLE_FIXUP
        s32i    a3, a3, EXC_TABLE_FIXUP
-       beq     a2, a3, .Lunrecoverable_fixup   # critical!
+       beq     a2, a3, .Lunrecoverable_fixup   # critical section
        beqz    a2, .Ldflt                      # no handler was registered
 
        /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
@@ -293,58 +324,145 @@ ENTRY(_DoubleExceptionVector)
 
 .Ldflt:        /* Get stack pointer. */
 
-       l32i    a3, a3, EXC_TABLE_DOUBLE_SAVE
-       addi    a2, a3, -PT_USER_SIZE
-
-.Lovfl:        /* Jump to default handlers. */
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       addi    a2, a2, -PT_USER_SIZE
 
-       /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
+       /* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */
 
-       xsr     a3, depc
        s32i    a0, a2, PT_DEPC
-       s32i    a3, a2, PT_AREG0
+       l32i    a0, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a0, depc
+       s32i    a0, a2, PT_AREG0
 
-       /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
+       /* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */
 
-       l32r    a3, .Lexc_table
        rsr     a0, exccause
        addx4   a0, a0, a3
+       xsr     a3, excsave1
        l32i    a0, a0, EXC_TABLE_FAST_USER
        jx      a0
 
        /*
-        * We only allow the ITLB miss exception if we are in kernel space.
-        * All other exceptions are unexpected and thus unrecoverable!
+        * Restart window OVERFLOW exception.
+        * Currently:
+        *      depc = orig a0,
+        *      a0 = orig DEPC,
+        *      a2 = new sp based on KSTK from exc_table
+        *      a3 = EXCSAVE_1
+        *      excsave_1 = orig a3
+        *
+        * We return to the instruction in user space that caused the window
+        * overflow exception. Therefore, we change window base to the value
+        * before we entered the window overflow exception and prepare the
+        * registers to return as if we were coming from a regular exception
+        * by changing DEPC (in a0).
+        *
+        * NOTE: We CANNOT trash the current window frame (a0...a3), but we
+        * can clobber depc.
+        *
+        * The tricky part here is that overflow8 and overflow12 handlers
+        * save a0, then clobber a0.  To restart the handler, we have to restore
+        * a0 if the double exception was past the point where a0 was clobbered.
+        *
+        * To keep things simple, we take advantage of the fact all overflow
+        * handlers save a0 in their very first instruction.  If DEPC was past
+        * that instruction, we can safely restore a0 from where it was saved
+        * on the stack.
+        *
+        * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3
         */
+_DoubleExceptionVector_WindowOverflow:
+       extui   a2, a0, 0, 6    # get offset into 64-byte vector handler
+       beqz    a2, 1f          # if at start of vector, don't restore
 
-#ifdef CONFIG_MMU
-       .extern fast_second_level_miss_double_kernel
+       addi    a0, a0, -128
+       bbsi    a0, 8, 1f       # don't restore except for overflow 8 and 12
+       bbsi    a0, 7, 2f
 
-.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+       /*
+        * Restore a0 as saved by _WindowOverflow8().
+        *
+        * FIXME:  we really need a fixup handler for this L32E,
+        * for the extremely unlikely case where the overflow handler's
+        * reference thru a0 gets a hardware TLB refill that bumps out
+        * the (distinct, aliasing) TLB entry that mapped its prior
+        * references thru a9, and where our reference now thru a9
+        * gets a 2nd-level miss exception (not hardware TLB refill).
+        */
 
-       rsr     a3, exccause
-       beqi    a3, EXCCAUSE_ITLB_MISS, 1f
-       addi    a3, a3, -EXCCAUSE_DTLB_MISS
-       bnez    a3, .Lunrecoverable
-1:     movi    a3, fast_second_level_miss_double_kernel
-       jx      a3
-#else
-.equ   .Lksp,  .Lunrecoverable
-#endif
+       l32e    a2, a9, -16
+       wsr     a2, depc        # replace the saved a0
+       j       1f
 
-       /* Critical! We can't handle this situation. PANIC! */
+2:
+       /*
+        * Restore a0 as saved by _WindowOverflow12().
+        *
+        * FIXME:  we really need a fixup handler for this L32E,
+        * for the extremely unlikely case where the overflow handler's
+        * reference thru a0 gets a hardware TLB refill that bumps out
+        * the (distinct, aliasing) TLB entry that mapped its prior
+        * references thru a13, and where our reference now thru a13
+        * gets a 2nd-level miss exception (not hardware TLB refill).
+        */
 
-       .extern unrecoverable_exception
+       l32e    a2, a13, -16
+       wsr     a2, depc        # replace the saved a0
+1:
+       /*
+        * Restore WindowBase while leaving all address registers restored.
+        * We have to use ROTW for this, because WSR.WINDOWBASE requires
+        * an address register (which would prevent restore).
+        *
+        * Window Base goes from 0 ... 7 (Module 8)
+        * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s
+        */
+
+       rsr     a0, ps
+       extui   a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+       rsr     a2, windowbase
+       sub     a0, a2, a0
+       extui   a0, a0, 0, 3
 
-.Lunrecoverable_fixup:
        l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
-       xsr     a0, depc
+       xsr     a3, excsave1
+       beqi    a0, 1, .L1pane
+       beqi    a0, 3, .L3pane
 
-.Lunrecoverable:
-       rsr     a3, excsave1
-       wsr     a0, excsave1
-       movi    a0, unrecoverable_exception
-       callx0  a0
+       rsr     a0, depc
+       rotw    -2
+
+       /*
+        * We are now in the user code's original window frame.
+        * Process the exception as a user exception as if it was
+        * taken by the user code.
+        *
+        * This is similar to the user exception vector,
+        * except that PT_DEPC isn't set to EXCCAUSE.
+        */
+1:
+       xsr     a3, excsave1
+       wsr     a2, depc
+       l32i    a2, a3, EXC_TABLE_KSTK
+       s32i    a0, a2, PT_AREG0
+       rsr     a0, exccause
+
+       s32i    a0, a2, PT_DEPC
+
+       addx4   a0, a0, a3
+       l32i    a0, a0, EXC_TABLE_FAST_USER
+       xsr     a3, excsave1
+       jx      a0
+
+.L1pane:
+       rsr     a0, depc
+       rotw    -1
+       j       1b
+
+.L3pane:
+       rsr     a0, depc
+       rotw    -3
+       j       1b
 
        .end literal_prefix
 
index d8507f8..74a60c7 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/io.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
+#include <asm/ftrace.h>
 #ifdef CONFIG_BLK_DEV_FD
 #include <asm/floppy.h>
 #endif
index 1a5ec9a..1eb09ee 100644 (file)
@@ -186,6 +186,7 @@ invalid:
  */
 static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
 {
+       uint32_t sz = 0;
        int i, part = 0, ret = 0; /* invalid by default */
 
        if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
@@ -216,12 +217,15 @@ check_hybrid:
        /*
         * Protective MBRs take up the lesser of the whole disk
         * or 2 TiB (32bit LBA), ignoring the rest of the disk.
+        * Some partitioning programs, nonetheless, choose to set
+        * the size to the maximum 32-bit limitation, disregarding
+        * the disk size.
         *
         * Hybrid MBRs do not necessarily comply with this.
         */
        if (ret == GPT_MBR_PROTECTIVE) {
-               if (le32_to_cpu(mbr->partition_record[part].size_in_lba) !=
-                   min((uint32_t) total_sectors - 1, 0xFFFFFFFF))
+               sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
+               if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
                        ret = 0;
        }
 done:
index 4329a29..b9c81b7 100644 (file)
@@ -315,68 +315,47 @@ static int em_sti_probe(struct platform_device *pdev)
 {
        struct em_sti_priv *p;
        struct resource *res;
-       int irq, ret;
+       int irq;
 
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
        if (p == NULL) {
                dev_err(&pdev->dev, "failed to allocate driver data\n");
-               ret = -ENOMEM;
-               goto err0;
+               return -ENOMEM;
        }
 
        p->pdev = pdev;
        platform_set_drvdata(pdev, p);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "failed to get I/O memory\n");
-               ret = -EINVAL;
-               goto err0;
-       }
-
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "failed to get irq\n");
-               ret = -EINVAL;
-               goto err0;
+               return -EINVAL;
        }
 
        /* map memory, let base point to the STI instance */
-       p->base = ioremap_nocache(res->start, resource_size(res));
-       if (p->base == NULL) {
-               dev_err(&pdev->dev, "failed to remap I/O memory\n");
-               ret = -ENXIO;
-               goto err0;
-       }
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       p->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(p->base))
+               return PTR_ERR(p->base);
 
        /* get hold of clock */
-       p->clk = clk_get(&pdev->dev, "sclk");
+       p->clk = devm_clk_get(&pdev->dev, "sclk");
        if (IS_ERR(p->clk)) {
                dev_err(&pdev->dev, "cannot get clock\n");
-               ret = PTR_ERR(p->clk);
-               goto err1;
+               return PTR_ERR(p->clk);
        }
 
-       if (request_irq(irq, em_sti_interrupt,
-                       IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
-                       dev_name(&pdev->dev), p)) {
+       if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
+                            IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+                            dev_name(&pdev->dev), p)) {
                dev_err(&pdev->dev, "failed to request low IRQ\n");
-               ret = -ENOENT;
-               goto err2;
+               return -ENOENT;
        }
 
        raw_spin_lock_init(&p->lock);
        em_sti_register_clockevent(p);
        em_sti_register_clocksource(p);
        return 0;
-
-err2:
-       clk_put(p->clk);
-err1:
-       iounmap(p->base);
-err0:
-       kfree(p);
-       return ret;
 }
 
 static int em_sti_remove(struct platform_device *pdev)
index 7d2c2c5..1b74bea 100644 (file)
@@ -165,7 +165,8 @@ static void nmdk_clkevt_resume(struct clock_event_device *cedev)
 
 static struct clock_event_device nmdk_clkevt = {
        .name           = "mtu_1",
-       .features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+       .features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
+                         CLOCK_EVT_FEAT_DYNIRQ,
        .rating         = 200,
        .set_mode       = nmdk_clkevt_mode,
        .set_next_event = nmdk_clkevt_next,
index 08d0c41..0965e98 100644 (file)
@@ -37,6 +37,7 @@
 
 struct sh_cmt_priv {
        void __iomem *mapbase;
+       void __iomem *mapbase_str;
        struct clk *clk;
        unsigned long width; /* 16 or 32 bit version of hardware block */
        unsigned long overflow_bit;
@@ -79,6 +80,12 @@ struct sh_cmt_priv {
  * CMCSR 0xffca0060 16-bit
  * CMCNT 0xffca0064 32-bit
  * CMCOR 0xffca0068 32-bit
+ *
+ * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790:
+ * CMSTR 0xffca0500 32-bit
+ * CMCSR 0xffca0510 32-bit
+ * CMCNT 0xffca0514 32-bit
+ * CMCOR 0xffca0518 32-bit
  */
 
 static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
@@ -109,9 +116,7 @@ static void sh_cmt_write32(void __iomem *base, unsigned long offs,
 
 static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
 {
-       struct sh_timer_config *cfg = p->pdev->dev.platform_data;
-
-       return p->read_control(p->mapbase - cfg->channel_offset, 0);
+       return p->read_control(p->mapbase_str, 0);
 }
 
 static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
@@ -127,9 +132,7 @@ static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
 static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
                                      unsigned long value)
 {
-       struct sh_timer_config *cfg = p->pdev->dev.platform_data;
-
-       p->write_control(p->mapbase - cfg->channel_offset, 0, value);
+       p->write_control(p->mapbase_str, 0, value);
 }
 
 static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
@@ -676,7 +679,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
 static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
 {
        struct sh_timer_config *cfg = pdev->dev.platform_data;
-       struct resource *res;
+       struct resource *res, *res2;
        int irq, ret;
        ret = -ENXIO;
 
@@ -694,6 +697,9 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
                goto err0;
        }
 
+       /* optional resource for the shared timer start/stop register */
+       res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);
+
        irq = platform_get_irq(p->pdev, 0);
        if (irq < 0) {
                dev_err(&p->pdev->dev, "failed to get irq\n");
@@ -707,6 +713,15 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
                goto err0;
        }
 
+       /* map second resource for CMSTR */
+       p->mapbase_str = ioremap_nocache(res2 ? res2->start :
+                                        res->start - cfg->channel_offset,
+                                        res2 ? resource_size(res2) : 2);
+       if (p->mapbase_str == NULL) {
+               dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
+               goto err1;
+       }
+
        /* request irq using setup_irq() (too early for request_irq()) */
        p->irqaction.name = dev_name(&p->pdev->dev);
        p->irqaction.handler = sh_cmt_interrupt;
@@ -719,11 +734,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
        if (IS_ERR(p->clk)) {
                dev_err(&p->pdev->dev, "cannot get clock\n");
                ret = PTR_ERR(p->clk);
-               goto err1;
+               goto err2;
        }
 
-       p->read_control = sh_cmt_read16;
-       p->write_control = sh_cmt_write16;
+       if (res2 && (resource_size(res2) == 4)) {
+               /* assume both CMSTR and CMCSR to be 32-bit */
+               p->read_control = sh_cmt_read32;
+               p->write_control = sh_cmt_write32;
+       } else {
+               p->read_control = sh_cmt_read16;
+               p->write_control = sh_cmt_write16;
+       }
 
        if (resource_size(res) == 6) {
                p->width = 16;
@@ -752,22 +773,23 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
                              cfg->clocksource_rating);
        if (ret) {
                dev_err(&p->pdev->dev, "registration failed\n");
-               goto err2;
+               goto err3;
        }
        p->cs_enabled = false;
 
        ret = setup_irq(irq, &p->irqaction);
        if (ret) {
                dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
-               goto err2;
+               goto err3;
        }
 
        platform_set_drvdata(pdev, p);
 
        return 0;
-err2:
+err3:
        clk_put(p->clk);
-
+err2:
+       iounmap(p->mapbase_str);
 err1:
        iounmap(p->mapbase);
 err0:
index 847cab6..0198504 100644 (file)
  *
  * Timer 0 is used as free-running clocksource, while timer 1 is
  * used as clock_event_device.
+ *
+ * ---
+ * Clocksource driver for Armada 370 and Armada XP SoC.
+ * This driver implements one compatible string for each SoC, given
+ * each has its own characteristics:
+ *
+ *   * Armada 370 has no 25 MHz fixed timer.
+ *
+ *   * Armada XP cannot work properly without such 25 MHz fixed timer as
+ *     doing otherwise leads to using a clocksource whose frequency varies
+ *     when doing cpufreq frequency changes.
+ *
+ * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
  */
 
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/sched_clock.h>
 #include <linux/percpu.h>
-#include <linux/time-armada-370-xp.h>
 
 /*
  * Timer block registers.
  */
 #define TIMER_CTRL_OFF         0x0000
-#define  TIMER0_EN              0x0001
-#define  TIMER0_RELOAD_EN       0x0002
-#define  TIMER0_25MHZ            0x0800
+#define  TIMER0_EN              BIT(0)
+#define  TIMER0_RELOAD_EN       BIT(1)
+#define  TIMER0_25MHZ            BIT(11)
 #define  TIMER0_DIV(div)         ((div) << 19)
-#define  TIMER1_EN              0x0004
-#define  TIMER1_RELOAD_EN       0x0008
-#define  TIMER1_25MHZ            0x1000
+#define  TIMER1_EN              BIT(2)
+#define  TIMER1_RELOAD_EN       BIT(3)
+#define  TIMER1_25MHZ            BIT(12)
 #define  TIMER1_DIV(div)         ((div) << 22)
 #define TIMER_EVENTS_STATUS    0x0004
 #define  TIMER0_CLR_MASK         (~0x1)
@@ -72,6 +84,18 @@ static u32 ticks_per_jiffy;
 
 static struct clock_event_device __percpu *armada_370_xp_evt;
 
+static void timer_ctrl_clrset(u32 clr, u32 set)
+{
+       writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set,
+               timer_base + TIMER_CTRL_OFF);
+}
+
+static void local_timer_ctrl_clrset(u32 clr, u32 set)
+{
+       writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
+               local_base + TIMER_CTRL_OFF);
+}
+
 static u32 notrace armada_370_xp_read_sched_clock(void)
 {
        return ~readl(timer_base + TIMER0_VAL_OFF);
@@ -84,7 +108,6 @@ static int
 armada_370_xp_clkevt_next_event(unsigned long delta,
                                struct clock_event_device *dev)
 {
-       u32 u;
        /*
         * Clear clockevent timer interrupt.
         */
@@ -98,11 +121,8 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
        /*
         * Enable the timer.
         */
-       u = readl(local_base + TIMER_CTRL_OFF);
-       u = ((u & ~TIMER0_RELOAD_EN) | TIMER0_EN |
-            TIMER0_DIV(TIMER_DIVIDER_SHIFT));
-       writel(u, local_base + TIMER_CTRL_OFF);
-
+       local_timer_ctrl_clrset(TIMER0_RELOAD_EN,
+                               TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT));
        return 0;
 }
 
@@ -110,8 +130,6 @@ static void
 armada_370_xp_clkevt_mode(enum clock_event_mode mode,
                          struct clock_event_device *dev)
 {
-       u32 u;
-
        if (mode == CLOCK_EVT_MODE_PERIODIC) {
 
                /*
@@ -123,18 +141,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
                /*
                 * Enable timer.
                 */
-
-               u = readl(local_base + TIMER_CTRL_OFF);
-
-               writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
-                       TIMER0_DIV(TIMER_DIVIDER_SHIFT)),
-                       local_base + TIMER_CTRL_OFF);
+               local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN |
+                                          TIMER0_EN |
+                                          TIMER0_DIV(TIMER_DIVIDER_SHIFT));
        } else {
                /*
                 * Disable timer.
                 */
-               u = readl(local_base + TIMER_CTRL_OFF);
-               writel(u & ~TIMER0_EN, local_base + TIMER_CTRL_OFF);
+               local_timer_ctrl_clrset(TIMER0_EN, 0);
 
                /*
                 * ACK pending timer interrupt.
@@ -163,14 +177,14 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
  */
 static int armada_370_xp_timer_setup(struct clock_event_device *evt)
 {
-       u32 u;
+       u32 clr = 0, set = 0;
        int cpu = smp_processor_id();
 
-       u = readl(local_base + TIMER_CTRL_OFF);
        if (timer25Mhz)
-               writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+               set = TIMER0_25MHZ;
        else
-               writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+               clr = TIMER0_25MHZ;
+       local_timer_ctrl_clrset(clr, set);
 
        evt->name               = "armada_370_xp_per_cpu_tick",
        evt->features           = CLOCK_EVT_FEAT_ONESHOT |
@@ -217,36 +231,21 @@ static struct notifier_block armada_370_xp_timer_cpu_nb = {
        .notifier_call = armada_370_xp_timer_cpu_notify,
 };
 
-void __init armada_370_xp_timer_init(void)
+static void __init armada_370_xp_timer_common_init(struct device_node *np)
 {
-       u32 u;
-       struct device_node *np;
+       u32 clr = 0, set = 0;
        int res;
 
-       np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
        timer_base = of_iomap(np, 0);
        WARN_ON(!timer_base);
        local_base = of_iomap(np, 1);
 
-       if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
-               /* The fixed 25MHz timer is available so let's use it */
-               u = readl(timer_base + TIMER_CTRL_OFF);
-               writel(u | TIMER0_25MHZ,
-                      timer_base + TIMER_CTRL_OFF);
-               timer_clk = 25000000;
-       } else {
-               unsigned long rate = 0;
-               struct clk *clk = of_clk_get(np, 0);
-               WARN_ON(IS_ERR(clk));
-               rate =  clk_get_rate(clk);
-
-               u = readl(timer_base + TIMER_CTRL_OFF);
-               writel(u & ~(TIMER0_25MHZ),
-                      timer_base + TIMER_CTRL_OFF);
-
-               timer_clk = rate / TIMER_DIVIDER;
-               timer25Mhz = false;
-       }
+       if (timer25Mhz)
+               set = TIMER0_25MHZ;             
+       else
+               clr = TIMER0_25MHZ;
+       timer_ctrl_clrset(clr, set);
+       local_timer_ctrl_clrset(clr, set);
 
        /*
         * We use timer 0 as clocksource, and private(local) timer 0
@@ -268,10 +267,8 @@ void __init armada_370_xp_timer_init(void)
        writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
        writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
 
-       u = readl(timer_base + TIMER_CTRL_OFF);
-
-       writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
-               TIMER0_DIV(TIMER_DIVIDER_SHIFT)), timer_base + TIMER_CTRL_OFF);
+       timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
+                            TIMER0_DIV(TIMER_DIVIDER_SHIFT));
 
        clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
                              "armada_370_xp_clocksource",
@@ -293,3 +290,29 @@ void __init armada_370_xp_timer_init(void)
        if (!res)
                armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
 }
+
+static void __init armada_xp_timer_init(struct device_node *np)
+{
+       struct clk *clk = of_clk_get_by_name(np, "fixed");
+
+       /* The 25Mhz fixed clock is mandatory, and must always be available */
+       BUG_ON(IS_ERR(clk));
+       timer_clk = clk_get_rate(clk);
+
+       armada_370_xp_timer_common_init(np);
+}
+CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
+                      armada_xp_timer_init);
+
+static void __init armada_370_timer_init(struct device_node *np)
+{
+       struct clk *clk = of_clk_get(np, 0);
+
+       BUG_ON(IS_ERR(clk));
+       timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
+       timer25Mhz = false;
+
+       armada_370_xp_timer_common_init(np);
+}
+CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
+                      armada_370_timer_init);
index 4fe49d2..eea8172 100644 (file)
@@ -364,7 +364,7 @@ static ssize_t set_pwm1_enable(
        if (config < 0) {
                        dev_err(&client->dev,
                        "Error reading configuration register, aborting.\n");
-                       return -EIO;
+                       return config;
        }
 
        switch (val) {
@@ -416,11 +416,9 @@ static ssize_t get_temp_auto_point_temp(
        case 1:
                return sprintf(buf, "%d\n",
                        data->temp1_auto_point_temp[ix] * 1000);
-               break;
        case 2:
                return sprintf(buf, "%d\n",
                        data->temp2_auto_point_temp[ix] * 1000);
-               break;
        default:
                dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
                return -EINVAL;
@@ -513,7 +511,6 @@ static ssize_t set_temp_auto_point_temp(
                                count = -EIO;
                }
                goto EXIT;
-               break;
        case 1:
                ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124);
                ptemp[1] &= 0x7C;
@@ -665,7 +662,7 @@ static ssize_t set_fan1_div(
        if (config < 0) {
                dev_err(&client->dev,
                        "Error reading configuration register, aborting.\n");
-               return -EIO;
+               return config;
        }
        mutex_lock(&data->update_lock);
        switch (val) {
index b073056..2c137b2 100644 (file)
@@ -248,7 +248,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
 
        int result = kstrtol(buf, 10, &val);
        if (result < 0)
-               return -EINVAL;
+               return result;
 
        val = DIV_ROUND_CLOSEST(val, 1000);
        if ((val < -63) || (val > 127))
@@ -272,7 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
 
        int result = kstrtol(buf, 10, &val);
        if (result < 0)
-               return -EINVAL;
+               return result;
 
        val = DIV_ROUND_CLOSEST(val, 1000);
        if ((val < -63) || (val > 127))
@@ -320,7 +320,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
 
        int status = kstrtol(buf, 10, &new_div);
        if (status < 0)
-               return -EINVAL;
+               return status;
 
        if (new_div == old_div) /* No change */
                return count;
@@ -394,7 +394,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
 
        int result = kstrtol(buf, 10, &rpm_target);
        if (result < 0)
-               return -EINVAL;
+               return result;
 
        /* Datasheet states 16384 as maximum RPM target (table 3.2) */
        if ((rpm_target < 0) || (rpm_target > 16384))
@@ -440,7 +440,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
 
        int result = kstrtol(buf, 10, &new_value);
        if (result < 0)
-               return -EINVAL;
+               return result;
 
        mutex_lock(&data->update_lock);
        switch (new_value) {
index e2b56a2..632f1dc 100644 (file)
@@ -292,7 +292,7 @@ static int aem_init_ipmi_data(struct aem_ipmi_data *data, int iface,
                dev_err(bmc,
                        "Unable to register user with IPMI interface %d\n",
                        data->interface);
-               return -EACCES;
+               return err;
        }
 
        return 0;
index e633856..d65f3fd 100644 (file)
@@ -202,7 +202,6 @@ static void k10temp_remove(struct pci_dev *pdev)
                           &sensor_dev_attr_temp1_crit.dev_attr);
        device_remove_file(&pdev->dev,
                           &sensor_dev_attr_temp1_crit_hyst.dev_attr);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
index 964c1d6..ae26b06 100644 (file)
@@ -210,7 +210,7 @@ static int tmp421_init_client(struct i2c_client *client)
        if (config < 0) {
                dev_err(&client->dev,
                        "Could not read configuration register (%d)\n", config);
-               return -ENODEV;
+               return config;
        }
 
        config_orig = config;
index d2b34fb..b6ded17 100644 (file)
@@ -48,6 +48,7 @@ struct evdev_client {
        struct evdev *evdev;
        struct list_head node;
        int clkid;
+       bool revoked;
        unsigned int bufsize;
        struct input_event buffer[];
 };
@@ -164,6 +165,9 @@ static void evdev_pass_values(struct evdev_client *client,
        struct input_event event;
        bool wakeup = false;
 
+       if (client->revoked)
+               return;
+
        event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
                                      mono : real);
 
@@ -240,7 +244,7 @@ static int evdev_flush(struct file *file, fl_owner_t id)
        if (retval)
                return retval;
 
-       if (!evdev->exist)
+       if (!evdev->exist || client->revoked)
                retval = -ENODEV;
        else
                retval = input_flush_device(&evdev->handle, file);
@@ -429,7 +433,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
        if (retval)
                return retval;
 
-       if (!evdev->exist) {
+       if (!evdev->exist || client->revoked) {
                retval = -ENODEV;
                goto out;
        }
@@ -482,7 +486,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
                return -EINVAL;
 
        for (;;) {
-               if (!evdev->exist)
+               if (!evdev->exist || client->revoked)
                        return -ENODEV;
 
                if (client->packet_head == client->tail &&
@@ -511,7 +515,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
                if (!(file->f_flags & O_NONBLOCK)) {
                        error = wait_event_interruptible(evdev->wait,
                                        client->packet_head != client->tail ||
-                                       !evdev->exist);
+                                       !evdev->exist || client->revoked);
                        if (error)
                                return error;
                }
@@ -529,7 +533,11 @@ static unsigned int evdev_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &evdev->wait, wait);
 
-       mask = evdev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR;
+       if (evdev->exist && !client->revoked)
+               mask = POLLOUT | POLLWRNORM;
+       else
+               mask = POLLHUP | POLLERR;
+
        if (client->packet_head != client->tail)
                mask |= POLLIN | POLLRDNORM;
 
@@ -795,6 +803,17 @@ static int evdev_handle_mt_request(struct input_dev *dev,
        return 0;
 }
 
+static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
+                       struct file *file)
+{
+       client->revoked = true;
+       evdev_ungrab(evdev, client);
+       input_flush_device(&evdev->handle, file);
+       wake_up_interruptible(&evdev->wait);
+
+       return 0;
+}
+
 static long evdev_do_ioctl(struct file *file, unsigned int cmd,
                           void __user *p, int compat_mode)
 {
@@ -857,6 +876,12 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
                else
                        return evdev_ungrab(evdev, client);
 
+       case EVIOCREVOKE:
+               if (p)
+                       return -EINVAL;
+               else
+                       return evdev_revoke(evdev, client, file);
+
        case EVIOCSCLOCKID:
                if (copy_from_user(&i, p, sizeof(unsigned int)))
                        return -EFAULT;
@@ -1002,7 +1027,7 @@ static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
        if (retval)
                return retval;
 
-       if (!evdev->exist) {
+       if (!evdev->exist || client->revoked) {
                retval = -ENODEV;
                goto out;
        }
index 1542751..f5aa4b0 100644 (file)
@@ -1343,7 +1343,7 @@ out:
 static int invalidate_fastmap(struct ubi_device *ubi,
                              struct ubi_fastmap_layout *fm)
 {
-       int ret, i;
+       int ret;
        struct ubi_vid_hdr *vh;
 
        ret = erase_block(ubi, fm->e[0]->pnum);
@@ -1360,9 +1360,6 @@ static int invalidate_fastmap(struct ubi_device *ubi,
        vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
        ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
 
-       for (i = 0; i < fm->used_blocks; i++)
-               ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
-
        return ret;
 }
 
index 5df49d3..c95bfb1 100644 (file)
@@ -1069,6 +1069,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
                        dbg_wl("no WL needed: min used EC %d, max free EC %d",
                               e1->ec, e2->ec);
+
+                       /* Give the unused PEB back */
+                       wl_tree_add(e2, &ubi->free);
                        goto out_cancel;
                }
                self_check_in_wl_tree(ubi, e1, &ubi->used);
index 3d86ffe..94edc9c 100644 (file)
@@ -725,6 +725,7 @@ static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
 
+       clear_ioasic_dma_irq(irq);
        printk(KERN_ERR "%s: DMA error\n", dev->name);
        return IRQ_HANDLED;
 }
index 36a9e60..96d6b2e 100644 (file)
@@ -732,6 +732,7 @@ config SAMSUNG_LAPTOP
        tristate "Samsung Laptop driver"
        depends on X86
        depends on RFKILL || RFKILL = n
+       depends on ACPI_VIDEO || ACPI_VIDEO = n
        depends on BACKLIGHT_CLASS_DEVICE
        select LEDS_CLASS
        select NEW_LEDS
@@ -764,7 +765,7 @@ config INTEL_OAKTRAIL
 
 config SAMSUNG_Q10
        tristate "Samsung Q10 Extras"
-       depends on SERIO_I8042
+       depends on ACPI
        select BACKLIGHT_CLASS_DEVICE
        ---help---
          This driver provides support for backlight control on Samsung Q10
index 6296f07..da36b5e 100644 (file)
@@ -85,6 +85,13 @@ static const struct dmi_system_id amilo_rfkill_id_table[] = {
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+                       DMI_MATCH(DMI_BOARD_NAME, "AMILO L1310"),
+               },
+               .driver_data = (void *)&amilo_a1655_rfkill_ops
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
                        DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"),
                },
                .driver_data = (void *)&amilo_m7440_rfkill_ops
index 36e5e6c..6dfa8d3 100644 (file)
@@ -590,7 +590,7 @@ static ssize_t cmpc_accel_sensitivity_store(struct device *dev,
        inputdev = dev_get_drvdata(&acpi->dev);
        accel = dev_get_drvdata(&inputdev->dev);
 
-       r = strict_strtoul(buf, 0, &sensitivity);
+       r = kstrtoul(buf, 0, &sensitivity);
        if (r)
                return r;
 
index 475cc52..eaa78ed 100644 (file)
@@ -425,7 +425,8 @@ static ssize_t pwm_enable_store(struct device *dev,
        struct compal_data *data = dev_get_drvdata(dev);
        long val;
        int err;
-       err = strict_strtol(buf, 10, &val);
+
+       err = kstrtol(buf, 10, &val);
        if (err)
                return err;
        if (val < 0)
@@ -463,7 +464,8 @@ static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
        struct compal_data *data = dev_get_drvdata(dev);
        long val;
        int err;
-       err = strict_strtol(buf, 10, &val);
+
+       err = kstrtol(buf, 10, &val);
        if (err)
                return err;
        if (val < 0 || val > 255)
@@ -1081,7 +1083,6 @@ static int compal_remove(struct platform_device *pdev)
        hwmon_device_unregister(data->hwmon_dev);
        power_supply_unregister(&data->psy);
 
-       platform_set_drvdata(pdev, NULL);
        kfree(data);
 
        sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group);
index d6970f4..1c86fa0 100644 (file)
@@ -725,7 +725,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
                                           (void *) HPWMI_WWAN);
                if (!wwan_rfkill) {
                        err = -ENOMEM;
-                       goto register_gps_error;
+                       goto register_bluetooth_error;
                }
                rfkill_init_sw_state(wwan_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_WWAN));
@@ -733,7 +733,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
                                    hp_wmi_get_hw_state(HPWMI_WWAN));
                err = rfkill_register(wwan_rfkill);
                if (err)
-                       goto register_wwan_err;
+                       goto register_wwan_error;
        }
 
        if (wireless & 0x8) {
@@ -743,7 +743,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
                                                (void *) HPWMI_GPS);
                if (!gps_rfkill) {
                        err = -ENOMEM;
-                       goto register_bluetooth_error;
+                       goto register_wwan_error;
                }
                rfkill_init_sw_state(gps_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_GPS));
@@ -755,16 +755,16 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
        }
 
        return 0;
-register_wwan_err:
-       rfkill_destroy(wwan_rfkill);
-       wwan_rfkill = NULL;
-       if (gps_rfkill)
-               rfkill_unregister(gps_rfkill);
 register_gps_error:
        rfkill_destroy(gps_rfkill);
        gps_rfkill = NULL;
        if (bluetooth_rfkill)
                rfkill_unregister(bluetooth_rfkill);
+register_wwan_error:
+       rfkill_destroy(wwan_rfkill);
+       wwan_rfkill = NULL;
+       if (gps_rfkill)
+               rfkill_unregister(gps_rfkill);
 register_bluetooth_error:
        rfkill_destroy(bluetooth_rfkill);
        bluetooth_rfkill = NULL;
index 9385afd..41b740c 100644 (file)
@@ -193,17 +193,6 @@ static struct acpi_driver irst_driver = {
        },
 };
 
-static int irst_init(void)
-{
-       return acpi_bus_register_driver(&irst_driver);
-}
-
-static void irst_exit(void)
-{
-       acpi_bus_unregister_driver(&irst_driver);
-}
-
-module_init(irst_init);
-module_exit(irst_exit);
+module_acpi_driver(irst_driver);
 
 MODULE_DEVICE_TABLE(acpi, irst_ids);
index f74e93d..52259dc 100644 (file)
@@ -74,17 +74,6 @@ static struct acpi_driver smartconnect_driver = {
        },
 };
 
-static int smartconnect_init(void)
-{
-       return acpi_bus_register_driver(&smartconnect_driver);
-}
-
-static void smartconnect_exit(void)
-{
-       acpi_bus_unregister_driver(&smartconnect_driver);
-}
-
-module_init(smartconnect_init);
-module_exit(smartconnect_exit);
+module_acpi_driver(smartconnect_driver);
 
 MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
index f59683a..6b18aba 100644 (file)
@@ -128,7 +128,6 @@ static int mfld_pb_remove(struct platform_device *pdev)
 
        free_irq(irq, input);
        input_unregister_device(input);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 81c491e..93fab8b 100644 (file)
@@ -542,7 +542,6 @@ static int mid_thermal_remove(struct platform_device *pdev)
        }
 
        kfree(pinfo);
-       platform_set_drvdata(pdev, NULL);
 
        /* Stop the ADC */
        return configure_adc(0);
index 984253d..10d12b2 100644 (file)
@@ -643,23 +643,6 @@ out_hotkey:
        return result;
 }
 
-static int __init acpi_pcc_init(void)
-{
-       int result = 0;
-
-       if (acpi_disabled)
-               return -ENODEV;
-
-       result = acpi_bus_register_driver(&acpi_pcc_driver);
-       if (result < 0) {
-               ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-                                 "Error registering hotkey driver\n"));
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
 static int acpi_pcc_hotkey_remove(struct acpi_device *device)
 {
        struct pcc_acpi *pcc = acpi_driver_data(device);
@@ -679,10 +662,4 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device)
        return 0;
 }
 
-static void __exit acpi_pcc_exit(void)
-{
-       acpi_bus_unregister_driver(&acpi_pcc_driver);
-}
-
-module_init(acpi_pcc_init);
-module_exit(acpi_pcc_exit);
+module_acpi_driver(acpi_pcc_driver);
index 4430b8c..cae7098 100644 (file)
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/backlight.h>
-#include <linux/i8042.h>
 #include <linux/dmi.h>
+#include <acpi/acpi_drivers.h>
 
-#define SAMSUNGQ10_BL_MAX_INTENSITY      255
-#define SAMSUNGQ10_BL_DEFAULT_INTENSITY  185
+#define SAMSUNGQ10_BL_MAX_INTENSITY 7
 
-#define SAMSUNGQ10_BL_8042_CMD           0xbe
-#define SAMSUNGQ10_BL_8042_DATA          { 0x89, 0x91 }
-
-static int samsungq10_bl_brightness;
+static acpi_handle ec_handle;
 
 static bool force;
 module_param(force, bool, 0);
@@ -33,21 +29,26 @@ MODULE_PARM_DESC(force,
 static int samsungq10_bl_set_intensity(struct backlight_device *bd)
 {
 
-       int brightness = bd->props.brightness;
-       unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA;
+       acpi_status status;
+       int i;
 
-       c[2] = (unsigned char)brightness;
-       i8042_lock_chip();
-       i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD);
-       i8042_unlock_chip();
-       samsungq10_bl_brightness = brightness;
+       for (i = 0; i < SAMSUNGQ10_BL_MAX_INTENSITY; i++) {
+               status = acpi_evaluate_object(ec_handle, "_Q63", NULL, NULL);
+               if (ACPI_FAILURE(status))
+                       return -EIO;
+       }
+       for (i = 0; i < bd->props.brightness; i++) {
+               status = acpi_evaluate_object(ec_handle, "_Q64", NULL, NULL);
+               if (ACPI_FAILURE(status))
+                       return -EIO;
+       }
 
        return 0;
 }
 
 static int samsungq10_bl_get_intensity(struct backlight_device *bd)
 {
-       return samsungq10_bl_brightness;
+       return bd->props.brightness;
 }
 
 static const struct backlight_ops samsungq10_bl_ops = {
@@ -55,28 +56,6 @@ static const struct backlight_ops samsungq10_bl_ops = {
        .update_status  = samsungq10_bl_set_intensity,
 };
 
-#ifdef CONFIG_PM_SLEEP
-static int samsungq10_suspend(struct device *dev)
-{
-       return 0;
-}
-
-static int samsungq10_resume(struct device *dev)
-{
-
-       struct backlight_device *bd = dev_get_drvdata(dev);
-
-       samsungq10_bl_set_intensity(bd);
-       return 0;
-}
-#else
-#define samsungq10_suspend NULL
-#define samsungq10_resume  NULL
-#endif
-
-static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops,
-                         samsungq10_suspend, samsungq10_resume);
-
 static int samsungq10_probe(struct platform_device *pdev)
 {
 
@@ -93,9 +72,6 @@ static int samsungq10_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, bd);
 
-       bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
-       samsungq10_bl_set_intensity(bd);
-
        return 0;
 }
 
@@ -104,9 +80,6 @@ static int samsungq10_remove(struct platform_device *pdev)
 
        struct backlight_device *bd = platform_get_drvdata(pdev);
 
-       bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
-       samsungq10_bl_set_intensity(bd);
-
        backlight_device_unregister(bd);
 
        return 0;
@@ -116,7 +89,6 @@ static struct platform_driver samsungq10_driver = {
        .driver         = {
                .name   = KBUILD_MODNAME,
                .owner  = THIS_MODULE,
-               .pm     = &samsungq10_pm_ops,
        },
        .probe          = samsungq10_probe,
        .remove         = samsungq10_remove,
@@ -172,6 +144,11 @@ static int __init samsungq10_init(void)
        if (!force && !dmi_check_system(samsungq10_dmi_table))
                return -ENODEV;
 
+       ec_handle = ec_get_handle();
+
+       if (!ec_handle)
+               return -ENODEV;
+
        samsungq10_device = platform_create_bundle(&samsungq10_driver,
                                                   samsungq10_probe,
                                                   NULL, 0, NULL, 0);
index be67e5e..03ca6c1 100644 (file)
@@ -369,7 +369,7 @@ struct tpacpi_led_classdev {
        struct led_classdev led_classdev;
        struct work_struct work;
        enum led_status_t new_state;
-       unsigned int led;
+       int led;
 };
 
 /* brightness level capabilities */
@@ -5296,6 +5296,16 @@ static int __init led_init(struct ibm_init_struct *iibm)
 
        led_supported = led_init_detect_mode();
 
+       if (led_supported != TPACPI_LED_NONE) {
+               useful_leds = tpacpi_check_quirks(led_useful_qtable,
+                               ARRAY_SIZE(led_useful_qtable));
+
+               if (!useful_leds) {
+                       led_handle = NULL;
+                       led_supported = TPACPI_LED_NONE;
+               }
+       }
+
        vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
                str_supported(led_supported), led_supported);
 
@@ -5309,10 +5319,9 @@ static int __init led_init(struct ibm_init_struct *iibm)
                return -ENOMEM;
        }
 
-       useful_leds = tpacpi_check_quirks(led_useful_qtable,
-                                         ARRAY_SIZE(led_useful_qtable));
-
        for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
+               tpacpi_leds[i].led = -1;
+
                if (!tpacpi_is_led_restricted(i) &&
                    test_bit(i, &useful_leds)) {
                        rc = tpacpi_init_led(i);
@@ -5370,9 +5379,13 @@ static int led_write(char *buf)
                return -ENODEV;
 
        while ((cmd = next_cmd(&buf))) {
-               if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 15)
+               if (sscanf(cmd, "%d", &led) != 1)
                        return -EINVAL;
 
+               if (led < 0 || led > (TPACPI_LED_NUMLEDS - 1) ||
+                               tpacpi_leds[led].led < 0)
+                       return -ENODEV;
+
                if (strstr(cmd, "off")) {
                        s = TPACPI_LED_OFF;
                } else if (strstr(cmd, "on")) {
index 6e02c95..601ea95 100644 (file)
@@ -780,7 +780,7 @@ static bool guid_already_parsed(const char *guid_string)
 /*
  * Parse the _WDG method for the GUID data blocks
  */
-static acpi_status parse_wdg(acpi_handle handle)
+static int parse_wdg(acpi_handle handle)
 {
        struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
        union acpi_object *obj;
@@ -812,7 +812,7 @@ static acpi_status parse_wdg(acpi_handle handle)
 
                wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
                if (!wblock)
-                       return AE_NO_MEMORY;
+                       return -ENOMEM;
 
                wblock->handle = handle;
                wblock->gblock = gblock[i];
index 6917b4f..22d5a94 100644 (file)
@@ -692,7 +692,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
         * ID as valid.
         */
        if (ahc_get_pci_function(pci) > 0
-        && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice)
+        && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor)
         && SUBID_9005_MFUNCENB(subdevice) == 0)
                return (NULL);
 
index 8582929..2ec3c23 100644 (file)
@@ -860,8 +860,13 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
                return false;
        }
 
+       if (fsc->command >= cmdcnt) {
+               fs->status = ATTO_STS_INV_FUNC;
+               return false;
+       }
+
        func = cmd_to_fls_func[fsc->command];
-       if (fsc->command >= cmdcnt || func == 0xFF) {
+       if (func == 0xFF) {
                fs->status = ATTO_STS_INV_FUNC;
                return false;
        }
@@ -1355,7 +1360,7 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
        u32 time = jiffies_to_msecs(jiffies);
 
        esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
-       memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+       *n = default_sas_nvram;
        n->sas_addr[3] |= 0x0F;
        n->sas_addr[4] = HIBYTE(LOWORD(time));
        n->sas_addr[5] = LOBYTE(LOWORD(time));
@@ -1373,7 +1378,7 @@ void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
         * address out first.
         */
        memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
-       memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+       *nvram = default_sas_nvram;
        memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
 }
 
index 3a798e7..da1869d 100644 (file)
@@ -665,7 +665,7 @@ void esas2r_kill_adapter(int i)
 
 int esas2r_cleanup(struct Scsi_Host *host)
 {
-       struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+       struct esas2r_adapter *a;
        int index;
 
        if (host == NULL) {
@@ -678,6 +678,7 @@ int esas2r_cleanup(struct Scsi_Host *host)
        }
 
        esas2r_debug("esas2r_cleanup called for host %p", host);
+       a = (struct esas2r_adapter *)host->hostdata;
        index = a->index;
        esas2r_kill_adapter(index);
        return index;
@@ -808,7 +809,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
        int pcie_cap_reg;
 
        pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
-       if (0xffff && pcie_cap_reg) {
+       if (0xffff & pcie_cap_reg) {
                u16 devcontrol;
 
                pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
@@ -1550,8 +1551,7 @@ void esas2r_reset_chip(struct esas2r_adapter *a)
         * to not overwrite a previous crash that was saved.
         */
        if ((a->flags2 & AF2_COREDUMP_AVAIL)
-           && !(a->flags2 & AF2_COREDUMP_SAVED)
-           && a->fw_coredump_buff) {
+           && !(a->flags2 & AF2_COREDUMP_SAVED)) {
                esas2r_read_mem_block(a,
                                      a->fw_coredump_buff,
                                      MW_DATA_ADDR_SRAM + 0x80000,
index f3d0cb8..e5b0902 100644 (file)
@@ -415,7 +415,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
                lun = tm->lun;
        }
 
-       if (path > 0 || tid > ESAS2R_MAX_ID) {
+       if (path > 0) {
                rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
                        CSMI_STS_INV_PARAM);
                return false;
index f8ec6d6..fd13928 100644 (file)
@@ -302,6 +302,7 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
                if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
                        struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
                        struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
+                       char buf[sizeof(cfg->data.init.fw_release) + 1];
 
                        cfg->data_length =
                                cpu_to_le32(sizeof(struct atto_vda_cfg_init));
@@ -309,11 +310,13 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
                                le32_to_cpu(rsp->vda_version);
                        cfg->data.init.fw_build = rsp->fw_build;
 
-                       sprintf((char *)&cfg->data.init.fw_release,
-                               "%1d.%02d",
+                       snprintf(buf, sizeof(buf), "%1d.%02d",
                                (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
                                (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
 
+                       memcpy(&cfg->data.init.fw_release, buf,
+                              sizeof(cfg->data.init.fw_release));
+
                        if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
                                cfg->data.init.fw_version =
                                        cfg->data.init.fw_build;
index c18c681..e4dd3d7 100644 (file)
@@ -43,6 +43,8 @@
 #define DFX                     DRV_NAME "%d: "
 
 #define DESC_CLEAN_LOW_WATERMARK 8
+#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD        16 /* UCSM default throttle count */
+#define FNIC_MIN_IO_REQ                        256 /* Min IO throttle count */
 #define FNIC_MAX_IO_REQ                2048 /* scsi_cmnd tag map entries */
 #define        FNIC_IO_LOCKS           64 /* IO locks: power of 2 */
 #define FNIC_DFLT_QUEUE_DEPTH  32
@@ -154,6 +156,9 @@ do {                                                                \
        FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING,                    \
                         shost_printk(kern_level, host, fmt, ##args);)
 
+#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...)          \
+       shost_printk(kern_level, host, fmt, ##args)
+
 extern const char *fnic_state_str[];
 
 enum fnic_intx_intr_index {
@@ -215,10 +220,12 @@ struct fnic {
 
        struct vnic_stats *stats;
        unsigned long stats_time;       /* time of stats update */
+       unsigned long stats_reset_time; /* time of stats reset */
        struct vnic_nic_cfg *nic_cfg;
        char name[IFNAMSIZ];
        struct timer_list notify_timer; /* used for MSI interrupts */
 
+       unsigned int fnic_max_tag_id;
        unsigned int err_intr_offset;
        unsigned int link_intr_offset;
 
@@ -359,4 +366,5 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
        return ((fnic->state_flags & st_flags) == st_flags);
 }
 void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
+void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
 #endif /* _FNIC_H_ */
index 42e15ee..bbf81ea 100644 (file)
@@ -74,6 +74,10 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
                                        "for fnic trace buffer");
 
+static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
+module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
+
 static struct libfc_function_template fnic_transport_template = {
        .frame_send = fnic_send,
        .lport_set_port_id = fnic_set_port_id,
@@ -91,7 +95,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
        if (!rport || fc_remote_port_chkready(rport))
                return -ENXIO;
 
-       scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
+       scsi_activate_tcq(sdev, fnic_max_qdepth);
        return 0;
 }
 
@@ -126,6 +130,7 @@ fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
 static void fnic_get_host_speed(struct Scsi_Host *shost);
 static struct scsi_transport_template *fnic_fc_transport;
 static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
+static void fnic_reset_host_stats(struct Scsi_Host *);
 
 static struct fc_function_template fnic_fc_functions = {
 
@@ -153,6 +158,7 @@ static struct fc_function_template fnic_fc_functions = {
        .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
        .issue_fc_host_lip = fnic_reset,
        .get_fc_host_stats = fnic_get_stats,
+       .reset_fc_host_stats = fnic_reset_host_stats,
        .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
        .terminate_rport_io = fnic_terminate_rport_io,
        .bsg_request = fc_lport_bsg_request,
@@ -206,13 +212,116 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
        stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
        stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
        stats->invalid_crc_count = vs->rx.rx_crc_errors;
-       stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
+       stats->seconds_since_last_reset =
+                       (jiffies - fnic->stats_reset_time) / HZ;
        stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
        stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
 
        return stats;
 }
 
+/*
+ * fnic_dump_fchost_stats
+ * note : dumps fc_statistics into system logs
+ */
+void fnic_dump_fchost_stats(struct Scsi_Host *host,
+                               struct fc_host_statistics *stats)
+{
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: seconds since last reset = %llu\n",
+                       stats->seconds_since_last_reset);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: tx frames                = %llu\n",
+                       stats->tx_frames);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: tx words         = %llu\n",
+                       stats->tx_words);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: rx frames                = %llu\n",
+                       stats->rx_frames);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: rx words         = %llu\n",
+                       stats->rx_words);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: lip count                = %llu\n",
+                       stats->lip_count);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: nos count                = %llu\n",
+                       stats->nos_count);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: error frames             = %llu\n",
+                       stats->error_frames);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: dumped frames    = %llu\n",
+                       stats->dumped_frames);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: link failure count       = %llu\n",
+                       stats->link_failure_count);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: loss of sync count       = %llu\n",
+                       stats->loss_of_sync_count);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: loss of signal count     = %llu\n",
+                       stats->loss_of_signal_count);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: prim seq protocol err count = %llu\n",
+                       stats->prim_seq_protocol_err_count);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: invalid tx word count= %llu\n",
+                       stats->invalid_tx_word_count);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: invalid crc count        = %llu\n",
+                       stats->invalid_crc_count);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: fcp input requests       = %llu\n",
+                       stats->fcp_input_requests);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: fcp output requests      = %llu\n",
+                       stats->fcp_output_requests);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: fcp control requests     = %llu\n",
+                       stats->fcp_control_requests);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: fcp input megabytes      = %llu\n",
+                       stats->fcp_input_megabytes);
+       FNIC_MAIN_NOTE(KERN_NOTICE, host,
+                       "fnic: fcp output megabytes     = %llu\n",
+                       stats->fcp_output_megabytes);
+       return;
+}
+
+/*
+ * fnic_reset_host_stats : clears host stats
+ * note : called when reset_statistics set under sysfs dir
+ */
+static void fnic_reset_host_stats(struct Scsi_Host *host)
+{
+       int ret;
+       struct fc_lport *lp = shost_priv(host);
+       struct fnic *fnic = lport_priv(lp);
+       struct fc_host_statistics *stats;
+       unsigned long flags;
+
+       /* dump current stats, before clearing them */
+       stats = fnic_get_stats(host);
+       fnic_dump_fchost_stats(host, stats);
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       ret = vnic_dev_stats_clear(fnic->vdev);
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+       if (ret) {
+               FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+                               "fnic: Reset vnic stats failed"
+                               " 0x%x", ret);
+               return;
+       }
+       fnic->stats_reset_time = jiffies;
+       memset(stats, 0, sizeof(*stats));
+
+       return;
+}
+
 void fnic_log_q_error(struct fnic *fnic)
 {
        unsigned int i;
@@ -447,13 +556,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        host->transportt = fnic_fc_transport;
 
-       err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
-       if (err) {
-               shost_printk(KERN_ERR, fnic->lport->host,
-                            "Unable to alloc shared tag map\n");
-               goto err_out_free_hba;
-       }
-
        /* Setup PCI resources */
        pci_set_drvdata(pdev, fnic);
 
@@ -476,10 +578,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_master(pdev);
 
        /* Query PCI controller on system for DMA addressing
-        * limitation for the device.  Try 40-bit first, and
+        * limitation for the device.  Try 64-bit first, and
         * fail to 32-bit.
         */
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
@@ -496,10 +598,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        goto err_out_release_regions;
                }
        } else {
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
                if (err) {
                        shost_printk(KERN_ERR, fnic->lport->host,
-                                    "Unable to obtain 40-bit DMA "
+                                    "Unable to obtain 64-bit DMA "
                                     "for consistent allocations, aborting.\n");
                        goto err_out_release_regions;
                }
@@ -566,6 +668,22 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                             "aborting.\n");
                goto err_out_dev_close;
        }
+
+       /* Configure Maximum Outstanding IO reqs*/
+       if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
+               host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+                                       max_t(u32, FNIC_MIN_IO_REQ,
+                                       fnic->config.io_throttle_count));
+       }
+       fnic->fnic_max_tag_id = host->can_queue;
+
+       err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
+       if (err) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+                         "Unable to alloc shared tag map\n");
+               goto err_out_dev_close;
+       }
+
        host->max_lun = fnic->config.luns_per_tgt;
        host->max_id = FNIC_MAX_FCP_TARGET;
        host->max_cmd_len = FCOE_MAX_CMD_LEN;
@@ -719,6 +837,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        fc_lport_init_stats(lp);
+       fnic->stats_reset_time = jiffies;
 
        fc_lport_config(lp);
 
index a97e6e5..d014aae 100644 (file)
@@ -111,6 +111,12 @@ static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
        return &fnic->io_req_lock[hash];
 }
 
+static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
+                                           int tag)
+{
+       return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
+}
+
 /*
  * Unmap the data buffer and sense buffer for an io_req,
  * also unmap and free the device-private scatter/gather list.
@@ -730,7 +736,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
        fcpio_tag_id_dec(&tag, &id);
        icmnd_cmpl = &desc->u.icmnd_cmpl;
 
-       if (id >= FNIC_MAX_IO_REQ) {
+       if (id >= fnic->fnic_max_tag_id) {
                shost_printk(KERN_ERR, fnic->lport->host,
                        "Tag out of range tag %x hdr status = %s\n",
                             id, fnic_fcpio_status_to_str(hdr_status));
@@ -818,38 +824,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
                if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
                        xfer_len -= icmnd_cmpl->residual;
 
-               /*
-                * If queue_full, then try to reduce queue depth for all
-                * LUNS on the target. Todo: this should be accompanied
-                * by a periodic queue_depth rampup based on successful
-                * IO completion.
-                */
-               if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
-                       struct scsi_device *t_sdev;
-                       int qd = 0;
-
-                       shost_for_each_device(t_sdev, sc->device->host) {
-                               if (t_sdev->id != sc->device->id)
-                                       continue;
-
-                               if (t_sdev->queue_depth > 1) {
-                                       qd = scsi_track_queue_full
-                                               (t_sdev,
-                                                t_sdev->queue_depth - 1);
-                                       if (qd == -1)
-                                               qd = t_sdev->host->cmd_per_lun;
-                                       shost_printk(KERN_INFO,
-                                                    fnic->lport->host,
-                                                    "scsi[%d:%d:%d:%d"
-                                                    "] queue full detected,"
-                                                    "new depth = %d\n",
-                                                    t_sdev->host->host_no,
-                                                    t_sdev->channel,
-                                                    t_sdev->id, t_sdev->lun,
-                                                    t_sdev->queue_depth);
-                               }
-                       }
-               }
                break;
 
        case FCPIO_TIMEOUT:          /* request was timed out */
@@ -939,7 +913,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
        fcpio_tag_id_dec(&tag, &id);
 
-       if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+       if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
                shost_printk(KERN_ERR, fnic->lport->host,
                "Tag out of range tag %x hdr status = %s\n",
                id, fnic_fcpio_status_to_str(hdr_status));
@@ -988,9 +962,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
                        spin_unlock_irqrestore(io_lock, flags);
                        return;
                }
-               CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
                CMD_ABTS_STATUS(sc) = hdr_status;
-
                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "abts cmpl recd. id %d status %s\n",
@@ -1148,23 +1120,25 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
 
 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
 {
-       unsigned int i;
+       int i;
        struct fnic_io_req *io_req;
        unsigned long flags = 0;
        struct scsi_cmnd *sc;
        spinlock_t *io_lock;
        unsigned long start_time = 0;
 
-       for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
+       for (i = 0; i < fnic->fnic_max_tag_id; i++) {
                if (i == exclude_id)
                        continue;
 
+               io_lock = fnic_io_lock_tag(fnic, i);
+               spin_lock_irqsave(io_lock, flags);
                sc = scsi_host_find_tag(fnic->lport->host, i);
-               if (!sc)
+               if (!sc) {
+                       spin_unlock_irqrestore(io_lock, flags);
                        continue;
+               }
 
-               io_lock = fnic_io_lock_hash(fnic, sc);
-               spin_lock_irqsave(io_lock, flags);
                io_req = (struct fnic_io_req *)CMD_SP(sc);
                if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
                        !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
@@ -1236,7 +1210,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
        fcpio_tag_id_dec(&desc->hdr.tag, &id);
        id &= FNIC_TAG_MASK;
 
-       if (id >= FNIC_MAX_IO_REQ)
+       if (id >= fnic->fnic_max_tag_id)
                return;
 
        sc = scsi_host_find_tag(fnic->lport->host, id);
@@ -1340,14 +1314,15 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
        if (fnic->in_remove)
                return;
 
-       for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+       for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
                abt_tag = tag;
+               io_lock = fnic_io_lock_tag(fnic, tag);
+               spin_lock_irqsave(io_lock, flags);
                sc = scsi_host_find_tag(fnic->lport->host, tag);
-               if (!sc)
+               if (!sc) {
+                       spin_unlock_irqrestore(io_lock, flags);
                        continue;
-
-               io_lock = fnic_io_lock_hash(fnic, sc);
-               spin_lock_irqsave(io_lock, flags);
+               }
 
                io_req = (struct fnic_io_req *)CMD_SP(sc);
 
@@ -1441,12 +1416,29 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
        unsigned long flags;
        struct scsi_cmnd *sc;
        struct scsi_lun fc_lun;
-       struct fc_rport_libfc_priv *rdata = rport->dd_data;
-       struct fc_lport *lport = rdata->local_port;
-       struct fnic *fnic = lport_priv(lport);
+       struct fc_rport_libfc_priv *rdata;
+       struct fc_lport *lport;
+       struct fnic *fnic;
        struct fc_rport *cmd_rport;
        enum fnic_ioreq_state old_ioreq_state;
 
+       if (!rport) {
+               printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
+               return;
+       }
+       rdata = rport->dd_data;
+
+       if (!rdata) {
+               printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
+               return;
+       }
+       lport = rdata->local_port;
+
+       if (!lport) {
+               printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
+               return;
+       }
+       fnic = lport_priv(lport);
        FNIC_SCSI_DBG(KERN_DEBUG,
                      fnic->lport->host, "fnic_terminate_rport_io called"
                      " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
@@ -1456,18 +1448,21 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
        if (fnic->in_remove)
                return;
 
-       for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+       for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
                abt_tag = tag;
+               io_lock = fnic_io_lock_tag(fnic, tag);
+               spin_lock_irqsave(io_lock, flags);
                sc = scsi_host_find_tag(fnic->lport->host, tag);
-               if (!sc)
+               if (!sc) {
+                       spin_unlock_irqrestore(io_lock, flags);
                        continue;
+               }
 
                cmd_rport = starget_to_rport(scsi_target(sc->device));
-               if (rport != cmd_rport)
+               if (rport != cmd_rport) {
+                       spin_unlock_irqrestore(io_lock, flags);
                        continue;
-
-               io_lock = fnic_io_lock_hash(fnic, sc);
-               spin_lock_irqsave(io_lock, flags);
+               }
 
                io_req = (struct fnic_io_req *)CMD_SP(sc);
 
@@ -1680,13 +1675,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        io_req->abts_done = NULL;
 
        /* fw did not complete abort, timed out */
-       if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+       if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
                spin_unlock_irqrestore(io_lock, flags);
                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
                ret = FAILED;
                goto fnic_abort_cmd_end;
        }
 
+       CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+
        /*
         * firmware completed the abort, check the status,
         * free the io_req irrespective of failure or success
@@ -1784,17 +1781,18 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
        DECLARE_COMPLETION_ONSTACK(tm_done);
        enum fnic_ioreq_state old_ioreq_state;
 
-       for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+       for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
+               io_lock = fnic_io_lock_tag(fnic, tag);
+               spin_lock_irqsave(io_lock, flags);
                sc = scsi_host_find_tag(fnic->lport->host, tag);
                /*
                 * ignore this lun reset cmd or cmds that do not belong to
                 * this lun
                 */
-               if (!sc || sc == lr_sc || sc->device != lun_dev)
+               if (!sc || sc == lr_sc || sc->device != lun_dev) {
+                       spin_unlock_irqrestore(io_lock, flags);
                        continue;
-
-               io_lock = fnic_io_lock_hash(fnic, sc);
-               spin_lock_irqsave(io_lock, flags);
+               }
 
                io_req = (struct fnic_io_req *)CMD_SP(sc);
 
@@ -1823,6 +1821,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                        spin_unlock_irqrestore(io_lock, flags);
                        continue;
                }
+
+               if (io_req->abts_done)
+                       shost_printk(KERN_ERR, fnic->lport->host,
+                         "%s: io_req->abts_done is set state is %s\n",
+                         __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
                old_ioreq_state = CMD_STATE(sc);
                /*
                 * Any pending IO issued prior to reset is expected to be
@@ -1833,11 +1836,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                 */
                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
 
-               if (io_req->abts_done)
-                       shost_printk(KERN_ERR, fnic->lport->host,
-                         "%s: io_req->abts_done is set state is %s\n",
-                         __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
-
                BUG_ON(io_req->abts_done);
 
                abt_tag = tag;
@@ -1890,12 +1888,13 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                io_req->abts_done = NULL;
 
                /* if abort is still pending with fw, fail */
-               if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+               if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
                        spin_unlock_irqrestore(io_lock, flags);
                        CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
                        ret = 1;
                        goto clean_pending_aborts_end;
                }
+               CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
                CMD_SP(sc) = NULL;
                spin_unlock_irqrestore(io_lock, flags);
 
@@ -2093,8 +2092,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
                spin_unlock_irqrestore(io_lock, flags);
                int_to_scsilun(sc->device->lun, &fc_lun);
                /*
-                * Issue abort and terminate on the device reset request.
-                * If q'ing of the abort fails, retry issue it after a delay.
+                * Issue abort and terminate on device reset request.
+                * If q'ing of terminate fails, retry it after a delay.
                 */
                while (1) {
                        spin_lock_irqsave(io_lock, flags);
@@ -2405,7 +2404,7 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
                lun_dev = lr_sc->device;
 
        /* walk again to check, if IOs are still pending in fw */
-       for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+       for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
                sc = scsi_host_find_tag(fnic->lport->host, tag);
                /*
                 * ignore this lun reset cmd or cmds that do not belong to
index fbb5536..e343e1d 100644 (file)
@@ -54,8 +54,8 @@
 #define VNIC_FNIC_PLOGI_TIMEOUT_MIN         1000
 #define VNIC_FNIC_PLOGI_TIMEOUT_MAX         255000
 
-#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN     256
-#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX     4096
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN     1
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX     2048
 
 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN     0
 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX     240000
index fac8cf5..891c86b 100644 (file)
@@ -54,7 +54,7 @@
 #include "hpsa.h"
 
 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "2.0.2-1"
+#define HPSA_DRIVER_VERSION "3.4.0-1"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 #define HPSA "hpsa"
 
@@ -89,13 +89,14 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
-       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
-       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x334D},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
@@ -107,7 +108,19 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1925},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
-       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x334d},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
        {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
                PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
        {0,}
@@ -125,24 +138,35 @@ static struct board_type products[] = {
        {0x3245103C, "Smart Array P410i", &SA5_access},
        {0x3247103C, "Smart Array P411", &SA5_access},
        {0x3249103C, "Smart Array P812", &SA5_access},
-       {0x324a103C, "Smart Array P712m", &SA5_access},
-       {0x324b103C, "Smart Array P711m", &SA5_access},
+       {0x324A103C, "Smart Array P712m", &SA5_access},
+       {0x324B103C, "Smart Array P711m", &SA5_access},
        {0x3350103C, "Smart Array P222", &SA5_access},
        {0x3351103C, "Smart Array P420", &SA5_access},
        {0x3352103C, "Smart Array P421", &SA5_access},
        {0x3353103C, "Smart Array P822", &SA5_access},
+       {0x334D103C, "Smart Array P822se", &SA5_access},
        {0x3354103C, "Smart Array P420i", &SA5_access},
        {0x3355103C, "Smart Array P220i", &SA5_access},
        {0x3356103C, "Smart Array P721m", &SA5_access},
-       {0x1920103C, "Smart Array", &SA5_access},
-       {0x1921103C, "Smart Array", &SA5_access},
-       {0x1922103C, "Smart Array", &SA5_access},
-       {0x1923103C, "Smart Array", &SA5_access},
-       {0x1924103C, "Smart Array", &SA5_access},
-       {0x1925103C, "Smart Array", &SA5_access},
-       {0x1926103C, "Smart Array", &SA5_access},
-       {0x1928103C, "Smart Array", &SA5_access},
-       {0x334d103C, "Smart Array P822se", &SA5_access},
+       {0x1921103C, "Smart Array P830i", &SA5_access},
+       {0x1922103C, "Smart Array P430", &SA5_access},
+       {0x1923103C, "Smart Array P431", &SA5_access},
+       {0x1924103C, "Smart Array P830", &SA5_access},
+       {0x1926103C, "Smart Array P731m", &SA5_access},
+       {0x1928103C, "Smart Array P230i", &SA5_access},
+       {0x1929103C, "Smart Array P530", &SA5_access},
+       {0x21BD103C, "Smart Array", &SA5_access},
+       {0x21BE103C, "Smart Array", &SA5_access},
+       {0x21BF103C, "Smart Array", &SA5_access},
+       {0x21C0103C, "Smart Array", &SA5_access},
+       {0x21C1103C, "Smart Array", &SA5_access},
+       {0x21C2103C, "Smart Array", &SA5_access},
+       {0x21C3103C, "Smart Array", &SA5_access},
+       {0x21C4103C, "Smart Array", &SA5_access},
+       {0x21C5103C, "Smart Array", &SA5_access},
+       {0x21C7103C, "Smart Array", &SA5_access},
+       {0x21C8103C, "Smart Array", &SA5_access},
+       {0x21C9103C, "Smart Array", &SA5_access},
        {0xFFFF103C, "Unknown Smart Array", &SA5_access},
 };
 
index 4e31caa..23f5ba5 100644 (file)
@@ -2208,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
 
        if (rsp_rc != 0) {
                sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
-               return -EIO;
+               /* If failure is received, the host adapter is most likely going
+                through reset, return success so the caller will wait for the command
+                being cancelled to get returned */
+               return 0;
        }
 
        sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
@@ -2221,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
 
        if (status != IBMVFC_MAD_SUCCESS) {
                sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
-               return -EIO;
+               switch (status) {
+               case IBMVFC_MAD_DRIVER_FAILED:
+               case IBMVFC_MAD_CRQ_ERROR:
+                       /* Host adapter most likely going through reset, return success to
+                        the caller will wait for the command being cancelled to get returned */
+                       return 0;
+               default:
+                       return -EIO;
+               };
        }
 
        sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
index d0fa4b6..fa76440 100644 (file)
@@ -241,7 +241,7 @@ static void gather_partition_info(void)
        struct device_node *rootdn;
 
        const char *ppartition_name;
-       const unsigned int *p_number_ptr;
+       const __be32 *p_number_ptr;
 
        /* Retrieve information about this partition */
        rootdn = of_find_node_by_path("/");
@@ -255,7 +255,7 @@ static void gather_partition_info(void)
                                sizeof(partition_name));
        p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
        if (p_number_ptr)
-               partition_number = *p_number_ptr;
+               partition_number = of_read_number(p_number_ptr, 1);
        of_node_put(rootdn);
 }
 
@@ -270,10 +270,11 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
        strncpy(hostdata->madapter_info.partition_name, partition_name,
                        sizeof(hostdata->madapter_info.partition_name));
 
-       hostdata->madapter_info.partition_number = partition_number;
+       hostdata->madapter_info.partition_number =
+                                       cpu_to_be32(partition_number);
 
-       hostdata->madapter_info.mad_version = 1;
-       hostdata->madapter_info.os_type = 2;
+       hostdata->madapter_info.mad_version = cpu_to_be32(1);
+       hostdata->madapter_info.os_type = cpu_to_be32(2);
 }
 
 /**
@@ -464,9 +465,9 @@ static int initialize_event_pool(struct event_pool *pool,
                memset(&evt->crq, 0x00, sizeof(evt->crq));
                atomic_set(&evt->free, 1);
                evt->crq.valid = 0x80;
-               evt->crq.IU_length = sizeof(*evt->xfer_iu);
-               evt->crq.IU_data_ptr = pool->iu_token + 
-                       sizeof(*evt->xfer_iu) * i;
+               evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
+               evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
+                       sizeof(*evt->xfer_iu) * i);
                evt->xfer_iu = pool->iu_storage + i;
                evt->hostdata = hostdata;
                evt->ext_list = NULL;
@@ -588,7 +589,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
        evt_struct->cmnd_done = NULL;
        evt_struct->sync_srp = NULL;
        evt_struct->crq.format = format;
-       evt_struct->crq.timeout = timeout;
+       evt_struct->crq.timeout = cpu_to_be16(timeout);
        evt_struct->done = done;
 }
 
@@ -659,8 +660,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
 
        scsi_for_each_sg(cmd, sg, nseg, i) {
                struct srp_direct_buf *descr = md + i;
-               descr->va = sg_dma_address(sg);
-               descr->len = sg_dma_len(sg);
+               descr->va = cpu_to_be64(sg_dma_address(sg));
+               descr->len = cpu_to_be32(sg_dma_len(sg));
                descr->key = 0;
                total_length += sg_dma_len(sg);
        }
@@ -703,13 +704,14 @@ static int map_sg_data(struct scsi_cmnd *cmd,
        }
 
        indirect->table_desc.va = 0;
-       indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
+       indirect->table_desc.len = cpu_to_be32(sg_mapped *
+                                              sizeof(struct srp_direct_buf));
        indirect->table_desc.key = 0;
 
        if (sg_mapped <= MAX_INDIRECT_BUFS) {
                total_length = map_sg_list(cmd, sg_mapped,
                                           &indirect->desc_list[0]);
-               indirect->len = total_length;
+               indirect->len = cpu_to_be32(total_length);
                return 1;
        }
 
@@ -731,9 +733,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
 
        total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
 
-       indirect->len = total_length;
-       indirect->table_desc.va = evt_struct->ext_list_token;
-       indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
+       indirect->len = cpu_to_be32(total_length);
+       indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
+       indirect->table_desc.len = cpu_to_be32(sg_mapped *
+                                              sizeof(indirect->desc_list[0]));
        memcpy(indirect->desc_list, evt_struct->ext_list,
               MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
        return 1;
@@ -849,7 +852,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
                                   struct ibmvscsi_host_data *hostdata,
                                   unsigned long timeout)
 {
-       u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
+       __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
        int request_status = 0;
        int rc;
        int srp_req = 0;
@@ -920,8 +923,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
                add_timer(&evt_struct->timer);
        }
 
-       if ((rc =
-            ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
+       rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
+                              be64_to_cpu(crq_as_u64[1]));
+       if (rc != 0) {
                list_del(&evt_struct->list);
                del_timer(&evt_struct->timer);
 
@@ -987,15 +991,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
                if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
                        memcpy(cmnd->sense_buffer,
                               rsp->data,
-                              rsp->sense_data_len);
+                              be32_to_cpu(rsp->sense_data_len));
                unmap_cmd_data(&evt_struct->iu.srp.cmd, 
                               evt_struct, 
                               evt_struct->hostdata->dev);
 
                if (rsp->flags & SRP_RSP_FLAG_DOOVER)
-                       scsi_set_resid(cmnd, rsp->data_out_res_cnt);
+                       scsi_set_resid(cmnd,
+                                      be32_to_cpu(rsp->data_out_res_cnt));
                else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
-                       scsi_set_resid(cmnd, rsp->data_in_res_cnt);
+                       scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
        }
 
        if (evt_struct->cmnd_done)
@@ -1037,7 +1042,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
        memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
        srp_cmd->opcode = SRP_CMD;
        memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
-       srp_cmd->lun = ((u64) lun) << 48;
+       srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
 
        if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
                if (!firmware_has_feature(FW_FEATURE_CMO))
@@ -1062,9 +1067,10 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
        if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
             out_fmt == SRP_DATA_DESC_INDIRECT) &&
            indirect->table_desc.va == 0) {
-               indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
+               indirect->table_desc.va =
+                       cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
                        offsetof(struct srp_cmd, add_data) +
-                       offsetof(struct srp_indirect_buf, desc_list);
+                       offsetof(struct srp_indirect_buf, desc_list));
        }
 
        return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
@@ -1158,7 +1164,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
         * request_limit could have been set to -1 by this client.
         */
        atomic_set(&hostdata->request_limit,
-                  evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
+                  be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
 
        /* If we had any pending I/Os, kick them */
        scsi_unblock_requests(hostdata->host);
@@ -1184,8 +1190,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
        login = &evt_struct->iu.srp.login_req;
        memset(login, 0, sizeof(*login));
        login->opcode = SRP_LOGIN_REQ;
-       login->req_it_iu_len = sizeof(union srp_iu);
-       login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
+       login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
+       login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+                                        SRP_BUF_FORMAT_INDIRECT);
 
        spin_lock_irqsave(hostdata->host->host_lock, flags);
        /* Start out with a request limit of 0, since this is negotiated in
@@ -1214,12 +1221,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct)
                dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
                        evt_struct->xfer_iu->mad.capabilities.common.status);
        } else {
-               if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+               if (hostdata->caps.migration.common.server_support !=
+                   cpu_to_be16(SERVER_SUPPORTS_CAP))
                        dev_info(hostdata->dev, "Partition migration not supported\n");
 
                if (client_reserve) {
                        if (hostdata->caps.reserve.common.server_support ==
-                           SERVER_SUPPORTS_CAP)
+                           cpu_to_be16(SERVER_SUPPORTS_CAP))
                                dev_info(hostdata->dev, "Client reserve enabled\n");
                        else
                                dev_info(hostdata->dev, "Client reserve not supported\n");
@@ -1251,9 +1259,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
        req = &evt_struct->iu.mad.capabilities;
        memset(req, 0, sizeof(*req));
 
-       hostdata->caps.flags = CAP_LIST_SUPPORTED;
+       hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
        if (hostdata->client_migrated)
-               hostdata->caps.flags |= CLIENT_MIGRATED;
+               hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
 
        strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
                sizeof(hostdata->caps.name));
@@ -1264,22 +1272,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
        strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
        hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
 
-       req->common.type = VIOSRP_CAPABILITIES_TYPE;
-       req->buffer = hostdata->caps_addr;
+       req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
+       req->buffer = cpu_to_be64(hostdata->caps_addr);
 
-       hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
-       hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
-       hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
-       hostdata->caps.migration.ecl = 1;
+       hostdata->caps.migration.common.cap_type =
+                               cpu_to_be32(MIGRATION_CAPABILITIES);
+       hostdata->caps.migration.common.length =
+                               cpu_to_be16(sizeof(hostdata->caps.migration));
+       hostdata->caps.migration.common.server_support =
+                               cpu_to_be16(SERVER_SUPPORTS_CAP);
+       hostdata->caps.migration.ecl = cpu_to_be32(1);
 
        if (client_reserve) {
-               hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
-               hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
-               hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
-               hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
-               req->common.length = sizeof(hostdata->caps);
+               hostdata->caps.reserve.common.cap_type =
+                                       cpu_to_be32(RESERVATION_CAPABILITIES);
+               hostdata->caps.reserve.common.length =
+                               cpu_to_be16(sizeof(hostdata->caps.reserve));
+               hostdata->caps.reserve.common.server_support =
+                               cpu_to_be16(SERVER_SUPPORTS_CAP);
+               hostdata->caps.reserve.type =
+                               cpu_to_be32(CLIENT_RESERVE_SCSI_2);
+               req->common.length =
+                               cpu_to_be16(sizeof(hostdata->caps));
        } else
-               req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+               req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
+                                               sizeof(hostdata->caps.reserve));
 
        spin_lock_irqsave(hostdata->host->host_lock, flags);
        if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1297,7 +1314,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
 static void fast_fail_rsp(struct srp_event_struct *evt_struct)
 {
        struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
-       u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+       u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
 
        if (status == VIOSRP_MAD_NOT_SUPPORTED)
                dev_err(hostdata->dev, "fast_fail not supported in server\n");
@@ -1334,8 +1351,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
 
        fast_fail_mad = &evt_struct->iu.mad.fast_fail;
        memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
-       fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
-       fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+       fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
+       fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
 
        spin_lock_irqsave(hostdata->host->host_lock, flags);
        rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
@@ -1362,15 +1379,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
                         "host partition %s (%d), OS %d, max io %u\n",
                         hostdata->madapter_info.srp_version,
                         hostdata->madapter_info.partition_name,
-                        hostdata->madapter_info.partition_number,
-                        hostdata->madapter_info.os_type,
-                        hostdata->madapter_info.port_max_txu[0]);
+                        be32_to_cpu(hostdata->madapter_info.partition_number),
+                        be32_to_cpu(hostdata->madapter_info.os_type),
+                        be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
                
                if (hostdata->madapter_info.port_max_txu[0]) 
                        hostdata->host->max_sectors = 
-                               hostdata->madapter_info.port_max_txu[0] >> 9;
+                               be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
                
-               if (hostdata->madapter_info.os_type == 3 &&
+               if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
                    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
                        dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
                                hostdata->madapter_info.srp_version);
@@ -1379,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
                        hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
                }
 
-               if (hostdata->madapter_info.os_type == 3) {
+               if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
                        enable_fast_fail(hostdata);
                        return;
                }
@@ -1414,9 +1431,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
        req = &evt_struct->iu.mad.adapter_info;
        memset(req, 0x00, sizeof(*req));
        
-       req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
-       req->common.length = sizeof(hostdata->madapter_info);
-       req->buffer = hostdata->adapter_info_addr;
+       req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
+       req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
+       req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
 
        spin_lock_irqsave(hostdata->host->host_lock, flags);
        if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1501,7 +1518,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
                /* Set up an abort SRP command */
                memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
                tsk_mgmt->opcode = SRP_TSK_MGMT;
-               tsk_mgmt->lun = ((u64) lun) << 48;
+               tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
                tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
                tsk_mgmt->task_tag = (u64) found_evt;
 
@@ -1624,7 +1641,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
                /* Set up a lun reset SRP command */
                memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
                tsk_mgmt->opcode = SRP_TSK_MGMT;
-               tsk_mgmt->lun = ((u64) lun) << 48;
+               tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
                tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
 
                evt->sync_srp = &srp_rsp;
@@ -1735,8 +1752,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
 {
        long rc;
        unsigned long flags;
+       /* The hypervisor copies our tag value here so no byteswapping */
        struct srp_event_struct *evt_struct =
-           (struct srp_event_struct *)crq->IU_data_ptr;
+                       (__force struct srp_event_struct *)crq->IU_data_ptr;
        switch (crq->valid) {
        case 0xC0:              /* initialization */
                switch (crq->format) {
@@ -1792,18 +1810,18 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
         */
        if (!valid_event_struct(&hostdata->pool, evt_struct)) {
                dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
-                      (void *)crq->IU_data_ptr);
+                      evt_struct);
                return;
        }
 
        if (atomic_read(&evt_struct->free)) {
                dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
-                       (void *)crq->IU_data_ptr);
+                       evt_struct);
                return;
        }
 
        if (crq->format == VIOSRP_SRP_FORMAT)
-               atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
+               atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
                           &hostdata->request_limit);
 
        del_timer(&evt_struct->timer);
@@ -1856,13 +1874,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
 
        /* Set up a lun reset SRP command */
        memset(host_config, 0x00, sizeof(*host_config));
-       host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
-       host_config->common.length = length;
-       host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
-                                                   length,
-                                                   DMA_BIDIRECTIONAL);
+       host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
+       host_config->common.length = cpu_to_be16(length);
+       addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
+       if (dma_mapping_error(hostdata->dev, addr)) {
                if (!firmware_has_feature(FW_FEATURE_CMO))
                        dev_err(hostdata->dev,
                                "dma_mapping error getting host config\n");
@@ -1870,6 +1886,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
                return -1;
        }
 
+       host_config->buffer = cpu_to_be64(addr);
+
        init_completion(&evt_struct->comp);
        spin_lock_irqsave(hostdata->host->host_lock, flags);
        rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
index 2cd735d..1162430 100644 (file)
@@ -75,9 +75,9 @@ struct viosrp_crq {
        u8 format;              /* SCSI vs out-of-band */
        u8 reserved;
        u8 status;              /* non-scsi failure? (e.g. DMA failure) */
-       u16 timeout;            /* in seconds */
-       u16 IU_length;          /* in bytes */
-       u64 IU_data_ptr;        /* the TCE for transferring data */
+       __be16 timeout;         /* in seconds */
+       __be16 IU_length;               /* in bytes */
+       __be64 IU_data_ptr;     /* the TCE for transferring data */
 };
 
 /* MADs are Management requests above and beyond the IUs defined in the SRP
@@ -124,10 +124,10 @@ enum viosrp_capability_flag {
  * Common MAD header
  */
 struct mad_common {
-       u32 type;
-       u16 status;
-       u16 length;
-       u64 tag;
+       __be32 type;
+       __be16 status;
+       __be16 length;
+       __be64 tag;
 };
 
 /*
@@ -139,23 +139,23 @@ struct mad_common {
  */
 struct viosrp_empty_iu {
        struct mad_common common;
-       u64 buffer;
-       u32 port;
+       __be64 buffer;
+       __be32 port;
 };
 
 struct viosrp_error_log {
        struct mad_common common;
-       u64 buffer;
+       __be64 buffer;
 };
 
 struct viosrp_adapter_info {
        struct mad_common common;
-       u64 buffer;
+       __be64 buffer;
 };
 
 struct viosrp_host_config {
        struct mad_common common;
-       u64 buffer;
+       __be64 buffer;
 };
 
 struct viosrp_fast_fail {
@@ -164,27 +164,27 @@ struct viosrp_fast_fail {
 
 struct viosrp_capabilities {
        struct mad_common common;
-       u64 buffer;
+       __be64 buffer;
 };
 
 struct mad_capability_common {
-       u32 cap_type;
-       u16 length;
-       u16 server_support;
+       __be32 cap_type;
+       __be16 length;
+       __be16 server_support;
 };
 
 struct mad_reserve_cap {
        struct mad_capability_common common;
-       u32 type;
+       __be32 type;
 };
 
 struct mad_migration_cap {
        struct mad_capability_common common;
-       u32 ecl;
+       __be32 ecl;
 };
 
 struct capabilities{
-       u32 flags;
+       __be32 flags;
        char name[SRP_MAX_LOC_LEN];
        char loc[SRP_MAX_LOC_LEN];
        struct mad_migration_cap migration;
@@ -208,10 +208,10 @@ union viosrp_iu {
 struct mad_adapter_info_data {
        char srp_version[8];
        char partition_name[96];
-       u32 partition_number;
-       u32 mad_version;
-       u32 os_type;
-       u32 port_max_txu[8];    /* per-port maximum transfer */
+       __be32 partition_number;
+       __be32 mad_version;
+       __be32 os_type;
+       __be32 port_max_txu[8]; /* per-port maximum transfer */
 };
 
 #endif
index df43bfe..4e1b75c 100644 (file)
@@ -708,6 +708,7 @@ struct lpfc_hba {
        uint32_t cfg_multi_ring_type;
        uint32_t cfg_poll;
        uint32_t cfg_poll_tmo;
+       uint32_t cfg_task_mgmt_tmo;
        uint32_t cfg_use_msi;
        uint32_t cfg_fcp_imax;
        uint32_t cfg_fcp_cpu_map;
index 16498e0..00656fc 100644 (file)
@@ -1865,8 +1865,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
 { \
        if (val >= minval && val <= maxval) {\
                lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
-                       "3053 lpfc_" #attr " changed from %d to %d\n", \
-                       vport->cfg_##attr, val); \
+                       "3053 lpfc_" #attr \
+                       " changed from %d (x%x) to %d (x%x)\n", \
+                       vport->cfg_##attr, vport->cfg_##attr, \
+                       val, val); \
                vport->cfg_##attr = val;\
                return 0;\
        }\
@@ -4011,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
 # For [0], FCP commands are issued to Work Queues ina round robin fashion.
 # For [1], FCP commands are issued to a Work Queue associated with the
 #          current CPU.
+# It would be set to 1 by the driver if it's able to set up cpu affinity
+# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
+# roundrobin scheduling of FCP I/Os through WQs will be used.
 */
-LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
+LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
                "issuing commands [0] - Round Robin, [1] - Current CPU");
 
 /*
@@ -4110,6 +4115,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
             "Milliseconds driver will wait between polling FCP ring");
 
 /*
+# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
+# to complete in seconds. Value range is [5,180], default value is 60.
+*/
+LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
+            "Maximum time to wait for task management commands to complete");
+/*
 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
 #              support this feature
 #       0  = MSI disabled
@@ -4295,6 +4306,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_issue_reset,
        &dev_attr_lpfc_poll,
        &dev_attr_lpfc_poll_tmo,
+       &dev_attr_lpfc_task_mgmt_tmo,
        &dev_attr_lpfc_use_msi,
        &dev_attr_lpfc_fcp_imax,
        &dev_attr_lpfc_fcp_cpu_map,
@@ -5274,6 +5286,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_topology_init(phba, lpfc_topology);
        lpfc_link_speed_init(phba, lpfc_link_speed);
        lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+       lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
        lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
        lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
        lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
index 79c13c3..b92aec9 100644 (file)
@@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
        }
        spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
+       /* Close the timeout handler abort window */
+       spin_lock_irqsave(&phba->hbalock, flags);
+       cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+
        iocb = &dd_data->context_un.iocb;
        ndlp = iocb->ndlp;
        rmp = iocb->rmp;
@@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
        int request_nseg;
        int reply_nseg;
        struct bsg_job_data *dd_data;
+       unsigned long flags;
        uint32_t creg_val;
        int rc = 0;
        int iocb_stat;
@@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
        }
 
        iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
-       if (iocb_stat == IOCB_SUCCESS)
+
+       if (iocb_stat == IOCB_SUCCESS) {
+               spin_lock_irqsave(&phba->hbalock, flags);
+               /* make sure the I/O had not been completed yet */
+               if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+                       /* open up abort window to timeout handler */
+                       cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+               }
+               spin_unlock_irqrestore(&phba->hbalock, flags);
                return 0; /* done for now */
-       else if (iocb_stat == IOCB_BUSY)
+       } else if (iocb_stat == IOCB_BUSY) {
                rc = -EAGAIN;
-       else
+       } else {
                rc = -EIO;
+       }
 
        /* iocb failed so cleanup */
+       job->dd_data = NULL;
 
 free_rmp:
        lpfc_free_bsg_buffers(phba, rmp);
@@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
        }
        spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
+       /* Close the timeout handler abort window */
+       spin_lock_irqsave(&phba->hbalock, flags);
+       cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+
        rsp = &rspiocbq->iocb;
        pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
        prsp = (struct lpfc_dmabuf *)pcmd->list.next;
@@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
        struct lpfc_iocbq *cmdiocbq;
        uint16_t rpi = 0;
        struct bsg_job_data *dd_data;
+       unsigned long flags;
        uint32_t creg_val;
        int rc = 0;
 
@@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
 
-       if (rc == IOCB_SUCCESS)
+       if (rc == IOCB_SUCCESS) {
+               spin_lock_irqsave(&phba->hbalock, flags);
+               /* make sure the I/O had not been completed/released */
+               if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+                       /* open up abort window to timeout handler */
+                       cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+               }
+               spin_unlock_irqrestore(&phba->hbalock, flags);
                return 0; /* done for now */
-       else if (rc == IOCB_BUSY)
+       } else if (rc == IOCB_BUSY) {
                rc = -EAGAIN;
-       else
+       } else {
                rc = -EIO;
+       }
 
-linkdown_err:
+       /* iocb failed so cleanup */
+       job->dd_data = NULL;
 
+linkdown_err:
        cmdiocbq->context1 = ndlp;
        lpfc_els_free_iocb(phba, cmdiocbq);
 
@@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
        struct lpfc_hba *phba = vport->phba;
        struct get_ct_event *event_req;
        struct get_ct_event_reply *event_reply;
-       struct lpfc_bsg_event *evt;
+       struct lpfc_bsg_event *evt, *evt_next;
        struct event_data *evt_dat = NULL;
        unsigned long flags;
        uint32_t rc = 0;
@@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
        event_reply = (struct get_ct_event_reply *)
                job->reply->reply_data.vendor_reply.vendor_rsp;
        spin_lock_irqsave(&phba->ct_ev_lock, flags);
-       list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+       list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
                if (evt->reg_id == event_req->ev_reg_id) {
                        if (list_empty(&evt->events_to_get))
                                break;
@@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
        }
        spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
+       /* Close the timeout handler abort window */
+       spin_lock_irqsave(&phba->hbalock, flags);
+       cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+
        ndlp = dd_data->context_un.iocb.ndlp;
        cmp = cmdiocbq->context2;
        bmp = cmdiocbq->context3;
@@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
        int rc = 0;
        struct lpfc_nodelist *ndlp = NULL;
        struct bsg_job_data *dd_data;
+       unsigned long flags;
        uint32_t creg_val;
 
        /* allocate our bsg tracking structure */
@@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
 
-       if (rc == IOCB_SUCCESS)
+       if (rc == IOCB_SUCCESS) {
+               spin_lock_irqsave(&phba->hbalock, flags);
+               /* make sure the I/O had not been completed/released */
+               if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+                       /* open up abort window to timeout handler */
+                       ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+               }
+               spin_unlock_irqrestore(&phba->hbalock, flags);
                return 0; /* done for now */
+       }
+
+       /* iocb failed so cleanup */
+       job->dd_data = NULL;
 
 issue_ct_rsp_exit:
        lpfc_sli_release_iocbq(phba, ctiocb);
@@ -5284,9 +5333,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
                 * remove it from the txq queue and call cancel iocbs.
                 * Otherwise, call abort iotag
                 */
-
                cmdiocb = dd_data->context_un.iocb.cmdiocbq;
-               spin_lock_irq(&phba->hbalock);
+               spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+               spin_lock_irqsave(&phba->hbalock, flags);
+               /* make sure the I/O abort window is still open */
+               if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+                       spin_unlock_irqrestore(&phba->hbalock, flags);
+                       return -EAGAIN;
+               }
                list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
                                         list) {
                        if (check_iocb == cmdiocb) {
@@ -5296,8 +5351,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
                }
                if (list_empty(&completions))
                        lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
-               spin_unlock_irq(&phba->hbalock);
-               spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+               spin_unlock_irqrestore(&phba->hbalock, flags);
                if (!list_empty(&completions)) {
                        lpfc_sli_cancel_iocbs(phba, &completions,
                                              IOSTAT_LOCAL_REJECT,
@@ -5321,9 +5375,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
                 * remove it from the txq queue and call cancel iocbs.
                 * Otherwise, call abort iotag.
                 */
-
                cmdiocb = dd_data->context_un.menlo.cmdiocbq;
-               spin_lock_irq(&phba->hbalock);
+               spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+               spin_lock_irqsave(&phba->hbalock, flags);
                list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
                                         list) {
                        if (check_iocb == cmdiocb) {
@@ -5333,8 +5388,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
                }
                if (list_empty(&completions))
                        lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
-               spin_unlock_irq(&phba->hbalock);
-               spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+               spin_unlock_irqrestore(&phba->hbalock, flags);
                if (!list_empty(&completions)) {
                        lpfc_sli_cancel_iocbs(phba, &completions,
                                              IOSTAT_LOCAL_REJECT,
index 60d6ca2..7801601 100644 (file)
@@ -4437,6 +4437,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        if (!ndlp)
                return;
        lpfc_issue_els_logo(vport, ndlp, 0);
+       mempool_free(pmb, phba->mbox_mem_pool);
 }
 
 /*
@@ -4456,7 +4457,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        int rc;
        uint16_t rpi;
 
-       if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+       if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+           ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
+               if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                        "3366 RPI x%x needs to be "
+                                        "unregistered nlp_flag x%x "
+                                        "did x%x\n",
+                                        ndlp->nlp_rpi, ndlp->nlp_flag,
+                                        ndlp->nlp_DID);
                mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
                if (mbox) {
                        /* SLI4 ports require the physical rpi value. */
index 501147c..647f5bf 100644 (file)
@@ -3031,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                        phba->sli4_hba.scsi_xri_max);
 
        spin_lock_irq(&phba->scsi_buf_list_get_lock);
-       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       spin_lock(&phba->scsi_buf_list_put_lock);
        list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
        list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
-       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock(&phba->scsi_buf_list_put_lock);
        spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
@@ -3070,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
        }
        spin_lock_irq(&phba->scsi_buf_list_get_lock);
-       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       spin_lock(&phba->scsi_buf_list_put_lock);
        list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
        INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock(&phba->scsi_buf_list_put_lock);
        spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        return 0;
@@ -4859,6 +4859,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        struct lpfc_mqe *mqe;
        int longs;
 
+       /* Get all the module params for configuring this host */
+       lpfc_get_cfgparam(phba);
+
        /* Before proceed, wait for POST done and device ready */
        rc = lpfc_sli4_post_status_check(phba);
        if (rc)
@@ -4902,15 +4905,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                sizeof(struct lpfc_mbox_ext_buf_ctx));
        INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
 
-       /*
-        * We need to do a READ_CONFIG mailbox command here before
-        * calling lpfc_get_cfgparam. For VFs this will report the
-        * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
-        * All of the resources allocated
-        * for this Port are tied to these values.
-        */
-       /* Get all the module params for configuring this host */
-       lpfc_get_cfgparam(phba);
        phba->max_vpi = LPFC_MAX_VPI;
 
        /* This will be set to correct value after the read_config mbox */
@@ -7141,19 +7135,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
                phba->sli4_hba.fcp_wq = NULL;
        }
 
-       if (phba->pci_bar0_memmap_p) {
-               iounmap(phba->pci_bar0_memmap_p);
-               phba->pci_bar0_memmap_p = NULL;
-       }
-       if (phba->pci_bar2_memmap_p) {
-               iounmap(phba->pci_bar2_memmap_p);
-               phba->pci_bar2_memmap_p = NULL;
-       }
-       if (phba->pci_bar4_memmap_p) {
-               iounmap(phba->pci_bar4_memmap_p);
-               phba->pci_bar4_memmap_p = NULL;
-       }
-
        /* Release FCP CQ mapping array */
        if (phba->sli4_hba.fcp_cq_map != NULL) {
                kfree(phba->sli4_hba.fcp_cq_map);
@@ -7942,9 +7923,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
         * particular PCI BARs regions is dependent on the type of
         * SLI4 device.
         */
-       if (pci_resource_start(pdev, 0)) {
-               phba->pci_bar0_map = pci_resource_start(pdev, 0);
-               bar0map_len = pci_resource_len(pdev, 0);
+       if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
+               phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+               bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
 
                /*
                 * Map SLI4 PCI Config Space Register base to a kernel virtual
@@ -7958,6 +7939,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                                   "registers.\n");
                        goto out;
                }
+               phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
                /* Set up BAR0 PCI config space register memory map */
                lpfc_sli4_bar0_register_memmap(phba, if_type);
        } else {
@@ -7980,13 +7962,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
        }
 
        if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
-           (pci_resource_start(pdev, 2))) {
+           (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
                /*
                 * Map SLI4 if type 0 HBA Control Register base to a kernel
                 * virtual address and setup the registers.
                 */
-               phba->pci_bar1_map = pci_resource_start(pdev, 2);
-               bar1map_len = pci_resource_len(pdev, 2);
+               phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
+               bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
                phba->sli4_hba.ctrl_regs_memmap_p =
                                ioremap(phba->pci_bar1_map, bar1map_len);
                if (!phba->sli4_hba.ctrl_regs_memmap_p) {
@@ -7994,17 +7976,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                           "ioremap failed for SLI4 HBA control registers.\n");
                        goto out_iounmap_conf;
                }
+               phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
                lpfc_sli4_bar1_register_memmap(phba);
        }
 
        if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
-           (pci_resource_start(pdev, 4))) {
+           (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
                /*
                 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
                 * virtual address and setup the registers.
                 */
-               phba->pci_bar2_map = pci_resource_start(pdev, 4);
-               bar2map_len = pci_resource_len(pdev, 4);
+               phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
+               bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
                phba->sli4_hba.drbl_regs_memmap_p =
                                ioremap(phba->pci_bar2_map, bar2map_len);
                if (!phba->sli4_hba.drbl_regs_memmap_p) {
@@ -8012,6 +7995,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                           "ioremap failed for SLI4 HBA doorbell registers.\n");
                        goto out_iounmap_ctrl;
                }
+               phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
                error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
                if (error)
                        goto out_iounmap_all;
@@ -8405,7 +8389,8 @@ static int
 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
 {
        int i, idx, saved_chann, used_chann, cpu, phys_id;
-       int max_phys_id, num_io_channel, first_cpu;
+       int max_phys_id, min_phys_id;
+       int num_io_channel, first_cpu, chan;
        struct lpfc_vector_map_info *cpup;
 #ifdef CONFIG_X86
        struct cpuinfo_x86 *cpuinfo;
@@ -8423,6 +8408,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
                phba->sli4_hba.num_present_cpu));
 
        max_phys_id = 0;
+       min_phys_id = 0xff;
        phys_id = 0;
        num_io_channel = 0;
        first_cpu = LPFC_VECTOR_MAP_EMPTY;
@@ -8446,9 +8432,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
 
                if (cpup->phys_id > max_phys_id)
                        max_phys_id = cpup->phys_id;
+               if (cpup->phys_id < min_phys_id)
+                       min_phys_id = cpup->phys_id;
                cpup++;
        }
 
+       phys_id = min_phys_id;
        /* Now associate the HBA vectors with specific CPUs */
        for (idx = 0; idx < vectors; idx++) {
                cpup = phba->sli4_hba.cpu_map;
@@ -8459,13 +8448,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
                        for (i = 1; i < max_phys_id; i++) {
                                phys_id++;
                                if (phys_id > max_phys_id)
-                                       phys_id = 0;
+                                       phys_id = min_phys_id;
                                cpu = lpfc_find_next_cpu(phba, phys_id);
                                if (cpu == LPFC_VECTOR_MAP_EMPTY)
                                        continue;
                                goto found;
                        }
 
+                       /* Use round robin for scheduling */
+                       phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
+                       chan = 0;
+                       cpup = phba->sli4_hba.cpu_map;
+                       for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+                               cpup->channel_id = chan;
+                               cpup++;
+                               chan++;
+                               if (chan >= phba->cfg_fcp_io_channel)
+                                       chan = 0;
+                       }
+
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "3329 Cannot set affinity:"
                                        "Error mapping vector %d (%d)\n",
@@ -8503,7 +8504,7 @@ found:
                /* Spread vector mapping across multple physical CPU nodes */
                phys_id++;
                if (phys_id > max_phys_id)
-                       phys_id = 0;
+                       phys_id = min_phys_id;
        }
 
        /*
@@ -8513,7 +8514,7 @@ found:
         * Base the remaining IO channel assigned, to IO channels already
         * assigned to other CPUs on the same phys_id.
         */
-       for (i = 0; i <= max_phys_id; i++) {
+       for (i = min_phys_id; i <= max_phys_id; i++) {
                /*
                 * If there are no io channels already mapped to
                 * this phys_id, just round robin thru the io_channels.
@@ -8595,10 +8596,11 @@ out:
        if (num_io_channel != phba->sli4_hba.num_present_cpu)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "3333 Set affinity mismatch:"
-                               "%d chann != %d cpus: %d vactors\n",
+                               "%d chann != %d cpus: %d vectors\n",
                                num_io_channel, phba->sli4_hba.num_present_cpu,
                                vectors);
 
+       /* Enable using cpu affinity for scheduling */
        phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
        return 1;
 }
@@ -8689,9 +8691,12 @@ enable_msix_vectors:
 
 cfg_fail_out:
        /* free the irq already requested */
-       for (--index; index >= 0; index--)
+       for (--index; index >= 0; index--) {
+               irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+                                         vector, NULL);
                free_irq(phba->sli4_hba.msix_entries[index].vector,
                         &phba->sli4_hba.fcp_eq_hdl[index]);
+       }
 
 msi_fail_out:
        /* Unconfigure MSI-X capability structure */
@@ -8712,9 +8717,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
        int index;
 
        /* Free up MSI-X multi-message vectors */
-       for (index = 0; index < phba->cfg_fcp_io_channel; index++)
+       for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
+               irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+                                         vector, NULL);
                free_irq(phba->sli4_hba.msix_entries[index].vector,
                         &phba->sli4_hba.fcp_eq_hdl[index]);
+       }
 
        /* Disable MSI-X */
        pci_disable_msix(phba->pcidev);
index 1242b6c..c913e8c 100644 (file)
@@ -926,10 +926,10 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
 
        /* get all SCSI buffers need to repost to a local list */
        spin_lock_irq(&phba->scsi_buf_list_get_lock);
-       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       spin_lock(&phba->scsi_buf_list_put_lock);
        list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
        list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
-       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock(&phba->scsi_buf_list_put_lock);
        spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        /* post the list of scsi buffer sgls to port if available */
@@ -1000,9 +1000,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                }
                memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
-               /* Page alignment is CRITICAL, double check to be sure */
-               if (((unsigned long)(psb->data) &
-                   (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
+               /*
+                * 4K Page alignment is CRITICAL to BlockGuard, double check
+                * to be sure.
+                */
+               if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
+                   (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
                        pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
                                      psb->data, psb->dma_handle);
                        kfree(psb);
@@ -1134,22 +1137,21 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct  lpfc_scsi_buf * lpfc_cmd = NULL;
        struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
-       unsigned long gflag = 0;
-       unsigned long pflag = 0;
+       unsigned long iflag = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
        list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
                         list);
        if (!lpfc_cmd) {
-               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               spin_lock(&phba->scsi_buf_list_put_lock);
                list_splice(&phba->lpfc_scsi_buf_list_put,
                            &phba->lpfc_scsi_buf_list_get);
                INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
                list_remove_head(scsi_buf_list_get, lpfc_cmd,
                                 struct lpfc_scsi_buf, list);
-               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+               spin_unlock(&phba->scsi_buf_list_put_lock);
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
        return  lpfc_cmd;
 }
 /**
@@ -1167,11 +1169,10 @@ static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
-       unsigned long gflag = 0;
-       unsigned long pflag = 0;
+       unsigned long iflag = 0;
        int found = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
        list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
                                 &phba->lpfc_scsi_buf_list_get, list) {
                if (lpfc_test_rrq_active(phba, ndlp,
@@ -1182,11 +1183,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
                break;
        }
        if (!found) {
-               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               spin_lock(&phba->scsi_buf_list_put_lock);
                list_splice(&phba->lpfc_scsi_buf_list_put,
                            &phba->lpfc_scsi_buf_list_get);
                INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+               spin_unlock(&phba->scsi_buf_list_put_lock);
                list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
                                         &phba->lpfc_scsi_buf_list_get, list) {
                        if (lpfc_test_rrq_active(
@@ -1197,7 +1198,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
                        break;
                }
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
        if (!found)
                return NULL;
        return  lpfc_cmd;
@@ -3966,11 +3967,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 
        /*
         * Check SLI validation that all the transfer was actually done
-        * (fcpi_parm should be zero).
+        * (fcpi_parm should be zero). Apply check only to reads.
         */
-       } else if (fcpi_parm) {
+       } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
-                                "9029 FCP Data Transfer Check Error: "
+                                "9029 FCP Read Check Error Data: "
                                 "x%x x%x x%x x%x x%x\n",
                                 be32_to_cpu(fcpcmd->fcpDl),
                                 be32_to_cpu(fcprsp->rspResId),
@@ -4342,6 +4343,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
        char tag[2];
        uint8_t *ptr;
        bool sli4;
+       uint32_t fcpdl;
 
        if (!pnode || !NLP_CHK_NODE_ACT(pnode))
                return;
@@ -4389,8 +4391,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
                        iocb_cmd->ulpPU = PARM_READ_CHECK;
                        if (vport->cfg_first_burst_size &&
                            (pnode->nlp_flag & NLP_FIRSTBURST)) {
-                               piocbq->iocb.un.fcpi.fcpi_XRdy =
-                                       vport->cfg_first_burst_size;
+                               fcpdl = scsi_bufflen(scsi_cmnd);
+                               if (fcpdl < vport->cfg_first_burst_size)
+                                       piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
+                               else
+                                       piocbq->iocb.un.fcpi.fcpi_XRdy =
+                                               vport->cfg_first_burst_size;
                        }
                        fcp_cmnd->fcpCntl3 = WRITE_DATA;
                        phba->fc4OutputRequests++;
@@ -4878,6 +4884,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
                goto out_unlock;
        }
 
+       /* Indicate the IO is being aborted by the driver. */
+       iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
        /*
         * The scsi command can not be in txq and it is in flight because the
         * pCmd is still pointig at the SCSI command we have to abort. There
@@ -5006,7 +5015,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
        lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
        if (lpfc_cmd == NULL)
                return FAILED;
-       lpfc_cmd->timeout = 60;
+       lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
        lpfc_cmd->rdata = rdata;
 
        status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
index 0392e11..612f489 100644 (file)
@@ -9831,6 +9831,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
                                               abort_cmd) != 0)
                        continue;
 
+               /*
+                * If the iocbq is already being aborted, don't take a second
+                * action, but do count it.
+                */
+               if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+                       continue;
+
                /* issue ABTS for this IOCB based on iotag */
                abtsiocb = lpfc_sli_get_iocbq(phba);
                if (abtsiocb == NULL) {
@@ -9838,6 +9845,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
                        continue;
                }
 
+               /* indicate the IO is being aborted by the driver. */
+               iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
                cmd = &iocbq->iocb;
                abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
                abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
@@ -9847,7 +9857,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
                        abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
                abtsiocb->iocb.ulpLe = 1;
                abtsiocb->iocb.ulpClass = cmd->ulpClass;
-               abtsiocb->vport = phba->pport;
+               abtsiocb->vport = vport;
 
                /* ABTS WQE must go to the same WQ as the WQE to be aborted */
                abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
@@ -12233,7 +12243,6 @@ static void __iomem *
 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
 {
        struct pci_dev *pdev;
-       unsigned long bar_map, bar_map_len;
 
        if (!phba->pcidev)
                return NULL;
@@ -12242,25 +12251,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
 
        switch (pci_barset) {
        case WQ_PCI_BAR_0_AND_1:
-               if (!phba->pci_bar0_memmap_p) {
-                       bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
-                       bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
-                       phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
-               }
                return phba->pci_bar0_memmap_p;
        case WQ_PCI_BAR_2_AND_3:
-               if (!phba->pci_bar2_memmap_p) {
-                       bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
-                       bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
-                       phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
-               }
                return phba->pci_bar2_memmap_p;
        case WQ_PCI_BAR_4_AND_5:
-               if (!phba->pci_bar4_memmap_p) {
-                       bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
-                       bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
-                       phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
-               }
                return phba->pci_bar4_memmap_p;
        default:
                break;
@@ -15808,7 +15802,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
 void
 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
 {
-       struct lpfc_fcf_pri *fcf_pri;
+       struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
        if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
                                "2762 FCF (x%x) reached driver's book "
@@ -15818,7 +15812,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
        }
        /* Clear the eligible FCF record index bmask */
        spin_lock_irq(&phba->hbalock);
-       list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+       list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
+                                list) {
                if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
                        list_del_init(&fcf_pri->list);
                        break;
index 9761799..6b0f247 100644 (file)
@@ -58,7 +58,7 @@ struct lpfc_iocbq {
 
        IOCB_t iocb;            /* IOCB cmd */
        uint8_t retry;          /* retry counter for IOCB cmd - if needed */
-       uint16_t iocb_flag;
+       uint32_t iocb_flag;
 #define LPFC_IO_LIBDFC         1       /* libdfc iocb */
 #define LPFC_IO_WAKE           2       /* Synchronous I/O completed */
 #define LPFC_IO_WAKE_TMO       LPFC_IO_WAKE /* Synchronous I/O timed out */
@@ -73,11 +73,11 @@ struct lpfc_iocbq {
 #define LPFC_IO_DIF_PASS       0x400   /* T10 DIF IO pass-thru prot */
 #define LPFC_IO_DIF_STRIP      0x800   /* T10 DIF IO strip prot */
 #define LPFC_IO_DIF_INSERT     0x1000  /* T10 DIF IO insert prot */
+#define LPFC_IO_CMD_OUTSTANDING        0x2000 /* timeout handler abort window */
 
 #define LPFC_FIP_ELS_ID_MASK   0xc000  /* ELS_ID range 0-3, non-shifted mask */
 #define LPFC_FIP_ELS_ID_SHIFT  14
 
-       uint8_t rsvd2;
        uint32_t drvrTimeout;   /* driver timeout in seconds */
        uint32_t fcp_wqidx;     /* index to FCP work queue */
        struct lpfc_vport *vport;/* virtual port pointer */
index 5bcc382..85120b7 100644 (file)
@@ -523,7 +523,7 @@ struct lpfc_sli4_hba {
        struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
        struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
 
-       uint8_t fw_func_mode;   /* FW function protocol mode */
+       uint32_t fw_func_mode;  /* FW function protocol mode */
        uint32_t ulp0_mode;     /* ULP0 protocol mode */
        uint32_t ulp1_mode;     /* ULP1 protocol mode */
 
index 21859d2..f58f183 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.41"
+#define LPFC_DRIVER_VERSION "8.3.42"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 04a42a5..0c73ba4 100644 (file)
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "06.600.18.00-rc1"
-#define MEGASAS_RELDATE                                "May. 15, 2013"
-#define MEGASAS_EXT_VERSION                    "Wed. May. 15 17:00:00 PDT 2013"
+#define MEGASAS_VERSION                                "06.700.06.00-rc1"
+#define MEGASAS_RELDATE                                "Aug. 31, 2013"
+#define MEGASAS_EXT_VERSION                    "Sat. Aug. 31 17:00:00 PDT 2013"
 
 /*
  * Device IDs
 
 #define MR_DCMD_CTRL_GET_INFO                  0x01010000
 #define MR_DCMD_LD_GET_LIST                    0x03010000
+#define MR_DCMD_LD_LIST_QUERY                  0x03010100
 
 #define MR_DCMD_CTRL_CACHE_FLUSH               0x01101000
 #define MR_FLUSH_CTRL_CACHE                    0x01
@@ -345,6 +346,15 @@ enum MR_PD_QUERY_TYPE {
        MR_PD_QUERY_TYPE_EXPOSED_TO_HOST    = 5,
 };
 
+enum MR_LD_QUERY_TYPE {
+       MR_LD_QUERY_TYPE_ALL             = 0,
+       MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
+       MR_LD_QUERY_TYPE_USED_TGT_IDS    = 2,
+       MR_LD_QUERY_TYPE_CLUSTER_ACCESS  = 3,
+       MR_LD_QUERY_TYPE_CLUSTER_LOCALE  = 4,
+};
+
+
 #define MR_EVT_CFG_CLEARED                              0x0004
 #define MR_EVT_LD_STATE_CHANGE                          0x0051
 #define MR_EVT_PD_INSERTED                              0x005b
@@ -435,6 +445,14 @@ struct MR_LD_LIST {
        } ldList[MAX_LOGICAL_DRIVES];
 } __packed;
 
+struct MR_LD_TARGETID_LIST {
+       u32     size;
+       u32     count;
+       u8      pad[3];
+       u8      targetId[MAX_LOGICAL_DRIVES];
+};
+
+
 /*
  * SAS controller properties
  */
@@ -474,21 +492,39 @@ struct megasas_ctrl_prop {
        * a bit in the following structure.
        */
        struct {
-               u32     copyBackDisabled            : 1;
-               u32     SMARTerEnabled              : 1;
-               u32     prCorrectUnconfiguredAreas  : 1;
-               u32     useFdeOnly                  : 1;
-               u32     disableNCQ                  : 1;
-               u32     SSDSMARTerEnabled           : 1;
-               u32     SSDPatrolReadEnabled        : 1;
-               u32     enableSpinDownUnconfigured  : 1;
-               u32     autoEnhancedImport          : 1;
-               u32     enableSecretKeyControl      : 1;
-               u32     disableOnlineCtrlReset      : 1;
-               u32     allowBootWithPinnedCache    : 1;
-               u32     disableSpinDownHS           : 1;
-               u32     enableJBOD                  : 1;
-               u32     reserved                    :18;
+#if   defined(__BIG_ENDIAN_BITFIELD)
+               u32     reserved:18;
+               u32     enableJBOD:1;
+               u32     disableSpinDownHS:1;
+               u32     allowBootWithPinnedCache:1;
+               u32     disableOnlineCtrlReset:1;
+               u32     enableSecretKeyControl:1;
+               u32     autoEnhancedImport:1;
+               u32     enableSpinDownUnconfigured:1;
+               u32     SSDPatrolReadEnabled:1;
+               u32     SSDSMARTerEnabled:1;
+               u32     disableNCQ:1;
+               u32     useFdeOnly:1;
+               u32     prCorrectUnconfiguredAreas:1;
+               u32     SMARTerEnabled:1;
+               u32     copyBackDisabled:1;
+#else
+               u32     copyBackDisabled:1;
+               u32     SMARTerEnabled:1;
+               u32     prCorrectUnconfiguredAreas:1;
+               u32     useFdeOnly:1;
+               u32     disableNCQ:1;
+               u32     SSDSMARTerEnabled:1;
+               u32     SSDPatrolReadEnabled:1;
+               u32     enableSpinDownUnconfigured:1;
+               u32     autoEnhancedImport:1;
+               u32     enableSecretKeyControl:1;
+               u32     disableOnlineCtrlReset:1;
+               u32     allowBootWithPinnedCache:1;
+               u32     disableSpinDownHS:1;
+               u32     enableJBOD:1;
+               u32     reserved:18;
+#endif
        } OnOffProperties;
        u8 autoSnapVDSpace;
        u8 viewSpace;
@@ -802,6 +838,30 @@ struct megasas_ctrl_info {
        u16 cacheMemorySize;                    /*7A2h */
 
        struct {                                /*7A4h */
+#if   defined(__BIG_ENDIAN_BITFIELD)
+               u32     reserved:11;
+               u32     supportUnevenSpans:1;
+               u32     dedicatedHotSparesLimited:1;
+               u32     headlessMode:1;
+               u32     supportEmulatedDrives:1;
+               u32     supportResetNow:1;
+               u32     realTimeScheduler:1;
+               u32     supportSSDPatrolRead:1;
+               u32     supportPerfTuning:1;
+               u32     disableOnlinePFKChange:1;
+               u32     supportJBOD:1;
+               u32     supportBootTimePFKChange:1;
+               u32     supportSetLinkSpeed:1;
+               u32     supportEmergencySpares:1;
+               u32     supportSuspendResumeBGops:1;
+               u32     blockSSDWriteCacheChange:1;
+               u32     supportShieldState:1;
+               u32     supportLdBBMInfo:1;
+               u32     supportLdPIType3:1;
+               u32     supportLdPIType2:1;
+               u32     supportLdPIType1:1;
+               u32     supportPIcontroller:1;
+#else
                u32     supportPIcontroller:1;
                u32     supportLdPIType1:1;
                u32     supportLdPIType2:1;
@@ -827,6 +887,7 @@ struct megasas_ctrl_info {
 
                u32     supportUnevenSpans:1;
                u32     reserved:11;
+#endif
        } adapterOperations2;
 
        u8  driverVersion[32];                  /*7A8h */
@@ -863,7 +924,7 @@ struct megasas_ctrl_info {
  * ===============================
  */
 #define MEGASAS_MAX_PD_CHANNELS                        2
-#define MEGASAS_MAX_LD_CHANNELS                        2
+#define MEGASAS_MAX_LD_CHANNELS                        1
 #define MEGASAS_MAX_CHANNELS                   (MEGASAS_MAX_PD_CHANNELS + \
                                                MEGASAS_MAX_LD_CHANNELS)
 #define MEGASAS_MAX_DEV_PER_CHANNEL            128
@@ -1051,9 +1112,15 @@ union megasas_sgl_frame {
 
 typedef union _MFI_CAPABILITIES {
        struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+               u32     reserved:30;
+               u32     support_additional_msix:1;
+               u32     support_fp_remote_lun:1;
+#else
                u32     support_fp_remote_lun:1;
                u32     support_additional_msix:1;
                u32     reserved:30;
+#endif
        } mfi_capabilities;
        u32     reg;
 } MFI_CAPABILITIES;
@@ -1656,4 +1723,16 @@ struct megasas_mgmt_info {
        int max_index;
 };
 
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+                   struct IO_REQUEST_INFO *io_info,
+                   struct RAID_CONTEXT *pRAID_Context,
+                   struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
 #endif                         /*LSI_MEGARAID_SAS_H */
index 1f0ca68..3020921 100644 (file)
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : 06.600.18.00-rc1
+ *  Version : 06.700.06.00-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
 
 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
 static int megasas_get_pd_list(struct megasas_instance *instance);
+static int megasas_ld_list_query(struct megasas_instance *instance,
+                                u8 query_type);
 static int megasas_issue_init_mfi(struct megasas_instance *instance);
 static int megasas_register_aen(struct megasas_instance *instance,
                                u32 seq_num, u32 class_locale_word);
@@ -374,13 +376,11 @@ static int
 megasas_check_reset_xscale(struct megasas_instance *instance,
                struct megasas_register_set __iomem *regs)
 {
-       u32 consumer;
-       consumer = *instance->consumer;
 
        if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
-               (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) {
+           (le32_to_cpu(*instance->consumer) ==
+               MEGASAS_ADPRESET_INPROG_SIGN))
                return 1;
-       }
        return 0;
 }
 
@@ -629,9 +629,10 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
 {
        unsigned long flags;
        spin_lock_irqsave(&instance->hba_lock, flags);
-       writel(0, &(regs)->inbound_high_queue_port);
-       writel((frame_phys_addr | (frame_count<<1))|1,
-               &(regs)->inbound_low_queue_port);
+       writel(upper_32_bits(frame_phys_addr),
+              &(regs)->inbound_high_queue_port);
+       writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
+              &(regs)->inbound_low_queue_port);
        spin_unlock_irqrestore(&instance->hba_lock, flags);
 }
 
@@ -879,8 +880,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
 
        struct megasas_header *frame_hdr = &cmd->frame->hdr;
 
-       frame_hdr->cmd_status = 0xFF;
-       frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+       frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+       frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
 
        /*
         * Issue the frame using inbound queue port
@@ -944,10 +945,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
         */
        abort_fr->cmd = MFI_CMD_ABORT;
        abort_fr->cmd_status = 0xFF;
-       abort_fr->flags = 0;
-       abort_fr->abort_context = cmd_to_abort->index;
-       abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
-       abort_fr->abort_mfi_phys_addr_hi = 0;
+       abort_fr->flags = cpu_to_le16(0);
+       abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
+       abort_fr->abort_mfi_phys_addr_lo =
+               cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
+       abort_fr->abort_mfi_phys_addr_hi =
+               cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
 
        cmd->sync_cmd = 1;
        cmd->cmd_status = 0xFF;
@@ -986,8 +989,8 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
 
        if (sge_count) {
                scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-                       mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
-                       mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+                       mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+                       mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
                }
        }
        return sge_count;
@@ -1015,8 +1018,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
 
        if (sge_count) {
                scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-                       mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
-                       mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+                       mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+                       mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
                }
        }
        return sge_count;
@@ -1043,10 +1046,11 @@ megasas_make_sgl_skinny(struct megasas_instance *instance,
 
        if (sge_count) {
                scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-                       mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
+                       mfi_sgl->sge_skinny[i].length =
+                               cpu_to_le32(sg_dma_len(os_sgl));
                        mfi_sgl->sge_skinny[i].phys_addr =
-                                               sg_dma_address(os_sgl);
-                       mfi_sgl->sge_skinny[i].flag = 0;
+                               cpu_to_le64(sg_dma_address(os_sgl));
+                       mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
                }
        }
        return sge_count;
@@ -1155,8 +1159,8 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
        pthru->cdb_len = scp->cmd_len;
        pthru->timeout = 0;
        pthru->pad_0 = 0;
-       pthru->flags = flags;
-       pthru->data_xfer_len = scsi_bufflen(scp);
+       pthru->flags = cpu_to_le16(flags);
+       pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
 
        memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
 
@@ -1168,18 +1172,18 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
                if ((scp->request->timeout / HZ) > 0xFFFF)
                        pthru->timeout = 0xFFFF;
                else
-                       pthru->timeout = scp->request->timeout / HZ;
+                       pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
        }
 
        /*
         * Construct SGL
         */
        if (instance->flag_ieee == 1) {
-               pthru->flags |= MFI_FRAME_SGL64;
+               pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
                pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
                                                      &pthru->sgl);
        } else if (IS_DMA64) {
-               pthru->flags |= MFI_FRAME_SGL64;
+               pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
                pthru->sge_count = megasas_make_sgl64(instance, scp,
                                                      &pthru->sgl);
        } else
@@ -1196,8 +1200,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
         * Sense info specific
         */
        pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
-       pthru->sense_buf_phys_addr_hi = 0;
-       pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+       pthru->sense_buf_phys_addr_hi =
+               cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
+       pthru->sense_buf_phys_addr_lo =
+               cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
 
        /*
         * Compute the total number of frames this command consumes. FW uses
@@ -1248,7 +1254,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
        ldio->timeout = 0;
        ldio->reserved_0 = 0;
        ldio->pad_0 = 0;
-       ldio->flags = flags;
+       ldio->flags = cpu_to_le16(flags);
        ldio->start_lba_hi = 0;
        ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
 
@@ -1256,52 +1262,59 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
         * 6-byte READ(0x08) or WRITE(0x0A) cdb
         */
        if (scp->cmd_len == 6) {
-               ldio->lba_count = (u32) scp->cmnd[4];
-               ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
-                   ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+               ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
+               ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
+                                                ((u32) scp->cmnd[2] << 8) |
+                                                (u32) scp->cmnd[3]);
 
-               ldio->start_lba_lo &= 0x1FFFFF;
+               ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
        }
 
        /*
         * 10-byte READ(0x28) or WRITE(0x2A) cdb
         */
        else if (scp->cmd_len == 10) {
-               ldio->lba_count = (u32) scp->cmnd[8] |
-                   ((u32) scp->cmnd[7] << 8);
-               ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
-                   ((u32) scp->cmnd[3] << 16) |
-                   ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+               ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
+                                             ((u32) scp->cmnd[7] << 8));
+               ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+                                                ((u32) scp->cmnd[3] << 16) |
+                                                ((u32) scp->cmnd[4] << 8) |
+                                                (u32) scp->cmnd[5]);
        }
 
        /*
         * 12-byte READ(0xA8) or WRITE(0xAA) cdb
         */
        else if (scp->cmd_len == 12) {
-               ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
-                   ((u32) scp->cmnd[7] << 16) |
-                   ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+               ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+                                             ((u32) scp->cmnd[7] << 16) |
+                                             ((u32) scp->cmnd[8] << 8) |
+                                             (u32) scp->cmnd[9]);
 
-               ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
-                   ((u32) scp->cmnd[3] << 16) |
-                   ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+               ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+                                                ((u32) scp->cmnd[3] << 16) |
+                                                ((u32) scp->cmnd[4] << 8) |
+                                                (u32) scp->cmnd[5]);
        }
 
        /*
         * 16-byte READ(0x88) or WRITE(0x8A) cdb
         */
        else if (scp->cmd_len == 16) {
-               ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
-                   ((u32) scp->cmnd[11] << 16) |
-                   ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+               ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
+                                             ((u32) scp->cmnd[11] << 16) |
+                                             ((u32) scp->cmnd[12] << 8) |
+                                             (u32) scp->cmnd[13]);
 
-               ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
-                   ((u32) scp->cmnd[7] << 16) |
-                   ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+               ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+                                                ((u32) scp->cmnd[7] << 16) |
+                                                ((u32) scp->cmnd[8] << 8) |
+                                                (u32) scp->cmnd[9]);
 
-               ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
-                   ((u32) scp->cmnd[3] << 16) |
-                   ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+               ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+                                                ((u32) scp->cmnd[3] << 16) |
+                                                ((u32) scp->cmnd[4] << 8) |
+                                                (u32) scp->cmnd[5]);
 
        }
 
@@ -1309,11 +1322,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
         * Construct SGL
         */
        if (instance->flag_ieee) {
-               ldio->flags |= MFI_FRAME_SGL64;
+               ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
                ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
                                              &ldio->sgl);
        } else if (IS_DMA64) {
-               ldio->flags |= MFI_FRAME_SGL64;
+               ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
                ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
        } else
                ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
@@ -1329,7 +1342,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
         */
        ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
        ldio->sense_buf_phys_addr_hi = 0;
-       ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+       ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
 
        /*
         * Compute the total number of frames this command consumes. FW uses
@@ -1400,20 +1413,32 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
                        ldio = (struct megasas_io_frame *)cmd->frame;
                        mfi_sgl = &ldio->sgl;
                        sgcount = ldio->sge_count;
-                       printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount);
+                       printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+                       " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+                       instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
+                       le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
+                       le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
                }
                else {
                        pthru = (struct megasas_pthru_frame *) cmd->frame;
                        mfi_sgl = &pthru->sgl;
                        sgcount = pthru->sge_count;
-                       printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount);
+                       printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+                       "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+                       instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
+                       pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
+                       le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
                }
        if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
                for (n = 0; n < sgcount; n++){
                        if (IS_DMA64)
-                               printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ;
+                               printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
+                                       le32_to_cpu(mfi_sgl->sge64[n].length),
+                                       le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
                        else
-                               printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ;
+                               printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
+                                       le32_to_cpu(mfi_sgl->sge32[n].length),
+                                       le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
                        }
                }
                printk(KERN_ERR "\n");
@@ -1674,11 +1699,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
 
        spin_lock_irqsave(&instance->completion_lock, flags);
 
-       producer = *instance->producer;
-       consumer = *instance->consumer;
+       producer = le32_to_cpu(*instance->producer);
+       consumer = le32_to_cpu(*instance->consumer);
 
        while (consumer != producer) {
-               context = instance->reply_queue[consumer];
+               context = le32_to_cpu(instance->reply_queue[consumer]);
                if (context >= instance->max_fw_cmds) {
                        printk(KERN_ERR "Unexpected context value %x\n",
                                context);
@@ -1695,7 +1720,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
                }
        }
 
-       *instance->consumer = producer;
+       *instance->consumer = cpu_to_le32(producer);
 
        spin_unlock_irqrestore(&instance->completion_lock, flags);
 
@@ -1716,7 +1741,7 @@ void megasas_do_ocr(struct megasas_instance *instance)
        if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
        (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
        (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
-               *instance->consumer     = MEGASAS_ADPRESET_INPROG_SIGN;
+               *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
        }
        instance->instancet->disable_intr(instance);
        instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
@@ -2186,6 +2211,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
        struct megasas_header *hdr = &cmd->frame->hdr;
        unsigned long flags;
        struct fusion_context *fusion = instance->ctrl_context;
+       u32 opcode;
 
        /* flag for the retry reset */
        cmd->retry_for_fw_reset = 0;
@@ -2287,9 +2313,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
        case MFI_CMD_SMP:
        case MFI_CMD_STP:
        case MFI_CMD_DCMD:
+               opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
                /* Check for LD map update */
-               if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
-                   (cmd->frame->dcmd.mbox.b[1] == 1)) {
+               if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+                       && (cmd->frame->dcmd.mbox.b[1] == 1)) {
                        fusion->fast_path_io = 0;
                        spin_lock_irqsave(instance->host->host_lock, flags);
                        if (cmd->frame->hdr.cmd_status != 0) {
@@ -2323,8 +2350,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
                                               flags);
                        break;
                }
-               if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
-                       cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
+               if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+                   opcode == MR_DCMD_CTRL_EVENT_GET) {
                        spin_lock_irqsave(&poll_aen_lock, flags);
                        megasas_poll_wait_aen = 0;
                        spin_unlock_irqrestore(&poll_aen_lock, flags);
@@ -2333,7 +2360,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
                /*
                 * See if got an event notification
                 */
-               if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
+               if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
                        megasas_service_aen(instance, cmd);
                else
                        megasas_complete_int_cmd(instance, cmd);
@@ -2606,7 +2633,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
                                        PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
 
                                *instance->consumer =
-                                       MEGASAS_ADPRESET_INPROG_SIGN;
+                                       cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
                        }
 
 
@@ -2983,7 +3010,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
                }
 
                memset(cmd->frame, 0, total_sz);
-               cmd->frame->io.context = cmd->index;
+               cmd->frame->io.context = cpu_to_le32(cmd->index);
                cmd->frame->io.pad_0 = 0;
                if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
                    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
@@ -3143,13 +3170,13 @@ megasas_get_pd_list(struct megasas_instance *instance)
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0xFF;
        dcmd->sge_count = 1;
-       dcmd->flags = MFI_FRAME_DIR_READ;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
        dcmd->timeout = 0;
        dcmd->pad_0 = 0;
-       dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
-       dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
-       dcmd->sgl.sge32[0].phys_addr = ci_h;
-       dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
+       dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+       dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
+       dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+       dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
 
        if (!megasas_issue_polled(instance, cmd)) {
                ret = 0;
@@ -3164,16 +3191,16 @@ megasas_get_pd_list(struct megasas_instance *instance)
        pd_addr = ci->addr;
 
        if ( ret == 0 &&
-               (ci->count <
+            (le32_to_cpu(ci->count) <
                  (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
 
                memset(instance->pd_list, 0,
                        MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
 
-               for (pd_index = 0; pd_index < ci->count; pd_index++) {
+               for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
 
                        instance->pd_list[pd_addr->deviceId].tid        =
-                                                       pd_addr->deviceId;
+                               le16_to_cpu(pd_addr->deviceId);
                        instance->pd_list[pd_addr->deviceId].driveType  =
                                                        pd_addr->scsiDevType;
                        instance->pd_list[pd_addr->deviceId].driveState =
@@ -3207,6 +3234,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
        struct megasas_dcmd_frame *dcmd;
        struct MR_LD_LIST *ci;
        dma_addr_t ci_h = 0;
+       u32 ld_count;
 
        cmd = megasas_get_cmd(instance);
 
@@ -3233,12 +3261,12 @@ megasas_get_ld_list(struct megasas_instance *instance)
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0xFF;
        dcmd->sge_count = 1;
-       dcmd->flags = MFI_FRAME_DIR_READ;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
        dcmd->timeout = 0;
-       dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
-       dcmd->opcode = MR_DCMD_LD_GET_LIST;
-       dcmd->sgl.sge32[0].phys_addr = ci_h;
-       dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+       dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
+       dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
+       dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+       dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
        dcmd->pad_0  = 0;
 
        if (!megasas_issue_polled(instance, cmd)) {
@@ -3247,12 +3275,14 @@ megasas_get_ld_list(struct megasas_instance *instance)
                ret = -1;
        }
 
+       ld_count = le32_to_cpu(ci->ldCount);
+
        /* the following function will get the instance PD LIST */
 
-       if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) {
+       if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
                memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
 
-               for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
+               for (ld_index = 0; ld_index < ld_count; ld_index++) {
                        if (ci->ldList[ld_index].state != 0) {
                                ids = ci->ldList[ld_index].ref.targetId;
                                instance->ld_ids[ids] =
@@ -3271,6 +3301,87 @@ megasas_get_ld_list(struct megasas_instance *instance)
 }
 
 /**
+ * megasas_ld_list_query -     Returns FW's ld_list structure
+ * @instance:                          Adapter soft state
+ * @ld_list:                           ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
+{
+       int ret = 0, ld_index = 0, ids = 0;
+       struct megasas_cmd *cmd;
+       struct megasas_dcmd_frame *dcmd;
+       struct MR_LD_TARGETID_LIST *ci;
+       dma_addr_t ci_h = 0;
+       u32 tgtid_count;
+
+       cmd = megasas_get_cmd(instance);
+
+       if (!cmd) {
+               printk(KERN_WARNING
+                      "megasas:(megasas_ld_list_query): Failed to get cmd\n");
+               return -ENOMEM;
+       }
+
+       dcmd = &cmd->frame->dcmd;
+
+       ci = pci_alloc_consistent(instance->pdev,
+                                 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
+
+       if (!ci) {
+               printk(KERN_WARNING
+                      "megasas: Failed to alloc mem for ld_list_query\n");
+               megasas_return_cmd(instance, cmd);
+               return -ENOMEM;
+       }
+
+       memset(ci, 0, sizeof(*ci));
+       memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+       dcmd->mbox.b[0] = query_type;
+
+       dcmd->cmd = MFI_CMD_DCMD;
+       dcmd->cmd_status = 0xFF;
+       dcmd->sge_count = 1;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+       dcmd->timeout = 0;
+       dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+       dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
+       dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+       dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+       dcmd->pad_0  = 0;
+
+       if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
+               ret = 0;
+       } else {
+               /* On failure, call older LD list DCMD */
+               ret = 1;
+       }
+
+       tgtid_count = le32_to_cpu(ci->count);
+
+       if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
+               memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+               for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
+                       ids = ci->targetId[ld_index];
+                       instance->ld_ids[ids] = ci->targetId[ld_index];
+               }
+
+       }
+
+       pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
+                           ci, ci_h);
+
+       megasas_return_cmd(instance, cmd);
+
+       return ret;
+}
+
+/**
  * megasas_get_controller_info -       Returns FW's controller structure
  * @instance:                          Adapter soft state
  * @ctrl_info:                         Controller information structure
@@ -3313,13 +3424,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0xFF;
        dcmd->sge_count = 1;
-       dcmd->flags = MFI_FRAME_DIR_READ;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
        dcmd->timeout = 0;
        dcmd->pad_0 = 0;
-       dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
-       dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
-       dcmd->sgl.sge32[0].phys_addr = ci_h;
-       dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
+       dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+       dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
+       dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+       dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
 
        if (!megasas_issue_polled(instance, cmd)) {
                ret = 0;
@@ -3375,17 +3486,20 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
        memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
        init_frame->context = context;
 
-       initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
-       initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
+       initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
+       initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
 
-       initq_info->producer_index_phys_addr_lo = instance->producer_h;
-       initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
+       initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
+       initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
 
        init_frame->cmd = MFI_CMD_INIT;
        init_frame->cmd_status = 0xFF;
-       init_frame->queue_info_new_phys_addr_lo = initq_info_h;
+       init_frame->queue_info_new_phys_addr_lo =
+               cpu_to_le32(lower_32_bits(initq_info_h));
+       init_frame->queue_info_new_phys_addr_hi =
+               cpu_to_le32(upper_32_bits(initq_info_h));
 
-       init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
+       init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
 
        /*
         * disable the intr before firing the init frame to FW
@@ -3648,7 +3762,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
        megasas_get_pd_list(instance);
 
        memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-       megasas_get_ld_list(instance);
+       if (megasas_ld_list_query(instance,
+                                 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+               megasas_get_ld_list(instance);
 
        ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
 
@@ -3665,8 +3781,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
        if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
 
                max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
-                   ctrl_info->max_strips_per_io;
-               max_sectors_2 = ctrl_info->max_request_size;
+                       le16_to_cpu(ctrl_info->max_strips_per_io);
+               max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
 
                tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
 
@@ -3675,14 +3791,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
                        instance->is_imr = 0;
                        dev_info(&instance->pdev->dev, "Controller type: MR,"
                                "Memory size is: %dMB\n",
-                               ctrl_info->memory_size);
+                               le16_to_cpu(ctrl_info->memory_size));
                } else {
                        instance->is_imr = 1;
                        dev_info(&instance->pdev->dev,
                                "Controller type: iMR\n");
                }
+               /* OnOffProperties are converted into CPU arch*/
+               le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
                instance->disableOnlineCtrlReset =
                ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+               /* adapterOperations2 are converted into CPU arch*/
+               le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
                instance->UnevenSpanSupport =
                        ctrl_info->adapterOperations2.supportUnevenSpans;
                if (instance->UnevenSpanSupport) {
@@ -3696,7 +3816,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
 
                }
        }
-
        instance->max_sectors_per_req = instance->max_num_sge *
                                                PAGE_SIZE / 512;
        if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
@@ -3802,20 +3921,24 @@ megasas_get_seq_num(struct megasas_instance *instance,
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0x0;
        dcmd->sge_count = 1;
-       dcmd->flags = MFI_FRAME_DIR_READ;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
        dcmd->timeout = 0;
        dcmd->pad_0 = 0;
-       dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
-       dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
-       dcmd->sgl.sge32[0].phys_addr = el_info_h;
-       dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
+       dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+       dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
+       dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
+       dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
 
        megasas_issue_blocked_cmd(instance, cmd);
 
        /*
         * Copy the data back into callers buffer
         */
-       memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
+       eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+       eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+       eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+       eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+       eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
 
        pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
                            el_info, el_info_h);
@@ -3862,6 +3985,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
        if (instance->aen_cmd) {
 
                prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
+               prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
 
                /*
                 * A class whose enum value is smaller is inclusive of all
@@ -3874,7 +3998,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
                 * values
                 */
                if ((prev_aen.members.class <= curr_aen.members.class) &&
-                   !((prev_aen.members.locale & curr_aen.members.locale) ^
+                   !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^
                      curr_aen.members.locale)) {
                        /*
                         * Previously issued event registration includes
@@ -3882,7 +4006,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
                         */
                        return 0;
                } else {
-                       curr_aen.members.locale |= prev_aen.members.locale;
+                       curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale);
 
                        if (prev_aen.members.class < curr_aen.members.class)
                                curr_aen.members.class = prev_aen.members.class;
@@ -3917,16 +4041,16 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0x0;
        dcmd->sge_count = 1;
-       dcmd->flags = MFI_FRAME_DIR_READ;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
        dcmd->timeout = 0;
        dcmd->pad_0 = 0;
+       dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
+       dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
+       dcmd->mbox.w[0] = cpu_to_le32(seq_num);
        instance->last_seq_num = seq_num;
-       dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
-       dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
-       dcmd->mbox.w[0] = seq_num;
-       dcmd->mbox.w[1] = curr_aen.word;
-       dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
-       dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
+       dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
+       dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
+       dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
 
        if (instance->aen_cmd != NULL) {
                megasas_return_cmd(instance, cmd);
@@ -3972,8 +4096,9 @@ static int megasas_start_aen(struct megasas_instance *instance)
        class_locale.members.locale = MR_EVT_LOCALE_ALL;
        class_locale.members.class = MR_EVT_CLASS_DEBUG;
 
-       return megasas_register_aen(instance, eli.newest_seq_num + 1,
-                                   class_locale.word);
+       return megasas_register_aen(instance,
+                       le32_to_cpu(eli.newest_seq_num) + 1,
+                       class_locale.word);
 }
 
 /**
@@ -4068,6 +4193,7 @@ megasas_set_dma_mask(struct pci_dev *pdev)
                if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
                        goto fail_set_dma_mask;
        }
+
        return 0;
 
 fail_set_dma_mask:
@@ -4386,11 +4512,11 @@ static void megasas_flush_cache(struct megasas_instance *instance)
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0x0;
        dcmd->sge_count = 0;
-       dcmd->flags = MFI_FRAME_DIR_NONE;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
        dcmd->timeout = 0;
        dcmd->pad_0 = 0;
        dcmd->data_xfer_len = 0;
-       dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+       dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
        dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
 
        megasas_issue_blocked_cmd(instance, cmd);
@@ -4431,11 +4557,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0x0;
        dcmd->sge_count = 0;
-       dcmd->flags = MFI_FRAME_DIR_NONE;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
        dcmd->timeout = 0;
        dcmd->pad_0 = 0;
        dcmd->data_xfer_len = 0;
-       dcmd->opcode = opcode;
+       dcmd->opcode = cpu_to_le32(opcode);
 
        megasas_issue_blocked_cmd(instance, cmd);
 
@@ -4850,10 +4976,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
         * alone separately
         */
        memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
-       cmd->frame->hdr.context = cmd->index;
+       cmd->frame->hdr.context = cpu_to_le32(cmd->index);
        cmd->frame->hdr.pad_0 = 0;
-       cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
-                                  MFI_FRAME_SENSE64);
+       cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
+                                              MFI_FRAME_SGL64 |
+                                              MFI_FRAME_SENSE64));
 
        /*
         * The management interface between applications and the fw uses
@@ -4887,8 +5014,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
                 * We don't change the dma_coherent_mask, so
                 * pci_alloc_consistent only returns 32bit addresses
                 */
-               kern_sge32[i].phys_addr = (u32) buf_handle;
-               kern_sge32[i].length = ioc->sgl[i].iov_len;
+               kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+               kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
 
                /*
                 * We created a kernel buffer corresponding to the
@@ -4911,7 +5038,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 
                sense_ptr =
                (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
-               *sense_ptr = sense_handle;
+               *sense_ptr = cpu_to_le32(sense_handle);
        }
 
        /*
@@ -4971,9 +5098,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
        for (i = 0; i < ioc->sge_count; i++) {
                if (kbuff_arr[i])
                        dma_free_coherent(&instance->pdev->dev,
-                                         kern_sge32[i].length,
+                                         le32_to_cpu(kern_sge32[i].length),
                                          kbuff_arr[i],
-                                         kern_sge32[i].phys_addr);
+                                         le32_to_cpu(kern_sge32[i].phys_addr));
        }
 
        megasas_return_cmd(instance, cmd);
@@ -5327,7 +5454,7 @@ megasas_aen_polling(struct work_struct *work)
        host = instance->host;
        if (instance->evt_detail) {
 
-               switch (instance->evt_detail->code) {
+               switch (le32_to_cpu(instance->evt_detail->code)) {
                case MR_EVT_PD_INSERTED:
                        if (megasas_get_pd_list(instance) == 0) {
                        for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
@@ -5389,7 +5516,9 @@ megasas_aen_polling(struct work_struct *work)
                case MR_EVT_LD_OFFLINE:
                case MR_EVT_CFG_CLEARED:
                case MR_EVT_LD_DELETED:
-                       megasas_get_ld_list(instance);
+                       if (megasas_ld_list_query(instance,
+                                       MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+                               megasas_get_ld_list(instance);
                        for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
                                for (j = 0;
                                j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5399,7 +5528,7 @@ megasas_aen_polling(struct work_struct *work)
                                (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
                                sdev1 = scsi_device_lookup(host,
-                                       i + MEGASAS_MAX_LD_CHANNELS,
+                                       MEGASAS_MAX_PD_CHANNELS + i,
                                        j,
                                        0);
 
@@ -5418,7 +5547,9 @@ megasas_aen_polling(struct work_struct *work)
                        doscan = 0;
                        break;
                case MR_EVT_LD_CREATED:
-                       megasas_get_ld_list(instance);
+                       if (megasas_ld_list_query(instance,
+                                       MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+                               megasas_get_ld_list(instance);
                        for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
                                for (j = 0;
                                        j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5427,14 +5558,14 @@ megasas_aen_polling(struct work_struct *work)
                                        (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
                                        sdev1 = scsi_device_lookup(host,
-                                               i+MEGASAS_MAX_LD_CHANNELS,
+                                               MEGASAS_MAX_PD_CHANNELS + i,
                                                j, 0);
 
                                        if (instance->ld_ids[ld_index] !=
                                                                0xff) {
                                                if (!sdev1) {
                                                        scsi_add_device(host,
-                                                               i + 2,
+                                               MEGASAS_MAX_PD_CHANNELS + i,
                                                                j, 0);
                                                }
                                        }
@@ -5483,18 +5614,20 @@ megasas_aen_polling(struct work_struct *work)
                        }
                }
 
-               megasas_get_ld_list(instance);
+               if (megasas_ld_list_query(instance,
+                                         MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+                       megasas_get_ld_list(instance);
                for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
                        for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
                                ld_index =
                                (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
                                sdev1 = scsi_device_lookup(host,
-                                       i+MEGASAS_MAX_LD_CHANNELS, j, 0);
+                                       MEGASAS_MAX_PD_CHANNELS + i, j, 0);
                                if (instance->ld_ids[ld_index] != 0xff) {
                                        if (!sdev1) {
                                                scsi_add_device(host,
-                                                               i+2,
+                                               MEGASAS_MAX_PD_CHANNELS + i,
                                                                j, 0);
                                        } else {
                                                scsi_device_put(sdev1);
@@ -5514,7 +5647,7 @@ megasas_aen_polling(struct work_struct *work)
                return ;
        }
 
-       seq_num = instance->evt_detail->seq_num + 1;
+       seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
 
        /* Register AEN with FW for latest sequence number plus 1 */
        class_locale.members.reserved = 0;
index 4f401f7..e24b6eb 100644 (file)
@@ -126,17 +126,17 @@ static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
        return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
 }
 
-static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
 {
-       return map->raidMap.arMapInfo[ar].pd[arm];
+       return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
 }
 
-static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
 {
-       return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
+       return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
 }
 
-static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
 {
        return map->raidMap.devHndlInfo[pd].curDevHdl;
 }
@@ -148,7 +148,7 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
 
 u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
 {
-       return map->raidMap.ldTgtIdToLd[ldTgtId];
+       return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
 }
 
 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -167,18 +167,22 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
        struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
        PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
        struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+       struct MR_LD_RAID         *raid;
+       int ldCount, num_lds;
+       u16 ld;
+
 
-       if (pFwRaidMap->totalSize !=
+       if (le32_to_cpu(pFwRaidMap->totalSize) !=
            (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
-            (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) {
+            (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
                printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
                       (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
                                       sizeof(struct MR_LD_SPAN_MAP)) +
                                      (sizeof(struct MR_LD_SPAN_MAP) *
-                                      pFwRaidMap->ldCount)));
+                                       le32_to_cpu(pFwRaidMap->ldCount))));
                printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
                       ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
-                      pFwRaidMap->totalSize);
+                       le32_to_cpu(pFwRaidMap->totalSize));
                return 0;
        }
 
@@ -187,6 +191,15 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
 
        mr_update_load_balance_params(map, lbInfo);
 
+       num_lds = le32_to_cpu(map->raidMap.ldCount);
+
+       /*Convert Raid capability values to CPU arch */
+       for (ldCount = 0; ldCount < num_lds; ldCount++) {
+               ld = MR_TargetIdToLdGet(ldCount, map);
+               raid = MR_LdRaidGet(ld, map);
+               le32_to_cpus((u32 *)&raid->capability);
+       }
+
        return 1;
 }
 
@@ -200,23 +213,20 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
 
        for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
 
-               for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
+               for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
                        quad = &pSpanBlock->block_span_info.quad[j];
 
-                       if (quad->diff == 0)
+                       if (le32_to_cpu(quad->diff) == 0)
                                return SPAN_INVALID;
-                       if (quad->logStart <= row  &&  row <= quad->logEnd  &&
-                           (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
+                       if (le64_to_cpu(quad->logStart) <= row && row <=
+                               le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
+                               le32_to_cpu(quad->diff))) == 0) {
                                if (span_blk != NULL) {
                                        u64  blk, debugBlk;
-                                       blk =
-                                               mega_div64_32(
-                                                       (row-quad->logStart),
-                                                       quad->diff);
+                                       blk =  mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
                                        debugBlk = blk;
 
-                                       blk = (blk + quad->offsetInSpan) <<
-                                               raid->stripeShift;
+                                       blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
                                        *span_blk = blk;
                                }
                                return span;
@@ -257,8 +267,8 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
                for (span = 0; span < raid->spanDepth; span++)
                        dev_dbg(&instance->pdev->dev, "Span=%x,"
                        " number of quads=%x\n", span,
-                       map->raidMap.ldSpanMap[ld].spanBlock[span].
-                       block_span_info.noElements);
+                       le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+                       block_span_info.noElements));
                for (element = 0; element < MAX_QUAD_DEPTH; element++) {
                        span_set = &(ldSpanInfo[ld].span_set[element]);
                        if (span_set->span_row_data_width == 0)
@@ -286,22 +296,22 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
                                (long unsigned int)span_set->data_strip_end);
 
                        for (span = 0; span < raid->spanDepth; span++) {
-                               if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-                                       block_span_info.noElements >=
+                               if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+                                       block_span_info.noElements) >=
                                        element + 1) {
                                        quad = &map->raidMap.ldSpanMap[ld].
                                                spanBlock[span].block_span_info.
                                                quad[element];
                                dev_dbg(&instance->pdev->dev, "Span=%x,"
                                        "Quad=%x, diff=%x\n", span,
-                                       element, quad->diff);
+                                       element, le32_to_cpu(quad->diff));
                                dev_dbg(&instance->pdev->dev,
                                        "offset_in_span=0x%08lx\n",
-                                       (long unsigned int)quad->offsetInSpan);
+                                       (long unsigned int)le64_to_cpu(quad->offsetInSpan));
                                dev_dbg(&instance->pdev->dev,
                                        "logical start=0x%08lx, end=0x%08lx\n",
-                                       (long unsigned int)quad->logStart,
-                                       (long unsigned int)quad->logEnd);
+                                       (long unsigned int)le64_to_cpu(quad->logStart),
+                                       (long unsigned int)le64_to_cpu(quad->logEnd));
                                }
                        }
                }
@@ -348,23 +358,23 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
                        continue;
 
                for (span = 0; span < raid->spanDepth; span++)
-                       if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-                               block_span_info.noElements >= info+1) {
+                       if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+                               block_span_info.noElements) >= info+1) {
                                quad = &map->raidMap.ldSpanMap[ld].
                                        spanBlock[span].
                                        block_span_info.quad[info];
-                               if (quad->diff == 0)
+                               if (le32_to_cpu(quad->diff == 0))
                                        return SPAN_INVALID;
-                               if (quad->logStart <= row  &&
-                                       row <= quad->logEnd  &&
-                                       (mega_mod64(row - quad->logStart,
-                                               quad->diff)) == 0) {
+                               if (le64_to_cpu(quad->logStart) <= row  &&
+                                       row <= le64_to_cpu(quad->logEnd)  &&
+                                       (mega_mod64(row - le64_to_cpu(quad->logStart),
+                                               le32_to_cpu(quad->diff))) == 0) {
                                        if (span_blk != NULL) {
                                                u64  blk;
                                                blk = mega_div64_32
-                                                   ((row - quad->logStart),
-                                                   quad->diff);
-                                               blk = (blk + quad->offsetInSpan)
+                                                   ((row - le64_to_cpu(quad->logStart)),
+                                                   le32_to_cpu(quad->diff));
+                                               blk = (blk + le64_to_cpu(quad->offsetInSpan))
                                                         << raid->stripeShift;
                                                *span_blk = blk;
                                        }
@@ -415,8 +425,8 @@ static u64  get_row_from_strip(struct megasas_instance *instance,
                span_set_Row = mega_div64_32(span_set_Strip,
                                span_set->span_row_data_width) * span_set->diff;
                for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
-                       if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-                               block_span_info.noElements >= info+1) {
+                       if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+                               block_span_info.noElements >= info+1)) {
                                if (strip_offset >=
                                        span_set->strip_offset[span])
                                        span_offset++;
@@ -480,18 +490,18 @@ static u64 get_strip_from_row(struct megasas_instance *instance,
                        continue;
 
                for (span = 0; span < raid->spanDepth; span++)
-                       if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-                               block_span_info.noElements >= info+1) {
+                       if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+                               block_span_info.noElements) >= info+1) {
                                quad = &map->raidMap.ldSpanMap[ld].
                                        spanBlock[span].block_span_info.quad[info];
-                               if (quad->logStart <= row  &&
-                                       row <= quad->logEnd  &&
-                                       mega_mod64((row - quad->logStart),
-                                       quad->diff) == 0) {
+                               if (le64_to_cpu(quad->logStart) <= row  &&
+                                       row <= le64_to_cpu(quad->logEnd)  &&
+                                       mega_mod64((row - le64_to_cpu(quad->logStart)),
+                                       le32_to_cpu(quad->diff)) == 0) {
                                        strip = mega_div64_32
                                                (((row - span_set->data_row_start)
-                                                       - quad->logStart),
-                                                       quad->diff);
+                                                       - le64_to_cpu(quad->logStart)),
+                                                       le32_to_cpu(quad->diff));
                                        strip *= span_set->span_row_data_width;
                                        strip += span_set->data_strip_start;
                                        strip += span_set->strip_offset[span];
@@ -543,8 +553,8 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
                                span_set->span_row_data_width);
 
                for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
-                       if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-                               block_span_info.noElements >= info+1) {
+                       if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+                               block_span_info.noElements) >= info+1) {
                                if (strip_offset >=
                                        span_set->strip_offset[span])
                                        span_offset =
@@ -669,7 +679,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
                }
        }
 
-       *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+       *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
        pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
                                        physArm;
        return retval;
@@ -765,7 +775,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
                }
        }
 
-       *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+       *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
        pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
                physArm;
        return retval;
@@ -784,7 +794,7 @@ u8
 MR_BuildRaidContext(struct megasas_instance *instance,
                    struct IO_REQUEST_INFO *io_info,
                    struct RAID_CONTEXT *pRAID_Context,
-                   struct MR_FW_RAID_MAP_ALL *map)
+                   struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
 {
        struct MR_LD_RAID  *raid;
        u32         ld, stripSize, stripe_mask;
@@ -965,7 +975,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
                        regSize += stripSize;
        }
 
-       pRAID_Context->timeoutValue     = map->raidMap.fpPdIoTimeoutSec;
+       pRAID_Context->timeoutValue     = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
        if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
                (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
                pRAID_Context->regLockFlags = (isRead) ?
@@ -974,9 +984,12 @@ MR_BuildRaidContext(struct megasas_instance *instance,
                pRAID_Context->regLockFlags = (isRead) ?
                        REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
        pRAID_Context->VirtualDiskTgtId = raid->targetId;
-       pRAID_Context->regLockRowLBA    = regStart;
-       pRAID_Context->regLockLength    = regSize;
+       pRAID_Context->regLockRowLBA    = cpu_to_le64(regStart);
+       pRAID_Context->regLockLength    = cpu_to_le32(regSize);
        pRAID_Context->configSeqNum     = raid->seqNum;
+       /* save pointer to raid->LUN array */
+       *raidLUN = raid->LUN;
+
 
        /*Get Phy Params only if FP capable, or else leave it to MR firmware
          to do the calculation.*/
@@ -1047,8 +1060,8 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
                raid = MR_LdRaidGet(ld, map);
                for (element = 0; element < MAX_QUAD_DEPTH; element++) {
                        for (span = 0; span < raid->spanDepth; span++) {
-                               if (map->raidMap.ldSpanMap[ld].spanBlock[span].
-                                       block_span_info.noElements <
+                               if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+                                       block_span_info.noElements) <
                                        element + 1)
                                        continue;
                                span_set = &(ldSpanInfo[ld].span_set[element]);
@@ -1056,14 +1069,14 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
                                        spanBlock[span].block_span_info.
                                        quad[element];
 
-                               span_set->diff = quad->diff;
+                               span_set->diff = le32_to_cpu(quad->diff);
 
                                for (count = 0, span_row_width = 0;
                                        count < raid->spanDepth; count++) {
-                                       if (map->raidMap.ldSpanMap[ld].
+                                       if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
                                                spanBlock[count].
                                                block_span_info.
-                                               noElements >= element + 1) {
+                                               noElements) >= element + 1) {
                                                span_set->strip_offset[count] =
                                                        span_row_width;
                                                span_row_width +=
@@ -1077,9 +1090,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
                                }
 
                                span_set->span_row_data_width = span_row_width;
-                               span_row = mega_div64_32(((quad->logEnd -
-                                       quad->logStart) + quad->diff),
-                                       quad->diff);
+                               span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
+                                       le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
+                                       le32_to_cpu(quad->diff));
 
                                if (element == 0) {
                                        span_set->log_start_lba = 0;
@@ -1096,7 +1109,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
 
                                        span_set->data_row_start = 0;
                                        span_set->data_row_end =
-                                               (span_row * quad->diff) - 1;
+                                               (span_row * le32_to_cpu(quad->diff)) - 1;
                                } else {
                                        span_set_prev = &(ldSpanInfo[ld].
                                                        span_set[element - 1]);
@@ -1122,7 +1135,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
                                                span_set_prev->data_row_end + 1;
                                        span_set->data_row_end =
                                                span_set->data_row_start +
-                                               (span_row * quad->diff) - 1;
+                                               (span_row * le32_to_cpu(quad->diff)) - 1;
                                }
                                break;
                }
index 417d5f1..f655592 100644 (file)
@@ -72,17 +72,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
 int
 megasas_issue_polled(struct megasas_instance *instance,
                     struct megasas_cmd *cmd);
-
-u8
-MR_BuildRaidContext(struct megasas_instance *instance,
-                   struct IO_REQUEST_INFO *io_info,
-                   struct RAID_CONTEXT *pRAID_Context,
-                   struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-
 void
 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
 
@@ -626,23 +615,20 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
 
        IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
        IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
-       IOCInitMessage->MsgVersion = MPI2_VERSION;
-       IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION;
-       IOCInitMessage->SystemRequestFrameSize =
-               MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
-
-       IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth;
-       IOCInitMessage->ReplyDescriptorPostQueueAddress =
-               fusion->reply_frames_desc_phys;
-       IOCInitMessage->SystemRequestFrameBaseAddress =
-               fusion->io_request_frames_phys;
+       IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
+       IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+       IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
+
+       IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
+       IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
+       IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
        IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
        init_frame = (struct megasas_init_frame *)cmd->frame;
        memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
 
        frame_hdr = &cmd->frame->hdr;
        frame_hdr->cmd_status = 0xFF;
-       frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+       frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
 
        init_frame->cmd = MFI_CMD_INIT;
        init_frame->cmd_status = 0xFF;
@@ -652,17 +638,24 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
                init_frame->driver_operations.
                        mfi_capabilities.support_additional_msix = 1;
+       /* driver supports HA / Remote LUN over Fast Path interface */
+       init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
+               = 1;
+       /* Convert capability to LE32 */
+       cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
 
-       init_frame->queue_info_new_phys_addr_lo = ioc_init_handle;
-       init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
+       init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
+       init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
 
        req_desc =
          (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
 
-       req_desc->Words = cmd->frame_phys_addr;
+       req_desc->Words = 0;
        req_desc->MFAIo.RequestFlags =
                (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
                 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+       cpu_to_le32s((u32 *)&req_desc->MFAIo);
+       req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
 
        /*
         * disable the intr before firing the init frame
@@ -753,13 +746,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0xFF;
        dcmd->sge_count = 1;
-       dcmd->flags = MFI_FRAME_DIR_READ;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
        dcmd->timeout = 0;
        dcmd->pad_0 = 0;
-       dcmd->data_xfer_len = size_map_info;
-       dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
-       dcmd->sgl.sge32[0].phys_addr = ci_h;
-       dcmd->sgl.sge32[0].length = size_map_info;
+       dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+       dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+       dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+       dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
 
        if (!megasas_issue_polled(instance, cmd))
                ret = 0;
@@ -828,7 +821,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
 
        map = fusion->ld_map[instance->map_id & 1];
 
-       num_lds = map->raidMap.ldCount;
+       num_lds = le32_to_cpu(map->raidMap.ldCount);
 
        dcmd = &cmd->frame->dcmd;
 
@@ -856,15 +849,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0xFF;
        dcmd->sge_count = 1;
-       dcmd->flags = MFI_FRAME_DIR_WRITE;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
        dcmd->timeout = 0;
        dcmd->pad_0 = 0;
-       dcmd->data_xfer_len = size_map_info;
+       dcmd->data_xfer_len = cpu_to_le32(size_map_info);
        dcmd->mbox.b[0] = num_lds;
        dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
-       dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
-       dcmd->sgl.sge32[0].phys_addr = ci_h;
-       dcmd->sgl.sge32[0].length = size_map_info;
+       dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+       dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+       dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
 
        instance->map_update_cmd = cmd;
 
@@ -1067,9 +1060,8 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
 
        spin_lock_irqsave(&instance->hba_lock, flags);
 
-       writel(req_desc_lo,
-              &(regs)->inbound_low_queue_port);
-       writel(req_desc_hi, &(regs)->inbound_high_queue_port);
+       writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
+       writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
        spin_unlock_irqrestore(&instance->hba_lock, flags);
 }
 
@@ -1157,8 +1149,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
                return sge_count;
 
        scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-               sgl_ptr->Length = sg_dma_len(os_sgl);
-               sgl_ptr->Address = sg_dma_address(os_sgl);
+               sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
+               sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
                sgl_ptr->Flags = 0;
                if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
                        (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
@@ -1177,9 +1169,9 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
                                PCI_DEVICE_ID_LSI_INVADER) ||
                                (instance->pdev->device ==
                                PCI_DEVICE_ID_LSI_FURY)) {
-                               if ((cmd->io_request->IoFlags &
-                               MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
-                               MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+                               if ((le16_to_cpu(cmd->io_request->IoFlags) &
+                                       MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+                                       MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
                                        cmd->io_request->ChainOffset =
                                                fusion->
                                                chain_offset_io_request;
@@ -1201,9 +1193,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
                                sg_chain->Flags =
                                        (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
                                         MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
-                       sg_chain->Length =  (sizeof(union MPI2_SGE_IO_UNION)
-                                            *(sge_count - sg_processed));
-                       sg_chain->Address = cmd->sg_frame_phys_addr;
+                       sg_chain->Length =  cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
+                       sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
 
                        sgl_ptr =
                          (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
@@ -1261,7 +1252,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
                io_request->CDB.EEDP32.PrimaryReferenceTag =
                        cpu_to_be32(ref_tag);
                io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
-               io_request->IoFlags = 32; /* Specify 32-byte cdb */
+               io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
 
                /* Transfer length */
                cdb[28] = (u8)((num_blocks >> 24) & 0xff);
@@ -1271,19 +1262,19 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
 
                /* set SCSI IO EEDPFlags */
                if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
-                       io_request->EEDPFlags =
+                       io_request->EEDPFlags = cpu_to_le16(
                                MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
                                MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
                                MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
                                MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
-                               MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+                               MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
                } else {
-                       io_request->EEDPFlags =
+                       io_request->EEDPFlags = cpu_to_le16(
                                MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
-                               MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+                               MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
                }
-               io_request->Control |= (0x4 << 26);
-               io_request->EEDPBlockSize = scp->device->sector_size;
+               io_request->Control |= cpu_to_le32((0x4 << 26));
+               io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
        } else {
                /* Some drives don't support 16/12 byte CDB's, convert to 10 */
                if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1311,7 +1302,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
                        cdb[8] = (u8)(num_blocks & 0xff);
                        cdb[7] = (u8)((num_blocks >> 8) & 0xff);
 
-                       io_request->IoFlags = 10; /* Specify 10-byte cdb */
+                       io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
                        cdb_len = 10;
                } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
                        /* Convert to 16 byte CDB for large LBA's */
@@ -1349,7 +1340,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
                        cdb[11] = (u8)((num_blocks >> 16) & 0xff);
                        cdb[10] = (u8)((num_blocks >> 24) & 0xff);
 
-                       io_request->IoFlags = 16; /* Specify 16-byte cdb */
+                       io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
                        cdb_len = 16;
                }
 
@@ -1410,13 +1401,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
        struct IO_REQUEST_INFO io_info;
        struct fusion_context *fusion;
        struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+       u8 *raidLUN;
 
        device_id = MEGASAS_DEV_INDEX(instance, scp);
 
        fusion = instance->ctrl_context;
 
        io_request = cmd->io_request;
-       io_request->RaidContext.VirtualDiskTgtId = device_id;
+       io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
        io_request->RaidContext.status = 0;
        io_request->RaidContext.exStatus = 0;
 
@@ -1480,7 +1472,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
        io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
        io_info.numBlocks = datalength;
        io_info.ldTgtId = device_id;
-       io_request->DataLength = scsi_bufflen(scp);
+       io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
 
        if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
                io_info.isRead = 1;
@@ -1494,7 +1486,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
        } else {
                if (MR_BuildRaidContext(instance, &io_info,
                                        &io_request->RaidContext,
-                                       local_map_ptr))
+                                       local_map_ptr, &raidLUN))
                        fp_possible = io_info.fpOkForIo;
        }
 
@@ -1520,8 +1512,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
                                        MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
                        io_request->RaidContext.Type = MPI2_TYPE_CUDA;
                        io_request->RaidContext.nseg = 0x1;
-                       io_request->IoFlags |=
-                         MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+                       io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
                        io_request->RaidContext.regLockFlags |=
                          (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
                           MR_RL_FLAGS_SEQ_NUM_ENABLE);
@@ -1537,9 +1528,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
                        scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
                cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
                io_request->DevHandle = io_info.devHandle;
+               /* populate the LUN field */
+               memcpy(io_request->LUN, raidLUN, 8);
        } else {
                io_request->RaidContext.timeoutValue =
-                       local_map_ptr->raidMap.fpPdIoTimeoutSec;
+                       cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
                cmd->request_desc->SCSIIO.RequestFlags =
                        (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
                         << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -1557,7 +1550,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
                        io_request->RaidContext.nseg = 0x1;
                }
                io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
-               io_request->DevHandle = device_id;
+               io_request->DevHandle = cpu_to_le16(device_id);
        } /* Not FP */
 }
 
@@ -1579,6 +1572,11 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
        u16 pd_index = 0;
        struct MR_FW_RAID_MAP_ALL *local_map_ptr;
        struct fusion_context *fusion = instance->ctrl_context;
+       u8                          span, physArm;
+       u16                         devHandle;
+       u32                         ld, arRef, pd;
+       struct MR_LD_RAID                  *raid;
+       struct RAID_CONTEXT                *pRAID_Context;
 
        io_request = cmd->io_request;
        device_id = MEGASAS_DEV_INDEX(instance, scmd);
@@ -1586,6 +1584,9 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
                +scmd->device->id;
        local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
 
+       io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+
+
        /* Check if this is a system PD I/O */
        if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
            instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
@@ -1623,15 +1624,62 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
                                        scmd->request->timeout / HZ;
                }
        } else {
+               if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
+                       goto NonFastPath;
+
+               ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+               if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
+                       goto NonFastPath;
+
+               raid = MR_LdRaidGet(ld, local_map_ptr);
+
+               /* check if this LD is FP capable */
+               if (!(raid->capability.fpNonRWCapable))
+                       /* not FP capable, send as non-FP */
+                       goto NonFastPath;
+
+               /* get RAID_Context pointer */
+               pRAID_Context = &io_request->RaidContext;
+
+               /* set RAID context values */
+               pRAID_Context->regLockFlags     = REGION_TYPE_SHARED_READ;
+               pRAID_Context->timeoutValue     = raid->fpIoTimeoutForLd;
+               pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+               pRAID_Context->regLockRowLBA    = 0;
+               pRAID_Context->regLockLength    = 0;
+               pRAID_Context->configSeqNum     = raid->seqNum;
+
+               /* get the DevHandle for the PD (since this is
+                  fpNonRWCapable, this is a single disk RAID0) */
+               span = physArm = 0;
+               arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
+               pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
+               devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
+
+               /* build request descriptor */
+               cmd->request_desc->SCSIIO.RequestFlags =
+                       (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+                        MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+               cmd->request_desc->SCSIIO.DevHandle = devHandle;
+
+               /* populate the LUN field */
+               memcpy(io_request->LUN, raid->LUN, 8);
+
+               /* build the raidScsiIO structure */
+               io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+               io_request->DevHandle = devHandle;
+
+               return;
+
+NonFastPath:
                io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
-               io_request->DevHandle = device_id;
+               io_request->DevHandle = cpu_to_le16(device_id);
                cmd->request_desc->SCSIIO.RequestFlags =
                        (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
                         MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
        }
-       io_request->RaidContext.VirtualDiskTgtId = device_id;
+       io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
        io_request->LUN[1] = scmd->device->lun;
-       io_request->DataLength = scsi_bufflen(scmd);
 }
 
 /**
@@ -1670,7 +1718,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
         * Just the CDB length,rest of the Flags are zero
         * This will be modified for FP in build_ldio_fusion
         */
-       io_request->IoFlags = scp->cmd_len;
+       io_request->IoFlags = cpu_to_le16(scp->cmd_len);
 
        if (megasas_is_ldio(scp))
                megasas_build_ldio_fusion(instance, scp, cmd);
@@ -1695,17 +1743,17 @@ megasas_build_io_fusion(struct megasas_instance *instance,
 
        io_request->RaidContext.numSGE = sge_count;
 
-       io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+       io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
 
        if (scp->sc_data_direction == PCI_DMA_TODEVICE)
-               io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
+               io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
        else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
-               io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
+               io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
 
        io_request->SGLOffset0 =
                offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
 
-       io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
+       io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
        io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
 
        cmd->scmd = scp;
@@ -1770,7 +1818,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
        }
 
        req_desc = cmd->request_desc;
-       req_desc->SCSIIO.SMID = index;
+       req_desc->SCSIIO.SMID = cpu_to_le16(index);
 
        if (cmd->io_request->ChainOffset != 0 &&
            cmd->io_request->ChainOffset != 0xF)
@@ -1832,7 +1880,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
        num_completed = 0;
 
        while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
-               smid = reply_desc->SMID;
+               smid = le16_to_cpu(reply_desc->SMID);
 
                cmd_fusion = fusion->cmd_list[smid - 1];
 
@@ -2050,12 +2098,12 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
                                       SGL) / 4;
        io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
 
-       mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
+       mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
 
        mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
                MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
 
-       mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME;
+       mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME);
 
        return 0;
 }
@@ -2088,7 +2136,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
        req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
                                         MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 
-       req_desc->SCSIIO.SMID = index;
+       req_desc->SCSIIO.SMID = cpu_to_le16(index);
 
        return req_desc;
 }
index 4eb8401..35a5139 100644 (file)
@@ -93,8 +93,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
  */
 
 struct RAID_CONTEXT {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+       u8      nseg:4;
+       u8      Type:4;
+#else
        u8      Type:4;
        u8      nseg:4;
+#endif
        u8      resvd0;
        u16     timeoutValue;
        u8      regLockFlags;
@@ -298,8 +303,13 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
  * MPT RAID MFA IO Descriptor.
  */
 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+       u32     MessageAddress1:24; /* bits 31:8*/
+       u32     RequestFlags:8;
+#else
        u32     RequestFlags:8;
        u32     MessageAddress1:24; /* bits 31:8*/
+#endif
        u32     MessageAddress2;      /* bits 61:32 */
 };
 
@@ -518,6 +528,19 @@ struct MR_SPAN_BLOCK_INFO {
 
 struct MR_LD_RAID {
        struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+               u32     reserved4:7;
+               u32     fpNonRWCapable:1;
+               u32     fpReadAcrossStripe:1;
+               u32     fpWriteAcrossStripe:1;
+               u32     fpReadCapable:1;
+               u32     fpWriteCapable:1;
+               u32     encryptionType:8;
+               u32     pdPiMode:4;
+               u32     ldPiMode:4;
+               u32     reserved5:3;
+               u32     fpCapable:1;
+#else
                u32     fpCapable:1;
                u32     reserved5:3;
                u32     ldPiMode:4;
@@ -527,7 +550,9 @@ struct MR_LD_RAID {
                u32     fpReadCapable:1;
                u32     fpWriteAcrossStripe:1;
                u32     fpReadAcrossStripe:1;
-               u32     reserved4:8;
+               u32     fpNonRWCapable:1;
+               u32     reserved4:7;
+#endif
        } capability;
        u32     reserved6;
        u64     size;
@@ -551,7 +576,9 @@ struct MR_LD_RAID {
                u32 reserved:31;
        } flags;
 
-       u8      reserved3[0x5C];
+       u8      LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
+       u8      fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
+       u8      reserved3[0x80-0x2D]; /* 0x2D */
 };
 
 struct MR_LD_SPAN_MAP {
index 4c1d2e7..efb0c4c 100644 (file)
@@ -1,5 +1,5 @@
 # mpt3sas makefile
-obj-m += mpt3sas.o
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
 mpt3sas-y +=  mpt3sas_base.o     \
                mpt3sas_config.o \
                mpt3sas_scsih.o      \
index b58e8f8..e62d17d 100644 (file)
@@ -2420,14 +2420,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
                        }
                }
 
-               if (modepage == 0x3F) {
-                       sd_printk(KERN_ERR, sdkp, "No Caching mode page "
-                                 "present\n");
-                       goto defaults;
-               } else if ((buffer[offset] & 0x3f) != modepage) {
-                       sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
-                       goto defaults;
-               }
+               sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+               goto defaults;
+
        Page_found:
                if (modepage == 8) {
                        sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
index bce09a6..7210500 100644 (file)
@@ -177,6 +177,7 @@ enum {
        MASK_TASK_RESPONSE              = 0xFF00,
        MASK_RSP_UPIU_RESULT            = 0xFFFF,
        MASK_QUERY_DATA_SEG_LEN         = 0xFFFF,
+       MASK_RSP_UPIU_DATA_SEG_LEN      = 0xFFFF,
        MASK_RSP_EXCEPTION_EVENT        = 0x10000,
 };
 
index b36ca9a..04884d6 100644 (file)
 #include <linux/async.h>
 
 #include "ufshcd.h"
+#include "unipro.h"
 
 #define UFSHCD_ENABLE_INTRS    (UTP_TRANSFER_REQ_COMPL |\
                                 UTP_TASK_REQ_COMPL |\
+                                UIC_POWER_MODE |\
                                 UFSHCD_ERROR_MASK)
 /* UIC command timeout, unit: ms */
 #define UIC_CMD_TIMEOUT        500
@@ -56,6 +58,9 @@
 /* Expose the flag value from utp_upiu_query.value */
 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
 
+/* Interrupt aggregation default timeout, unit: 40us */
+#define INT_AGGR_DEF_TO        0x02
+
 enum {
        UFSHCD_MAX_CHANNEL      = 0,
        UFSHCD_MAX_ID           = 1,
@@ -78,12 +83,6 @@ enum {
        UFSHCD_INT_CLEAR,
 };
 
-/* Interrupt aggregation options */
-enum {
-       INT_AGGR_RESET,
-       INT_AGGR_CONFIG,
-};
-
 /*
  * ufshcd_wait_for_register - wait for register value to change
  * @hba - per-adapter interface
@@ -238,6 +237,18 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
 }
 
 /**
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets UIC command argument3
+ * Returns 0 on success, non zero value on error
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+       return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
  * ufshcd_get_req_rsp - returns the TR response transaction type
  * @ucd_rsp_ptr: pointer to response UPIU
  */
@@ -260,6 +271,20 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
        return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
 }
 
+/*
+ * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
+ *                             from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * Return the data segment length.
+ */
+static inline unsigned int
+ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+       return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+               MASK_RSP_UPIU_DATA_SEG_LEN;
+}
+
 /**
  * ufshcd_is_exception_event - Check if the device raised an exception event
  * @ucd_rsp_ptr: pointer to response UPIU
@@ -276,30 +301,30 @@ static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 }
 
 /**
- * ufshcd_config_int_aggr - Configure interrupt aggregation values.
- *             Currently there is no use case where we want to configure
- *             interrupt aggregation dynamically. So to configure interrupt
- *             aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
- *             INT_AGGR_TIMEOUT_VALUE are used.
+ * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
  * @hba: per adapter instance
- * @option: Interrupt aggregation option
  */
 static inline void
-ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
+ufshcd_reset_intr_aggr(struct ufs_hba *hba)
 {
-       switch (option) {
-       case INT_AGGR_RESET:
-               ufshcd_writel(hba, INT_AGGR_ENABLE |
-                             INT_AGGR_COUNTER_AND_TIMER_RESET,
-                             REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-               break;
-       case INT_AGGR_CONFIG:
-               ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
-                             INT_AGGR_COUNTER_THRESHOLD_VALUE |
-                             INT_AGGR_TIMEOUT_VALUE,
-                             REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-               break;
-       }
+       ufshcd_writel(hba, INT_AGGR_ENABLE |
+                     INT_AGGR_COUNTER_AND_TIMER_RESET,
+                     REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
+ * @hba: per adapter instance
+ * @cnt: Interrupt aggregation counter threshold
+ * @tmout: Interrupt aggregation timeout value
+ */
+static inline void
+ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+{
+       ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
+                     INT_AGGR_COUNTER_THLD_VAL(cnt) |
+                     INT_AGGR_TIMEOUT_VAL(tmout),
+                     REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
 }
 
 /**
@@ -355,7 +380,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
 {
        int len;
-       if (lrbp->sense_buffer) {
+       if (lrbp->sense_buffer &&
+           ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
                len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
                memcpy(lrbp->sense_buffer,
                        lrbp->ucd_rsp_ptr->sr.sense_data,
@@ -446,6 +472,18 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
 }
 
 /**
+ * ufshcd_get_upmcrs - Get the power mode change request status
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the UPMCRS field of HCS register
+ * Returns value of UPMCRS field
+ */
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+{
+       return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+}
+
+/**
  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
  * @hba: per adapter instance
  * @uic_cmd: UIC command
@@ -1362,6 +1400,202 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
 }
 
 /**
+ * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @attr_set: attribute set type as uic command argument2
+ * @mib_val: setting value as uic command argument3
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+                       u8 attr_set, u32 mib_val, u8 peer)
+{
+       struct uic_command uic_cmd = {0};
+       static const char *const action[] = {
+               "dme-set",
+               "dme-peer-set"
+       };
+       const char *set = action[!!peer];
+       int ret;
+
+       uic_cmd.command = peer ?
+               UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
+       uic_cmd.argument1 = attr_sel;
+       uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
+       uic_cmd.argument3 = mib_val;
+
+       ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+       if (ret)
+               dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+                       set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
+
+/**
+ * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @mib_val: the value of the attribute as returned by the UIC command
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+                       u32 *mib_val, u8 peer)
+{
+       struct uic_command uic_cmd = {0};
+       static const char *const action[] = {
+               "dme-get",
+               "dme-peer-get"
+       };
+       const char *get = action[!!peer];
+       int ret;
+
+       uic_cmd.command = peer ?
+               UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+       uic_cmd.argument1 = attr_sel;
+
+       ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+       if (ret) {
+               dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
+                       get, UIC_GET_ATTR_ID(attr_sel), ret);
+               goto out;
+       }
+
+       if (mib_val)
+               *mib_val = uic_cmd.argument3;
+out:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+
+/**
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ *                             using DME_SET primitives.
+ * @hba: per adapter instance
+ * @mode: powr mode value
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+{
+       struct uic_command uic_cmd = {0};
+       struct completion pwr_done;
+       unsigned long flags;
+       u8 status;
+       int ret;
+
+       uic_cmd.command = UIC_CMD_DME_SET;
+       uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+       uic_cmd.argument3 = mode;
+       init_completion(&pwr_done);
+
+       mutex_lock(&hba->uic_cmd_mutex);
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->pwr_done = &pwr_done;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
+       if (ret) {
+               dev_err(hba->dev,
+                       "pwr mode change with mode 0x%x uic error %d\n",
+                       mode, ret);
+               goto out;
+       }
+
+       if (!wait_for_completion_timeout(hba->pwr_done,
+                                        msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+               dev_err(hba->dev,
+                       "pwr mode change with mode 0x%x completion timeout\n",
+                       mode);
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       status = ufshcd_get_upmcrs(hba);
+       if (status != PWR_LOCAL) {
+               dev_err(hba->dev,
+                       "pwr mode change failed, host umpcrs:0x%x\n",
+                       status);
+               ret = (status != PWR_OK) ? status : -1;
+       }
+out:
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->pwr_done = NULL;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       mutex_unlock(&hba->uic_cmd_mutex);
+       return ret;
+}
+
+/**
+ * ufshcd_config_max_pwr_mode - Set & Change power mode with
+ *     maximum capability attribute information.
+ * @hba: per adapter instance
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
+{
+       enum {RX = 0, TX = 1};
+       u32 lanes[] = {1, 1};
+       u32 gear[] = {1, 1};
+       u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
+       int ret;
+
+       /* Get the connected lane count */
+       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
+
+       /*
+        * First, get the maximum gears of HS speed.
+        * If a zero value, it means there is no HSGEAR capability.
+        * Then, get the maximum gears of PWM speed.
+        */
+       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
+       if (!gear[RX]) {
+               ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
+               pwr[RX] = SLOWAUTO_MODE;
+       }
+
+       ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
+       if (!gear[TX]) {
+               ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+                                   &gear[TX]);
+               pwr[TX] = SLOWAUTO_MODE;
+       }
+
+       /*
+        * Configure attributes for power mode change with below.
+        * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+        * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+        * - PA_HSSERIES
+        */
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
+       if (pwr[RX] == FASTAUTO_MODE)
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
+       if (pwr[TX] == FASTAUTO_MODE)
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+
+       if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
+
+       ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
+       if (ret)
+               dev_err(hba->dev,
+                       "pwr_mode: power mode change failed %d\n", ret);
+
+       return ret;
+}
+
+/**
  * ufshcd_complete_dev_init() - checks device readiness
  * hba: per-adapter instance
  *
@@ -1442,7 +1676,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
        ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
 
        /* Configure interrupt aggregation */
-       ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
+       ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
 
        /* Configure UTRL and UTMRL base address registers */
        ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
@@ -1788,32 +2022,24 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
        int result = 0;
 
        switch (scsi_status) {
-       case SAM_STAT_GOOD:
-               result |= DID_OK << 16 |
-                         COMMAND_COMPLETE << 8 |
-                         SAM_STAT_GOOD;
-               break;
        case SAM_STAT_CHECK_CONDITION:
+               ufshcd_copy_sense_data(lrbp);
+       case SAM_STAT_GOOD:
                result |= DID_OK << 16 |
                          COMMAND_COMPLETE << 8 |
-                         SAM_STAT_CHECK_CONDITION;
-               ufshcd_copy_sense_data(lrbp);
-               break;
-       case SAM_STAT_BUSY:
-               result |= SAM_STAT_BUSY;
+                         scsi_status;
                break;
        case SAM_STAT_TASK_SET_FULL:
-
                /*
                 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
                 * depth needs to be adjusted to the exact number of
                 * outstanding commands the LUN can handle at any given time.
                 */
                ufshcd_adjust_lun_qdepth(lrbp->cmd);
-               result |= SAM_STAT_TASK_SET_FULL;
-               break;
+       case SAM_STAT_BUSY:
        case SAM_STAT_TASK_ABORTED:
-               result |= SAM_STAT_TASK_ABORTED;
+               ufshcd_copy_sense_data(lrbp);
+               result |= scsi_status;
                break;
        default:
                result |= DID_ERROR << 16;
@@ -1898,14 +2124,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 /**
  * ufshcd_uic_cmd_compl - handle completion of uic command
  * @hba: per adapter instance
+ * @intr_status: interrupt status generated by the controller
  */
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
+static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 {
-       if (hba->active_uic_cmd) {
+       if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
                hba->active_uic_cmd->argument2 |=
                        ufshcd_get_uic_cmd_result(hba);
+               hba->active_uic_cmd->argument3 =
+                       ufshcd_get_dme_attr_val(hba);
                complete(&hba->active_uic_cmd->done);
        }
+
+       if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
+               complete(hba->pwr_done);
 }
 
 /**
@@ -1960,7 +2192,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
 
        /* Reset interrupt aggregation counters */
        if (int_aggr_reset)
-               ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+               ufshcd_reset_intr_aggr(hba);
 }
 
 /**
@@ -2251,8 +2483,8 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
        if (hba->errors)
                ufshcd_err_handler(hba);
 
-       if (intr_status & UIC_COMMAND_COMPL)
-               ufshcd_uic_cmd_compl(hba);
+       if (intr_status & UFSHCD_UIC_MASK)
+               ufshcd_uic_cmd_compl(hba, intr_status);
 
        if (intr_status & UTP_TASK_REQ_COMPL)
                ufshcd_tmc_handler(hba);
@@ -2494,6 +2726,8 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
        if (ret)
                goto out;
 
+       ufshcd_config_max_pwr_mode(hba);
+
        ret = ufshcd_verify_dev_init(hba);
        if (ret)
                goto out;
index 59c9c48..577679a 100644 (file)
@@ -175,6 +175,7 @@ struct ufs_dev_cmd {
  * @active_uic_cmd: handle of active UIC command
  * @uic_cmd_mutex: mutex for uic command
  * @ufshcd_tm_wait_queue: wait queue for task management
+ * @pwr_done: completion for power mode change
  * @tm_condition: condition variable for task management
  * @ufshcd_state: UFSHCD states
  * @intr_mask: Interrupt Mask Bits
@@ -219,6 +220,8 @@ struct ufs_hba {
        wait_queue_head_t ufshcd_tm_wait_queue;
        unsigned long tm_condition;
 
+       struct completion *pwr_done;
+
        u32 ufshcd_state;
        u32 intr_mask;
        u16 ee_ctrl_mask;
@@ -263,4 +266,55 @@ static inline void check_upiu_size(void)
 extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
 extern int ufshcd_runtime_resume(struct ufs_hba *hba);
 extern int ufshcd_runtime_idle(struct ufs_hba *hba);
+extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+                              u8 attr_set, u32 mib_val, u8 peer);
+extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+                              u32 *mib_val, u8 peer);
+
+/* UIC command interfaces for DME primitives */
+#define DME_LOCAL      0
+#define DME_PEER       1
+#define ATTR_SET_NOR   0       /* NORMAL */
+#define ATTR_SET_ST    1       /* STATIC */
+
+static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
+                                u32 mib_val)
+{
+       return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+                                  mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
+                                   u32 mib_val)
+{
+       return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+                                  mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
+                                     u32 mib_val)
+{
+       return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+                                  mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
+                                        u32 mib_val)
+{
+       return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+                                  mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_get(struct ufs_hba *hba,
+                                u32 attr_sel, u32 *mib_val)
+{
+       return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
+                                     u32 attr_sel, u32 *mib_val)
+{
+       return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
+}
+
 #endif /* End of Header */
index f1e1b74..0475c66 100644 (file)
@@ -124,6 +124,9 @@ enum {
 #define CONTROLLER_FATAL_ERROR                 UFS_BIT(16)
 #define SYSTEM_BUS_FATAL_ERROR                 UFS_BIT(17)
 
+#define UFSHCD_UIC_MASK                (UIC_COMMAND_COMPL |\
+                                UIC_POWER_MODE)
+
 #define UFSHCD_ERROR_MASK      (UIC_ERROR |\
                                DEVICE_FATAL_ERROR |\
                                CONTROLLER_FATAL_ERROR |\
@@ -142,6 +145,15 @@ enum {
 #define DEVICE_ERROR_INDICATOR                 UFS_BIT(5)
 #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK  UFS_MASK(0x7, 8)
 
+enum {
+       PWR_OK          = 0x0,
+       PWR_LOCAL       = 0x01,
+       PWR_REMOTE      = 0x02,
+       PWR_BUSY        = 0x03,
+       PWR_ERROR_CAP   = 0x04,
+       PWR_FATAL_ERROR = 0x05,
+};
+
 /* HCE - Host Controller Enable 34h */
 #define CONTROLLER_ENABLE      UFS_BIT(0)
 #define CONTROLLER_DISABLE     0x0
@@ -191,6 +203,12 @@ enum {
 #define CONFIG_RESULT_CODE_MASK                0xFF
 #define GENERIC_ERROR_CODE_MASK                0xFF
 
+#define UIC_ARG_MIB_SEL(attr, sel)     ((((attr) & 0xFFFF) << 16) |\
+                                        ((sel) & 0xFFFF))
+#define UIC_ARG_MIB(attr)              UIC_ARG_MIB_SEL(attr, 0)
+#define UIC_ARG_ATTR_TYPE(t)           (((t) & 0xFF) << 16)
+#define UIC_GET_ATTR_ID(v)             (((v) >> 16) & 0xFFFF)
+
 /* UIC Commands */
 enum {
        UIC_CMD_DME_GET                 = 0x01,
@@ -226,8 +244,8 @@ enum {
 
 #define MASK_UIC_COMMAND_RESULT                        0xFF
 
-#define INT_AGGR_COUNTER_THRESHOLD_VALUE       (0x1F << 8)
-#define INT_AGGR_TIMEOUT_VALUE                 (0x02)
+#define INT_AGGR_COUNTER_THLD_VAL(c)   (((c) & 0x1F) << 8)
+#define INT_AGGR_TIMEOUT_VAL(t)                (((t) & 0xFF) << 0)
 
 /* Interrupt disable masks */
 enum {
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
new file mode 100644 (file)
index 0000000..0bb8041
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * drivers/scsi/ufs/unipro.h
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UNIPRO_H_
+#define _UNIPRO_H_
+
+/*
+ * PHY Adpater attributes
+ */
+#define PA_ACTIVETXDATALANES   0x1560
+#define PA_ACTIVERXDATALANES   0x1580
+#define PA_TXTRAILINGCLOCKS    0x1564
+#define PA_PHY_TYPE            0x1500
+#define PA_AVAILTXDATALANES    0x1520
+#define PA_AVAILRXDATALANES    0x1540
+#define PA_MINRXTRAILINGCLOCKS 0x1543
+#define PA_TXPWRSTATUS         0x1567
+#define PA_RXPWRSTATUS         0x1582
+#define PA_TXFORCECLOCK                0x1562
+#define PA_TXPWRMODE           0x1563
+#define PA_LEGACYDPHYESCDL     0x1570
+#define PA_MAXTXSPEEDFAST      0x1521
+#define PA_MAXTXSPEEDSLOW      0x1522
+#define PA_MAXRXSPEEDFAST      0x1541
+#define PA_MAXRXSPEEDSLOW      0x1542
+#define PA_TXLINKSTARTUPHS     0x1544
+#define PA_TXSPEEDFAST         0x1565
+#define PA_TXSPEEDSLOW         0x1566
+#define PA_REMOTEVERINFO       0x15A0
+#define PA_TXGEAR              0x1568
+#define PA_TXTERMINATION       0x1569
+#define PA_HSSERIES            0x156A
+#define PA_PWRMODE             0x1571
+#define PA_RXGEAR              0x1583
+#define PA_RXTERMINATION       0x1584
+#define PA_MAXRXPWMGEAR                0x1586
+#define PA_MAXRXHSGEAR         0x1587
+#define PA_RXHSUNTERMCAP       0x15A5
+#define PA_RXLSTERMCAP         0x15A6
+#define PA_PACPREQTIMEOUT      0x1590
+#define PA_PACPREQEOBTIMEOUT   0x1591
+#define PA_HIBERN8TIME         0x15A7
+#define PA_LOCALVERINFO                0x15A9
+#define PA_TACTIVATE           0x15A8
+#define PA_PACPFRAMECOUNT      0x15C0
+#define PA_PACPERRORCOUNT      0x15C1
+#define PA_PHYTESTCONTROL      0x15C2
+#define PA_PWRMODEUSERDATA0    0x15B0
+#define PA_PWRMODEUSERDATA1    0x15B1
+#define PA_PWRMODEUSERDATA2    0x15B2
+#define PA_PWRMODEUSERDATA3    0x15B3
+#define PA_PWRMODEUSERDATA4    0x15B4
+#define PA_PWRMODEUSERDATA5    0x15B5
+#define PA_PWRMODEUSERDATA6    0x15B6
+#define PA_PWRMODEUSERDATA7    0x15B7
+#define PA_PWRMODEUSERDATA8    0x15B8
+#define PA_PWRMODEUSERDATA9    0x15B9
+#define PA_PWRMODEUSERDATA10   0x15BA
+#define PA_PWRMODEUSERDATA11   0x15BB
+#define PA_CONNECTEDTXDATALANES        0x1561
+#define PA_CONNECTEDRXDATALANES        0x1581
+#define PA_LOGICALLANEMAP      0x15A1
+#define PA_SLEEPNOCONFIGTIME   0x15A2
+#define PA_STALLNOCONFIGTIME   0x15A3
+#define PA_SAVECONFIGTIME      0x15A4
+
+/* PA power modes */
+enum {
+       FAST_MODE       = 1,
+       SLOW_MODE       = 2,
+       FASTAUTO_MODE   = 4,
+       SLOWAUTO_MODE   = 5,
+       UNCHANGED       = 7,
+};
+
+/* PA TX/RX Frequency Series */
+enum {
+       PA_HS_MODE_A    = 1,
+       PA_HS_MODE_B    = 2,
+};
+
+/*
+ * Data Link Layer Attributes
+ */
+#define DL_TC0TXFCTHRESHOLD    0x2040
+#define DL_FC0PROTTIMEOUTVAL   0x2041
+#define DL_TC0REPLAYTIMEOUTVAL 0x2042
+#define DL_AFC0REQTIMEOUTVAL   0x2043
+#define DL_AFC0CREDITTHRESHOLD 0x2044
+#define DL_TC0OUTACKTHRESHOLD  0x2045
+#define DL_TC1TXFCTHRESHOLD    0x2060
+#define DL_FC1PROTTIMEOUTVAL   0x2061
+#define DL_TC1REPLAYTIMEOUTVAL 0x2062
+#define DL_AFC1REQTIMEOUTVAL   0x2063
+#define DL_AFC1CREDITTHRESHOLD 0x2064
+#define DL_TC1OUTACKTHRESHOLD  0x2065
+#define DL_TXPREEMPTIONCAP     0x2000
+#define DL_TC0TXMAXSDUSIZE     0x2001
+#define DL_TC0RXINITCREDITVAL  0x2002
+#define DL_TC0TXBUFFERSIZE     0x2005
+#define DL_PEERTC0PRESENT      0x2046
+#define DL_PEERTC0RXINITCREVAL 0x2047
+#define DL_TC1TXMAXSDUSIZE     0x2003
+#define DL_TC1RXINITCREDITVAL  0x2004
+#define DL_TC1TXBUFFERSIZE     0x2006
+#define DL_PEERTC1PRESENT      0x2066
+#define DL_PEERTC1RXINITCREVAL 0x2067
+
+/*
+ * Network Layer Attributes
+ */
+#define N_DEVICEID             0x3000
+#define N_DEVICEID_VALID       0x3001
+#define N_TC0TXMAXSDUSIZE      0x3020
+#define N_TC1TXMAXSDUSIZE      0x3021
+
+/*
+ * Transport Layer Attributes
+ */
+#define T_NUMCPORTS            0x4000
+#define T_NUMTESTFEATURES      0x4001
+#define T_CONNECTIONSTATE      0x4020
+#define T_PEERDEVICEID         0x4021
+#define T_PEERCPORTID          0x4022
+#define T_TRAFFICCLASS         0x4023
+#define T_PROTOCOLID           0x4024
+#define T_CPORTFLAGS           0x4025
+#define T_TXTOKENVALUE         0x4026
+#define T_RXTOKENVALUE         0x4027
+#define T_LOCALBUFFERSPACE     0x4028
+#define T_PEERBUFFERSPACE      0x4029
+#define T_CREDITSTOSEND                0x402A
+#define T_CPORTMODE            0x402B
+#define T_TC0TXMAXSDUSIZE      0x4060
+#define T_TC1TXMAXSDUSIZE      0x4061
+
+/* Boolean attribute values */
+enum {
+       FALSE = 0,
+       TRUE,
+};
+
+#endif /* _UNIPRO_H_ */
index a8c3444..d42f578 100644 (file)
@@ -481,7 +481,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
        header.sec = now.tv_sec;
        header.nsec = now.tv_nsec;
        header.euid = current_euid();
-       header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+       header.len = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
        header.hdr_size = sizeof(struct logger_entry);
 
        /* null writes succeed, return zero */
index 253f026..bc534db 100644 (file)
@@ -1009,7 +1009,7 @@ static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
        local_iov->iov_len = count;
        init_sync_kiocb(kiocb, file);
        kiocb->ki_pos = *ppos;
-       kiocb->ki_left = count;
+       kiocb->ki_nbytes = count;
 
        result = ll_file_aio_read(kiocb, local_iov, 1, kiocb->ki_pos);
        *ppos = kiocb->ki_pos;
@@ -1068,7 +1068,7 @@ static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
        local_iov->iov_len = count;
        init_sync_kiocb(kiocb, file);
        kiocb->ki_pos = *ppos;
-       kiocb->ki_left = count;
+       kiocb->ki_nbytes = count;
 
        result = ll_file_aio_write(kiocb, local_iov, 1, kiocb->ki_pos);
        *ppos = kiocb->ki_pos;
index 465ef8e..b94c049 100644 (file)
@@ -524,7 +524,7 @@ struct kiocb_priv {
        unsigned                actual;
 };
 
-static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
+static int ep_aio_cancel(struct kiocb *iocb)
 {
        struct kiocb_priv       *priv = iocb->private;
        struct ep_data          *epdata;
@@ -540,7 +540,6 @@ static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e)
        // spin_unlock(&epdata->dev->lock);
        local_irq_enable();
 
-       aio_put_req(iocb);
        return value;
 }
 
@@ -709,11 +708,11 @@ ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
        if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
                return -EINVAL;
 
-       buf = kmalloc(iocb->ki_left, GFP_KERNEL);
+       buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
        if (unlikely(!buf))
                return -ENOMEM;
 
-       return ep_aio_rwtail(iocb, buf, iocb->ki_left, epdata, iov, nr_segs);
+       return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
 }
 
 static ssize_t
@@ -728,7 +727,7 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
                return -EINVAL;
 
-       buf = kmalloc(iocb->ki_left, GFP_KERNEL);
+       buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
        if (unlikely(!buf))
                return -ENOMEM;
 
index 9b5ca11..6b868f0 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -26,6 +26,7 @@
 #include <linux/mm.h>
 #include <linux/mman.h>
 #include <linux/mmu_context.h>
+#include <linux/percpu.h>
 #include <linux/slab.h>
 #include <linux/timer.h>
 #include <linux/aio.h>
 #include <linux/eventfd.h>
 #include <linux/blkdev.h>
 #include <linux/compat.h>
+#include <linux/anon_inodes.h>
+#include <linux/migrate.h>
+#include <linux/ramfs.h>
+#include <linux/percpu-refcount.h>
 
 #include <asm/kmap_types.h>
 #include <asm/uaccess.h>
@@ -61,14 +66,29 @@ struct aio_ring {
 
 #define AIO_RING_PAGES 8
 
+struct kioctx_table {
+       struct rcu_head rcu;
+       unsigned        nr;
+       struct kioctx   *table[];
+};
+
+struct kioctx_cpu {
+       unsigned                reqs_available;
+};
+
 struct kioctx {
-       atomic_t                users;
+       struct percpu_ref       users;
        atomic_t                dead;
 
-       /* This needs improving */
        unsigned long           user_id;
-       struct hlist_node       list;
 
+       struct __percpu kioctx_cpu *cpu;
+
+       /*
+        * For percpu reqs_available, number of slots we move to/from global
+        * counter at a time:
+        */
+       unsigned                req_batch;
        /*
         * This is what userspace passed to io_setup(), it's not used for
         * anything but counting against the global max_reqs quota.
@@ -88,10 +108,18 @@ struct kioctx {
        long                    nr_pages;
 
        struct rcu_head         rcu_head;
-       struct work_struct      rcu_work;
+       struct work_struct      free_work;
 
        struct {
-               atomic_t        reqs_active;
+               /*
+                * This counts the number of available slots in the ringbuffer,
+                * so we avoid overflowing it: it's decremented (if positive)
+                * when allocating a kiocb and incremented when the resulting
+                * io_event is pulled off the ringbuffer.
+                *
+                * We batch accesses to it with a percpu version.
+                */
+               atomic_t        reqs_available;
        } ____cacheline_aligned_in_smp;
 
        struct {
@@ -110,6 +138,9 @@ struct kioctx {
        } ____cacheline_aligned_in_smp;
 
        struct page             *internal_pages[AIO_RING_PAGES];
+       struct file             *aio_ring_file;
+
+       unsigned                id;
 };
 
 /*------ sysctl variables----*/
@@ -138,15 +169,77 @@ __initcall(aio_setup);
 
 static void aio_free_ring(struct kioctx *ctx)
 {
-       long i;
+       int i;
+       struct file *aio_ring_file = ctx->aio_ring_file;
 
-       for (i = 0; i < ctx->nr_pages; i++)
+       for (i = 0; i < ctx->nr_pages; i++) {
+               pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
+                               page_count(ctx->ring_pages[i]));
                put_page(ctx->ring_pages[i]);
+       }
 
        if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
                kfree(ctx->ring_pages);
+
+       if (aio_ring_file) {
+               truncate_setsize(aio_ring_file->f_inode, 0);
+               fput(aio_ring_file);
+               ctx->aio_ring_file = NULL;
+       }
+}
+
+static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       vma->vm_ops = &generic_file_vm_ops;
+       return 0;
 }
 
+static const struct file_operations aio_ring_fops = {
+       .mmap = aio_ring_mmap,
+};
+
+static int aio_set_page_dirty(struct page *page)
+{
+       return 0;
+}
+
+#if IS_ENABLED(CONFIG_MIGRATION)
+static int aio_migratepage(struct address_space *mapping, struct page *new,
+                       struct page *old, enum migrate_mode mode)
+{
+       struct kioctx *ctx = mapping->private_data;
+       unsigned long flags;
+       unsigned idx = old->index;
+       int rc;
+
+       /* Writeback must be complete */
+       BUG_ON(PageWriteback(old));
+       put_page(old);
+
+       rc = migrate_page_move_mapping(mapping, new, old, NULL, mode);
+       if (rc != MIGRATEPAGE_SUCCESS) {
+               get_page(old);
+               return rc;
+       }
+
+       get_page(new);
+
+       spin_lock_irqsave(&ctx->completion_lock, flags);
+       migrate_page_copy(new, old);
+       ctx->ring_pages[idx] = new;
+       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+       return rc;
+}
+#endif
+
+static const struct address_space_operations aio_ctx_aops = {
+       .set_page_dirty = aio_set_page_dirty,
+#if IS_ENABLED(CONFIG_MIGRATION)
+       .migratepage    = aio_migratepage,
+#endif
+};
+
 static int aio_setup_ring(struct kioctx *ctx)
 {
        struct aio_ring *ring;
@@ -154,20 +247,45 @@ static int aio_setup_ring(struct kioctx *ctx)
        struct mm_struct *mm = current->mm;
        unsigned long size, populate;
        int nr_pages;
+       int i;
+       struct file *file;
 
        /* Compensate for the ring buffer's head/tail overlap entry */
        nr_events += 2; /* 1 is required, 2 for good luck */
 
        size = sizeof(struct aio_ring);
        size += sizeof(struct io_event) * nr_events;
-       nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
 
+       nr_pages = PFN_UP(size);
        if (nr_pages < 0)
                return -EINVAL;
 
-       nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
+       file = anon_inode_getfile_private("[aio]", &aio_ring_fops, ctx, O_RDWR);
+       if (IS_ERR(file)) {
+               ctx->aio_ring_file = NULL;
+               return -EAGAIN;
+       }
+
+       file->f_inode->i_mapping->a_ops = &aio_ctx_aops;
+       file->f_inode->i_mapping->private_data = ctx;
+       file->f_inode->i_size = PAGE_SIZE * (loff_t)nr_pages;
+
+       for (i = 0; i < nr_pages; i++) {
+               struct page *page;
+               page = find_or_create_page(file->f_inode->i_mapping,
+                                          i, GFP_HIGHUSER | __GFP_ZERO);
+               if (!page)
+                       break;
+               pr_debug("pid(%d) page[%d]->count=%d\n",
+                        current->pid, i, page_count(page));
+               SetPageUptodate(page);
+               SetPageDirty(page);
+               unlock_page(page);
+       }
+       ctx->aio_ring_file = file;
+       nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
+                       / sizeof(struct io_event);
 
-       ctx->nr_events = 0;
        ctx->ring_pages = ctx->internal_pages;
        if (nr_pages > AIO_RING_PAGES) {
                ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
@@ -178,10 +296,11 @@ static int aio_setup_ring(struct kioctx *ctx)
 
        ctx->mmap_size = nr_pages * PAGE_SIZE;
        pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
+
        down_write(&mm->mmap_sem);
-       ctx->mmap_base = do_mmap_pgoff(NULL, 0, ctx->mmap_size,
-                                      PROT_READ|PROT_WRITE,
-                                      MAP_ANONYMOUS|MAP_PRIVATE, 0, &populate);
+       ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
+                                      PROT_READ | PROT_WRITE,
+                                      MAP_SHARED | MAP_POPULATE, 0, &populate);
        if (IS_ERR((void *)ctx->mmap_base)) {
                up_write(&mm->mmap_sem);
                ctx->mmap_size = 0;
@@ -190,23 +309,34 @@ static int aio_setup_ring(struct kioctx *ctx)
        }
 
        pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
+
+       /* We must do this while still holding mmap_sem for write, as we
+        * need to be protected against userspace attempting to mremap()
+        * or munmap() the ring buffer.
+        */
        ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
                                       1, 0, ctx->ring_pages, NULL);
+
+       /* Dropping the reference here is safe as the page cache will hold
+        * onto the pages for us.  It is also required so that page migration
+        * can unmap the pages and get the right reference count.
+        */
+       for (i = 0; i < ctx->nr_pages; i++)
+               put_page(ctx->ring_pages[i]);
+
        up_write(&mm->mmap_sem);
 
        if (unlikely(ctx->nr_pages != nr_pages)) {
                aio_free_ring(ctx);
                return -EAGAIN;
        }
-       if (populate)
-               mm_populate(ctx->mmap_base, populate);
 
        ctx->user_id = ctx->mmap_base;
        ctx->nr_events = nr_events; /* trusted copy */
 
        ring = kmap_atomic(ctx->ring_pages[0]);
        ring->nr = nr_events;   /* user copy */
-       ring->id = ctx->user_id;
+       ring->id = ~0U;
        ring->head = ring->tail = 0;
        ring->magic = AIO_RING_MAGIC;
        ring->compat_features = AIO_RING_COMPAT_FEATURES;
@@ -238,11 +368,9 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
 }
 EXPORT_SYMBOL(kiocb_set_cancel_fn);
 
-static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
-                       struct io_event *res)
+static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
 {
        kiocb_cancel_fn *old, *cancel;
-       int ret = -EINVAL;
 
        /*
         * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
@@ -252,28 +380,20 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
        cancel = ACCESS_ONCE(kiocb->ki_cancel);
        do {
                if (!cancel || cancel == KIOCB_CANCELLED)
-                       return ret;
+                       return -EINVAL;
 
                old = cancel;
                cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED);
        } while (cancel != old);
 
-       atomic_inc(&kiocb->ki_users);
-       spin_unlock_irq(&ctx->ctx_lock);
-
-       memset(res, 0, sizeof(*res));
-       res->obj = (u64)(unsigned long)kiocb->ki_obj.user;
-       res->data = kiocb->ki_user_data;
-       ret = cancel(kiocb, res);
-
-       spin_lock_irq(&ctx->ctx_lock);
-
-       return ret;
+       return cancel(kiocb);
 }
 
 static void free_ioctx_rcu(struct rcu_head *head)
 {
        struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
+
+       free_percpu(ctx->cpu);
        kmem_cache_free(kioctx_cachep, ctx);
 }
 
@@ -282,12 +402,13 @@ static void free_ioctx_rcu(struct rcu_head *head)
  * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
  * now it's safe to cancel any that need to be.
  */
-static void free_ioctx(struct kioctx *ctx)
+static void free_ioctx(struct work_struct *work)
 {
+       struct kioctx *ctx = container_of(work, struct kioctx, free_work);
        struct aio_ring *ring;
-       struct io_event res;
        struct kiocb *req;
-       unsigned head, avail;
+       unsigned cpu, avail;
+       DEFINE_WAIT(wait);
 
        spin_lock_irq(&ctx->ctx_lock);
 
@@ -296,28 +417,38 @@ static void free_ioctx(struct kioctx *ctx)
                                       struct kiocb, ki_list);
 
                list_del_init(&req->ki_list);
-               kiocb_cancel(ctx, req, &res);
+               kiocb_cancel(ctx, req);
        }
 
        spin_unlock_irq(&ctx->ctx_lock);
 
-       ring = kmap_atomic(ctx->ring_pages[0]);
-       head = ring->head;
-       kunmap_atomic(ring);
+       for_each_possible_cpu(cpu) {
+               struct kioctx_cpu *kcpu = per_cpu_ptr(ctx->cpu, cpu);
 
-       while (atomic_read(&ctx->reqs_active) > 0) {
-               wait_event(ctx->wait,
-                               head != ctx->tail ||
-                               atomic_read(&ctx->reqs_active) <= 0);
+               atomic_add(kcpu->reqs_available, &ctx->reqs_available);
+               kcpu->reqs_available = 0;
+       }
 
-               avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
+       while (1) {
+               prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);
 
-               atomic_sub(avail, &ctx->reqs_active);
-               head += avail;
-               head %= ctx->nr_events;
+               ring = kmap_atomic(ctx->ring_pages[0]);
+               avail = (ring->head <= ring->tail)
+                        ? ring->tail - ring->head
+                        : ctx->nr_events - ring->head + ring->tail;
+
+               atomic_add(avail, &ctx->reqs_available);
+               ring->head = ring->tail;
+               kunmap_atomic(ring);
+
+               if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
+                       break;
+
+               schedule();
        }
+       finish_wait(&ctx->wait, &wait);
 
-       WARN_ON(atomic_read(&ctx->reqs_active) < 0);
+       WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
 
        aio_free_ring(ctx);
 
@@ -333,10 +464,68 @@ static void free_ioctx(struct kioctx *ctx)
        call_rcu(&ctx->rcu_head, free_ioctx_rcu);
 }
 
-static void put_ioctx(struct kioctx *ctx)
+static void free_ioctx_ref(struct percpu_ref *ref)
 {
-       if (unlikely(atomic_dec_and_test(&ctx->users)))
-               free_ioctx(ctx);
+       struct kioctx *ctx = container_of(ref, struct kioctx, users);
+
+       INIT_WORK(&ctx->free_work, free_ioctx);
+       schedule_work(&ctx->free_work);
+}
+
+static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
+{
+       unsigned i, new_nr;
+       struct kioctx_table *table, *old;
+       struct aio_ring *ring;
+
+       spin_lock(&mm->ioctx_lock);
+       rcu_read_lock();
+       table = rcu_dereference(mm->ioctx_table);
+
+       while (1) {
+               if (table)
+                       for (i = 0; i < table->nr; i++)
+                               if (!table->table[i]) {
+                                       ctx->id = i;
+                                       table->table[i] = ctx;
+                                       rcu_read_unlock();
+                                       spin_unlock(&mm->ioctx_lock);
+
+                                       ring = kmap_atomic(ctx->ring_pages[0]);
+                                       ring->id = ctx->id;
+                                       kunmap_atomic(ring);
+                                       return 0;
+                               }
+
+               new_nr = (table ? table->nr : 1) * 4;
+
+               rcu_read_unlock();
+               spin_unlock(&mm->ioctx_lock);
+
+               table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
+                               new_nr, GFP_KERNEL);
+               if (!table)
+                       return -ENOMEM;
+
+               table->nr = new_nr;
+
+               spin_lock(&mm->ioctx_lock);
+               rcu_read_lock();
+               old = rcu_dereference(mm->ioctx_table);
+
+               if (!old) {
+                       rcu_assign_pointer(mm->ioctx_table, table);
+               } else if (table->nr > old->nr) {
+                       memcpy(table->table, old->table,
+                              old->nr * sizeof(struct kioctx *));
+
+                       rcu_assign_pointer(mm->ioctx_table, table);
+                       kfree_rcu(old, rcu);
+               } else {
+                       kfree(table);
+                       table = old;
+               }
+       }
 }
 
 /* ioctx_alloc
@@ -348,6 +537,18 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
        struct kioctx *ctx;
        int err = -ENOMEM;
 
+       /*
+        * We keep track of the number of available ringbuffer slots, to prevent
+        * overflow (reqs_available), and we also use percpu counters for this.
+        *
+        * So since up to half the slots might be on other cpu's percpu counters
+        * and unavailable, double nr_events so userspace sees what they
+        * expected: additionally, we move req_batch slots to/from percpu
+        * counters at a time, so make sure that isn't 0:
+        */
+       nr_events = max(nr_events, num_possible_cpus() * 4);
+       nr_events *= 2;
+
        /* Prevent overflows */
        if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
            (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
@@ -355,7 +556,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
                return ERR_PTR(-EINVAL);
        }
 
-       if (!nr_events || (unsigned long)nr_events > aio_max_nr)
+       if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL))
                return ERR_PTR(-EAGAIN);
 
        ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
@@ -364,8 +565,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 
        ctx->max_reqs = nr_events;
 
-       atomic_set(&ctx->users, 2);
-       atomic_set(&ctx->dead, 0);
+       if (percpu_ref_init(&ctx->users, free_ioctx_ref))
+               goto out_freectx;
+
        spin_lock_init(&ctx->ctx_lock);
        spin_lock_init(&ctx->completion_lock);
        mutex_init(&ctx->ring_lock);
@@ -373,12 +575,21 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 
        INIT_LIST_HEAD(&ctx->active_reqs);
 
+       ctx->cpu = alloc_percpu(struct kioctx_cpu);
+       if (!ctx->cpu)
+               goto out_freeref;
+
        if (aio_setup_ring(ctx) < 0)
-               goto out_freectx;
+               goto out_freepcpu;
+
+       atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
+       ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
+       if (ctx->req_batch < 1)
+               ctx->req_batch = 1;
 
        /* limit the number of system wide aios */
        spin_lock(&aio_nr_lock);
-       if (aio_nr + nr_events > aio_max_nr ||
+       if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
            aio_nr + nr_events < aio_nr) {
                spin_unlock(&aio_nr_lock);
                goto out_cleanup;
@@ -386,49 +597,54 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
        aio_nr += ctx->max_reqs;
        spin_unlock(&aio_nr_lock);
 
-       /* now link into global list. */
-       spin_lock(&mm->ioctx_lock);
-       hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
-       spin_unlock(&mm->ioctx_lock);
+       percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
+
+       err = ioctx_add_table(ctx, mm);
+       if (err)
+               goto out_cleanup_put;
 
        pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
                 ctx, ctx->user_id, mm, ctx->nr_events);
        return ctx;
 
+out_cleanup_put:
+       percpu_ref_put(&ctx->users);
 out_cleanup:
        err = -EAGAIN;
        aio_free_ring(ctx);
+out_freepcpu:
+       free_percpu(ctx->cpu);
+out_freeref:
+       free_percpu(ctx->users.pcpu_count);
 out_freectx:
+       if (ctx->aio_ring_file)
+               fput(ctx->aio_ring_file);
        kmem_cache_free(kioctx_cachep, ctx);
        pr_debug("error allocating ioctx %d\n", err);
        return ERR_PTR(err);
 }
 
-static void kill_ioctx_work(struct work_struct *work)
-{
-       struct kioctx *ctx = container_of(work, struct kioctx, rcu_work);
-
-       wake_up_all(&ctx->wait);
-       put_ioctx(ctx);
-}
-
-static void kill_ioctx_rcu(struct rcu_head *head)
-{
-       struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
-
-       INIT_WORK(&ctx->rcu_work, kill_ioctx_work);
-       schedule_work(&ctx->rcu_work);
-}
-
 /* kill_ioctx
  *     Cancels all outstanding aio requests on an aio context.  Used
  *     when the processes owning a context have all exited to encourage
  *     the rapid destruction of the kioctx.
  */
-static void kill_ioctx(struct kioctx *ctx)
+static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
 {
        if (!atomic_xchg(&ctx->dead, 1)) {
-               hlist_del_rcu(&ctx->list);
+               struct kioctx_table *table;
+
+               spin_lock(&mm->ioctx_lock);
+               rcu_read_lock();
+               table = rcu_dereference(mm->ioctx_table);
+
+               WARN_ON(ctx != table->table[ctx->id]);
+               table->table[ctx->id] = NULL;
+               rcu_read_unlock();
+               spin_unlock(&mm->ioctx_lock);
+
+               /* percpu_ref_kill() will do the necessary call_rcu() */
+               wake_up_all(&ctx->wait);
 
                /*
                 * It'd be more correct to do this in free_ioctx(), after all
@@ -445,24 +661,23 @@ static void kill_ioctx(struct kioctx *ctx)
                if (ctx->mmap_size)
                        vm_munmap(ctx->mmap_base, ctx->mmap_size);
 
-               /* Between hlist_del_rcu() and dropping the initial ref */
-               call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
+               percpu_ref_kill(&ctx->users);
        }
 }
 
 /* wait_on_sync_kiocb:
  *     Waits on the given sync kiocb to complete.
  */
-ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
+ssize_t wait_on_sync_kiocb(struct kiocb *req)
 {
-       while (atomic_read(&iocb->ki_users)) {
+       while (!req->ki_ctx) {
                set_current_state(TASK_UNINTERRUPTIBLE);
-               if (!atomic_read(&iocb->ki_users))
+               if (req->ki_ctx)
                        break;
                io_schedule();
        }
        __set_current_state(TASK_RUNNING);
-       return iocb->ki_user_data;
+       return req->ki_user_data;
 }
 EXPORT_SYMBOL(wait_on_sync_kiocb);
 
@@ -476,16 +691,28 @@ EXPORT_SYMBOL(wait_on_sync_kiocb);
  */
 void exit_aio(struct mm_struct *mm)
 {
+       struct kioctx_table *table;
        struct kioctx *ctx;
-       struct hlist_node *n;
-
-       hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) {
-               if (1 != atomic_read(&ctx->users))
-                       printk(KERN_DEBUG
-                               "exit_aio:ioctx still alive: %d %d %d\n",
-                               atomic_read(&ctx->users),
-                               atomic_read(&ctx->dead),
-                               atomic_read(&ctx->reqs_active));
+       unsigned i = 0;
+
+       while (1) {
+               rcu_read_lock();
+               table = rcu_dereference(mm->ioctx_table);
+
+               do {
+                       if (!table || i >= table->nr) {
+                               rcu_read_unlock();
+                               rcu_assign_pointer(mm->ioctx_table, NULL);
+                               if (table)
+                                       kfree(table);
+                               return;
+                       }
+
+                       ctx = table->table[i++];
+               } while (!ctx);
+
+               rcu_read_unlock();
+
                /*
                 * We don't need to bother with munmap() here -
                 * exit_mmap(mm) is coming and it'll unmap everything.
@@ -496,40 +723,75 @@ void exit_aio(struct mm_struct *mm)
                 */
                ctx->mmap_size = 0;
 
-               kill_ioctx(ctx);
+               kill_ioctx(mm, ctx);
+       }
+}
+
+static void put_reqs_available(struct kioctx *ctx, unsigned nr)
+{
+       struct kioctx_cpu *kcpu;
+
+       preempt_disable();
+       kcpu = this_cpu_ptr(ctx->cpu);
+
+       kcpu->reqs_available += nr;
+       while (kcpu->reqs_available >= ctx->req_batch * 2) {
+               kcpu->reqs_available -= ctx->req_batch;
+               atomic_add(ctx->req_batch, &ctx->reqs_available);
+       }
+
+       preempt_enable();
+}
+
+static bool get_reqs_available(struct kioctx *ctx)
+{
+       struct kioctx_cpu *kcpu;
+       bool ret = false;
+
+       preempt_disable();
+       kcpu = this_cpu_ptr(ctx->cpu);
+
+       if (!kcpu->reqs_available) {
+               int old, avail = atomic_read(&ctx->reqs_available);
+
+               do {
+                       if (avail < ctx->req_batch)
+                               goto out;
+
+                       old = avail;
+                       avail = atomic_cmpxchg(&ctx->reqs_available,
+                                              avail, avail - ctx->req_batch);
+               } while (avail != old);
+
+               kcpu->reqs_available += ctx->req_batch;
        }
+
+       ret = true;
+       kcpu->reqs_available--;
+out:
+       preempt_enable();
+       return ret;
 }
 
 /* aio_get_req
- *     Allocate a slot for an aio request.  Increments the ki_users count
- * of the kioctx so that the kioctx stays around until all requests are
- * complete.  Returns NULL if no requests are free.
- *
- * Returns with kiocb->ki_users set to 2.  The io submit code path holds
- * an extra reference while submitting the i/o.
- * This prevents races between the aio code path referencing the
- * req (after submitting it) and aio_complete() freeing the req.
+ *     Allocate a slot for an aio request.
+ * Returns NULL if no requests are free.
  */
 static inline struct kiocb *aio_get_req(struct kioctx *ctx)
 {
        struct kiocb *req;
 
-       if (atomic_read(&ctx->reqs_active) >= ctx->nr_events)
+       if (!get_reqs_available(ctx))
                return NULL;
 
-       if (atomic_inc_return(&ctx->reqs_active) > ctx->nr_events - 1)
-               goto out_put;
-
        req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
        if (unlikely(!req))
                goto out_put;
 
-       atomic_set(&req->ki_users, 2);
        req->ki_ctx = ctx;
-
        return req;
 out_put:
-       atomic_dec(&ctx->reqs_active);
+       put_reqs_available(ctx, 1);
        return NULL;
 }
 
@@ -539,35 +801,32 @@ static void kiocb_free(struct kiocb *req)
                fput(req->ki_filp);
        if (req->ki_eventfd != NULL)
                eventfd_ctx_put(req->ki_eventfd);
-       if (req->ki_dtor)
-               req->ki_dtor(req);
-       if (req->ki_iovec != &req->ki_inline_vec)
-               kfree(req->ki_iovec);
        kmem_cache_free(kiocb_cachep, req);
 }
 
-void aio_put_req(struct kiocb *req)
-{
-       if (atomic_dec_and_test(&req->ki_users))
-               kiocb_free(req);
-}
-EXPORT_SYMBOL(aio_put_req);
-
 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 {
+       struct aio_ring __user *ring  = (void __user *)ctx_id;
        struct mm_struct *mm = current->mm;
        struct kioctx *ctx, *ret = NULL;
+       struct kioctx_table *table;
+       unsigned id;
+
+       if (get_user(id, &ring->id))
+               return NULL;
 
        rcu_read_lock();
+       table = rcu_dereference(mm->ioctx_table);
 
-       hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
-               if (ctx->user_id == ctx_id) {
-                       atomic_inc(&ctx->users);
-                       ret = ctx;
-                       break;
-               }
-       }
+       if (!table || id >= table->nr)
+               goto out;
 
+       ctx = table->table[id];
+       if (ctx && ctx->user_id == ctx_id) {
+               percpu_ref_get(&ctx->users);
+               ret = ctx;
+       }
+out:
        rcu_read_unlock();
        return ret;
 }
@@ -591,16 +850,16 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
         *  - the sync task helpfully left a reference to itself in the iocb
         */
        if (is_sync_kiocb(iocb)) {
-               BUG_ON(atomic_read(&iocb->ki_users) != 1);
                iocb->ki_user_data = res;
-               atomic_set(&iocb->ki_users, 0);
+               smp_wmb();
+               iocb->ki_ctx = ERR_PTR(-EXDEV);
                wake_up_process(iocb->ki_obj.tsk);
                return;
        }
 
        /*
         * Take rcu_read_lock() in case the kioctx is being destroyed, as we
-        * need to issue a wakeup after decrementing reqs_active.
+        * need to issue a wakeup after incrementing reqs_available.
         */
        rcu_read_lock();
 
@@ -613,17 +872,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
        }
 
        /*
-        * cancelled requests don't get events, userland was given one
-        * when the event got cancelled.
-        */
-       if (unlikely(xchg(&iocb->ki_cancel,
-                         KIOCB_CANCELLED) == KIOCB_CANCELLED)) {
-               atomic_dec(&ctx->reqs_active);
-               /* Still need the wake_up in case free_ioctx is waiting */
-               goto put_rq;
-       }
-
-       /*
         * Add a completion event to the ring buffer. Must be done holding
         * ctx->completion_lock to prevent other code from messing with the tail
         * pointer since we might be called from irq context.
@@ -675,9 +923,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
        if (iocb->ki_eventfd != NULL)
                eventfd_signal(iocb->ki_eventfd, 1);
 
-put_rq:
        /* everything turned out well, dispose of the aiocb. */
-       aio_put_req(iocb);
+       kiocb_free(iocb);
 
        /*
         * We have to order our ring_info tail store above and test
@@ -702,7 +949,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
                                 struct io_event __user *event, long nr)
 {
        struct aio_ring *ring;
-       unsigned head, pos;
+       unsigned head, tail, pos;
        long ret = 0;
        int copy_ret;
 
@@ -710,11 +957,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
 
        ring = kmap_atomic(ctx->ring_pages[0]);
        head = ring->head;
+       tail = ring->tail;
        kunmap_atomic(ring);
 
-       pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events);
+       pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
 
-       if (head == ctx->tail)
+       if (head == tail)
                goto out;
 
        while (ret < nr) {
@@ -722,8 +970,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
                struct io_event *ev;
                struct page *page;
 
-               avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
-               if (head == ctx->tail)
+               avail = (head <= tail ?  tail : ctx->nr_events) - head;
+               if (head == tail)
                        break;
 
                avail = min(avail, nr - ret);
@@ -754,9 +1002,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
        kunmap_atomic(ring);
        flush_dcache_page(ctx->ring_pages[0]);
 
-       pr_debug("%li  h%u t%u\n", ret, head, ctx->tail);
+       pr_debug("%li  h%u t%u\n", ret, head, tail);
 
-       atomic_sub(ret, &ctx->reqs_active);
+       put_reqs_available(ctx, ret);
 out:
        mutex_unlock(&ctx->ring_lock);
 
@@ -854,8 +1102,8 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
        if (!IS_ERR(ioctx)) {
                ret = put_user(ioctx->user_id, ctxp);
                if (ret)
-                       kill_ioctx(ioctx);
-               put_ioctx(ioctx);
+                       kill_ioctx(current->mm, ioctx);
+               percpu_ref_put(&ioctx->users);
        }
 
 out:
@@ -872,101 +1120,37 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
 {
        struct kioctx *ioctx = lookup_ioctx(ctx);
        if (likely(NULL != ioctx)) {
-               kill_ioctx(ioctx);
-               put_ioctx(ioctx);
+               kill_ioctx(current->mm, ioctx);
+               percpu_ref_put(&ioctx->users);
                return 0;
        }
        pr_debug("EINVAL: io_destroy: invalid context id\n");
        return -EINVAL;
 }
 
-static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
-{
-       struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
-
-       BUG_ON(ret <= 0);
-
-       while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
-               ssize_t this = min((ssize_t)iov->iov_len, ret);
-               iov->iov_base += this;
-               iov->iov_len -= this;
-               iocb->ki_left -= this;
-               ret -= this;
-               if (iov->iov_len == 0) {
-                       iocb->ki_cur_seg++;
-                       iov++;
-               }
-       }
-
-       /* the caller should not have done more io than what fit in
-        * the remaining iovecs */
-       BUG_ON(ret > 0 && iocb->ki_left == 0);
-}
-
 typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *,
                            unsigned long, loff_t);
 
-static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
-{
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       ssize_t ret = 0;
-
-       /* This matches the pread()/pwrite() logic */
-       if (iocb->ki_pos < 0)
-               return -EINVAL;
-
-       if (rw == WRITE)
-               file_start_write(file);
-       do {
-               ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
-                           iocb->ki_nr_segs - iocb->ki_cur_seg,
-                           iocb->ki_pos);
-               if (ret > 0)
-                       aio_advance_iovec(iocb, ret);
-
-       /* retry all partial writes.  retry partial reads as long as its a
-        * regular file. */
-       } while (ret > 0 && iocb->ki_left > 0 &&
-                (rw == WRITE ||
-                 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
-       if (rw == WRITE)
-               file_end_write(file);
-
-       /* This means we must have transferred all that we could */
-       /* No need to retry anymore */
-       if ((ret == 0) || (iocb->ki_left == 0))
-               ret = iocb->ki_nbytes - iocb->ki_left;
-
-       /* If we managed to write some out we return that, rather than
-        * the eventual error. */
-       if (rw == WRITE
-           && ret < 0 && ret != -EIOCBQUEUED
-           && iocb->ki_nbytes - iocb->ki_left)
-               ret = iocb->ki_nbytes - iocb->ki_left;
-
-       return ret;
-}
-
-static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
+static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb,
+                                    int rw, char __user *buf,
+                                    unsigned long *nr_segs,
+                                    struct iovec **iovec,
+                                    bool compat)
 {
        ssize_t ret;
 
-       kiocb->ki_nr_segs = kiocb->ki_nbytes;
+       *nr_segs = kiocb->ki_nbytes;
 
 #ifdef CONFIG_COMPAT
        if (compat)
                ret = compat_rw_copy_check_uvector(rw,
-                               (struct compat_iovec __user *)kiocb->ki_buf,
-                               kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
-                               &kiocb->ki_iovec);
+                               (struct compat_iovec __user *)buf,
+                               *nr_segs, 1, *iovec, iovec);
        else
 #endif
                ret = rw_copy_check_uvector(rw,
-                               (struct iovec __user *)kiocb->ki_buf,
-                               kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
-                               &kiocb->ki_iovec);
+                               (struct iovec __user *)buf,
+                               *nr_segs, 1, *iovec, iovec);
        if (ret < 0)
                return ret;
 
@@ -975,15 +1159,17 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
        return 0;
 }
 
-static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb)
+static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
+                                      int rw, char __user *buf,
+                                      unsigned long *nr_segs,
+                                      struct iovec *iovec)
 {
-       if (unlikely(!access_ok(!rw, kiocb->ki_buf, kiocb->ki_nbytes)))
+       if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes)))
                return -EFAULT;
 
-       kiocb->ki_iovec = &kiocb->ki_inline_vec;
-       kiocb->ki_iovec->iov_base = kiocb->ki_buf;
-       kiocb->ki_iovec->iov_len = kiocb->ki_nbytes;
-       kiocb->ki_nr_segs = 1;
+       iovec->iov_base = buf;
+       iovec->iov_len = kiocb->ki_nbytes;
+       *nr_segs = 1;
        return 0;
 }
 
@@ -992,15 +1178,18 @@ static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb)
  *     Performs the initial checks and aio retry method
  *     setup for the kiocb at the time of io submission.
  */
-static ssize_t aio_run_iocb(struct kiocb *req, bool compat)
+static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
+                           char __user *buf, bool compat)
 {
        struct file *file = req->ki_filp;
        ssize_t ret;
+       unsigned long nr_segs;
        int rw;
        fmode_t mode;
        aio_rw_op *rw_op;
+       struct iovec inline_vec, *iovec = &inline_vec;
 
-       switch (req->ki_opcode) {
+       switch (opcode) {
        case IOCB_CMD_PREAD:
        case IOCB_CMD_PREADV:
                mode    = FMODE_READ;
@@ -1021,21 +1210,38 @@ rw_common:
                if (!rw_op)
                        return -EINVAL;
 
-               ret = (req->ki_opcode == IOCB_CMD_PREADV ||
-                      req->ki_opcode == IOCB_CMD_PWRITEV)
-                       ? aio_setup_vectored_rw(rw, req, compat)
-                       : aio_setup_single_vector(rw, req);
+               ret = (opcode == IOCB_CMD_PREADV ||
+                      opcode == IOCB_CMD_PWRITEV)
+                       ? aio_setup_vectored_rw(req, rw, buf, &nr_segs,
+                                               &iovec, compat)
+                       : aio_setup_single_vector(req, rw, buf, &nr_segs,
+                                                 iovec);
                if (ret)
                        return ret;
 
                ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
-               if (ret < 0)
+               if (ret < 0) {
+                       if (iovec != &inline_vec)
+                               kfree(iovec);
                        return ret;
+               }
 
                req->ki_nbytes = ret;
-               req->ki_left = ret;
 
-               ret = aio_rw_vect_retry(req, rw, rw_op);
+               /* XXX: move/kill - rw_verify_area()? */
+               /* This matches the pread()/pwrite() logic */
+               if (req->ki_pos < 0) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (rw == WRITE)
+                       file_start_write(file);
+
+               ret = rw_op(req, iovec, nr_segs, req->ki_pos);
+
+               if (rw == WRITE)
+                       file_end_write(file);
                break;
 
        case IOCB_CMD_FDSYNC:
@@ -1057,6 +1263,9 @@ rw_common:
                return -EINVAL;
        }
 
+       if (iovec != &inline_vec)
+               kfree(iovec);
+
        if (ret != -EIOCBQUEUED) {
                /*
                 * There's no easy way to restart the syscall since other AIO's
@@ -1128,21 +1337,18 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        req->ki_obj.user = user_iocb;
        req->ki_user_data = iocb->aio_data;
        req->ki_pos = iocb->aio_offset;
+       req->ki_nbytes = iocb->aio_nbytes;
 
-       req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
-       req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
-       req->ki_opcode = iocb->aio_lio_opcode;
-
-       ret = aio_run_iocb(req, compat);
+       ret = aio_run_iocb(req, iocb->aio_lio_opcode,
+                          (char __user *)(unsigned long)iocb->aio_buf,
+                          compat);
        if (ret)
                goto out_put_req;
 
-       aio_put_req(req);       /* drop extra ref to req */
        return 0;
 out_put_req:
-       atomic_dec(&ctx->reqs_active);
-       aio_put_req(req);       /* drop extra ref to req */
-       aio_put_req(req);       /* drop i/o ref to req */
+       put_reqs_available(ctx, 1);
+       kiocb_free(req);
        return ret;
 }
 
@@ -1195,7 +1401,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
        }
        blk_finish_plug(&plug);
 
-       put_ioctx(ctx);
+       percpu_ref_put(&ctx->users);
        return i ? i : ret;
 }
 
@@ -1252,7 +1458,6 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
                struct io_event __user *, result)
 {
-       struct io_event res;
        struct kioctx *ctx;
        struct kiocb *kiocb;
        u32 key;
@@ -1270,21 +1475,22 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
 
        kiocb = lookup_kiocb(ctx, iocb, key);
        if (kiocb)
-               ret = kiocb_cancel(ctx, kiocb, &res);
+               ret = kiocb_cancel(ctx, kiocb);
        else
                ret = -EINVAL;
 
        spin_unlock_irq(&ctx->ctx_lock);
 
        if (!ret) {
-               /* Cancellation succeeded -- copy the result
-                * into the user's buffer.
+               /*
+                * The result argument is no longer used - the io_event is
+                * always delivered via the ring buffer. -EINPROGRESS indicates
+                * cancellation is progress:
                 */
-               if (copy_to_user(result, &res, sizeof(res)))
-                       ret = -EFAULT;
+               ret = -EINPROGRESS;
        }
 
-       put_ioctx(ctx);
+       percpu_ref_put(&ctx->users);
 
        return ret;
 }
@@ -1313,7 +1519,7 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
        if (likely(ioctx)) {
                if (likely(min_nr <= nr && min_nr >= 0))
                        ret = read_events(ioctx, min_nr, nr, events, timeout);
-               put_ioctx(ioctx);
+               percpu_ref_put(&ioctx->users);
        }
        return ret;
 }
index 47a65df..85c9618 100644 (file)
@@ -109,6 +109,72 @@ static struct file_system_type anon_inode_fs_type = {
 };
 
 /**
+ * anon_inode_getfile_private - creates a new file instance by hooking it up to an
+ *                      anonymous inode, and a dentry that describe the "class"
+ *                      of the file
+ *
+ * @name:    [in]    name of the "class" of the new file
+ * @fops:    [in]    file operations for the new file
+ * @priv:    [in]    private data for the new file (will be file's private_data)
+ * @flags:   [in]    flags
+ *
+ *
+ * Similar to anon_inode_getfile, but each file holds a single inode.
+ *
+ */
+struct file *anon_inode_getfile_private(const char *name,
+                                       const struct file_operations *fops,
+                                       void *priv, int flags)
+{
+       struct qstr this;
+       struct path path;
+       struct file *file;
+       struct inode *inode;
+
+       if (fops->owner && !try_module_get(fops->owner))
+               return ERR_PTR(-ENOENT);
+
+       inode = anon_inode_mkinode(anon_inode_mnt->mnt_sb);
+       if (IS_ERR(inode)) {
+               file = ERR_PTR(-ENOMEM);
+               goto err_module;
+       }
+
+       /*
+        * Link the inode to a directory entry by creating a unique name
+        * using the inode sequence number.
+        */
+       file = ERR_PTR(-ENOMEM);
+       this.name = name;
+       this.len = strlen(name);
+       this.hash = 0;
+       path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this);
+       if (!path.dentry)
+               goto err_module;
+
+       path.mnt = mntget(anon_inode_mnt);
+
+       d_instantiate(path.dentry, inode);
+
+       file = alloc_file(&path, OPEN_FMODE(flags), fops);
+       if (IS_ERR(file))
+               goto err_dput;
+
+       file->f_mapping = inode->i_mapping;
+       file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+       file->private_data = priv;
+
+       return file;
+
+err_dput:
+       path_put(&path);
+err_module:
+       module_put(fops->owner);
+       return file;
+}
+EXPORT_SYMBOL_GPL(anon_inode_getfile_private);
+
+/**
  * anon_inode_getfile - creates a new file instance by hooking it up to an
  *                      anonymous inode, and a dentry that describe the "class"
  *                      of the file
index 1173a4e..1e86823 100644 (file)
@@ -592,7 +592,7 @@ static struct block_device *bd_acquire(struct inode *inode)
        return bdev;
 }
 
-static inline int sb_is_blkdev_sb(struct super_block *sb)
+int sb_is_blkdev_sb(struct super_block *sb)
 {
        return sb == blockdev_superblock;
 }
@@ -1542,7 +1542,7 @@ static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
                return 0;
 
        size -= pos;
-       if (size < iocb->ki_left)
+       if (size < iocb->ki_nbytes)
                nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size);
        return generic_file_aio_read(iocb, iov, nr_segs, pos);
 }
index d044b35..eb955b5 100644 (file)
@@ -3379,6 +3379,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
        return rc;
 }
 
+/*
+ * cifs_readpage_worker must be called with the page pinned
+ */
 static int cifs_readpage_worker(struct file *file, struct page *page,
        loff_t *poffset)
 {
@@ -3390,7 +3393,6 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
        if (rc == 0)
                goto read_complete;
 
-       page_cache_get(page);
        read_data = kmap(page);
        /* for reads over a certain size could initiate async read ahead */
 
@@ -3417,7 +3419,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
 
 io_error:
        kunmap(page);
-       page_cache_release(page);
+       unlock_page(page);
 
 read_complete:
        return rc;
@@ -3442,8 +3444,6 @@ static int cifs_readpage(struct file *file, struct page *page)
 
        rc = cifs_readpage_worker(file, page, &offset);
 
-       unlock_page(page);
-
        free_xid(xid);
        return rc;
 }
@@ -3497,6 +3497,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
                        struct page **pagep, void **fsdata)
 {
+       int oncethru = 0;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
        loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
        loff_t page_start = pos & PAGE_MASK;
@@ -3506,6 +3507,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
 
        cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
 
+start:
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page) {
                rc = -ENOMEM;
@@ -3547,13 +3549,16 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
                }
        }
 
-       if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
+       if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
                /*
                 * might as well read a page, it is fast enough. If we get
                 * an error, we don't need to return it. cifs_write_end will
                 * do a sync write instead since PG_uptodate isn't set.
                 */
                cifs_readpage_worker(file, page, &page_start);
+               page_cache_release(page);
+               oncethru = 1;
+               goto start;
        } else {
                /* we could try using another file handle if there is one -
                   but how would we lock it to prevent close of that handle
index 1bd4614..4100030 100644 (file)
@@ -357,15 +357,80 @@ static void dentry_unlink_inode(struct dentry * dentry)
 }
 
 /*
+ * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
+ * is in use - which includes both the "real" per-superblock
+ * LRU list _and_ the DCACHE_SHRINK_LIST use.
+ *
+ * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
+ * on the shrink list (ie not on the superblock LRU list).
+ *
+ * The per-cpu "nr_dentry_unused" counters are updated with
+ * the DCACHE_LRU_LIST bit.
+ *
+ * These helper functions make sure we always follow the
+ * rules. d_lock must be held by the caller.
+ */
+#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
+static void d_lru_add(struct dentry *dentry)
+{
+       D_FLAG_VERIFY(dentry, 0);
+       dentry->d_flags |= DCACHE_LRU_LIST;
+       this_cpu_inc(nr_dentry_unused);
+       WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+}
+
+static void d_lru_del(struct dentry *dentry)
+{
+       D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
+       dentry->d_flags &= ~DCACHE_LRU_LIST;
+       this_cpu_dec(nr_dentry_unused);
+       WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+}
+
+static void d_shrink_del(struct dentry *dentry)
+{
+       D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
+       list_del_init(&dentry->d_lru);
+       dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
+       this_cpu_dec(nr_dentry_unused);
+}
+
+static void d_shrink_add(struct dentry *dentry, struct list_head *list)
+{
+       D_FLAG_VERIFY(dentry, 0);
+       list_add(&dentry->d_lru, list);
+       dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
+       this_cpu_inc(nr_dentry_unused);
+}
+
+/*
+ * These can only be called under the global LRU lock, ie during the
+ * callback for freeing the LRU list. "isolate" removes it from the
+ * LRU lists entirely, while shrink_move moves it to the indicated
+ * private list.
+ */
+static void d_lru_isolate(struct dentry *dentry)
+{
+       D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
+       dentry->d_flags &= ~DCACHE_LRU_LIST;
+       this_cpu_dec(nr_dentry_unused);
+       list_del_init(&dentry->d_lru);
+}
+
+static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
+{
+       D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
+       dentry->d_flags |= DCACHE_SHRINK_LIST;
+       list_move_tail(&dentry->d_lru, list);
+}
+
+/*
  * dentry_lru_(add|del)_list) must be called with d_lock held.
  */
 static void dentry_lru_add(struct dentry *dentry)
 {
-       if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
-               if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
-                       this_cpu_inc(nr_dentry_unused);
-               dentry->d_flags |= DCACHE_LRU_LIST;
-       }
+       if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
+               d_lru_add(dentry);
 }
 
 /*
@@ -377,15 +442,11 @@ static void dentry_lru_add(struct dentry *dentry)
  */
 static void dentry_lru_del(struct dentry *dentry)
 {
-       if (dentry->d_flags & DCACHE_SHRINK_LIST) {
-               list_del_init(&dentry->d_lru);
-               dentry->d_flags &= ~DCACHE_SHRINK_LIST;
-               return;
+       if (dentry->d_flags & DCACHE_LRU_LIST) {
+               if (dentry->d_flags & DCACHE_SHRINK_LIST)
+                       return d_shrink_del(dentry);
+               d_lru_del(dentry);
        }
-
-       if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
-               this_cpu_dec(nr_dentry_unused);
-       dentry->d_flags &= ~DCACHE_LRU_LIST;
 }
 
 /**
@@ -837,6 +898,12 @@ static void shrink_dentry_list(struct list_head *list)
                dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
                if (&dentry->d_lru == list)
                        break; /* empty */
+
+               /*
+                * Get the dentry lock, and re-verify that the dentry is
+                * this on the shrinking list. If it is, we know that
+                * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
+                */
                spin_lock(&dentry->d_lock);
                if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
                        spin_unlock(&dentry->d_lock);
@@ -848,8 +915,7 @@ static void shrink_dentry_list(struct list_head *list)
                 * to the LRU here, so we can simply remove it from the list
                 * here regardless of whether it is referenced or not.
                 */
-               list_del_init(&dentry->d_lru);
-               dentry->d_flags &= ~DCACHE_SHRINK_LIST;
+               d_shrink_del(dentry);
 
                /*
                 * We found an inuse dentry which was not removed from
@@ -861,12 +927,20 @@ static void shrink_dentry_list(struct list_head *list)
                }
                rcu_read_unlock();
 
+               /*
+                * If 'try_to_prune()' returns a dentry, it will
+                * be the same one we passed in, and d_lock will
+                * have been held the whole time, so it will not
+                * have been added to any other lists. We failed
+                * to get the inode lock.
+                *
+                * We just add it back to the shrink list.
+                */
                dentry = try_prune_one_dentry(dentry);
 
                rcu_read_lock();
                if (dentry) {
-                       dentry->d_flags |= DCACHE_SHRINK_LIST;
-                       list_add(&dentry->d_lru, list);
+                       d_shrink_add(dentry, list);
                        spin_unlock(&dentry->d_lock);
                }
        }
@@ -894,7 +968,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
         * another pass through the LRU.
         */
        if (dentry->d_lockref.count) {
-               list_del_init(&dentry->d_lru);
+               d_lru_isolate(dentry);
                spin_unlock(&dentry->d_lock);
                return LRU_REMOVED;
        }
@@ -925,9 +999,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
                return LRU_ROTATE;
        }
 
-       dentry->d_flags |= DCACHE_SHRINK_LIST;
-       list_move_tail(&dentry->d_lru, freeable);
-       this_cpu_dec(nr_dentry_unused);
+       d_lru_shrink_move(dentry, freeable);
        spin_unlock(&dentry->d_lock);
 
        return LRU_REMOVED;
@@ -972,9 +1044,7 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
        if (!spin_trylock(&dentry->d_lock))
                return LRU_SKIP;
 
-       dentry->d_flags |= DCACHE_SHRINK_LIST;
-       list_move_tail(&dentry->d_lru, freeable);
-       this_cpu_dec(nr_dentry_unused);
+       d_lru_shrink_move(dentry, freeable);
        spin_unlock(&dentry->d_lock);
 
        return LRU_REMOVED;
@@ -1362,9 +1432,13 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
        if (dentry->d_lockref.count) {
                dentry_lru_del(dentry);
        } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
-               dentry_lru_del(dentry);
-               list_add_tail(&dentry->d_lru, &data->dispose);
-               dentry->d_flags |= DCACHE_SHRINK_LIST;
+               /*
+                * We can't use d_lru_shrink_move() because we
+                * need to get the global LRU lock and do the
+                * LRU accounting.
+                */
+               d_lru_del(dentry);
+               d_shrink_add(dentry, &data->dispose);
                data->found++;
                ret = D_WALK_NORETRY;
        }
index 30f6f27..9f4935b 100644 (file)
@@ -69,7 +69,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
 {
        struct super_block *sb = inode->i_sb;
 
-       if (strcmp(sb->s_type->name, "bdev") == 0)
+       if (sb_is_blkdev_sb(sb))
                return inode->i_mapping->backing_dev_info;
 
        return sb->s_bdi;
@@ -251,11 +251,13 @@ static int move_expired_inodes(struct list_head *delaying_queue,
                if (work->older_than_this &&
                    inode_dirtied_after(inode, *work->older_than_this))
                        break;
+               list_move(&inode->i_wb_list, &tmp);
+               moved++;
+               if (sb_is_blkdev_sb(inode->i_sb))
+                       continue;
                if (sb && sb != inode->i_sb)
                        do_sb_sort = 1;
                sb = inode->i_sb;
-               list_move(&inode->i_wb_list, &tmp);
-               moved++;
        }
 
        /* just one sb in list, splice to dispatch_queue and we're done */
index 0bd7a55..91ff089 100644 (file)
@@ -130,7 +130,6 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_
 
        return -EINVAL;
 #else
-       VM_BUG_ON(iocb->ki_left != PAGE_SIZE);
        VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
 
        if (rw == READ || rw == KERNEL_READ)
index 4f8197c..d71903c 100644 (file)
@@ -2242,7 +2242,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
                file->f_path.dentry->d_name.name,
                (unsigned int)nr_segs);
 
-       if (iocb->ki_left == 0)
+       if (iocb->ki_nbytes == 0)
                return 0;
 
        appending = file->f_flags & O_APPEND ? 1 : 0;
@@ -2293,7 +2293,7 @@ relock:
 
        can_do_direct = direct_io;
        ret = ocfs2_prepare_inode_for_write(file, ppos,
-                                           iocb->ki_left, appending,
+                                           iocb->ki_nbytes, appending,
                                            &can_do_direct, &has_refcount);
        if (ret < 0) {
                mlog_errno(ret);
@@ -2301,7 +2301,7 @@ relock:
        }
 
        if (direct_io && !is_sync_kiocb(iocb))
-               unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left,
+               unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_nbytes,
                                                      *ppos);
 
        /*
index 122a384..e3cd280 100644 (file)
@@ -367,7 +367,6 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp
 
        init_sync_kiocb(&kiocb, filp);
        kiocb.ki_pos = *ppos;
-       kiocb.ki_left = len;
        kiocb.ki_nbytes = len;
 
        ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
@@ -417,7 +416,6 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
 
        init_sync_kiocb(&kiocb, filp);
        kiocb.ki_pos = *ppos;
-       kiocb.ki_left = len;
        kiocb.ki_nbytes = len;
 
        ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
@@ -599,7 +597,6 @@ static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
 
        init_sync_kiocb(&kiocb, filp);
        kiocb.ki_pos = *ppos;
-       kiocb.ki_left = len;
        kiocb.ki_nbytes = len;
 
        ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
index 7f60e90..6e025e0 100644 (file)
@@ -2587,10 +2587,11 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
                return -EROFS;
 
        failing = power_cut_emulated(c, lnum, 1);
-       if (failing)
+       if (failing) {
                len = corrupt_data(c, buf, len);
-       ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
-                  len, lnum, offs);
+               ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
+                          len, lnum, offs);
+       }
        err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
        if (err)
                return err;
index 29569dd..c02a27a 100644 (file)
@@ -141,7 +141,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        int err, pos;
-       size_t count = iocb->ki_left;
+       size_t count = iocb->ki_nbytes;
        struct udf_inode_info *iinfo = UDF_I(inode);
 
        down_write(&iinfo->i_data_sem);
index 1bdf965..d9c92da 100644 (file)
@@ -27,15 +27,13 @@ struct kiocb;
  */
 #define KIOCB_CANCELLED                ((void *) (~0ULL))
 
-typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
+typedef int (kiocb_cancel_fn)(struct kiocb *);
 
 struct kiocb {
-       atomic_t                ki_users;
-
        struct file             *ki_filp;
        struct kioctx           *ki_ctx;        /* NULL for sync ops */
        kiocb_cancel_fn         *ki_cancel;
-       void                    (*ki_dtor)(struct kiocb *);
+       void                    *private;
 
        union {
                void __user             *user;
@@ -44,17 +42,7 @@ struct kiocb {
 
        __u64                   ki_user_data;   /* user's data for completion */
        loff_t                  ki_pos;
-
-       void                    *private;
-       /* State that we remember to be able to restart/retry  */
-       unsigned short          ki_opcode;
-       size_t                  ki_nbytes;      /* copy of iocb->aio_nbytes */
-       char                    __user *ki_buf; /* remaining iocb->aio_buf */
-       size_t                  ki_left;        /* remaining bytes */
-       struct iovec            ki_inline_vec;  /* inline vector */
-       struct iovec            *ki_iovec;
-       unsigned long           ki_nr_segs;
-       unsigned long           ki_cur_seg;
+       size_t                  ki_nbytes;      /* copy of iocb->aio_nbytes */
 
        struct list_head        ki_list;        /* the aio core uses this
                                                 * for cancellation */
@@ -74,7 +62,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 {
        *kiocb = (struct kiocb) {
-                       .ki_users = ATOMIC_INIT(1),
                        .ki_ctx = NULL,
                        .ki_filp = filp,
                        .ki_obj.tsk = current,
@@ -84,7 +71,6 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 /* prototypes */
 #ifdef CONFIG_AIO
 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
-extern void aio_put_req(struct kiocb *iocb);
 extern void aio_complete(struct kiocb *iocb, long res, long res2);
 struct mm_struct;
 extern void exit_aio(struct mm_struct *mm);
@@ -93,7 +79,6 @@ extern long do_io_submit(aio_context_t ctx_id, long nr,
 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
 #else
 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
-static inline void aio_put_req(struct kiocb *iocb) { }
 static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
 struct mm_struct;
 static inline void exit_aio(struct mm_struct *mm) { }
index 8013a45..cf573c2 100644 (file)
@@ -13,6 +13,9 @@ struct file_operations;
 struct file *anon_inode_getfile(const char *name,
                                const struct file_operations *fops,
                                void *priv, int flags);
+struct file *anon_inode_getfile_private(const char *name,
+                               const struct file_operations *fops,
+                               void *priv, int flags);
 int anon_inode_getfd(const char *name, const struct file_operations *fops,
                     void *priv, int flags);
 
index a4acd3c..3f40547 100644 (file)
@@ -2069,6 +2069,7 @@ extern struct super_block *freeze_bdev(struct block_device *);
 extern void emergency_thaw_all(void);
 extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
 extern int fsync_bdev(struct block_device *);
+extern int sb_is_blkdev_sb(struct super_block *sb);
 #else
 static inline void bd_forget(struct inode *inode) {}
 static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -2088,6 +2089,11 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
 static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
 {
 }
+
+static inline int sb_is_blkdev_sb(struct super_block *sb)
+{
+       return 0;
+}
 #endif
 extern int sync_filesystem(struct super_block *);
 extern const struct file_operations def_blk_fops;
index 6fe5214..8d3c57f 100644 (file)
@@ -53,6 +53,9 @@ extern int migrate_vmas(struct mm_struct *mm,
 extern void migrate_page_copy(struct page *newpage, struct page *page);
 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
                                  struct page *newpage, struct page *page);
+extern int migrate_page_move_mapping(struct address_space *mapping,
+               struct page *newpage, struct page *page,
+               struct buffer_head *head, enum migrate_mode mode);
 #else
 
 static inline void putback_lru_pages(struct list_head *l) {}
index faf4b7c..d9851ee 100644 (file)
@@ -322,6 +322,7 @@ struct mm_rss_stat {
        atomic_long_t count[NR_MM_COUNTERS];
 };
 
+struct kioctx_table;
 struct mm_struct {
        struct vm_area_struct * mmap;           /* list of VMAs */
        struct rb_root mm_rb;
@@ -383,8 +384,8 @@ struct mm_struct {
 
        struct core_state *core_state; /* coredumping support */
 #ifdef CONFIG_AIO
-       spinlock_t              ioctx_lock;
-       struct hlist_head       ioctx_list;
+       spinlock_t                      ioctx_lock;
+       struct kioctx_table __rcu       *ioctx_table;
 #endif
 #ifdef CONFIG_MM_OWNER
        /*
index bc95b2b..97fbecd 100644 (file)
 #define PCI_DEVICE_ID_HP_CISSE         0x323a
 #define PCI_DEVICE_ID_HP_CISSF         0x323b
 #define PCI_DEVICE_ID_HP_CISSH         0x323c
+#define PCI_DEVICE_ID_HP_CISSI         0x3239
 #define PCI_DEVICE_ID_HP_ZX2_IOC       0x4031
 
 #define PCI_VENDOR_ID_PCTECH           0x1042
index 6c5cc0e..74f1058 100644 (file)
@@ -4,6 +4,8 @@
  * (C) SGI 2006, Christoph Lameter
  *     Cleaned up and restructured to ease the addition of alternative
  *     implementations of SLAB allocators.
+ * (C) Linux Foundation 2008-2013
+ *      Unified interface for all slab allocators
  */
 
 #ifndef _LINUX_SLAB_H
@@ -94,6 +96,7 @@
 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
                                (unsigned long)ZERO_SIZE_PTR)
 
+#include <linux/kmemleak.h>
 
 struct mem_cgroup;
 /*
@@ -289,6 +292,57 @@ static __always_inline int kmalloc_index(size_t size)
 }
 #endif /* !CONFIG_SLOB */
 
+void *__kmalloc(size_t size, gfp_t flags);
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+#else
+static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+       return __kmalloc(size, flags);
+}
+
+static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
+{
+       return kmem_cache_alloc(s, flags);
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+
+#ifdef CONFIG_NUMA
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+                                          gfp_t gfpflags,
+                                          int node, size_t size);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+                             gfp_t gfpflags,
+                             int node, size_t size)
+{
+       return kmem_cache_alloc_trace(s, gfpflags, size);
+}
+#endif /* CONFIG_NUMA */
+
+#else /* CONFIG_TRACING */
+static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
+               gfp_t flags, size_t size)
+{
+       return kmem_cache_alloc(s, flags);
+}
+
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+                             gfp_t gfpflags,
+                             int node, size_t size)
+{
+       return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif /* CONFIG_TRACING */
+
 #ifdef CONFIG_SLAB
 #include <linux/slab_def.h>
 #endif
@@ -297,9 +351,60 @@ static __always_inline int kmalloc_index(size_t size)
 #include <linux/slub_def.h>
 #endif
 
-#ifdef CONFIG_SLOB
-#include <linux/slob_def.h>
+static __always_inline void *
+kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+{
+       void *ret;
+
+       flags |= (__GFP_COMP | __GFP_KMEMCG);
+       ret = (void *) __get_free_pages(flags, order);
+       kmemleak_alloc(ret, size, 1, flags);
+       return ret;
+}
+
+#ifdef CONFIG_TRACING
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+#else
+static __always_inline void *
+kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+       return kmalloc_order(size, flags, order);
+}
+#endif
+
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+       unsigned int order = get_order(size);
+       return kmalloc_order_trace(size, flags, order);
+}
+
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kcalloc).
+ *
+ * kmalloc is the normal method of allocating memory
+ * for objects smaller than page size in the kernel.
+ */
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+       if (__builtin_constant_p(size)) {
+               if (size > KMALLOC_MAX_CACHE_SIZE)
+                       return kmalloc_large(size, flags);
+#ifndef CONFIG_SLOB
+               if (!(flags & GFP_DMA)) {
+                       int index = kmalloc_index(size);
+
+                       if (!index)
+                               return ZERO_SIZE_PTR;
+
+                       return kmem_cache_alloc_trace(kmalloc_caches[index],
+                                       flags, size);
+               }
 #endif
+       }
+       return __kmalloc(size, flags);
+}
 
 /*
  * Determine size used for the nth kmalloc cache.
@@ -321,6 +426,23 @@ static __always_inline int kmalloc_size(int n)
        return 0;
 }
 
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+#ifndef CONFIG_SLOB
+       if (__builtin_constant_p(size) &&
+               size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
+               int i = kmalloc_index(size);
+
+               if (!i)
+                       return ZERO_SIZE_PTR;
+
+               return kmem_cache_alloc_node_trace(kmalloc_caches[i],
+                                               flags, node, size);
+       }
+#endif
+       return __kmalloc_node(size, flags, node);
+}
+
 /*
  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
  * Intended for arches that get misalignment faults even for 64 bit integer
@@ -451,36 +573,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
        return kmalloc_array(n, size, flags | __GFP_ZERO);
 }
 
-#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
-/**
- * kmalloc_node - allocate memory from a specific node
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @node: node to allocate from.
- *
- * kmalloc() for non-local nodes, used to allocate from a specific node
- * if available. Equivalent to kmalloc() in the non-NUMA single-node
- * case.
- */
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
-       return kmalloc(size, flags);
-}
-
-static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
-       return __kmalloc(size, flags);
-}
-
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-
-static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
-                                       gfp_t flags, int node)
-{
-       return kmem_cache_alloc(cachep, flags);
-}
-#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
-
 /*
  * kmalloc_track_caller is a special version of kmalloc that records the
  * calling function of the routine calling it for slab leak tracking instead
index cd40158..e9346b4 100644 (file)
@@ -3,20 +3,6 @@
 
 /*
  * Definitions unique to the original Linux SLAB allocator.
- *
- * What we provide here is a way to optimize the frequent kmalloc
- * calls in the kernel by selecting the appropriate general cache
- * if kmalloc was called with a size that can be established at
- * compile time.
- */
-
-#include <linux/init.h>
-#include <linux/compiler.h>
-
-/*
- * struct kmem_cache
- *
- * manages a cache.
  */
 
 struct kmem_cache {
@@ -102,96 +88,4 @@ struct kmem_cache {
         */
 };
 
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
-{
-       return kmem_cache_alloc(cachep, flags);
-}
-#endif
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
-       struct kmem_cache *cachep;
-       void *ret;
-
-       if (__builtin_constant_p(size)) {
-               int i;
-
-               if (!size)
-                       return ZERO_SIZE_PTR;
-
-               if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
-                       return NULL;
-
-               i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
-               if (flags & GFP_DMA)
-                       cachep = kmalloc_dma_caches[i];
-               else
-#endif
-                       cachep = kmalloc_caches[i];
-
-               ret = kmem_cache_alloc_trace(cachep, flags, size);
-
-               return ret;
-       }
-       return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
-extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
-                                        gfp_t flags,
-                                        int nodeid,
-                                        size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
-                           gfp_t flags,
-                           int nodeid,
-                           size_t size)
-{
-       return kmem_cache_alloc_node(cachep, flags, nodeid);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
-       struct kmem_cache *cachep;
-
-       if (__builtin_constant_p(size)) {
-               int i;
-
-               if (!size)
-                       return ZERO_SIZE_PTR;
-
-               if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
-                       return NULL;
-
-               i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
-               if (flags & GFP_DMA)
-                       cachep = kmalloc_dma_caches[i];
-               else
-#endif
-                       cachep = kmalloc_caches[i];
-
-               return kmem_cache_alloc_node_trace(cachep, flags, node, size);
-       }
-       return __kmalloc_node(size, flags, node);
-}
-
-#endif /* CONFIG_NUMA */
-
 #endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
deleted file mode 100644 (file)
index 095a5a4..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __LINUX_SLOB_DEF_H
-#define __LINUX_SLOB_DEF_H
-
-#include <linux/numa.h>
-
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
-                                             gfp_t flags)
-{
-       return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
-}
-
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
-       return __kmalloc_node(size, flags, node);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
-       return __kmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
-{
-       return kmalloc(size, flags);
-}
-
-#endif /* __LINUX_SLOB_DEF_H */
index 027276f..cc0b67e 100644 (file)
@@ -6,14 +6,8 @@
  *
  * (C) 2007 SGI, Christoph Lameter
  */
-#include <linux/types.h>
-#include <linux/gfp.h>
-#include <linux/bug.h>
-#include <linux/workqueue.h>
 #include <linux/kobject.h>
 
-#include <linux/kmemleak.h>
-
 enum stat_item {
        ALLOC_FASTPATH,         /* Allocation from cpu slab */
        ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
@@ -104,108 +98,4 @@ struct kmem_cache {
        struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-static __always_inline void *
-kmalloc_order(size_t size, gfp_t flags, unsigned int order)
-{
-       void *ret;
-
-       flags |= (__GFP_COMP | __GFP_KMEMCG);
-       ret = (void *) __get_free_pages(flags, order);
-       kmemleak_alloc(ret, size, 1, flags);
-       return ret;
-}
-
-/**
- * Calling this on allocated memory will check that the memory
- * is expected to be in use, and print warnings if not.
- */
-#ifdef CONFIG_SLUB_DEBUG
-extern bool verify_mem_not_deleted(const void *x);
-#else
-static inline bool verify_mem_not_deleted(const void *x)
-{
-       return true;
-}
-#endif
-
-#ifdef CONFIG_TRACING
-extern void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
-       return kmem_cache_alloc(s, gfpflags);
-}
-
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
-       return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
-       unsigned int order = get_order(size);
-       return kmalloc_order_trace(size, flags, order);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
-       if (__builtin_constant_p(size)) {
-               if (size > KMALLOC_MAX_CACHE_SIZE)
-                       return kmalloc_large(size, flags);
-
-               if (!(flags & GFP_DMA)) {
-                       int index = kmalloc_index(size);
-
-                       if (!index)
-                               return ZERO_SIZE_PTR;
-
-                       return kmem_cache_alloc_trace(kmalloc_caches[index],
-                                       flags, size);
-               }
-       }
-       return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
-                                          gfp_t gfpflags,
-                                          int node, size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
-                             gfp_t gfpflags,
-                             int node, size_t size)
-{
-       return kmem_cache_alloc_node(s, gfpflags, node);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
-       if (__builtin_constant_p(size) &&
-               size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
-               int index = kmalloc_index(size);
-
-               if (!index)
-                       return ZERO_SIZE_PTR;
-
-               return kmem_cache_alloc_node_trace(kmalloc_caches[index],
-                              flags, node, size);
-       }
-       return __kmalloc_node(size, flags, node);
-}
-#endif
-
 #endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/time-armada-370-xp.h b/include/linux/time-armada-370-xp.h
deleted file mode 100644 (file)
index 6fb0856..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Marvell Armada 370/XP SoC timer handling.
- *
- * Copyright (C) 2012 Marvell
- *
- * Lior Amsalem <alior@marvell.com>
- * Gregory CLEMENT <gregory.clement@free-electrons.com>
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- */
-#ifndef __TIME_ARMADA_370_XPPRCMU_H
-#define __TIME_ARMADA_370_XPPRCMU_H
-
-void armada_370_xp_timer_init(void);
-
-#endif
index d08abf9..a372627 100644 (file)
@@ -152,6 +152,7 @@ struct input_keymap_entry {
 #define EVIOCGEFFECTS          _IOR('E', 0x84, int)                    /* Report number of effects playable at the same time */
 
 #define EVIOCGRAB              _IOW('E', 0x90, int)                    /* Grab/Release device */
+#define EVIOCREVOKE            _IOW('E', 0x91, int)                    /* Revoke device access */
 
 #define EVIOCSCLOCKID          _IOW('E', 0xa0, int)                    /* Set clockid to be used for timestamps */
 
index 18bd9e3..3ecd8a1 100644 (file)
@@ -1602,7 +1602,7 @@ endchoice
 
 config SLUB_CPU_PARTIAL
        default y
-       depends on SLUB
+       depends on SLUB && SMP
        bool "SLUB per cpu partial cache"
        help
          Per cpu partial caches accellerate objects allocation and freeing
index 81ccb4f..086fe73 100644 (file)
@@ -519,7 +519,7 @@ static void mm_init_aio(struct mm_struct *mm)
 {
 #ifdef CONFIG_AIO
        spin_lock_init(&mm->ioctx_lock);
-       INIT_HLIST_HEAD(&mm->ioctx_list);
+       mm->ioctx_table = NULL;
 #endif
 }
 
index b7ded7e..9c8d5f5 100644 (file)
@@ -311,7 +311,7 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
  * 2 for pages with a mapping
  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
-static int migrate_page_move_mapping(struct address_space *mapping,
+int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page,
                struct buffer_head *head, enum migrate_mode mode)
 {
index ba05b64..8c79a47 100644 (file)
@@ -266,7 +266,6 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
 
                init_sync_kiocb(&kiocb, swap_file);
                kiocb.ki_pos = page_file_offset(page);
-               kiocb.ki_left = PAGE_SIZE;
                kiocb.ki_nbytes = PAGE_SIZE;
 
                set_page_writeback(page);
index 538bade..a344327 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/tlbflush.h>
 #include <asm/page.h>
 #include <linux/memcontrol.h>
+#include <trace/events/kmem.h>
 
 #include "slab.h"
 
@@ -373,7 +374,7 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
 {
        int index;
 
-       if (size > KMALLOC_MAX_SIZE) {
+       if (unlikely(size > KMALLOC_MAX_SIZE)) {
                WARN_ON_ONCE(!(flags & __GFP_NOWARN));
                return NULL;
        }
@@ -495,6 +496,15 @@ void __init create_kmalloc_caches(unsigned long flags)
 }
 #endif /* !CONFIG_SLOB */
 
+#ifdef CONFIG_TRACING
+void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+       void *ret = kmalloc_order(size, flags, order);
+       trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
+       return ret;
+}
+EXPORT_SYMBOL(kmalloc_order_trace);
+#endif
 
 #ifdef CONFIG_SLABINFO
 
index 91bd3f2..4bf8809 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -462,11 +462,11 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
        return ret;
 }
 
-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+void *__kmalloc(size_t size, gfp_t gfp)
 {
-       return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+       return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
 }
-EXPORT_SYMBOL(__kmalloc_node);
+EXPORT_SYMBOL(__kmalloc);
 
 #ifdef CONFIG_TRACING
 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
@@ -534,7 +534,7 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
        return 0;
 }
 
-void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 {
        void *b;
 
@@ -560,7 +560,27 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
        kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
        return b;
 }
+EXPORT_SYMBOL(slob_alloc_node);
+
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+       return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
+}
+EXPORT_SYMBOL(kmem_cache_alloc);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+{
+       return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
+{
+       return slob_alloc_node(cachep, gfp, node);
+}
 EXPORT_SYMBOL(kmem_cache_alloc_node);
+#endif
 
 static void __kmem_cache_free(void *b, int size)
 {
index 51df827..c3eb3d3 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
 #endif
        {
                slab_lock(page);
-               if (page->freelist == freelist_old && page->counters == counters_old) {
+               if (page->freelist == freelist_old &&
+                                       page->counters == counters_old) {
                        page->freelist = freelist_new;
                        page->counters = counters_new;
                        slab_unlock(page);
@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 
                local_irq_save(flags);
                slab_lock(page);
-               if (page->freelist == freelist_old && page->counters == counters_old) {
+               if (page->freelist == freelist_old &&
+                                       page->counters == counters_old) {
                        page->freelist = freelist_new;
                        page->counters = counters_new;
                        slab_unlock(page);
@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
 
 static void print_page_info(struct page *page)
 {
-       printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
-               page, page->objects, page->inuse, page->freelist, page->flags);
+       printk(KERN_ERR
+              "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+              page, page->objects, page->inuse, page->freelist, page->flags);
 
 }
 
@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
        print_trailer(s, page, object);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page,
+                       const char *fmt, ...)
 {
        va_list args;
        char buf[100];
@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
        } else {
                if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
                        check_bytes_and_report(s, page, p, "Alignment padding",
-                               endobject, POISON_INUSE, s->inuse - s->object_size);
+                               endobject, POISON_INUSE,
+                               s->inuse - s->object_size);
                }
        }
 
@@ -873,7 +878,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
                                object_err(s, page, object,
                                        "Freechain corrupt");
                                set_freepointer(s, object, NULL);
-                               break;
                        } else {
                                slab_err(s, page, "Freepointer corrupt");
                                page->freelist = NULL;
@@ -918,7 +922,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object ", (void *)object, s->object_size);
+                       print_section("Object ", (void *)object,
+                                       s->object_size);
 
                dump_stack();
        }
@@ -937,7 +942,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
        return should_failslab(s->object_size, flags, s->flags);
 }
 
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+                                       gfp_t flags, void *object)
 {
        flags &= gfp_allowed_mask;
        kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -1039,7 +1045,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
        init_tracking(s, object);
 }
 
-static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
+static noinline int alloc_debug_processing(struct kmem_cache *s,
+                                       struct page *page,
                                        void *object, unsigned long addr)
 {
        if (!check_slab(s, page))
@@ -1743,7 +1750,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
 /*
  * Remove the cpu slab
  */
-static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
+static void deactivate_slab(struct kmem_cache *s, struct page *page,
+                               void *freelist)
 {
        enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -1999,7 +2007,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                page->pobjects = pobjects;
                page->next = oldpage;
 
-       } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+       } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
+                                                               != oldpage);
 #endif
 }
 
@@ -2169,8 +2178,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
 }
 
 /*
- * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
- * or deactivate the page.
+ * Check the page->freelist of a page and either transfer the freelist to the
+ * per cpu freelist or deactivate the page.
  *
  * The page is still frozen if the return value is not NULL.
  *
@@ -2314,7 +2323,8 @@ new_slab:
                goto load_freelist;
 
        /* Only entered in the debug case */
-       if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
+       if (kmem_cache_debug(s) &&
+                       !alloc_debug_processing(s, page, freelist, addr))
                goto new_slab;  /* Slab failed checks. Next slab needed */
 
        deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2372,7 +2382,7 @@ redo:
 
        object = c->freelist;
        page = c->page;
-       if (unlikely(!object || !page || !node_match(page, node)))
+       if (unlikely(!object || !node_match(page, node)))
                object = __slab_alloc(s, gfpflags, node, addr, c);
 
        else {
@@ -2382,13 +2392,15 @@ redo:
                 * The cmpxchg will only match if there was no additional
                 * operation and if we are on the right processor.
                 *
-                * The cmpxchg does the following atomically (without lock semantics!)
+                * The cmpxchg does the following atomically (without lock
+                * semantics!)
                 * 1. Relocate first pointer to the current per cpu area.
                 * 2. Verify that tid and freelist have not been changed
                 * 3. If they were not changed replace tid and freelist
                 *
-                * Since this is without lock semantics the protection is only against
-                * code executing on this cpu *not* from access by other cpus.
+                * Since this is without lock semantics the protection is only
+                * against code executing on this cpu *not* from access by
+                * other cpus.
                 */
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2420,7 +2432,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
        void *ret = slab_alloc(s, gfpflags, _RET_IP_);
 
-       trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
+       trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
+                               s->size, gfpflags);
 
        return ret;
 }
@@ -2434,14 +2447,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
        return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_trace);
-
-void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
-       void *ret = kmalloc_order(size, flags, order);
-       trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
-       return ret;
-}
-EXPORT_SYMBOL(kmalloc_order_trace);
 #endif
 
 #ifdef CONFIG_NUMA
@@ -2512,8 +2517,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                        if (kmem_cache_has_cpu_partial(s) && !prior)
 
                                /*
-                                * Slab was on no list before and will be partially empty
-                                * We can defer the list move and instead freeze it.
+                                * Slab was on no list before and will be
+                                * partially empty
+                                * We can defer the list move and instead
+                                * freeze it.
                                 */
                                new.frozen = 1;
 
@@ -3071,8 +3078,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
         * A) The number of objects from per cpu partial slabs dumped to the
         *    per node list when we reach the limit.
         * B) The number of objects in cpu partial slabs to extract from the
-        *    per node list when we run out of per cpu objects. We only fetch 50%
-        *    to keep some capacity around for frees.
+        *    per node list when we run out of per cpu objects. We only fetch
+        *    50% to keep some capacity around for frees.
         */
        if (!kmem_cache_has_cpu_partial(s))
                s->cpu_partial = 0;
@@ -3099,8 +3106,8 @@ error:
        if (flags & SLAB_PANIC)
                panic("Cannot create slab %s size=%lu realsize=%u "
                        "order=%u offset=%u flags=%lx\n",
-                       s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
-                       s->offset, flags);
+                       s->name, (unsigned long)s->size, s->size,
+                       oo_order(s->oo), s->offset, flags);
        return -EINVAL;
 }
 
@@ -3316,42 +3323,6 @@ size_t ksize(const void *object)
 }
 EXPORT_SYMBOL(ksize);
 
-#ifdef CONFIG_SLUB_DEBUG
-bool verify_mem_not_deleted(const void *x)
-{
-       struct page *page;
-       void *object = (void *)x;
-       unsigned long flags;
-       bool rv;
-
-       if (unlikely(ZERO_OR_NULL_PTR(x)))
-               return false;
-
-       local_irq_save(flags);
-
-       page = virt_to_head_page(x);
-       if (unlikely(!PageSlab(page))) {
-               /* maybe it was from stack? */
-               rv = true;
-               goto out_unlock;
-       }
-
-       slab_lock(page);
-       if (on_freelist(page->slab_cache, page, object)) {
-               object_err(page->slab_cache, page, object, "Object is on free-list");
-               rv = false;
-       } else {
-               rv = true;
-       }
-       slab_unlock(page);
-
-out_unlock:
-       local_irq_restore(flags);
-       return rv;
-}
-EXPORT_SYMBOL(verify_mem_not_deleted);
-#endif
-
 void kfree(const void *x)
 {
        struct page *page;
@@ -4162,15 +4133,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
                                !cpumask_empty(to_cpumask(l->cpus)) &&
                                len < PAGE_SIZE - 60) {
                        len += sprintf(buf + len, " cpus=");
-                       len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
+                       len += cpulist_scnprintf(buf + len,
+                                                PAGE_SIZE - len - 50,
                                                 to_cpumask(l->cpus));
                }
 
                if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
                                len < PAGE_SIZE - 60) {
                        len += sprintf(buf + len, " nodes=");
-                       len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
-                                       l->nodes);
+                       len += nodelist_scnprintf(buf + len,
+                                                 PAGE_SIZE - len - 50,
+                                                 l->nodes);
                }
 
                len += sprintf(buf + len, "\n");
@@ -4268,18 +4241,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
        int node;
        int x;
        unsigned long *nodes;
-       unsigned long *per_cpu;
 
-       nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
+       nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
        if (!nodes)
                return -ENOMEM;
-       per_cpu = nodes + nr_node_ids;
 
        if (flags & SO_CPU) {
                int cpu;
 
                for_each_possible_cpu(cpu) {
-                       struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+                       struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
+                                                              cpu);
                        int node;
                        struct page *page;
 
@@ -4304,8 +4276,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                                total += x;
                                nodes[node] += x;
                        }
-
-                       per_cpu[node]++;
                }
        }
 
@@ -4315,12 +4285,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                for_each_node_state(node, N_NORMAL_MEMORY) {
                        struct kmem_cache_node *n = get_node(s, node);
 
-               if (flags & SO_TOTAL)
-                       x = atomic_long_read(&n->total_objects);
-               else if (flags & SO_OBJECTS)
-                       x = atomic_long_read(&n->total_objects) -
-                               count_partial(n, count_free);
-
+                       if (flags & SO_TOTAL)
+                               x = atomic_long_read(&n->total_objects);
+                       else if (flags & SO_OBJECTS)
+                               x = atomic_long_read(&n->total_objects) -
+                                       count_partial(n, count_free);
                        else
                                x = atomic_long_read(&n->nr_slabs);
                        total += x;
@@ -5136,7 +5105,8 @@ static char *create_unique_id(struct kmem_cache *s)
 
 #ifdef CONFIG_MEMCG_KMEM
        if (!is_root_cache(s))
-               p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
+               p += sprintf(p, "-%08d",
+                               memcg_cache_id(s->memcg_params->memcg));
 #endif
 
        BUG_ON(p > name + ID_STR_LENGTH - 1);
index 0ceaa5c..ebed4b6 100644 (file)
@@ -854,11 +854,6 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
 }
 EXPORT_SYMBOL(kernel_recvmsg);
 
-static void sock_aio_dtor(struct kiocb *iocb)
-{
-       kfree(iocb->private);
-}
-
 static ssize_t sock_sendpage(struct file *file, struct page *page,
                             int offset, size_t size, loff_t *ppos, int more)
 {
@@ -889,12 +884,8 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
 static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
                                         struct sock_iocb *siocb)
 {
-       if (!is_sync_kiocb(iocb)) {
-               siocb = kmalloc(sizeof(*siocb), GFP_KERNEL);
-               if (!siocb)
-                       return NULL;
-               iocb->ki_dtor = sock_aio_dtor;
-       }
+       if (!is_sync_kiocb(iocb))
+               BUG();
 
        siocb->kiocb = iocb;
        iocb->private = siocb;
@@ -931,7 +922,7 @@ static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
        if (pos != 0)
                return -ESPIPE;
 
-       if (iocb->ki_left == 0) /* Match SYS5 behaviour */
+       if (iocb->ki_nbytes == 0)       /* Match SYS5 behaviour */
                return 0;