Merge branch 'fbmem'
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 May 2011 18:24:32 +0000 (11:24 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 May 2011 18:24:32 +0000 (11:24 -0700)
* fbmem:
  Further fbcon sanity checking
  fbmem: fix remove_conflicting_framebuffers races

104 files changed:
MAINTAINERS
arch/alpha/include/asm/unistd.h
arch/alpha/kernel/systbls.S
arch/alpha/kernel/time.c
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/boot/compressed/vmlinux.lds.in
arch/arm/include/asm/system.h
arch/arm/kernel/signal.c
arch/arm/mach-realview/include/mach/barriers.h
arch/arm/mach-tegra/include/mach/barriers.h
arch/arm/mm/init.c
arch/s390/include/asm/diag.h
arch/s390/include/asm/mmu_context.h
arch/s390/kernel/diag.c
arch/s390/kernel/dis.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/mm/cmm.c
arch/s390/oprofile/hwsampler.c
arch/s390/oprofile/hwsampler.h
arch/s390/oprofile/init.c
arch/sparc/kernel/apc.c
arch/sparc/kernel/pmc.c
arch/sparc/kernel/smp_32.c
arch/sparc/kernel/time_32.c
arch/sparc/lib/checksum_32.S
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/x86_init.h
arch/x86/kernel/x86_init.c
arch/x86/mm/init.c
arch/x86/xen/mmu.c
drivers/ata/libahci.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/i2c/busses/i2c-pnx.c
drivers/input/touchscreen/ads7846.c
drivers/mfd/asic3.c
drivers/mfd/omap-usb-host.c
drivers/mfd/twl4030-power.c
drivers/net/Makefile
drivers/net/arm/etherh.c
drivers/net/bonding/bond_3ad.h
drivers/net/ehea/ehea_main.c
drivers/net/hydra.c
drivers/net/ne-h8300.c
drivers/net/sfc/mcdi.c
drivers/net/sfc/nic.h
drivers/net/sfc/siena.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/iwlegacy/iwl-core.c
drivers/net/wireless/iwlegacy/iwl-dev.h
drivers/net/wireless/libertas/cmd.c
drivers/net/zorro8390.c
drivers/rtc/rtc-s3c.c
drivers/s390/block/dasd.c
drivers/s390/char/sclp_cmd.c
drivers/video/acornfb.c
fs/ceph/caps.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/ceph/super.h
fs/fuse/dir.c
fs/namei.c
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4filelayout.h
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/read.c
fs/nfs/write.c
include/linux/bootmem.h
include/linux/capability.h
include/linux/cred.h
include/linux/gfp.h
include/linux/nfs_xdr.h
include/net/inet_ecn.h
include/net/llc_pdu.h
include/trace/events/gfpflags.h
kernel/capability.c
kernel/cred.c
kernel/power/suspend.c
kernel/power/user.c
lib/vsprintf.c
mm/page_alloc.c
mm/page_cgroup.c
mm/shmem.c
mm/swap.c
net/9p/protocol.c
net/bluetooth/sco.c
net/bridge/br_netfilter.c
net/mac80211/tx.c
security/selinux/ss/policydb.c
sound/soc/codecs/ssm2602.c
sound/soc/codecs/uda134x.c
sound/soc/codecs/wm8903.c
sound/soc/jz4740/jz4740-i2s.c
sound/soc/mid-x86/sst_platform.c
sound/soc/soc-core.c

index 16a5c5f..69f19f1 100644 (file)
@@ -2813,38 +2813,19 @@ F:      Documentation/gpio.txt
 F:     drivers/gpio/
 F:     include/linux/gpio*
 
+GRE DEMULTIPLEXER DRIVER
+M:     Dmitry Kozlov <xeb@mail.ru>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     net/ipv4/gre.c
+F:     include/net/gre.h
+
 GRETH 10/100/1G Ethernet MAC device driver
 M:     Kristoffer Glembo <kristoffer@gaisler.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/greth*
 
-HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
-M:     Frank Seidel <frank@f-seidel.de>
-L:     platform-driver-x86@vger.kernel.org
-W:     http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
-S:     Maintained
-F:     drivers/platform/x86/hdaps.c
-
-HWPOISON MEMORY FAILURE HANDLING
-M:     Andi Kleen <andi@firstfloor.org>
-L:     linux-mm@kvack.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
-S:     Maintained
-F:     mm/memory-failure.c
-F:     mm/hwpoison-inject.c
-
-HYPERVISOR VIRTUAL CONSOLE DRIVER
-L:     linuxppc-dev@lists.ozlabs.org
-S:     Odd Fixes
-F:     drivers/tty/hvc/
-
-iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
-M:     Peter Jones <pjones@redhat.com>
-M:     Konrad Rzeszutek Wilk <konrad@kernel.org>
-S:     Maintained
-F:     drivers/firmware/iscsi_ibft*
-
 GSPCA FINEPIX SUBDRIVER
 M:     Frank Zago <frank@zago.net>
 L:     linux-media@vger.kernel.org
@@ -2895,6 +2876,26 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:     Maintained
 F:     drivers/media/video/gspca/
 
+HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
+M:     Frank Seidel <frank@f-seidel.de>
+L:     platform-driver-x86@vger.kernel.org
+W:     http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
+S:     Maintained
+F:     drivers/platform/x86/hdaps.c
+
+HWPOISON MEMORY FAILURE HANDLING
+M:     Andi Kleen <andi@firstfloor.org>
+L:     linux-mm@kvack.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
+S:     Maintained
+F:     mm/memory-failure.c
+F:     mm/hwpoison-inject.c
+
+HYPERVISOR VIRTUAL CONSOLE DRIVER
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Odd Fixes
+F:     drivers/tty/hvc/
+
 HARDWARE MONITORING
 M:     Jean Delvare <khali@linux-fr.org>
 M:     Guenter Roeck <guenter.roeck@ericsson.com>
@@ -3478,6 +3479,12 @@ F:       Documentation/isapnp.txt
 F:     drivers/pnp/isapnp/
 F:     include/linux/isapnp.h
 
+iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
+M:     Peter Jones <pjones@redhat.com>
+M:     Konrad Rzeszutek Wilk <konrad@kernel.org>
+S:     Maintained
+F:     drivers/firmware/iscsi_ibft*
+
 ISCSI
 M:     Mike Christie <michaelc@cs.wisc.edu>
 L:     open-iscsi@googlegroups.com
@@ -4989,6 +4996,13 @@ F:       Documentation/pps/
 F:     drivers/pps/
 F:     include/linux/pps*.h
 
+PPTP DRIVER
+M:     Dmitry Kozlov <xeb@mail.ru>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/pptp.c
+W:     http://sourceforge.net/projects/accel-pptp
+
 PREEMPTIBLE KERNEL
 M:     Robert Love <rml@tech9.net>
 L:     kpreempt-tech@lists.sourceforge.net
@@ -7024,20 +7038,6 @@ M:       "Maciej W. Rozycki" <macro@linux-mips.org>
 S:     Maintained
 F:     drivers/tty/serial/zs.*
 
-GRE DEMULTIPLEXER DRIVER
-M:     Dmitry Kozlov <xeb@mail.ru>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     net/ipv4/gre.c
-F:     include/net/gre.h
-
-PPTP DRIVER
-M:     Dmitry Kozlov <xeb@mail.ru>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/pptp.c
-W:     http://sourceforge.net/projects/accel-pptp
-
 THE REST
 M:     Linus Torvalds <torvalds@linux-foundation.org>
 L:     linux-kernel@vger.kernel.org
index 058937b..b183416 100644 (file)
 #define __NR_fanotify_init             494
 #define __NR_fanotify_mark             495
 #define __NR_prlimit64                 496
+#define __NR_name_to_handle_at         497
+#define __NR_open_by_handle_at         498
+#define __NR_clock_adjtime             499
+#define __NR_syncfs                    500
 
 #ifdef __KERNEL__
 
-#define NR_SYSCALLS                    497
+#define NR_SYSCALLS                    501
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index a6a1de9..15f999d 100644 (file)
@@ -498,23 +498,27 @@ sys_call_table:
        .quad sys_ni_syscall                    /* sys_timerfd */
        .quad sys_eventfd
        .quad sys_recvmmsg
-       .quad sys_fallocate                             /* 480 */
+       .quad sys_fallocate                     /* 480 */
        .quad sys_timerfd_create
        .quad sys_timerfd_settime
        .quad sys_timerfd_gettime
        .quad sys_signalfd4
-       .quad sys_eventfd2                              /* 485 */
+       .quad sys_eventfd2                      /* 485 */
        .quad sys_epoll_create1
        .quad sys_dup3
        .quad sys_pipe2
        .quad sys_inotify_init1
-       .quad sys_preadv                                /* 490 */
+       .quad sys_preadv                        /* 490 */
        .quad sys_pwritev
        .quad sys_rt_tgsigqueueinfo
        .quad sys_perf_event_open
        .quad sys_fanotify_init
-       .quad sys_fanotify_mark                         /* 495 */
+       .quad sys_fanotify_mark                 /* 495 */
        .quad sys_prlimit64
+       .quad sys_name_to_handle_at
+       .quad sys_open_by_handle_at
+       .quad sys_clock_adjtime
+       .quad sys_syncfs                        /* 500 */
 
        .size sys_call_table, . - sys_call_table
        .type sys_call_table, @object
index 918e8e0..818e74e 100644 (file)
@@ -375,8 +375,7 @@ static struct clocksource clocksource_rpcc = {
 
 static inline void register_rpcc_clocksource(long cycle_freq)
 {
-       clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4);
-       clocksource_register(&clocksource_rpcc);
+       clocksource_register_hz(&clocksource_rpcc, cycle_freq);
 }
 #else /* !CONFIG_SMP */
 static inline void register_rpcc_clocksource(long cycle_freq)
index 8ebbb51..0c6852d 100644 (file)
@@ -74,7 +74,7 @@ ZTEXTADDR     := $(CONFIG_ZBOOT_ROM_TEXT)
 ZBSSADDR       := $(CONFIG_ZBOOT_ROM_BSS)
 else
 ZTEXTADDR      := 0
-ZBSSADDR       := ALIGN(4)
+ZBSSADDR       := ALIGN(8)
 endif
 
 SEDFLAGS       = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
index adf583c..49f5b2e 100644 (file)
@@ -179,15 +179,14 @@ not_angel:
                bl      cache_on
 
 restart:       adr     r0, LC0
-               ldmia   r0, {r1, r2, r3, r5, r6, r9, r11, r12}
-               ldr     sp, [r0, #32]
+               ldmia   r0, {r1, r2, r3, r6, r9, r11, r12}
+               ldr     sp, [r0, #28]
 
                /*
                 * We might be running at a different address.  We need
                 * to fix up various pointers.
                 */
                sub     r0, r0, r1              @ calculate the delta offset
-               add     r5, r5, r0              @ _start
                add     r6, r6, r0              @ _edata
 
 #ifndef CONFIG_ZBOOT_ROM
@@ -206,31 +205,40 @@ restart:  adr     r0, LC0
 /*
  * Check to see if we will overwrite ourselves.
  *   r4  = final kernel address
- *   r5  = start of this image
  *   r9  = size of decompressed image
  *   r10 = end of this image, including  bss/stack/malloc space if non XIP
  * We basically want:
- *   r4 >= r10 -> OK
- *   r4 + image length <= r5 -> OK
+ *   r4 - 16k page directory >= r10 -> OK
+ *   r4 + image length <= current position (pc) -> OK
  */
+               add     r10, r10, #16384
                cmp     r4, r10
                bhs     wont_overwrite
                add     r10, r4, r9
-               cmp     r10, r5
+   ARM(                cmp     r10, pc         )
+ THUMB(                mov     lr, pc          )
+ THUMB(                cmp     r10, lr         )
                bls     wont_overwrite
 
 /*
  * Relocate ourselves past the end of the decompressed kernel.
- *   r5  = start of this image
  *   r6  = _edata
  *   r10 = end of the decompressed kernel
  * Because we always copy ahead, we need to do it from the end and go
  * backward in case the source and destination overlap.
  */
-               /* Round up to next 256-byte boundary. */
-               add     r10, r10, #256
+               /*
+                * Bump to the next 256-byte boundary with the size of
+                * the relocation code added. This avoids overwriting
+                * ourself when the offset is small.
+                */
+               add     r10, r10, #((reloc_code_end - restart + 256) & ~255)
                bic     r10, r10, #255
 
+               /* Get start of code we want to copy and align it down. */
+               adr     r5, restart
+               bic     r5, r5, #31
+
                sub     r9, r6, r5              @ size to copy
                add     r9, r9, #31             @ rounded up to a multiple
                bic     r9, r9, #31             @ ... of 32 bytes
@@ -245,6 +253,11 @@ restart:   adr     r0, LC0
                /* Preserve offset to relocated code. */
                sub     r6, r9, r6
 
+#ifndef CONFIG_ZBOOT_ROM
+               /* cache_clean_flush may use the stack, so relocate it */
+               add     sp, sp, r6
+#endif
+
                bl      cache_clean_flush
 
                adr     r0, BSYM(restart)
@@ -333,7 +346,6 @@ not_relocated:      mov     r0, #0
 LC0:           .word   LC0                     @ r1
                .word   __bss_start             @ r2
                .word   _end                    @ r3
-               .word   _start                  @ r5
                .word   _edata                  @ r6
                .word   _image_size             @ r9
                .word   _got_start              @ r11
@@ -1062,6 +1074,7 @@ memdump:  mov     r12, r0
 #endif
 
                .ltorg
+reloc_code_end:
 
                .align
                .section ".stack", "aw", %nobits
index 5309909..ea80abe 100644 (file)
@@ -54,6 +54,7 @@ SECTIONS
   .bss                 : { *(.bss) }
   _end = .;
 
+  . = ALIGN(8);                /* the stack must be 64-bit aligned */
   .stack               : { *(.stack) }
 
   .stab 0              : { *(.stab) }
index 885be09..832888d 100644 (file)
@@ -159,7 +159,7 @@ extern unsigned int user_debug;
 #include <mach/barriers.h>
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
 #define mb()           do { dsb(); outer_sync(); } while (0)
-#define rmb()          dmb()
+#define rmb()          dsb()
 #define wmb()          mb()
 #else
 #include <asm/memory.h>
index cb83983..0340224 100644 (file)
@@ -597,19 +597,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
        return err;
 }
 
-static inline void setup_syscall_restart(struct pt_regs *regs)
-{
-       regs->ARM_r0 = regs->ARM_ORIG_r0;
-       regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
-}
-
 /*
  * OK, we're invoking a handler
  */    
 static int
 handle_signal(unsigned long sig, struct k_sigaction *ka,
              siginfo_t *info, sigset_t *oldset,
-             struct pt_regs * regs, int syscall)
+             struct pt_regs * regs)
 {
        struct thread_info *thread = current_thread_info();
        struct task_struct *tsk = current;
@@ -617,26 +611,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
        int ret;
 
        /*
-        * If we were from a system call, check for system call restarting...
-        */
-       if (syscall) {
-               switch (regs->ARM_r0) {
-               case -ERESTART_RESTARTBLOCK:
-               case -ERESTARTNOHAND:
-                       regs->ARM_r0 = -EINTR;
-                       break;
-               case -ERESTARTSYS:
-                       if (!(ka->sa.sa_flags & SA_RESTART)) {
-                               regs->ARM_r0 = -EINTR;
-                               break;
-                       }
-                       /* fallthrough */
-               case -ERESTARTNOINTR:
-                       setup_syscall_restart(regs);
-               }
-       }
-
-       /*
         * translate the signal
         */
        if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
@@ -685,6 +659,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
  */
 static void do_signal(struct pt_regs *regs, int syscall)
 {
+       unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
        struct k_sigaction ka;
        siginfo_t info;
        int signr;
@@ -698,18 +673,61 @@ static void do_signal(struct pt_regs *regs, int syscall)
        if (!user_mode(regs))
                return;
 
+       /*
+        * If we were from a system call, check for system call restarting...
+        */
+       if (syscall) {
+               continue_addr = regs->ARM_pc;
+               restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
+               retval = regs->ARM_r0;
+
+               /*
+                * Prepare for system call restart.  We do this here so that a
+                * debugger will see the already changed PSW.
+                */
+               switch (retval) {
+               case -ERESTARTNOHAND:
+               case -ERESTARTSYS:
+               case -ERESTARTNOINTR:
+                       regs->ARM_r0 = regs->ARM_ORIG_r0;
+                       regs->ARM_pc = restart_addr;
+                       break;
+               case -ERESTART_RESTARTBLOCK:
+                       regs->ARM_r0 = -EINTR;
+                       break;
+               }
+       }
+
        if (try_to_freeze())
                goto no_signal;
 
+       /*
+        * Get the signal to deliver.  When running under ptrace, at this
+        * point the debugger may change all our registers ...
+        */
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                sigset_t *oldset;
 
+               /*
+                * Depending on the signal settings we may need to revert the
+                * decision to restart the system call.  But skip this if a
+                * debugger has chosen to restart at a different PC.
+                */
+               if (regs->ARM_pc == restart_addr) {
+                       if (retval == -ERESTARTNOHAND
+                           || (retval == -ERESTARTSYS
+                               && !(ka.sa.sa_flags & SA_RESTART))) {
+                               regs->ARM_r0 = -EINTR;
+                               regs->ARM_pc = continue_addr;
+                       }
+               }
+
                if (test_thread_flag(TIF_RESTORE_SIGMASK))
                        oldset = &current->saved_sigmask;
                else
                        oldset = &current->blocked;
-               if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
                        /*
                         * A signal was successfully delivered; the saved
                         * sigmask will have been stored in the signal frame,
@@ -723,11 +741,14 @@ static void do_signal(struct pt_regs *regs, int syscall)
        }
 
  no_signal:
-       /*
-        * No signal to deliver to the process - restart the syscall.
-        */
        if (syscall) {
-               if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
+               /*
+                * Handle restarting a different system call.  As above,
+                * if a debugger has chosen to restart at a different PC,
+                * ignore the restart.
+                */
+               if (retval == -ERESTART_RESTARTBLOCK
+                   && regs->ARM_pc == continue_addr) {
                        if (thumb_mode(regs)) {
                                regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
                                regs->ARM_pc -= 2;
@@ -750,11 +771,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
 #endif
                        }
                }
-               if (regs->ARM_r0 == -ERESTARTNOHAND ||
-                   regs->ARM_r0 == -ERESTARTSYS ||
-                   regs->ARM_r0 == -ERESTARTNOINTR) {
-                       setup_syscall_restart(regs);
-               }
 
                /* If there's no signal to deliver, we just put the saved sigmask
                 * back.
index 0c5d749..9a73219 100644 (file)
@@ -4,5 +4,5 @@
  * operation to deadlock the system.
  */
 #define mb()           dsb()
-#define rmb()          dmb()
+#define rmb()          dsb()
 #define wmb()          mb()
index cc11517..425b42e 100644 (file)
@@ -23,7 +23,7 @@
 
 #include <asm/outercache.h>
 
-#define rmb()          dmb()
+#define rmb()          dsb()
 #define wmb()          do { dsb(); outer_sync(); } while (0)
 #define mb()           wmb()
 
index e5f6fc4..e591513 100644 (file)
@@ -392,7 +392,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
         * Convert start_pfn/end_pfn to a struct page pointer.
         */
        start_pg = pfn_to_page(start_pfn - 1) + 1;
-       end_pg = pfn_to_page(end_pfn);
+       end_pg = pfn_to_page(end_pfn - 1) + 1;
 
        /*
         * Convert to physical addresses, and
@@ -426,6 +426,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
 
                bank_start = bank_pfn_start(bank);
 
+#ifdef CONFIG_SPARSEMEM
+               /*
+                * Take care not to free memmap entries that don't exist
+                * due to SPARSEMEM sections which aren't present.
+                */
+               bank_start = min(bank_start,
+                                ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#endif
                /*
                 * If we had a previous bank, and there is a space
                 * between the current bank and the previous, free it.
@@ -440,6 +448,12 @@ static void __init free_unused_memmap(struct meminfo *mi)
                 */
                prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
        }
+
+#ifdef CONFIG_SPARSEMEM
+       if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
+               free_memmap(prev_bank_end,
+                           ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#endif
 }
 
 static void __init free_highpages(void)
index 72b2e2f..7e91c58 100644 (file)
@@ -9,9 +9,22 @@
 #define _ASM_S390_DIAG_H
 
 /*
- * Diagnose 10: Release pages
+ * Diagnose 10: Release page range
  */
-extern void diag10(unsigned long addr);
+static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
+{
+       unsigned long start_addr, end_addr;
+
+       start_addr = start_pfn << PAGE_SHIFT;
+       end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
+
+       asm volatile(
+               "0:     diag    %0,%1,0x10\n"
+               "1:\n"
+               EX_TABLE(0b, 1b)
+               EX_TABLE(1b, 1b)
+               : : "a" (start_addr), "a" (end_addr));
+}
 
 /*
  * Diagnose 14: Input spool file manipulation
index a6f0e7c..8c277ca 100644 (file)
@@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk,
 #ifdef CONFIG_64BIT
        mm->context.asce_bits |= _ASCE_TYPE_REGION3;
 #endif
-       if (current->mm->context.alloc_pgste) {
+       if (current->mm && current->mm->context.alloc_pgste) {
                /*
                 * alloc_pgste indicates, that any NEW context will be created
                 * with extended page tables. The old context is unchanged. The
index c032d11..8237fc0 100644 (file)
@@ -9,27 +9,6 @@
 #include <asm/diag.h>
 
 /*
- * Diagnose 10: Release pages
- */
-void diag10(unsigned long addr)
-{
-       if (addr >= 0x7ff00000)
-               return;
-       asm volatile(
-#ifdef CONFIG_64BIT
-               "       sam31\n"
-               "       diag    %0,%0,0x10\n"
-               "0:     sam64\n"
-#else
-               "       diag    %0,%0,0x10\n"
-               "0:\n"
-#endif
-               EX_TABLE(0b, 0b)
-               : : "a" (addr));
-}
-EXPORT_SYMBOL(diag10);
-
-/*
  * Diagnose 14: Input spool file manipulation
  */
 int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
index c83726c..3d4a78f 100644 (file)
@@ -672,6 +672,7 @@ static struct insn opcode_b2[] = {
        { "rp", 0x77, INSTR_S_RD },
        { "stcke", 0x78, INSTR_S_RD },
        { "sacf", 0x79, INSTR_S_RD },
+       { "spp", 0x80, INSTR_S_RD },
        { "stsi", 0x7d, INSTR_S_RD },
        { "srnm", 0x99, INSTR_S_RD },
        { "stfpc", 0x9c, INSTR_S_RD },
index 648f642..1b67fc6 100644 (file)
@@ -836,7 +836,7 @@ restart_base:
        stosm   __SF_EMPTY(%r15),0x04   # now we can turn dat on
        basr    %r14,0
        l       %r14,restart_addr-.(%r14)
-       br      %r14                    # branch to start_secondary
+       basr    %r14,%r14               # branch to start_secondary
 restart_addr:
        .long   start_secondary
        .align  8
index 9d3603d..9fd8645 100644 (file)
@@ -841,7 +841,7 @@ restart_base:
        mvc     __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
        xc      __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
        stosm   __SF_EMPTY(%r15),0x04   # now we can turn dat on
-       jg      start_secondary
+       brasl   %r14,start_secondary
        .align  8
 restart_vtime:
        .long   0x7fffffff,0xffffffff
index c66ffd8..1f1dba9 100644 (file)
@@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter,
                        } else
                                free_page((unsigned long) npa);
                }
-               diag10(addr);
+               diag10_range(addr >> PAGE_SHIFT, 1);
                pa->pages[pa->index++] = addr;
                (*counter)++;
                spin_unlock(&cmm_lock);
index 4952872..33cbd37 100644 (file)
@@ -1021,20 +1021,14 @@ deallocate_exit:
        return rc;
 }
 
-long hwsampler_query_min_interval(void)
+unsigned long hwsampler_query_min_interval(void)
 {
-       if (min_sampler_rate)
-               return min_sampler_rate;
-       else
-               return -EINVAL;
+       return min_sampler_rate;
 }
 
-long hwsampler_query_max_interval(void)
+unsigned long hwsampler_query_max_interval(void)
 {
-       if (max_sampler_rate)
-               return max_sampler_rate;
-       else
-               return -EINVAL;
+       return max_sampler_rate;
 }
 
 unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
index 8c72b59..1912f3b 100644 (file)
@@ -102,8 +102,8 @@ int hwsampler_setup(void);
 int hwsampler_shutdown(void);
 int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
 int hwsampler_deallocate(void);
-long hwsampler_query_min_interval(void);
-long hwsampler_query_max_interval(void);
+unsigned long hwsampler_query_min_interval(void);
+unsigned long hwsampler_query_max_interval(void);
 int hwsampler_start_all(unsigned long interval);
 int hwsampler_stop_all(void);
 int hwsampler_deactivate(unsigned int cpu);
index c63d7e5..5995e9b 100644 (file)
@@ -145,15 +145,11 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
         * create hwsampler files only if hwsampler_setup() succeeds.
         */
        oprofile_min_interval = hwsampler_query_min_interval();
-       if (oprofile_min_interval < 0) {
-               oprofile_min_interval = 0;
+       if (oprofile_min_interval == 0)
                return -ENODEV;
-       }
        oprofile_max_interval = hwsampler_query_max_interval();
-       if (oprofile_max_interval < 0) {
-               oprofile_max_interval = 0;
+       if (oprofile_max_interval == 0)
                return -ENODEV;
-       }
 
        if (oprofile_timer_init(ops))
                return -ENODEV;
index f679c57..1e34f29 100644 (file)
@@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op)
        return 0;
 }
 
-static struct of_device_id __initdata apc_match[] = {
+static struct of_device_id apc_match[] = {
        {
                .name = APC_OBPNAME,
        },
index 93d7b44..6a585d3 100644 (file)
@@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op)
        return 0;
 }
 
-static struct of_device_id __initdata pmc_match[] = {
+static struct of_device_id pmc_match[] = {
        {
                .name = PMC_OBPNAME,
        },
index 91c10fb..850a136 100644 (file)
@@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 void __cpuinit smp_store_cpu_info(int id)
 {
        int cpu_node;
+       int mid;
 
        cpu_data(id).udelay_val = loops_per_jiffy;
 
@@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id)
        cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
                                                     "clock-frequency", 0);
        cpu_data(id).prom_node = cpu_node;
-       cpu_data(id).mid = cpu_get_hwmid(cpu_node);
+       mid = cpu_get_hwmid(cpu_node);
 
-       if (cpu_data(id).mid < 0)
-               panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
+       if (mid < 0) {
+               printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
+               mid = 0;
+       }
+       cpu_data(id).mid = mid;
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
index 4e23639..96046a4 100644 (file)
@@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op)
        return 0;
 }
 
-static struct of_device_id __initdata clock_match[] = {
+static struct of_device_id clock_match[] = {
        {
                .name = "eeprom",
        },
index 3632cb3..0084c33 100644 (file)
@@ -289,10 +289,16 @@ cc_end_cruft:
 
        /* Also, handle the alignment code out of band. */
 cc_dword_align:
-       cmp     %g1, 6
-       bl,a    ccte
+       cmp     %g1, 16
+       bge     1f
+        srl    %g1, 1, %o3
+2:     cmp     %o3, 0
+       be,a    ccte
         andcc  %g1, 0xf, %o3
-       andcc   %o0, 0x1, %g0
+       andcc   %o3, %o0, %g0   ! Check %o0 only (%o1 has the same last 2 bits)
+       be,a    2b
+        srl    %o3, 1, %o3
+1:     andcc   %o0, 0x1, %g0
        bne     ccslow
         andcc  %o0, 0x2, %g0
        be      1f
index 7db7723..d56187c 100644 (file)
@@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 /* Install a pte for a particular vaddr in kernel space. */
 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
 
+extern void native_pagetable_reserve(u64 start, u64 end);
 #ifdef CONFIG_X86_32
 extern void native_pagetable_setup_start(pgd_t *base);
 extern void native_pagetable_setup_done(pgd_t *base);
index 643ebf2..d3d8590 100644 (file)
@@ -68,6 +68,17 @@ struct x86_init_oem {
 };
 
 /**
+ * struct x86_init_mapping - platform specific initial kernel pagetable setup
+ * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
+ *
+ * For more details on the purpose of this hook, look in
+ * init_memory_mapping and the commit that added it.
+ */
+struct x86_init_mapping {
+       void (*pagetable_reserve)(u64 start, u64 end);
+};
+
+/**
  * struct x86_init_paging - platform specific paging functions
  * @pagetable_setup_start:     platform specific pre paging_init() call
  * @pagetable_setup_done:      platform specific post paging_init() call
@@ -123,6 +134,7 @@ struct x86_init_ops {
        struct x86_init_mpparse         mpparse;
        struct x86_init_irqs            irqs;
        struct x86_init_oem             oem;
+       struct x86_init_mapping         mapping;
        struct x86_init_paging          paging;
        struct x86_init_timers          timers;
        struct x86_init_iommu           iommu;
index c11514e..75ef4b1 100644 (file)
@@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = {
                .banner                 = default_banner,
        },
 
+       .mapping = {
+               .pagetable_reserve              = native_pagetable_reserve,
+       },
+
        .paging = {
                .pagetable_setup_start  = native_pagetable_setup_start,
                .pagetable_setup_done   = native_pagetable_setup_done,
index 286d289..37b8b0f 100644 (file)
@@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
                end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
 }
 
+void __init native_pagetable_reserve(u64 start, u64 end)
+{
+       memblock_x86_reserve_range(start, end, "PGTABLE");
+}
+
 struct map_range {
        unsigned long start;
        unsigned long end;
@@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 
        __flush_tlb_all();
 
+       /*
+        * Reserve the kernel pagetable pages we used (pgt_buf_start -
+        * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
+        * so that they can be reused for other purposes.
+        *
+        * On native it just means calling memblock_x86_reserve_range, on Xen it
+        * also means marking RW the pagetable pages that we allocated before
+        * but that haven't been used.
+        *
+        * In fact on xen we mark RO the whole range pgt_buf_start -
+        * pgt_buf_top, because we have to make sure that when
+        * init_memory_mapping reaches the pagetable pages area, it maps
+        * RO all the pagetable pages, including the ones that are beyond
+        * pgt_buf_end at that time.
+        */
        if (!after_bootmem && pgt_buf_end > pgt_buf_start)
-               memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT,
-                                pgt_buf_end << PAGE_SHIFT, "PGTABLE");
+               x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
+                               PFN_PHYS(pgt_buf_end));
 
        if (!after_bootmem)
                early_memtest(start, end);
index 55c965b..0684f3c 100644 (file)
@@ -1275,6 +1275,20 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
 {
 }
 
+static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
+{
+       /* reserve the range used */
+       native_pagetable_reserve(start, end);
+
+       /* set as RW the rest */
+       printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
+                       PFN_PHYS(pgt_buf_top));
+       while (end < PFN_PHYS(pgt_buf_top)) {
+               make_lowmem_page_readwrite(__va(end));
+               end += PAGE_SIZE;
+       }
+}
+
 static void xen_post_allocator_init(void);
 
 static __init void xen_pagetable_setup_done(pgd_t *base)
@@ -1463,119 +1477,6 @@ static int xen_pgd_alloc(struct mm_struct *mm)
        return ret;
 }
 
-#ifdef CONFIG_X86_64
-static __initdata u64 __last_pgt_set_rw = 0;
-static __initdata u64 __pgt_buf_start = 0;
-static __initdata u64 __pgt_buf_end = 0;
-static __initdata u64 __pgt_buf_top = 0;
-/*
- * As a consequence of the commit:
- * 
- * commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
- * Author: Yinghai Lu <yinghai@kernel.org>
- * Date:   Fri Dec 17 16:58:28 2010 -0800
- * 
- *     x86-64, mm: Put early page table high
- * 
- * at some point init_memory_mapping is going to reach the pagetable pages
- * area and map those pages too (mapping them as normal memory that falls
- * in the range of addresses passed to init_memory_mapping as argument).
- * Some of those pages are already pagetable pages (they are in the range
- * pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
- * everything is fine.
- * Some of these pages are not pagetable pages yet (they fall in the range
- * pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
- * are going to be mapped RW.  When these pages become pagetable pages and
- * are hooked into the pagetable, xen will find that the guest has already
- * a RW mapping of them somewhere and fail the operation.
- * The reason Xen requires pagetables to be RO is that the hypervisor needs
- * to verify that the pagetables are valid before using them. The validation
- * operations are called "pinning".
- * 
- * In order to fix the issue we mark all the pages in the entire range
- * pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
- * is completed only the range pgt_buf_start-pgt_buf_end is reserved by
- * init_memory_mapping. Hence the kernel is going to crash as soon as one
- * of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
- * ranges are RO).
- * 
- * For this reason, 'mark_rw_past_pgt' is introduced which is called _after_
- * the init_memory_mapping has completed (in a perfect world we would
- * call this function from init_memory_mapping, but lets ignore that).
- * 
- * Because we are called _after_ init_memory_mapping the pgt_buf_[start,
- * end,top] have all changed to new values (b/c init_memory_mapping
- * is called and setting up another new page-table). Hence, the first time
- * we enter this function, we save away the pgt_buf_start value and update
- * the pgt_buf_[end,top].
- * 
- * When we detect that the "old" pgt_buf_start through pgt_buf_end
- * PFNs have been reserved (so memblock_x86_reserve_range has been called),
- * we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top.
- * 
- * And then we update those "old" pgt_buf_[end|top] with the new ones
- * so that we can redo this on the next pagetable.
- */
-static __init void mark_rw_past_pgt(void) {
-
-       if (pgt_buf_end > pgt_buf_start) {
-               u64 addr, size;
-
-               /* Save it away. */
-               if (!__pgt_buf_start) {
-                       __pgt_buf_start = pgt_buf_start;
-                       __pgt_buf_end = pgt_buf_end;
-                       __pgt_buf_top = pgt_buf_top;
-                       return;
-               }
-               /* If we get the range that starts at __pgt_buf_end that means
-                * the range is reserved, and that in 'init_memory_mapping'
-                * the 'memblock_x86_reserve_range' has been called with the
-                * outdated __pgt_buf_start, __pgt_buf_end (the "new"
-                * pgt_buf_[start|end|top] refer now to a new pagetable.
-                * Note: we are called _after_ the pgt_buf_[..] have been
-                * updated.*/
-
-               addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start),
-                                                      &size, PAGE_SIZE);
-
-               /* Still not reserved, meaning 'memblock_x86_reserve_range'
-                * hasn't been called yet. Update the _end and _top.*/
-               if (addr == PFN_PHYS(__pgt_buf_start)) {
-                       __pgt_buf_end = pgt_buf_end;
-                       __pgt_buf_top = pgt_buf_top;
-                       return;
-               }
-
-               /* OK, the area is reserved, meaning it is time for us to
-                * set RW for the old end->top PFNs. */
-
-               /* ..unless we had already done this. */
-               if (__pgt_buf_end == __last_pgt_set_rw)
-                       return;
-
-               addr = PFN_PHYS(__pgt_buf_end);
-               
-               /* set as RW the rest */
-               printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n",
-                       PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top));
-               
-               while (addr < PFN_PHYS(__pgt_buf_top)) {
-                       make_lowmem_page_readwrite(__va(addr));
-                       addr += PAGE_SIZE;
-               }
-               /* And update everything so that we are ready for the next
-                * pagetable (the one created for regions past 4GB) */
-               __last_pgt_set_rw = __pgt_buf_end;
-               __pgt_buf_start = pgt_buf_start;
-               __pgt_buf_end = pgt_buf_end;
-               __pgt_buf_top = pgt_buf_top;
-       }
-       return;
-}
-#else
-static __init void mark_rw_past_pgt(void) { }
-#endif
 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
 #ifdef CONFIG_X86_64
@@ -1602,14 +1503,6 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
        unsigned long pfn = pte_pfn(pte);
 
        /*
-        * A bit of optimization. We do not need to call the workaround
-        * when xen_set_pte_init is called with a PTE with 0 as PFN.
-        * That is b/c the pagetable at that point are just being populated
-        * with empty values and we can save some cycles by not calling
-        * the 'memblock' code.*/
-       if (pfn)
-               mark_rw_past_pgt();
-       /*
         * If the new pfn is within the range of the newly allocated
         * kernel pagetable, and it isn't being mapped into an
         * early_ioremap fixmap slot as a freshly allocated page, make sure
@@ -2118,8 +2011,6 @@ __init void xen_ident_map_ISA(void)
 
 static __init void xen_post_allocator_init(void)
 {
-       mark_rw_past_pgt();
-
 #ifdef CONFIG_XEN_DEBUG
        pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
 #endif
@@ -2228,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
 
 void __init xen_init_mmu_ops(void)
 {
+       x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
        x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
        x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
        pv_mmu_ops = xen_mmu_ops;
index ff9d832..d38c40f 100644 (file)
@@ -561,27 +561,6 @@ void ahci_start_engine(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
        u32 tmp;
-       u8 status;
-
-       status = readl(port_mmio + PORT_TFDATA) & 0xFF;
-
-       /*
-        * At end of section 10.1 of AHCI spec (rev 1.3), it states
-        * Software shall not set PxCMD.ST to 1 until it is determined
-        * that a functoinal device is present on the port as determined by
-        * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h
-        *
-        * Even though most AHCI host controllers work without this check,
-        * specific controller will fail under this condition
-        */
-       if (status & (ATA_BUSY | ATA_DRQ))
-               return;
-       else {
-               ahci_scr_read(&ap->link, SCR_STATUS, &tmp);
-
-               if ((tmp & 0xf) != 0x3)
-                       return;
-       }
 
        /* start DMA */
        tmp = readl(port_mmio + PORT_CMD);
index c34a8dd..32d1b3e 100644 (file)
@@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
 unsigned int i915_powersave = 1;
 module_param_named(powersave, i915_powersave, int, 0600);
 
-unsigned int i915_semaphores = 1;
+unsigned int i915_semaphores = 0;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 
 unsigned int i915_enable_rc6 = 0;
index 373c2a0..2166ee0 100644 (file)
@@ -5154,6 +5154,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 
        I915_WRITE(DSPCNTR(plane), dspcntr);
        POSTING_READ(DSPCNTR(plane));
+       if (!HAS_PCH_SPLIT(dev))
+               intel_enable_plane(dev_priv, plane, pipe);
 
        ret = intel_pipe_set_base(crtc, x, y, old_fb);
 
index 4bce801..c77111e 100644 (file)
@@ -42,7 +42,8 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
 
        nvbe->nr_pages = 0;
        while (num_pages--) {
-               if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
+               /* this code path isn't called and is incorrect anyways */
+               if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
                        nvbe->pages[nvbe->nr_pages] =
                                        dma_addrs[nvbe->nr_pages];
                        nvbe->ttm_alloced[nvbe->nr_pages] = true;
index 7aade20..3d8a763 100644 (file)
@@ -674,7 +674,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
        cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
-       cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE);
+       cgts_tcc_disable = 0xff000000;
        gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
        gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
        cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
@@ -871,7 +871,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
        smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
-       smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+       smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
        WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
        WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
@@ -887,20 +887,20 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
 
-       WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
-                                       POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
-                                       SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+       WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
+                                       POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
+                                       SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
 
-       WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
-                                SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
-                                SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+       WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
+                                SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
+                                SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
 
 
        WREG32(VGT_NUM_INSTANCES, 1);
 
        WREG32(CP_PERFMON_CNTL, 0);
 
-       WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+       WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
                                  FETCH_FIFO_HIWATER(0x4) |
                                  DONE_FIFO_HIWATER(0xe0) |
                                  ALU_UPDATE_FIFO_HIWATER(0x8)));
index 8a955bb..a533f52 100644 (file)
@@ -181,9 +181,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
        for (i = 0; i < pages; i++, p++) {
-               /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
-                * is requested. */
-               if (dma_addr[i] != DMA_ERROR_CODE) {
+               /* we reverted the patch using dma_addr in TTM for now but this
+                * code stops building on alpha so just comment it out for now */
+               if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
                        rdev->gart.ttm_alloced[p] = true;
                        rdev->gart.pages_addr[p] = dma_addr[i];
                } else {
index a97e3fe..04be9f8 100644 (file)
@@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
                jiffies, expires);
 
        timer->expires = jiffies + expires;
-       timer->data = (unsigned long)&alg_data;
+       timer->data = (unsigned long)alg_data;
 
        add_timer(timer);
 }
index c24946f..1de1c19 100644 (file)
@@ -281,17 +281,24 @@ struct ser_req {
        u8                      command;
        u8                      ref_off;
        u16                     scratch;
-       __be16                  sample;
        struct spi_message      msg;
        struct spi_transfer     xfer[6];
+       /*
+        * DMA (thus cache coherency maintenance) requires the
+        * transfer buffers to live in their own cache lines.
+        */
+       __be16 sample ____cacheline_aligned;
 };
 
 struct ads7845_ser_req {
        u8                      command[3];
-       u8                      pwrdown[3];
-       u8                      sample[3];
        struct spi_message      msg;
        struct spi_transfer     xfer[2];
+       /*
+        * DMA (thus cache coherency maintenance) requires the
+        * transfer buffers to live in their own cache lines.
+        */
+       u8 sample[3] ____cacheline_aligned;
 };
 
 static int ads7846_read12_ser(struct device *dev, unsigned command)
index d4a851c..0b4d5b2 100644 (file)
@@ -144,7 +144,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
        int iter, i;
        unsigned long flags;
 
-       data->chip->irq_ack(irq_data);
+       data->chip->irq_ack(data);
 
        for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
                u32 status;
index 2e16511..3ab9ffa 100644 (file)
@@ -717,14 +717,14 @@ static int usbhs_enable(struct device *dev)
                        gpio_request(pdata->ehci_data->reset_gpio_port[0],
                                                "USB1 PHY reset");
                        gpio_direction_output
-                               (pdata->ehci_data->reset_gpio_port[0], 1);
+                               (pdata->ehci_data->reset_gpio_port[0], 0);
                }
 
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) {
                        gpio_request(pdata->ehci_data->reset_gpio_port[1],
                                                "USB2 PHY reset");
                        gpio_direction_output
-                               (pdata->ehci_data->reset_gpio_port[1], 1);
+                               (pdata->ehci_data->reset_gpio_port[1], 0);
                }
 
                /* Hold the PHY in RESET for enough time till DIR is high */
@@ -904,11 +904,11 @@ static int usbhs_enable(struct device *dev)
 
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
                        gpio_set_value
-                               (pdata->ehci_data->reset_gpio_port[0], 0);
+                               (pdata->ehci_data->reset_gpio_port[0], 1);
 
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
                        gpio_set_value
-                               (pdata->ehci_data->reset_gpio_port[1], 0);
+                               (pdata->ehci_data->reset_gpio_port[1], 1);
        }
 
 end_count:
index 16422de..2c0d4d1 100644 (file)
@@ -447,12 +447,13 @@ static int __init load_twl4030_script(struct twl4030_script *tscript,
                if (err)
                        goto out;
        }
-       if (tscript->flags & TWL4030_SLEEP_SCRIPT)
+       if (tscript->flags & TWL4030_SLEEP_SCRIPT) {
                if (order)
                        pr_warning("TWL4030: Bad order of scripts (sleep "\
                                        "script before wakeup) Leads to boot"\
                                        "failure on some boards\n");
                err = twl4030_config_sleep_sequence(address);
+       }
 out:
        return err;
 }
index 01b604a..e5a7375 100644 (file)
@@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o
 obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
 obj-$(CONFIG_B44) += b44.o
 obj-$(CONFIG_FORCEDETH) += forcedeth.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+obj-$(CONFIG_NE_H8300) += ne-h8300.o
 obj-$(CONFIG_AX88796) += ax88796.o
 obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
 obj-$(CONFIG_FTMAC100) += ftmac100.o
@@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o
 obj-$(CONFIG_LP486E) += lp486e.o
 
 obj-$(CONFIG_ETH16I) += eth16i.o
-obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o
 obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
 obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
 obj-$(CONFIG_EQUALIZER) += eql.o
@@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
 obj-$(CONFIG_DECLANCE) += declance.o
 obj-$(CONFIG_ATARILANCE) += atarilance.o
 obj-$(CONFIG_A2065) += a2065.o
-obj-$(CONFIG_HYDRA) += hydra.o 8390.o
+obj-$(CONFIG_HYDRA) += hydra.o
 obj-$(CONFIG_ARIADNE) += ariadne.o
 obj-$(CONFIG_CS89x0) += cs89x0.o
 obj-$(CONFIG_MACSONIC) += macsonic.o
index 4af235d..fbfb5b4 100644 (file)
@@ -527,7 +527,7 @@ static void __init etherh_banner(void)
  * Read the ethernet address string from the on board rom.
  * This is an ascii string...
  */
-static int __init etherh_addr(char *addr, struct expansion_card *ec)
+static int __devinit etherh_addr(char *addr, struct expansion_card *ec)
 {
        struct in_chunk_dir cd;
        char *s;
@@ -655,7 +655,7 @@ static const struct net_device_ops etherh_netdev_ops = {
 static u32 etherh_regoffsets[16];
 static u32 etherm_regoffsets[16];
 
-static int __init
+static int __devinit
 etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
 {
        const struct etherh_data *data = id->data;
index b28baff..01b8a6a 100644 (file)
@@ -39,7 +39,7 @@
 
 typedef struct mac_addr {
        u8 mac_addr_value[ETH_ALEN];
-} mac_addr_t;
+} __packed mac_addr_t;
 
 enum {
        BOND_AD_STABLE = 0,
@@ -134,12 +134,12 @@ typedef struct lacpdu {
        u8 tlv_type_terminator;      // = terminator
        u8 terminator_length;        // = 0
        u8 reserved_50[50];          // = 0
-} lacpdu_t;
+} __packed lacpdu_t;
 
 typedef struct lacpdu_header {
        struct ethhdr hdr;
        struct lacpdu lacpdu;
-} lacpdu_header_t;
+} __packed lacpdu_header_t;
 
 // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
 typedef struct bond_marker {
@@ -155,12 +155,12 @@ typedef struct bond_marker {
        u8 tlv_type_terminator;      //  = 0x00
        u8 terminator_length;        //  = 0x00
        u8 reserved_90[90];          //  = 0
-} bond_marker_t;
+} __packed bond_marker_t;
 
 typedef struct bond_marker_header {
        struct ethhdr hdr;
        struct bond_marker marker;
-} bond_marker_header_t;
+} __packed bond_marker_header_t;
 
 #pragma pack()
 
index 53c0f04..cf79cf7 100644 (file)
@@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev)
                netif_start_queue(dev);
        }
 
-       init_waitqueue_head(&port->swqe_avail_wq);
-       init_waitqueue_head(&port->restart_wq);
-
        mutex_unlock(&port->port_lock);
 
        return ret;
@@ -3276,6 +3273,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
 
        INIT_WORK(&port->reset_task, ehea_reset_port);
 
+       init_waitqueue_head(&port->swqe_avail_wq);
+       init_waitqueue_head(&port->restart_wq);
+
        ret = register_netdev(dev);
        if (ret) {
                pr_err("register_netdev failed. ret=%d\n", ret);
index c5ef62c..1cd481c 100644 (file)
@@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = {
        .ndo_open               = hydra_open,
        .ndo_stop               = hydra_close,
 
-       .ndo_start_xmit         = ei_start_xmit,
-       .ndo_tx_timeout         = ei_tx_timeout,
-       .ndo_get_stats          = ei_get_stats,
-       .ndo_set_multicast_list = ei_set_multicast_list,
+       .ndo_start_xmit         = __ei_start_xmit,
+       .ndo_tx_timeout         = __ei_tx_timeout,
+       .ndo_get_stats          = __ei_get_stats,
+       .ndo_set_multicast_list = __ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ei_poll,
+       .ndo_poll_controller    = __ei_poll,
 #endif
 };
 
@@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
        0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
     };
 
-    dev = alloc_ei_netdev();
+    dev = ____alloc_ei_netdev(0);
     if (!dev)
        return -ENOMEM;
 
index 30be8c6..7298a34 100644 (file)
@@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev)
 #ifndef MODULE
 struct net_device * __init ne_probe(int unit)
 {
-       struct net_device *dev = alloc_ei_netdev();
+       struct net_device *dev = ____alloc_ei_netdev(0);
        int err;
 
        if (!dev)
@@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = {
        .ndo_open               = ne_open,
        .ndo_stop               = ne_close,
 
-       .ndo_start_xmit         = ei_start_xmit,
-       .ndo_tx_timeout         = ei_tx_timeout,
-       .ndo_get_stats          = ei_get_stats,
-       .ndo_set_multicast_list = ei_set_multicast_list,
+       .ndo_start_xmit         = __ei_start_xmit,
+       .ndo_tx_timeout         = __ei_tx_timeout,
+       .ndo_get_stats          = __ei_get_stats,
+       .ndo_set_multicast_list = __ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ei_poll,
+       .ndo_poll_controller    = __ei_poll,
 #endif
 };
 
@@ -637,7 +637,7 @@ int init_module(void)
        int err;
 
        for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-               struct net_device *dev = alloc_ei_netdev();
+               struct net_device *dev = ____alloc_ei_netdev(0);
                if (!dev)
                        break;
                if (io[this_dev]) {
index d984790..3dd45ed 100644 (file)
@@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
        return &nic_data->mcdi;
 }
 
+static inline void
+efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
+{
+       struct siena_nic_data *nic_data = efx->nic_data;
+       value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
+}
+
+static inline void
+efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
+{
+       struct siena_nic_data *nic_data = efx->nic_data;
+       __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
+}
+
 void efx_mcdi_init(struct efx_nic *efx)
 {
        struct efx_mcdi_iface *mcdi;
@@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
                            const u8 *inbuf, size_t inlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
-       unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+       unsigned pdu = MCDI_PDU(efx);
+       unsigned doorbell = MCDI_DOORBELL(efx);
        unsigned int i;
        efx_dword_t hdr;
        u32 xflags, seqno;
@@ -92,30 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
                             MCDI_HEADER_SEQ, seqno,
                             MCDI_HEADER_XFLAGS, xflags);
 
-       efx_writed(efx, &hdr, pdu);
+       efx_mcdi_writed(efx, &hdr, pdu);
 
-       for (i = 0; i < inlen; i += 4) {
-               _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
-               /* use wmb() within loop to inhibit write combining */
-               wmb();
-       }
+       for (i = 0; i < inlen; i += 4)
+               efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
+                               pdu + 4 + i);
 
        /* ring the doorbell with a distinctive value */
-       _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
-       wmb();
+       EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
+       efx_mcdi_writed(efx, &hdr, doorbell);
 }
 
 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+       unsigned int pdu = MCDI_PDU(efx);
        int i;
 
        BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
        BUG_ON(outlen & 3 || outlen >= 0x100);
 
        for (i = 0; i < outlen; i += 4)
-               *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+               efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
 }
 
 static int efx_mcdi_poll(struct efx_nic *efx)
@@ -123,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
        unsigned int time, finish;
        unsigned int respseq, respcmd, error;
-       unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+       unsigned int pdu = MCDI_PDU(efx);
        unsigned int rc, spins;
        efx_dword_t reg;
 
@@ -149,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
 
                time = get_seconds();
 
-               rmb();
-               efx_readd(efx, &reg, pdu);
+               efx_mcdi_readd(efx, &reg, pdu);
 
                /* All 1's indicates that shared memory is in reset (and is
                 * not a valid header). Wait for it to come out reset before
@@ -177,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
                          respseq, mcdi->seqno);
                rc = EIO;
        } else if (error) {
-               efx_readd(efx, &reg, pdu + 4);
+               efx_mcdi_readd(efx, &reg, pdu + 4);
                switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
 #define TRANSLATE_ERROR(name)                                  \
                case MC_CMD_ERR_ ## name:                       \
@@ -211,21 +222,21 @@ out:
 /* Test and clear MC-rebooted flag for this port/function */
 int efx_mcdi_poll_reboot(struct efx_nic *efx)
 {
-       unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
+       unsigned int addr = MCDI_REBOOT_FLAG(efx);
        efx_dword_t reg;
        uint32_t value;
 
        if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
                return false;
 
-       efx_readd(efx, &reg, addr);
+       efx_mcdi_readd(efx, &reg, addr);
        value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
 
        if (value == 0)
                return 0;
 
        EFX_ZERO_DWORD(reg);
-       efx_writed(efx, &reg, addr);
+       efx_mcdi_writed(efx, &reg, addr);
 
        if (value == MC_STATUS_DWORD_ASSERT)
                return -EINTR;
index a42db6e..d91701a 100644 (file)
@@ -143,10 +143,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
 /**
  * struct siena_nic_data - Siena NIC state
  * @mcdi: Management-Controller-to-Driver Interface
+ * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
  * @wol_filter_id: Wake-on-LAN packet filter id
  */
 struct siena_nic_data {
        struct efx_mcdi_iface mcdi;
+       void __iomem *mcdi_smem;
        int wol_filter_id;
 };
 
index e4dd898..837869b 100644 (file)
@@ -220,12 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx)
        efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
        efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
 
+       /* Initialise MCDI */
+       nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
+                                             FR_CZ_MC_TREG_SMEM,
+                                             FR_CZ_MC_TREG_SMEM_STEP *
+                                             FR_CZ_MC_TREG_SMEM_ROWS);
+       if (!nic_data->mcdi_smem) {
+               netif_err(efx, probe, efx->net_dev,
+                         "could not map MCDI at %llx+%x\n",
+                         (unsigned long long)efx->membase_phys +
+                         FR_CZ_MC_TREG_SMEM,
+                         FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
+               rc = -ENOMEM;
+               goto fail1;
+       }
        efx_mcdi_init(efx);
 
        /* Recover from a failed assertion before probing */
        rc = efx_mcdi_handle_assertion(efx);
        if (rc)
-               goto fail1;
+               goto fail2;
 
        /* Let the BMC know that the driver is now in charge of link and
         * filter settings. We must do this before we reset the NIC */
@@ -280,6 +294,7 @@ fail4:
 fail3:
        efx_mcdi_drv_attach(efx, false, NULL);
 fail2:
+       iounmap(nic_data->mcdi_smem);
 fail1:
        kfree(efx->nic_data);
        return rc;
@@ -359,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx)
 
 static void siena_remove_nic(struct efx_nic *efx)
 {
+       struct siena_nic_data *nic_data = efx->nic_data;
+
        efx_nic_free_buffer(efx, &efx->irq_status);
 
        siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -368,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx)
                efx_mcdi_drv_attach(efx, false, NULL);
 
        /* Tear down the private nic state */
-       kfree(efx->nic_data);
+       iounmap(nic_data->mcdi_smem);
+       kfree(nic_data);
        efx->nic_data = NULL;
 }
 
@@ -606,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = {
        .default_mac_ops = &efx_mcdi_mac_operations,
 
        .revision = EFX_REV_SIENA_A0,
-       .mem_map_size = (FR_CZ_MC_TREG_SMEM +
-                        FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
+       .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
        .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
        .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
        .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
index 17d04ff..1482fa6 100644 (file)
@@ -2141,6 +2141,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
 static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
 {
        struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        int timeout = 200; /* ms */
        int i, j;
 
@@ -2149,6 +2151,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
 
        cancel_delayed_work_sync(&sc->tx_complete_work);
 
+       if (sc->sc_flags & SC_OP_INVALID) {
+               ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+               mutex_unlock(&sc->mutex);
+               return;
+       }
+
        if (drop)
                timeout = 1;
 
index c1511b1..42db0fc 100644 (file)
@@ -2155,6 +2155,13 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
                        goto set_ch_out;
                }
 
+               if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+                   !iwl_legacy_is_channel_ibss(ch_info)) {
+                       IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
+                       ret = -EINVAL;
+                       goto set_ch_out;
+               }
+
                spin_lock_irqsave(&priv->lock, flags);
 
                for_each_context(priv, ctx) {
index 9ee849d..f43ac1e 100644 (file)
@@ -1411,6 +1411,12 @@ iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
        return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
 }
 
+static inline int
+iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
+{
+       return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
 static inline void
 __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
 {
index 7e8a658..f3ac624 100644 (file)
@@ -1339,8 +1339,8 @@ int lbs_execute_next_command(struct lbs_private *priv)
                                    cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
                                        lbs_deb_host(
                                               "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n");
-                                       list_del(&cmdnode->list);
                                        spin_lock_irqsave(&priv->driver_lock, flags);
+                                       list_del(&cmdnode->list);
                                        lbs_complete_command(priv, cmdnode, 0);
                                        spin_unlock_irqrestore(&priv->driver_lock, flags);
 
@@ -1352,8 +1352,8 @@ int lbs_execute_next_command(struct lbs_private *priv)
                                    (priv->psstate == PS_STATE_PRE_SLEEP)) {
                                        lbs_deb_host(
                                               "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n");
-                                       list_del(&cmdnode->list);
                                        spin_lock_irqsave(&priv->driver_lock, flags);
+                                       list_del(&cmdnode->list);
                                        lbs_complete_command(priv, cmdnode, 0);
                                        spin_unlock_irqrestore(&priv->driver_lock, flags);
                                        priv->needtowakeup = 1;
@@ -1366,7 +1366,9 @@ int lbs_execute_next_command(struct lbs_private *priv)
                                       "EXEC_NEXT_CMD: sending EXIT_PS\n");
                        }
                }
+               spin_lock_irqsave(&priv->driver_lock, flags);
                list_del(&cmdnode->list);
+               spin_unlock_irqrestore(&priv->driver_lock, flags);
                lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
                            le16_to_cpu(cmd->command));
                lbs_submit_command(priv, cmdnode);
index b78a38d..8c7c522 100644 (file)
@@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
 
     board = z->resource.start;
     ioaddr = board+cards[i].offset;
-    dev = alloc_ei_netdev();
+    dev = ____alloc_ei_netdev(0);
     if (!dev)
        return -ENOMEM;
     if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) {
@@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
 static const struct net_device_ops zorro8390_netdev_ops = {
        .ndo_open               = zorro8390_open,
        .ndo_stop               = zorro8390_close,
-       .ndo_start_xmit         = ei_start_xmit,
-       .ndo_tx_timeout         = ei_tx_timeout,
-       .ndo_get_stats          = ei_get_stats,
-       .ndo_set_multicast_list = ei_set_multicast_list,
+       .ndo_start_xmit         = __ei_start_xmit,
+       .ndo_tx_timeout         = __ei_tx_timeout,
+       .ndo_get_stats          = __ei_get_stats,
+       .ndo_set_multicast_list = __ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ei_poll,
+       .ndo_poll_controller    = __ei_poll,
 #endif
 };
 
index b3466c4..16512ec 100644 (file)
@@ -46,6 +46,7 @@ static struct clk *rtc_clk;
 static void __iomem *s3c_rtc_base;
 static int s3c_rtc_alarmno = NO_IRQ;
 static int s3c_rtc_tickno  = NO_IRQ;
+static bool wake_en;
 static enum s3c_cpu_type s3c_rtc_cpu_type;
 
 static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
@@ -562,8 +563,12 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
        }
        s3c_rtc_enable(pdev, 0);
 
-       if (device_may_wakeup(&pdev->dev))
-               enable_irq_wake(s3c_rtc_alarmno);
+       if (device_may_wakeup(&pdev->dev) && !wake_en) {
+               if (enable_irq_wake(s3c_rtc_alarmno) == 0)
+                       wake_en = true;
+               else
+                       dev_err(&pdev->dev, "enable_irq_wake failed\n");
+       }
 
        return 0;
 }
@@ -579,8 +584,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
                writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
        }
 
-       if (device_may_wakeup(&pdev->dev))
+       if (device_may_wakeup(&pdev->dev) && wake_en) {
                disable_irq_wake(s3c_rtc_alarmno);
+               wake_en = false;
+       }
 
        return 0;
 }
index 475e603..86b6f1c 100644 (file)
@@ -1742,11 +1742,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
 static inline int _dasd_term_running_cqr(struct dasd_device *device)
 {
        struct dasd_ccw_req *cqr;
+       int rc;
 
        if (list_empty(&device->ccw_queue))
                return 0;
        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
-       return device->discipline->term_IO(cqr);
+       rc = device->discipline->term_IO(cqr);
+       if (!rc)
+               /*
+                * CQR terminated because a more important request is pending.
+                * Undo decreasing of retry counter because this is
+                * not an error case.
+                */
+               cqr->retries++;
+       return rc;
 }
 
 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
index 4b60ede..be55fb2 100644 (file)
@@ -518,6 +518,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
                return;
        new_incr->rn = rn;
        new_incr->standby = standby;
+       if (!standby)
+               new_incr->usecount = 1;
        last_rn = 0;
        prev = &sclp_mem_list;
        list_for_each_entry(incr, &sclp_mem_list, list) {
index 82acb8d..6183a57 100644 (file)
@@ -66,7 +66,7 @@
  * have.  Allow 1% either way on the nominal for TVs.
  */
 #define NR_MONTYPES    6
-static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = {
+static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = {
        {       /* TV           */
                .hfmin  = 15469,
                .hfmax  = 15781,
@@ -873,7 +873,7 @@ static struct fb_ops acornfb_ops = {
 /*
  * Everything after here is initialisation!!!
  */
-static struct fb_videomode modedb[] __initdata = {
+static struct fb_videomode modedb[] __devinitdata = {
        {       /* 320x256 @ 50Hz */
                NULL, 50,  320,  256, 125000,  92,  62,  35, 19,  38, 2,
                FB_SYNC_COMP_HIGH_ACT,
@@ -925,8 +925,7 @@ static struct fb_videomode modedb[] __initdata = {
        }
 };
 
-static struct fb_videomode __initdata
-acornfb_default_mode = {
+static struct fb_videomode acornfb_default_mode __devinitdata = {
        .name =         NULL,
        .refresh =      60,
        .xres =         640,
@@ -942,7 +941,7 @@ acornfb_default_mode = {
        .vmode =        FB_VMODE_NONINTERLACED
 };
 
-static void __init acornfb_init_fbinfo(void)
+static void __devinit acornfb_init_fbinfo(void)
 {
        static int first = 1;
 
@@ -1018,8 +1017,7 @@ static void __init acornfb_init_fbinfo(void)
  *     size can optionally be followed by 'M' or 'K' for
  *     MB or KB respectively.
  */
-static void __init
-acornfb_parse_mon(char *opt)
+static void __devinit acornfb_parse_mon(char *opt)
 {
        char *p = opt;
 
@@ -1066,8 +1064,7 @@ bad:
        current_par.montype = -1;
 }
 
-static void __init
-acornfb_parse_montype(char *opt)
+static void __devinit acornfb_parse_montype(char *opt)
 {
        current_par.montype = -2;
 
@@ -1108,8 +1105,7 @@ acornfb_parse_montype(char *opt)
        }
 }
 
-static void __init
-acornfb_parse_dram(char *opt)
+static void __devinit acornfb_parse_dram(char *opt)
 {
        unsigned int size;
 
@@ -1134,15 +1130,14 @@ acornfb_parse_dram(char *opt)
 static struct options {
        char *name;
        void (*parse)(char *opt);
-} opt_table[] __initdata = {
+} opt_table[] __devinitdata = {
        { "mon",     acornfb_parse_mon     },
        { "montype", acornfb_parse_montype },
        { "dram",    acornfb_parse_dram    },
        { NULL, NULL }
 };
 
-int __init
-acornfb_setup(char *options)
+static int __devinit acornfb_setup(char *options)
 {
        struct options *optp;
        char *opt;
@@ -1179,8 +1174,7 @@ acornfb_setup(char *options)
  * Detect type of monitor connected
  *  For now, we just assume SVGA
  */
-static int __init
-acornfb_detect_monitortype(void)
+static int __devinit acornfb_detect_monitortype(void)
 {
        return 4;
 }
index 9fa0866..2a5404c 100644 (file)
@@ -819,7 +819,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
                used |= CEPH_CAP_FILE_CACHE;
        if (ci->i_wr_ref)
                used |= CEPH_CAP_FILE_WR;
-       if (ci->i_wrbuffer_ref)
+       if (ci->i_wb_ref || ci->i_wrbuffer_ref)
                used |= CEPH_CAP_FILE_BUFFER;
        return used;
 }
@@ -1990,11 +1990,11 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
        if (got & CEPH_CAP_FILE_WR)
                ci->i_wr_ref++;
        if (got & CEPH_CAP_FILE_BUFFER) {
-               if (ci->i_wrbuffer_ref == 0)
+               if (ci->i_wb_ref == 0)
                        ihold(&ci->vfs_inode);
-               ci->i_wrbuffer_ref++;
-               dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
-                    &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
+               ci->i_wb_ref++;
+               dout("__take_cap_refs %p wb %d -> %d (?)\n",
+                    &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
        }
 }
 
@@ -2169,12 +2169,12 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
                if (--ci->i_rdcache_ref == 0)
                        last++;
        if (had & CEPH_CAP_FILE_BUFFER) {
-               if (--ci->i_wrbuffer_ref == 0) {
+               if (--ci->i_wb_ref == 0) {
                        last++;
                        put++;
                }
-               dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
-                    inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
+               dout("put_cap_refs %p wb %d -> %d (?)\n",
+                    inode, ci->i_wb_ref+1, ci->i_wb_ref);
        }
        if (had & CEPH_CAP_FILE_WR)
                if (--ci->i_wr_ref == 0) {
index 03d6daf..70b6a48 100644 (file)
@@ -355,6 +355,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        ci->i_rd_ref = 0;
        ci->i_rdcache_ref = 0;
        ci->i_wr_ref = 0;
+       ci->i_wb_ref = 0;
        ci->i_wrbuffer_ref = 0;
        ci->i_wrbuffer_ref_head = 0;
        ci->i_shared_gen = 0;
index f60b07b..d0fae4c 100644 (file)
@@ -3304,8 +3304,8 @@ static void con_put(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
+       dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
        ceph_put_mds_session(s);
-       dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref));
 }
 
 /*
index e86ec11..24067d6 100644 (file)
@@ -206,7 +206,7 @@ void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
                up_write(&mdsc->snap_rwsem);
        } else {
                spin_lock(&mdsc->snap_empty_lock);
-               list_add(&mdsc->snap_empty, &realm->empty_item);
+               list_add(&realm->empty_item, &mdsc->snap_empty);
                spin_unlock(&mdsc->snap_empty_lock);
        }
 }
index b1f1b8b..f5cabef 100644 (file)
@@ -293,7 +293,7 @@ struct ceph_inode_info {
 
        /* held references to caps */
        int i_pin_ref;
-       int i_rd_ref, i_rdcache_ref, i_wr_ref;
+       int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref;
        int i_wrbuffer_ref, i_wrbuffer_ref_head;
        u32 i_shared_gen;       /* increment each time we get FILE_SHARED */
        u32 i_rdcache_gen;      /* incremented each time we get FILE_CACHE. */
index c6ba49b..b32eb29 100644 (file)
@@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
                if (!inode)
                        return 0;
 
-               if (nd->flags & LOOKUP_RCU)
+               if (nd && (nd->flags & LOOKUP_RCU))
                        return -ECHILD;
 
                fc = get_fuse_conn(inode);
index 54fc993..e3c4f11 100644 (file)
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(putname);
 static int acl_permission_check(struct inode *inode, int mask, unsigned int flags,
                int (*check_acl)(struct inode *inode, int mask, unsigned int flags))
 {
-       umode_t                 mode = inode->i_mode;
+       unsigned int mode = inode->i_mode;
 
        mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
 
index 6f8192f..be79dc9 100644 (file)
@@ -117,6 +117,8 @@ static int filelayout_async_handle_error(struct rpc_task *task,
        case -EKEYEXPIRED:
                rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
                break;
+       case -NFS4ERR_RETRY_UNCACHED_REP:
+               break;
        default:
                dprintk("%s DS error. Retry through MDS %d\n", __func__,
                        task->tk_status);
@@ -416,7 +418,8 @@ static int
 filelayout_check_layout(struct pnfs_layout_hdr *lo,
                        struct nfs4_filelayout_segment *fl,
                        struct nfs4_layoutget_res *lgr,
-                       struct nfs4_deviceid *id)
+                       struct nfs4_deviceid *id,
+                       gfp_t gfp_flags)
 {
        struct nfs4_file_layout_dsaddr *dsaddr;
        int status = -EINVAL;
@@ -439,7 +442,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
        /* find and reference the deviceid */
        dsaddr = nfs4_fl_find_get_deviceid(id);
        if (dsaddr == NULL) {
-               dsaddr = get_device_info(lo->plh_inode, id);
+               dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
                if (dsaddr == NULL)
                        goto out;
        }
@@ -500,7 +503,8 @@ static int
 filelayout_decode_layout(struct pnfs_layout_hdr *flo,
                         struct nfs4_filelayout_segment *fl,
                         struct nfs4_layoutget_res *lgr,
-                        struct nfs4_deviceid *id)
+                        struct nfs4_deviceid *id,
+                        gfp_t gfp_flags)
 {
        struct xdr_stream stream;
        struct xdr_buf buf = {
@@ -516,7 +520,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
 
        dprintk("%s: set_layout_map Begin\n", __func__);
 
-       scratch = alloc_page(GFP_KERNEL);
+       scratch = alloc_page(gfp_flags);
        if (!scratch)
                return -ENOMEM;
 
@@ -554,13 +558,13 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
                goto out_err;
 
        fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
-                              GFP_KERNEL);
+                              gfp_flags);
        if (!fl->fh_array)
                goto out_err;
 
        for (i = 0; i < fl->num_fh; i++) {
                /* Do we want to use a mempool here? */
-               fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
+               fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
                if (!fl->fh_array[i])
                        goto out_err_free;
 
@@ -605,19 +609,20 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
 
 static struct pnfs_layout_segment *
 filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
-                     struct nfs4_layoutget_res *lgr)
+                     struct nfs4_layoutget_res *lgr,
+                     gfp_t gfp_flags)
 {
        struct nfs4_filelayout_segment *fl;
        int rc;
        struct nfs4_deviceid id;
 
        dprintk("--> %s\n", __func__);
-       fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+       fl = kzalloc(sizeof(*fl), gfp_flags);
        if (!fl)
                return NULL;
 
-       rc = filelayout_decode_layout(layoutid, fl, lgr, &id);
-       if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) {
+       rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
+       if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
                _filelayout_free_lseg(fl);
                return NULL;
        }
@@ -633,7 +638,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
                int size = (fl->stripe_type == STRIPE_SPARSE) ?
                        fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
 
-               fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL);
+               fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags);
                if (!fl->commit_buckets) {
                        filelayout_free_lseg(&fl->generic_hdr);
                        return NULL;
index 7c44579..2b461d7 100644 (file)
@@ -104,6 +104,6 @@ extern struct nfs4_file_layout_dsaddr *
 nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id);
 extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
 struct nfs4_file_layout_dsaddr *
-get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id);
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
 
 #endif /* FS_NFS_NFS4FILELAYOUT_H */
index de5350f..db07c7a 100644 (file)
@@ -225,11 +225,11 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
 }
 
 static struct nfs4_pnfs_ds *
-nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port)
+nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags)
 {
        struct nfs4_pnfs_ds *tmp_ds, *ds;
 
-       ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL);
+       ds = kzalloc(sizeof(*tmp_ds), gfp_flags);
        if (!ds)
                goto out;
 
@@ -261,7 +261,7 @@ out:
  * Currently only support ipv4, and one multi-path address.
  */
 static struct nfs4_pnfs_ds *
-decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
+decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags)
 {
        struct nfs4_pnfs_ds *ds = NULL;
        char *buf;
@@ -303,7 +303,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
                        rlen);
                goto out_err;
        }
-       buf = kmalloc(rlen + 1, GFP_KERNEL);
+       buf = kmalloc(rlen + 1, gfp_flags);
        if (!buf) {
                dprintk("%s: Not enough memory\n", __func__);
                goto out_err;
@@ -333,7 +333,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
        sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]);
        port = htons((tmp[0] << 8) | (tmp[1]));
 
-       ds = nfs4_pnfs_ds_add(inode, ip_addr, port);
+       ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags);
        dprintk("%s: Decoded address and port %s\n", __func__, buf);
 out_free:
        kfree(buf);
@@ -343,7 +343,7 @@ out_err:
 
 /* Decode opaque device data and return the result */
 static struct nfs4_file_layout_dsaddr*
-decode_device(struct inode *ino, struct pnfs_device *pdev)
+decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
 {
        int i;
        u32 cnt, num;
@@ -362,7 +362,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
        struct page *scratch;
 
        /* set up xdr stream */
-       scratch = alloc_page(GFP_KERNEL);
+       scratch = alloc_page(gfp_flags);
        if (!scratch)
                goto out_err;
 
@@ -384,7 +384,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
        }
 
        /* read stripe indices */
-       stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL);
+       stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags);
        if (!stripe_indices)
                goto out_err_free_scratch;
 
@@ -423,7 +423,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
 
        dsaddr = kzalloc(sizeof(*dsaddr) +
                        (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
-                       GFP_KERNEL);
+                       gfp_flags);
        if (!dsaddr)
                goto out_err_free_stripe_indices;
 
@@ -452,7 +452,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
                for (j = 0; j < mp_count; j++) {
                        if (j == 0) {
                                dsaddr->ds_list[i] = decode_and_add_ds(&stream,
-                                       ino);
+                                       ino, gfp_flags);
                                if (dsaddr->ds_list[i] == NULL)
                                        goto out_err_free_deviceid;
                        } else {
@@ -503,12 +503,12 @@ out_err:
  * available devices.
  */
 static struct nfs4_file_layout_dsaddr *
-decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
+decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags)
 {
        struct nfs4_file_layout_dsaddr *d, *new;
        long hash;
 
-       new = decode_device(inode, dev);
+       new = decode_device(inode, dev, gfp_flags);
        if (!new) {
                printk(KERN_WARNING "%s: Could not decode or add device\n",
                        __func__);
@@ -537,7 +537,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
  * of available devices, and return it.
  */
 struct nfs4_file_layout_dsaddr *
-get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags)
 {
        struct pnfs_device *pdev = NULL;
        u32 max_resp_sz;
@@ -556,17 +556,17 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
        dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
                __func__, inode, max_resp_sz, max_pages);
 
-       pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL);
+       pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags);
        if (pdev == NULL)
                return NULL;
 
-       pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+       pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
        if (pages == NULL) {
                kfree(pdev);
                return NULL;
        }
        for (i = 0; i < max_pages; i++) {
-               pages[i] = alloc_page(GFP_KERNEL);
+               pages[i] = alloc_page(gfp_flags);
                if (!pages[i])
                        goto out_free;
        }
@@ -587,7 +587,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
         * Found new device, need to decode it and then add it to the
         * list of known devices for this mountpoint.
         */
-       dsaddr = decode_and_add_device(inode, pdev);
+       dsaddr = decode_and_add_device(inode, pdev, gfp_flags);
 out_free:
        for (i = 0; i < max_pages; i++)
                __free_page(pages[i]);
index 69c0f3c..cf1b339 100644 (file)
@@ -300,6 +300,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
                        ret = nfs4_delay(server->client, &exception->timeout);
                        if (ret != 0)
                                break;
+               case -NFS4ERR_RETRY_UNCACHED_REP:
                case -NFS4ERR_OLD_STATEID:
                        exception->retry = 1;
                        break;
@@ -3695,6 +3696,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
                        rpc_delay(task, NFS4_POLL_RETRY_MAX);
                        task->tk_status = 0;
                        return -EAGAIN;
+               case -NFS4ERR_RETRY_UNCACHED_REP:
                case -NFS4ERR_OLD_STATEID:
                        task->tk_status = 0;
                        return -EAGAIN;
@@ -4844,6 +4846,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
                dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
                rpc_delay(task, NFS4_POLL_RETRY_MIN);
                task->tk_status = 0;
+               /* fall through */
+       case -NFS4ERR_RETRY_UNCACHED_REP:
                nfs_restart_rpc(task, data->clp);
                return;
        }
@@ -5479,6 +5483,8 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
                break;
        case -NFS4ERR_DELAY:
                rpc_delay(task, NFS4_POLL_RETRY_MAX);
+               /* fall through */
+       case -NFS4ERR_RETRY_UNCACHED_REP:
                return -EAGAIN;
        default:
                nfs4_schedule_lease_recovery(clp);
index ff681ab..f57f528 100644 (file)
@@ -383,6 +383,7 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
                                plh_layouts);
                dprintk("%s freeing layout for inode %lu\n", __func__,
                        lo->plh_inode->i_ino);
+               list_del_init(&lo->plh_layouts);
                pnfs_destroy_layout(NFS_I(lo->plh_inode));
        }
 }
@@ -466,7 +467,8 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
 static struct pnfs_layout_segment *
 send_layoutget(struct pnfs_layout_hdr *lo,
           struct nfs_open_context *ctx,
-          u32 iomode)
+          u32 iomode,
+          gfp_t gfp_flags)
 {
        struct inode *ino = lo->plh_inode;
        struct nfs_server *server = NFS_SERVER(ino);
@@ -479,7 +481,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        dprintk("--> %s\n", __func__);
 
        BUG_ON(ctx == NULL);
-       lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
+       lgp = kzalloc(sizeof(*lgp), gfp_flags);
        if (lgp == NULL)
                return NULL;
 
@@ -487,12 +489,12 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
        max_pages = max_resp_sz >> PAGE_SHIFT;
 
-       pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+       pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
        if (!pages)
                goto out_err_free;
 
        for (i = 0; i < max_pages; i++) {
-               pages[i] = alloc_page(GFP_KERNEL);
+               pages[i] = alloc_page(gfp_flags);
                if (!pages[i])
                        goto out_err_free;
        }
@@ -508,6 +510,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        lgp->args.layout.pages = pages;
        lgp->args.layout.pglen = max_pages * PAGE_SIZE;
        lgp->lsegpp = &lseg;
+       lgp->gfp_flags = gfp_flags;
 
        /* Synchronously retrieve layout information from server and
         * store in lseg.
@@ -665,11 +668,11 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo,
 }
 
 static struct pnfs_layout_hdr *
-alloc_init_layout_hdr(struct inode *ino)
+alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
 {
        struct pnfs_layout_hdr *lo;
 
-       lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
+       lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
        if (!lo)
                return NULL;
        atomic_set(&lo->plh_refcount, 1);
@@ -681,7 +684,7 @@ alloc_init_layout_hdr(struct inode *ino)
 }
 
 static struct pnfs_layout_hdr *
-pnfs_find_alloc_layout(struct inode *ino)
+pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
 {
        struct nfs_inode *nfsi = NFS_I(ino);
        struct pnfs_layout_hdr *new = NULL;
@@ -696,7 +699,7 @@ pnfs_find_alloc_layout(struct inode *ino)
                        return nfsi->layout;
        }
        spin_unlock(&ino->i_lock);
-       new = alloc_init_layout_hdr(ino);
+       new = alloc_init_layout_hdr(ino, gfp_flags);
        spin_lock(&ino->i_lock);
 
        if (likely(nfsi->layout == NULL))       /* Won the race? */
@@ -756,7 +759,8 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
 struct pnfs_layout_segment *
 pnfs_update_layout(struct inode *ino,
                   struct nfs_open_context *ctx,
-                  enum pnfs_iomode iomode)
+                  enum pnfs_iomode iomode,
+                  gfp_t gfp_flags)
 {
        struct nfs_inode *nfsi = NFS_I(ino);
        struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
@@ -767,7 +771,7 @@ pnfs_update_layout(struct inode *ino,
        if (!pnfs_enabled_sb(NFS_SERVER(ino)))
                return NULL;
        spin_lock(&ino->i_lock);
-       lo = pnfs_find_alloc_layout(ino);
+       lo = pnfs_find_alloc_layout(ino, gfp_flags);
        if (lo == NULL) {
                dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
                goto out_unlock;
@@ -807,7 +811,7 @@ pnfs_update_layout(struct inode *ino,
                spin_unlock(&clp->cl_lock);
        }
 
-       lseg = send_layoutget(lo, ctx, iomode);
+       lseg = send_layoutget(lo, ctx, iomode, gfp_flags);
        if (!lseg && first) {
                spin_lock(&clp->cl_lock);
                list_del_init(&lo->plh_layouts);
@@ -846,7 +850,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
                goto out;
        }
        /* Inject layout blob into I/O device driver */
-       lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
+       lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
        if (!lseg || IS_ERR(lseg)) {
                if (!lseg)
                        status = -ENOMEM;
@@ -899,7 +903,8 @@ static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
                /* This is first coelesce call for a series of nfs_pages */
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   prev->wb_context,
-                                                  IOMODE_READ);
+                                                  IOMODE_READ,
+                                                  GFP_KERNEL);
        }
        return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
 }
@@ -921,7 +926,8 @@ static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio,
                /* This is first coelesce call for a series of nfs_pages */
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   prev->wb_context,
-                                                  IOMODE_RW);
+                                                  IOMODE_RW,
+                                                  GFP_NOFS);
        }
        return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
 }
index bc48272..0c015ba 100644 (file)
@@ -70,7 +70,7 @@ struct pnfs_layoutdriver_type {
        const u32 id;
        const char *name;
        struct module *owner;
-       struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr);
+       struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
        void (*free_lseg) (struct pnfs_layout_segment *lseg);
 
        /* test for nfs page cache coalescing */
@@ -126,7 +126,7 @@ void get_layout_hdr(struct pnfs_layout_hdr *lo);
 void put_lseg(struct pnfs_layout_segment *lseg);
 struct pnfs_layout_segment *
 pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
-                  enum pnfs_iomode access_type);
+                  enum pnfs_iomode access_type, gfp_t gfp_flags);
 void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
 void unset_pnfs_layoutdriver(struct nfs_server *);
 enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *,
@@ -245,7 +245,7 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg)
 
 static inline struct pnfs_layout_segment *
 pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
-                  enum pnfs_iomode access_type)
+                  enum pnfs_iomode access_type, gfp_t gfp_flags)
 {
        return NULL;
 }
index 7cded2b..2bcf0dc 100644 (file)
@@ -288,7 +288,7 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc)
        atomic_set(&req->wb_complete, requests);
 
        BUG_ON(desc->pg_lseg != NULL);
-       lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ);
+       lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
        ClearPageError(page);
        offset = 0;
        nbytes = desc->pg_count;
@@ -351,7 +351,7 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc)
        }
        req = nfs_list_entry(data->pages.next);
        if ((!lseg) && list_is_singular(&data->pages))
-               lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ);
+               lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
 
        ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count,
                                0, lseg);
index 3bd5d7e..49c715b 100644 (file)
@@ -939,7 +939,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
        atomic_set(&req->wb_complete, requests);
 
        BUG_ON(desc->pg_lseg);
-       lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
+       lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
        ClearPageError(page);
        offset = 0;
        nbytes = desc->pg_count;
@@ -1013,7 +1013,7 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
        }
        req = nfs_list_entry(data->pages.next);
        if ((!lseg) && list_is_singular(&data->pages))
-               lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
+               lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
 
        if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
            (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
index b8613e8..01eca17 100644 (file)
@@ -111,6 +111,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
        __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_node(pgdat, x) \
        __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_node_nopanic(pgdat, x) \
+       __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_pages_node(pgdat, x) \
        __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_pages_node_nopanic(pgdat, x) \
index 16ee8b4..d4675af 100644 (file)
@@ -546,18 +546,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
 extern bool capable(int cap);
 extern bool ns_capable(struct user_namespace *ns, int cap);
 extern bool task_ns_capable(struct task_struct *t, int cap);
-
-/**
- * nsown_capable - Check superior capability to one's own user_ns
- * @cap: The capability in question
- *
- * Return true if the current task has the given superior capability
- * targeted at its own user namespace.
- */
-static inline bool nsown_capable(int cap)
-{
-       return ns_capable(current_user_ns(), cap);
-}
+extern bool nsown_capable(int cap);
 
 /* audit system wants to get cap info from files as well */
 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
index 9aeeb0b..be16b61 100644 (file)
@@ -146,6 +146,7 @@ struct cred {
        void            *security;      /* subjective LSM security */
 #endif
        struct user_struct *user;       /* real user ID subscription */
+       struct user_namespace *user_ns; /* cached user->user_ns */
        struct group_info *group_info;  /* supplementary groups for euid/fsgid */
        struct rcu_head rcu;            /* RCU deletion hook */
 };
@@ -354,10 +355,15 @@ static inline void put_cred(const struct cred *_cred)
 #define current_fsgid()        (current_cred_xxx(fsgid))
 #define current_cap()          (current_cred_xxx(cap_effective))
 #define current_user()         (current_cred_xxx(user))
-#define _current_user_ns()     (current_cred_xxx(user)->user_ns)
 #define current_security()     (current_cred_xxx(security))
 
-extern struct user_namespace *current_user_ns(void);
+#ifdef CONFIG_USER_NS
+#define current_user_ns() (current_cred_xxx(user_ns))
+#else
+extern struct user_namespace init_user_ns;
+#define current_user_ns() (&init_user_ns)
+#endif
+
 
 #define current_uid_gid(_uid, _gid)            \
 do {                                           \
index bfb8f93..56d8fc8 100644 (file)
@@ -353,6 +353,8 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
 
 void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
 void free_pages_exact(void *virt, size_t size);
+/* This is different from alloc_pages_exact_node !!! */
+void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
 
 #define __get_free_page(gfp_mask) \
                __get_free_pages((gfp_mask), 0)
index 890dce2..7e371f7 100644 (file)
@@ -233,6 +233,7 @@ struct nfs4_layoutget {
        struct nfs4_layoutget_args args;
        struct nfs4_layoutget_res res;
        struct pnfs_layout_segment **lsegpp;
+       gfp_t gfp_flags;
 };
 
 struct nfs4_getdeviceinfo_args {
index 88bdd01..2fa8d13 100644 (file)
@@ -38,9 +38,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
        return outer;
 }
 
-#define        INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0)
-#define        INET_ECN_dontxmit(sk) \
-       do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0)
+static inline void INET_ECN_xmit(struct sock *sk)
+{
+       inet_sk(sk)->tos |= INET_ECN_ECT_0;
+       if (inet6_sk(sk) != NULL)
+               inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
+}
+
+static inline void INET_ECN_dontxmit(struct sock *sk)
+{
+       inet_sk(sk)->tos &= ~INET_ECN_MASK;
+       if (inet6_sk(sk) != NULL)
+               inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
+}
 
 #define IP6_ECN_flow_init(label) do {          \
       (label) &= ~htonl(INET_ECN_MASK << 20);  \
index 75b8e29..f57e7d4 100644 (file)
@@ -199,7 +199,7 @@ struct llc_pdu_sn {
        u8 ssap;
        u8 ctrl_1;
        u8 ctrl_2;
-};
+} __packed;
 
 static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
 {
@@ -211,7 +211,7 @@ struct llc_pdu_un {
        u8 dsap;
        u8 ssap;
        u8 ctrl_1;
-};
+} __packed;
 
 static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
 {
@@ -359,7 +359,7 @@ struct llc_xid_info {
        u8 fmt_id;      /* always 0x81 for LLC */
        u8 type;        /* different if NULL/non-NULL LSAP */
        u8 rw;          /* sender receive window */
-};
+} __packed;
 
 /**
  *     llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
@@ -415,7 +415,7 @@ struct llc_frmr_info {
        u8  curr_ssv;           /* current send state variable val */
        u8  curr_rsv;           /* current receive state variable */
        u8  ind_bits;           /* indicator bits set with macro */
-};
+} __packed;
 
 extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
 extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
index e3615c0..9fe3a36 100644 (file)
@@ -10,6 +10,7 @@
  */
 #define show_gfp_flags(flags)                                          \
        (flags) ? __print_flags(flags, "|",                             \
+       {(unsigned long)GFP_TRANSHUGE,          "GFP_TRANSHUGE"},       \
        {(unsigned long)GFP_HIGHUSER_MOVABLE,   "GFP_HIGHUSER_MOVABLE"}, \
        {(unsigned long)GFP_HIGHUSER,           "GFP_HIGHUSER"},        \
        {(unsigned long)GFP_USER,               "GFP_USER"},            \
@@ -32,6 +33,9 @@
        {(unsigned long)__GFP_HARDWALL,         "GFP_HARDWALL"},        \
        {(unsigned long)__GFP_THISNODE,         "GFP_THISNODE"},        \
        {(unsigned long)__GFP_RECLAIMABLE,      "GFP_RECLAIMABLE"},     \
-       {(unsigned long)__GFP_MOVABLE,          "GFP_MOVABLE"}          \
+       {(unsigned long)__GFP_MOVABLE,          "GFP_MOVABLE"},         \
+       {(unsigned long)__GFP_NOTRACK,          "GFP_NOTRACK"},         \
+       {(unsigned long)__GFP_NO_KSWAPD,        "GFP_NO_KSWAPD"},       \
+       {(unsigned long)__GFP_OTHER_NODE,       "GFP_OTHER_NODE"}       \
        ) : "GFP_NOWAIT"
 
index bf0c734..32a80e0 100644 (file)
@@ -399,3 +399,15 @@ bool task_ns_capable(struct task_struct *t, int cap)
        return ns_capable(task_cred_xxx(t, user)->user_ns, cap);
 }
 EXPORT_SYMBOL(task_ns_capable);
+
+/**
+ * nsown_capable - Check superior capability to one's own user_ns
+ * @cap: The capability in question
+ *
+ * Return true if the current task has the given superior capability
+ * targeted at its own user namespace.
+ */
+bool nsown_capable(int cap)
+{
+       return ns_capable(current_user_ns(), cap);
+}
index 5557b55..8093c16 100644 (file)
@@ -54,6 +54,7 @@ struct cred init_cred = {
        .cap_effective          = CAP_INIT_EFF_SET,
        .cap_bset               = CAP_INIT_BSET,
        .user                   = INIT_USER,
+       .user_ns                = &init_user_ns,
        .group_info             = &init_groups,
 #ifdef CONFIG_KEYS
        .tgcred                 = &init_tgcred,
@@ -410,6 +411,11 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
                        goto error_put;
        }
 
+       /* cache user_ns in cred.  Doesn't need a refcount because it will
+        * stay pinned by cred->user
+        */
+       new->user_ns = new->user->user_ns;
+
 #ifdef CONFIG_KEYS
        /* new threads get their own thread keyrings if their parent already
         * had one */
@@ -741,12 +747,6 @@ int set_create_files_as(struct cred *new, struct inode *inode)
 }
 EXPORT_SYMBOL(set_create_files_as);
 
-struct user_namespace *current_user_ns(void)
-{
-       return _current_user_ns();
-}
-EXPORT_SYMBOL(current_user_ns);
-
 #ifdef CONFIG_DEBUG_CREDENTIALS
 
 bool creds_are_invalid(const struct cred *cred)
index 8935369..6275970 100644 (file)
@@ -216,7 +216,6 @@ int suspend_devices_and_enter(suspend_state_t state)
                        goto Close;
        }
        suspend_console();
-       pm_restrict_gfp_mask();
        suspend_test_start();
        error = dpm_suspend_start(PMSG_SUSPEND);
        if (error) {
@@ -233,7 +232,6 @@ int suspend_devices_and_enter(suspend_state_t state)
        suspend_test_start();
        dpm_resume_end(PMSG_RESUME);
        suspend_test_finish("resume devices");
-       pm_restore_gfp_mask();
        resume_console();
  Close:
        if (suspend_ops->end)
@@ -294,7 +292,9 @@ int enter_state(suspend_state_t state)
                goto Finish;
 
        pr_debug("PM: Entering %s sleep\n", pm_states[state]);
+       pm_restrict_gfp_mask();
        error = suspend_devices_and_enter(state);
+       pm_restore_gfp_mask();
 
  Finish:
        pr_debug("PM: Finishing wakeup.\n");
index c36c3b9..7d02d33 100644 (file)
@@ -135,8 +135,10 @@ static int snapshot_release(struct inode *inode, struct file *filp)
        free_basic_memory_bitmaps();
        data = filp->private_data;
        free_all_swap_pages(data->swap);
-       if (data->frozen)
+       if (data->frozen) {
+               pm_restore_gfp_mask();
                thaw_processes();
+       }
        pm_notifier_call_chain(data->mode == O_RDONLY ?
                        PM_POST_HIBERNATION : PM_POST_RESTORE);
        atomic_inc(&snapshot_device_available);
@@ -379,6 +381,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                 * PM_HIBERNATION_PREPARE
                 */
                error = suspend_devices_and_enter(PM_SUSPEND_MEM);
+               data->ready = 0;
                break;
 
        case SNAPSHOT_PLATFORM_SUPPORT:
index bc0ac6b..dfd6019 100644 (file)
@@ -797,7 +797,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
        return string(buf, end, uuid, spec);
 }
 
-int kptr_restrict = 1;
+int kptr_restrict __read_mostly;
 
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
index 9f8a97b..570d944 100644 (file)
@@ -2317,6 +2317,21 @@ void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
+static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
+{
+       if (addr) {
+               unsigned long alloc_end = addr + (PAGE_SIZE << order);
+               unsigned long used = addr + PAGE_ALIGN(size);
+
+               split_page(virt_to_page((void *)addr), order);
+               while (used < alloc_end) {
+                       free_page(used);
+                       used += PAGE_SIZE;
+               }
+       }
+       return (void *)addr;
+}
+
 /**
  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  * @size: the number of bytes to allocate
@@ -2336,22 +2351,32 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
        unsigned long addr;
 
        addr = __get_free_pages(gfp_mask, order);
-       if (addr) {
-               unsigned long alloc_end = addr + (PAGE_SIZE << order);
-               unsigned long used = addr + PAGE_ALIGN(size);
-
-               split_page(virt_to_page((void *)addr), order);
-               while (used < alloc_end) {
-                       free_page(used);
-                       used += PAGE_SIZE;
-               }
-       }
-
-       return (void *)addr;
+       return make_alloc_exact(addr, order, size);
 }
 EXPORT_SYMBOL(alloc_pages_exact);
 
 /**
+ * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
+ *                        pages on a node.
+ * @size: the number of bytes to allocate
+ * @gfp_mask: GFP flags for the allocation
+ *
+ * Like alloc_pages_exact(), but try to allocate on node nid first before falling
+ * back.
+ * Note this is not alloc_pages_exact_node() which allocates on a specific node,
+ * but is not exact.
+ */
+void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
+{
+       unsigned order = get_order(size);
+       struct page *p = alloc_pages_node(nid, gfp_mask, order);
+       if (!p)
+               return NULL;
+       return make_alloc_exact((unsigned long)page_address(p), order, size);
+}
+EXPORT_SYMBOL(alloc_pages_exact_nid);
+
+/**
  * free_pages_exact - release memory allocated via alloc_pages_exact()
  * @virt: the value returned by alloc_pages_exact.
  * @size: size of allocation, same value as passed to alloc_pages_exact().
@@ -3564,7 +3589,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 
        if (!slab_is_available()) {
                zone->wait_table = (wait_queue_head_t *)
-                       alloc_bootmem_node(pgdat, alloc_size);
+                       alloc_bootmem_node_nopanic(pgdat, alloc_size);
        } else {
                /*
                 * This case means that a zone whose size was 0 gets new memory
@@ -4141,7 +4166,8 @@ static void __init setup_usemap(struct pglist_data *pgdat,
        unsigned long usemapsize = usemap_size(zonesize);
        zone->pageblock_flags = NULL;
        if (usemapsize)
-               zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
+               zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
+                                                                  usemapsize);
 }
 #else
 static inline void setup_usemap(struct pglist_data *pgdat,
@@ -4307,7 +4333,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
                size =  (end - start) * sizeof(struct page);
                map = alloc_remap(pgdat->node_id, size);
                if (!map)
-                       map = alloc_bootmem_node(pgdat, size);
+                       map = alloc_bootmem_node_nopanic(pgdat, size);
                pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
        }
 #ifndef CONFIG_NEED_MULTIPLE_NODES
index 9905501..2daadc3 100644 (file)
@@ -134,7 +134,7 @@ static void *__init_refok alloc_page_cgroup(size_t size, int nid)
 {
        void *addr = NULL;
 
-       addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN);
+       addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN);
        if (addr)
                return addr;
 
index 8fa27e4..9e755c1 100644 (file)
@@ -852,7 +852,7 @@ static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_
 
 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
 {
-       struct inode *inode;
+       struct address_space *mapping;
        unsigned long idx;
        unsigned long size;
        unsigned long limit;
@@ -875,8 +875,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
        if (size > SHMEM_NR_DIRECT)
                size = SHMEM_NR_DIRECT;
        offset = shmem_find_swp(entry, ptr, ptr+size);
-       if (offset >= 0)
+       if (offset >= 0) {
+               shmem_swp_balance_unmap();
                goto found;
+       }
        if (!info->i_indirect)
                goto lost2;
 
@@ -914,11 +916,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
                        if (size > ENTRIES_PER_PAGE)
                                size = ENTRIES_PER_PAGE;
                        offset = shmem_find_swp(entry, ptr, ptr+size);
-                       shmem_swp_unmap(ptr);
                        if (offset >= 0) {
                                shmem_dir_unmap(dir);
                                goto found;
                        }
+                       shmem_swp_unmap(ptr);
                }
        }
 lost1:
@@ -928,8 +930,7 @@ lost2:
        return 0;
 found:
        idx += offset;
-       inode = igrab(&info->vfs_inode);
-       spin_unlock(&info->lock);
+       ptr += offset;
 
        /*
         * Move _head_ to start search for next from here.
@@ -940,37 +941,18 @@ found:
         */
        if (shmem_swaplist.next != &info->swaplist)
                list_move_tail(&shmem_swaplist, &info->swaplist);
-       mutex_unlock(&shmem_swaplist_mutex);
 
-       error = 1;
-       if (!inode)
-               goto out;
        /*
-        * Charge page using GFP_KERNEL while we can wait.
-        * Charged back to the user(not to caller) when swap account is used.
-        * add_to_page_cache() will be called with GFP_NOWAIT.
+        * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
+        * but also to hold up shmem_evict_inode(): so inode cannot be freed
+        * beneath us (pagelock doesn't help until the page is in pagecache).
         */
-       error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
-       if (error)
-               goto out;
-       error = radix_tree_preload(GFP_KERNEL);
-       if (error) {
-               mem_cgroup_uncharge_cache_page(page);
-               goto out;
-       }
-       error = 1;
-
-       spin_lock(&info->lock);
-       ptr = shmem_swp_entry(info, idx, NULL);
-       if (ptr && ptr->val == entry.val) {
-               error = add_to_page_cache_locked(page, inode->i_mapping,
-                                               idx, GFP_NOWAIT);
-               /* does mem_cgroup_uncharge_cache_page on error */
-       } else  /* we must compensate for our precharge above */
-               mem_cgroup_uncharge_cache_page(page);
+       mapping = info->vfs_inode.i_mapping;
+       error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
+       /* which does mem_cgroup_uncharge_cache_page on error */
 
        if (error == -EEXIST) {
-               struct page *filepage = find_get_page(inode->i_mapping, idx);
+               struct page *filepage = find_get_page(mapping, idx);
                error = 1;
                if (filepage) {
                        /*
@@ -990,14 +972,8 @@ found:
                swap_free(entry);
                error = 1;      /* not an error, but entry was found */
        }
-       if (ptr)
-               shmem_swp_unmap(ptr);
+       shmem_swp_unmap(ptr);
        spin_unlock(&info->lock);
-       radix_tree_preload_end();
-out:
-       unlock_page(page);
-       page_cache_release(page);
-       iput(inode);            /* allows for NULL */
        return error;
 }
 
@@ -1009,6 +985,26 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
        struct list_head *p, *next;
        struct shmem_inode_info *info;
        int found = 0;
+       int error;
+
+       /*
+        * Charge page using GFP_KERNEL while we can wait, before taking
+        * the shmem_swaplist_mutex which might hold up shmem_writepage().
+        * Charged back to the user (not to caller) when swap account is used.
+        * add_to_page_cache() will be called with GFP_NOWAIT.
+        */
+       error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
+       if (error)
+               goto out;
+       /*
+        * Try to preload while we can wait, to not make a habit of
+        * draining atomic reserves; but don't latch on to this cpu,
+        * it's okay if sometimes we get rescheduled after this.
+        */
+       error = radix_tree_preload(GFP_KERNEL);
+       if (error)
+               goto uncharge;
+       radix_tree_preload_end();
 
        mutex_lock(&shmem_swaplist_mutex);
        list_for_each_safe(p, next, &shmem_swaplist) {
@@ -1016,17 +1012,19 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
                found = shmem_unuse_inode(info, entry, page);
                cond_resched();
                if (found)
-                       goto out;
+                       break;
        }
        mutex_unlock(&shmem_swaplist_mutex);
-       /*
-        * Can some race bring us here?  We've been holding page lock,
-        * so I think not; but would rather try again later than BUG()
-        */
+
+uncharge:
+       if (!found)
+               mem_cgroup_uncharge_cache_page(page);
+       if (found < 0)
+               error = found;
+out:
        unlock_page(page);
        page_cache_release(page);
-out:
-       return (found < 0) ? found : 0;
+       return error;
 }
 
 /*
@@ -1039,6 +1037,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        struct address_space *mapping;
        unsigned long index;
        struct inode *inode;
+       bool unlock_mutex = false;
 
        BUG_ON(!PageLocked(page));
        mapping = page->mapping;
@@ -1064,7 +1063,26 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        else
                swap.val = 0;
 
+       /*
+        * Add inode to shmem_unuse()'s list of swapped-out inodes,
+        * if it's not already there.  Do it now because we cannot take
+        * mutex while holding spinlock, and must do so before the page
+        * is moved to swap cache, when its pagelock no longer protects
+        * the inode from eviction.  But don't unlock the mutex until
+        * we've taken the spinlock, because shmem_unuse_inode() will
+        * prune a !swapped inode from the swaplist under both locks.
+        */
+       if (swap.val && list_empty(&info->swaplist)) {
+               mutex_lock(&shmem_swaplist_mutex);
+               /* move instead of add in case we're racing */
+               list_move_tail(&info->swaplist, &shmem_swaplist);
+               unlock_mutex = true;
+       }
+
        spin_lock(&info->lock);
+       if (unlock_mutex)
+               mutex_unlock(&shmem_swaplist_mutex);
+
        if (index >= info->next_index) {
                BUG_ON(!(info->flags & SHMEM_TRUNCATE));
                goto unlock;
@@ -1084,21 +1102,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                delete_from_page_cache(page);
                shmem_swp_set(info, entry, swap.val);
                shmem_swp_unmap(entry);
-               if (list_empty(&info->swaplist))
-                       inode = igrab(inode);
-               else
-                       inode = NULL;
                spin_unlock(&info->lock);
                swap_shmem_alloc(swap);
                BUG_ON(page_mapped(page));
                swap_writepage(page, wbc);
-               if (inode) {
-                       mutex_lock(&shmem_swaplist_mutex);
-                       /* move instead of add in case we're racing */
-                       list_move_tail(&info->swaplist, &shmem_swaplist);
-                       mutex_unlock(&shmem_swaplist_mutex);
-                       iput(inode);
-               }
                return 0;
        }
 
@@ -1400,20 +1407,14 @@ repeat:
                if (sbinfo->max_blocks) {
                        if (percpu_counter_compare(&sbinfo->used_blocks,
                                                sbinfo->max_blocks) >= 0 ||
-                           shmem_acct_block(info->flags)) {
-                               spin_unlock(&info->lock);
-                               error = -ENOSPC;
-                               goto failed;
-                       }
+                           shmem_acct_block(info->flags))
+                               goto nospace;
                        percpu_counter_inc(&sbinfo->used_blocks);
                        spin_lock(&inode->i_lock);
                        inode->i_blocks += BLOCKS_PER_PAGE;
                        spin_unlock(&inode->i_lock);
-               } else if (shmem_acct_block(info->flags)) {
-                       spin_unlock(&info->lock);
-                       error = -ENOSPC;
-                       goto failed;
-               }
+               } else if (shmem_acct_block(info->flags))
+                       goto nospace;
 
                if (!filepage) {
                        int ret;
@@ -1493,6 +1494,24 @@ done:
        error = 0;
        goto out;
 
+nospace:
+       /*
+        * Perhaps the page was brought in from swap between find_lock_page
+        * and taking info->lock?  We allow for that at add_to_page_cache_lru,
+        * but must also avoid reporting a spurious ENOSPC while working on a
+        * full tmpfs.  (When filepage has been passed in to shmem_getpage, it
+        * is already in page cache, which prevents this race from occurring.)
+        */
+       if (!filepage) {
+               struct page *page = find_get_page(mapping, idx);
+               if (page) {
+                       spin_unlock(&info->lock);
+                       page_cache_release(page);
+                       goto repeat;
+               }
+       }
+       spin_unlock(&info->lock);
+       error = -ENOSPC;
 failed:
        if (*pagep != filepage) {
                unlock_page(filepage);
index a448db3..5602f1a 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -396,6 +396,9 @@ static void lru_deactivate_fn(struct page *page, void *arg)
        if (!PageLRU(page))
                return;
 
+       if (PageUnevictable(page))
+               return;
+
        /* Some processes are using the page */
        if (page_mapped(page))
                return;
index b58a501..a873277 100644 (file)
@@ -674,6 +674,7 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
        }
 
        strcpy(dirent->d_name, nameptr);
+       kfree(nameptr);
 
 out:
        return fake_pdu.offset;
index 94954c7..42fdffd 100644 (file)
@@ -369,15 +369,6 @@ static void __sco_sock_close(struct sock *sk)
 
        case BT_CONNECTED:
        case BT_CONFIG:
-               if (sco_pi(sk)->conn) {
-                       sk->sk_state = BT_DISCONN;
-                       sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
-                       hci_conn_put(sco_pi(sk)->conn->hcon);
-                       sco_pi(sk)->conn = NULL;
-               } else
-                       sco_chan_del(sk, ECONNRESET);
-               break;
-
        case BT_CONNECT:
        case BT_DISCONN:
                sco_chan_del(sk, ECONNRESET);
index f3bc322..74ef4d4 100644 (file)
@@ -737,7 +737,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
                nf_bridge->mask |= BRNF_PKT_TYPE;
        }
 
-       if (br_parse_ip_options(skb))
+       if (pf == PF_INET && br_parse_ip_options(skb))
                return NF_DROP;
 
        /* The physdev module checks on this */
index ce4596e..bd1224f 100644 (file)
@@ -237,6 +237,10 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
                                     &local->dynamic_ps_disable_work);
        }
 
+       /* Don't restart the timer if we're not disassociated */
+       if (!ifmgd->associated)
+               return TX_CONTINUE;
+
        mod_timer(&local->dynamic_ps_timer, jiffies +
                  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
 
index e6e7ce0..7102457 100644 (file)
@@ -1819,8 +1819,6 @@ static int filename_trans_read(struct policydb *p, void *fp)
                goto out;
        nel = le32_to_cpu(buf[0]);
 
-       printk(KERN_ERR "%s: nel=%d\n", __func__, nel);
-
        last = p->filename_trans;
        while (last && last->next)
                last = last->next;
@@ -1857,8 +1855,6 @@ static int filename_trans_read(struct policydb *p, void *fp)
                        goto out;
                name[len] = 0;
 
-               printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name);
-
                rc = next_entry(buf, fp, sizeof(u32) * 4);
                if (rc)
                        goto out;
index 2727bef..b04d280 100644 (file)
@@ -139,7 +139,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0),
 SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1),
 
 SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0),
-SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0),
+SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0),
 SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1),
 
 SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1),
@@ -602,7 +602,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
        .read = ssm2602_read_reg_cache,
        .write = ssm2602_write,
        .set_bias_level = ssm2602_set_bias_level,
-       .reg_cache_size = sizeof(ssm2602_reg),
+       .reg_cache_size = ARRAY_SIZE(ssm2602_reg),
        .reg_word_size = sizeof(u16),
        .reg_cache_default = ssm2602_reg,
 };
@@ -614,7 +614,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
  *    low  = 0x1a
  *    high = 0x1b
  */
-static int ssm2602_i2c_probe(struct i2c_client *i2c,
+static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c,
                             const struct i2c_device_id *id)
 {
        struct ssm2602_priv *ssm2602;
@@ -635,7 +635,7 @@ static int ssm2602_i2c_probe(struct i2c_client *i2c,
        return ret;
 }
 
-static int ssm2602_i2c_remove(struct i2c_client *client)
+static int __devexit ssm2602_i2c_remove(struct i2c_client *client)
 {
        snd_soc_unregister_codec(&client->dev);
        kfree(i2c_get_clientdata(client));
@@ -655,7 +655,7 @@ static struct i2c_driver ssm2602_i2c_driver = {
                .owner = THIS_MODULE,
        },
        .probe = ssm2602_i2c_probe,
-       .remove = ssm2602_i2c_remove,
+       .remove = __devexit_p(ssm2602_i2c_remove),
        .id_table = ssm2602_i2c_id,
 };
 #endif
index 48ffd40..a7b8f30 100644 (file)
@@ -601,9 +601,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
        .reg_cache_step = 1,
        .read = uda134x_read_reg_cache,
        .write = uda134x_write,
-#ifdef POWER_OFF_ON_STANDBY
        .set_bias_level = uda134x_set_bias_level,
-#endif
 };
 
 static int __devinit uda134x_codec_probe(struct platform_device *pdev)
index f52b623..824d1c8 100644 (file)
@@ -692,7 +692,7 @@ SOC_ENUM("DRC Smoothing Threshold", drc_smoothing),
 SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup),
 
 SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT,
-                WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv),
+                WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv),
 SOC_ENUM("ADC Companding Mode", adc_companding),
 SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0),
 
index 419bf4f..cd22a54 100644 (file)
@@ -133,7 +133,7 @@ static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream,
        struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
        uint32_t conf;
 
-       if (!dai->active)
+       if (dai->active)
                return;
 
        conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
index d567c32..6b1f9d3 100644 (file)
@@ -376,6 +376,11 @@ static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
+static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+       return snd_pcm_lib_free_pages(substream);
+}
+
 static struct snd_pcm_ops sst_platform_ops = {
        .open = sst_platform_open,
        .close = sst_platform_close,
@@ -384,6 +389,7 @@ static struct snd_pcm_ops sst_platform_ops = {
        .trigger = sst_platform_pcm_trigger,
        .pointer = sst_platform_pcm_pointer,
        .hw_params = sst_platform_pcm_hw_params,
+       .hw_free = sst_platform_pcm_hw_free,
 };
 
 static void sst_pcm_free(struct snd_pcm *pcm)
index d8562ce..dd55d10 100644 (file)
@@ -3291,6 +3291,8 @@ int snd_soc_register_card(struct snd_soc_card *card)
        if (!card->name || !card->dev)
                return -EINVAL;
 
+       dev_set_drvdata(card->dev, card);
+
        snd_soc_initialize_card_lists(card);
 
        soc_init_card_debugfs(card);