Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Jan 2013 19:41:43 +0000 (11:41 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Jan 2013 19:41:43 +0000 (11:41 -0800)
Pull f2fs bug fixes from Jaegeuk Kim:
 "This patch-set includes two major bug fixes:
   - incorrect IUsed provided by *df -i*, and
   - lookup failure of parent inodes in corner cases.

  [Other Bug Fixes]
   - Fix error handling routines
   - Trigger recovery process correctly
   - Resolve build failures due to missing header files

  [Etc]
   - Add a MAINTAINERS entry for f2fs
   - Fix and clean up variables, functions, and equations
   - Avoid warnings during compilation"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs:
  f2fs: unify string length declarations and usage
  f2fs: clean up unused variables and return values
  f2fs: clean up the start_bidx_of_node function
  f2fs: remove unneeded variable from f2fs_sync_fs
  f2fs: fix fsync_inode list addition logic and avoid invalid access to memory
  f2fs: remove unneeded initialization of nr_dirty in dirty_seglist_info
  f2fs: handle error from f2fs_iget_nowait
  f2fs: fix equation of has_not_enough_free_secs()
  f2fs: add MAINTAINERS entry
  f2fs: return a default value for non-void function
  f2fs: invalidate the node page if allocation is failed
  f2fs: add missing #include <linux/prefetch.h>
  f2fs: do f2fs_balance_fs in front of dir operations
  f2fs: should recover orphan and fsync data
  f2fs: fix handling errors got by f2fs_write_inode
  f2fs: fix up f2fs_get_parent issue to retrieve correct parent inode number
  f2fs: fix wrong calculation on f_files in statfs
  f2fs: remove set_page_dirty for atomic f2fs_end_io_write

151 files changed:
Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arm/boot/dts/twl4030.dtsi
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/mach-omap1/Makefile
arch/arm/mach-omap1/fb.c [new file with mode: 0644]
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/control.h
arch/arm/mach-omap2/dpll3xxx.c
arch/arm/mach-omap2/drm.c
arch/arm/mach-omap2/dss-common.c
arch/arm/mach-omap2/fb.c [moved from arch/arm/plat-omap/fb.c with 76% similarity]
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_twl.c
arch/arm/mach-omap2/pmu.c
arch/arm/mach-omap2/prm2xxx.c
arch/arm/mach-omap2/prm3xxx.c
arch/arm/mach-sunxi/sunxi.c
arch/arm/plat-omap/Makefile
arch/arm/plat-omap/dmtimer.c
arch/arm/plat-omap/include/plat/cpu.h
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/entry.S
arch/powerpc/kernel/time.c
arch/powerpc/platforms/40x/ppc40x_simple.c
arch/x86/pci/common.c
drivers/atm/solos-pci.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
drivers/gpu/drm/nouveau/core/include/subdev/bios.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/host1x.c
drivers/hwmon/emc6w201.c
drivers/hwmon/lm73.c
drivers/leds/leds-gpio.c
drivers/media/platform/omap3isp/isp.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/cpts.h
drivers/net/tun.c
drivers/net/vxlan.c
drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
drivers/pci/pci-sysfs.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/quirks.c
drivers/power/avs/smartreflex.c
drivers/watchdog/da9055_wdt.c
drivers/watchdog/omap_wdt.c
drivers/watchdog/twl4030_wdt.c
fs/ecryptfs/crypto.c
fs/ecryptfs/kthread.c
fs/ecryptfs/mmap.c
fs/eventpoll.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/fsync.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/super.c
fs/f2fs/acl.c
fs/gfs2/lock_dlm.c
fs/gfs2/rgrp.c
fs/jbd2/transaction.c
fs/proc/generic.c
fs/proc/task_mmu.c
include/Kbuild
include/drm/drm_mm.h
include/linux/Kbuild [deleted file]
include/linux/hdlc/Kbuild [deleted file]
include/linux/hsi/Kbuild [deleted file]
include/linux/jbd2.h
include/linux/mempolicy.h
include/linux/netdevice.h
include/linux/page-flags.h
include/linux/pci_ids.h
include/linux/pid.h
include/linux/pid_namespace.h
include/linux/raid/Kbuild [deleted file]
include/linux/usb/Kbuild [deleted file]
include/net/sock.h
include/rdma/Kbuild [deleted file]
include/sound/Kbuild [deleted file]
include/trace/events/ext4.h
include/uapi/drm/i915_drm.h
include/uapi/linux/pci_regs.h
kernel/fork.c
kernel/pid.c
kernel/pid_namespace.c
mm/mempolicy.c
mm/shmem.c
mm/vmscan.c
net/batman-adv/bat_iv_ogm.c
net/bridge/br_if.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/core/dev.c
net/core/net-sysfs.c
net/core/sock.c
net/ipv4/arp.c
net/ipv4/ip_gre.c
net/ipv4/tcp_input.c
net/ipv6/ip6_gre.c
net/rds/ib_cm.c
net/rds/ib_recv.c
net/sched/sch_htb.c
net/wireless/reg.c
net/wireless/sysfs.c
scripts/headers_install.pl

diff --git a/Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt b/Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt
new file mode 100644 (file)
index 0000000..80a3719
--- /dev/null
@@ -0,0 +1,10 @@
+Device tree bindings for twl4030-wdt driver (TWL4030 watchdog)
+
+Required properties:
+       compatible = "ti,twl4030-wdt";
+
+Example:
+
+watchdog {
+       compatible = "ti,twl4030-wdt";
+};
index 599934c..7a58a25 100644 (file)
@@ -5395,6 +5395,15 @@ F:       arch/arm/*omap*/
 F:     drivers/i2c/busses/i2c-omap.c
 F:     include/linux/i2c-omap.h
 
+OMAP DEVICE TREE SUPPORT
+M:     Benoît Cousson <b-cousson@ti.com>
+M:     Tony Lindgren <tony@atomide.com>
+L:     linux-omap@vger.kernel.org
+L:     devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/boot/dts/*omap*
+F:     arch/arm/boot/dts/*am3*
+
 OMAP CLOCK FRAMEWORK SUPPORT
 M:     Paul Walmsley <paul@pwsan.com>
 L:     linux-omap@vger.kernel.org
index 275b956..80c5694 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Terrified Chipmunk
 
 # *DOCUMENTATION*
index 63411b0..ed0bc95 100644 (file)
                interrupts = <11>;
        };
 
+       watchdog {
+               compatible = "ti,twl4030-wdt";
+       };
+
        vdac: regulator-vdac {
                compatible = "ti,twl4030-vdac";
                regulator-min-microvolt = <1800000>;
index dbea6f4..2eeff1e 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_MACH_ARMADA_370=y
 CONFIG_MACH_ARMADA_XP=y
 CONFIG_ARCH_HIGHBANK=y
 CONFIG_ARCH_SOCFPGA=y
+CONFIG_ARCH_SUNXI=y
 # CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set
 CONFIG_ARM_ERRATA_754322=y
 CONFIG_SMP=y
index a1dc5c0..82ce8d7 100644 (file)
@@ -65,6 +65,8 @@ CONFIG_MAC80211_RC_PID=y
 CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_CONNECTOR=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
@@ -132,9 +134,11 @@ CONFIG_POWER_SUPPLY=y
 CONFIG_WATCHDOG=y
 CONFIG_OMAP_WATCHDOG=y
 CONFIG_TWL4030_WATCHDOG=y
+CONFIG_MFD_TPS65217=y
 CONFIG_REGULATOR_TWL4030=y
 CONFIG_REGULATOR_TPS65023=y
 CONFIG_REGULATOR_TPS6507X=y
+CONFIG_REGULATOR_TPS65217=y
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
@@ -170,6 +174,7 @@ CONFIG_SND_DEBUG=y
 CONFIG_SND_USB_AUDIO=m
 CONFIG_SND_SOC=m
 CONFIG_SND_OMAP_SOC=m
+CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
 CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
 CONFIG_USB=y
 CONFIG_USB_DEBUG=y
index f0e69cb..222d58c 100644 (file)
@@ -4,7 +4,7 @@
 
 # Common support
 obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \
-        serial.o devices.o dma.o
+        serial.o devices.o dma.o fb.o
 obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o timer.o
 
 ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),)
diff --git a/arch/arm/mach-omap1/fb.c b/arch/arm/mach-omap1/fb.c
new file mode 100644 (file)
index 0000000..c770d45
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * File: arch/arm/plat-omap/fb.c
+ *
+ * Framebuffer device registration for TI OMAP platforms
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/memblock.h>
+#include <linux/io.h>
+#include <linux/omapfb.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/mach/map.h>
+
+#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
+
+static bool omapfb_lcd_configured;
+static struct omapfb_platform_data omapfb_config;
+
+static u64 omap_fb_dma_mask = ~(u32)0;
+
+static struct platform_device omap_fb_device = {
+       .name           = "omapfb",
+       .id             = -1,
+       .dev = {
+               .dma_mask               = &omap_fb_dma_mask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+               .platform_data          = &omapfb_config,
+       },
+       .num_resources = 0,
+};
+
+void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
+{
+       omapfb_config.lcd = *config;
+       omapfb_lcd_configured = true;
+}
+
+static int __init omap_init_fb(void)
+{
+       /*
+        * If the board file has not set the lcd config with
+        * omapfb_set_lcd_config(), don't bother registering the omapfb device
+        */
+       if (!omapfb_lcd_configured)
+               return 0;
+
+       return platform_device_register(&omap_fb_device);
+}
+
+arch_initcall(omap_init_fb);
+
+#else
+
+void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
+{
+}
+
+#endif
index a8004f3..947cafe 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common support
-obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
+obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o gpmc.o timer.o pm.o \
         common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
         omap_device.o sram.o
 
index 3d944d3..e6c3281 100644 (file)
 #define OMAP343X_PADCONF_ETK_D14       OMAP343X_PADCONF_ETK(16)
 #define OMAP343X_PADCONF_ETK_D15       OMAP343X_PADCONF_ETK(17)
 
-/* 34xx GENERAL_WKUP regist offsets */
+/* 34xx GENERAL_WKUP register offsets */
 #define OMAP343X_CONTROL_WKUP_DEBOBSMUX(i) (OMAP343X_CONTROL_GENERAL_WKUP + \
                                                0x008 + (i))
 #define OMAP343X_CONTROL_WKUP_DEBOBS0 (OMAP343X_CONTROL_GENERAL_WKUP + 0x008)
index 2bb1883..0a02aab 100644 (file)
@@ -504,8 +504,7 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
                if (!cpu_is_omap44xx() && !cpu_is_omap3630()) {
                        freqsel = _omap3_dpll_compute_freqsel(clk,
                                                dd->last_rounded_n);
-                       if (!freqsel)
-                               WARN_ON(1);
+                       WARN_ON(!freqsel);
                }
 
                pr_debug("%s: %s: set rate: locking rate to %lu.\n",
index fce5aa3..4c7566c 100644 (file)
@@ -27,7 +27,6 @@
 
 #include "omap_device.h"
 #include "omap_hwmod.h"
-#include <plat/cpu.h>
 
 #if defined(CONFIG_DRM_OMAP) || (CONFIG_DRM_OMAP_MODULE)
 
index 679a047..4be5cfc 100644 (file)
@@ -31,8 +31,7 @@
 #include <video/omap-panel-nokia-dsi.h>
 #include <video/omap-panel-picodlp.h>
 
-#include <plat/cpu.h>
-
+#include "soc.h"
 #include "dss-common.h"
 #include "mux.h"
 
similarity index 76%
rename from arch/arm/plat-omap/fb.c
rename to arch/arm/mach-omap2/fb.c
index a3367b7..d9bd965 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * File: arch/arm/plat-omap/fb.c
- *
  * Framebuffer device registration for TI OMAP platforms
  *
  * Copyright (C) 2006 Nokia Corporation
@@ -33,7 +31,7 @@
 
 #include <asm/mach/map.h>
 
-#include <plat/cpu.h>
+#include "soc.h"
 
 #ifdef CONFIG_OMAP2_VRFB
 
@@ -94,45 +92,7 @@ static int __init omap_init_vrfb(void)
 arch_initcall(omap_init_vrfb);
 #endif
 
-#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
-
-static bool omapfb_lcd_configured;
-static struct omapfb_platform_data omapfb_config;
-
-static u64 omap_fb_dma_mask = ~(u32)0;
-
-static struct platform_device omap_fb_device = {
-       .name           = "omapfb",
-       .id             = -1,
-       .dev = {
-               .dma_mask               = &omap_fb_dma_mask,
-               .coherent_dma_mask      = DMA_BIT_MASK(32),
-               .platform_data          = &omapfb_config,
-       },
-       .num_resources = 0,
-};
-
-void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
-{
-       omapfb_config.lcd = *config;
-       omapfb_lcd_configured = true;
-}
-
-static int __init omap_init_fb(void)
-{
-       /*
-        * If the board file has not set the lcd config with
-        * omapfb_set_lcd_config(), don't bother registering the omapfb device
-        */
-       if (!omapfb_lcd_configured)
-               return 0;
-
-       return platform_device_register(&omap_fb_device);
-}
-
-arch_initcall(omap_init_fb);
-
-#elif defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
 
 static u64 omap_fb_dma_mask = ~(u32)0;
 static struct omapfb_platform_data omapfb_config;
@@ -155,10 +115,4 @@ static int __init omap_init_fb(void)
 
 arch_initcall(omap_init_fb);
 
-#else
-
-void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
-{
-}
-
 #endif
index f9fab94..129d508 100644 (file)
@@ -26,7 +26,6 @@
 
 #include <linux/omap-dma.h>
 
-#include <linux/platform_data/omap_ocp2scp.h>
 #include <linux/platform_data/spi-omap2-mcspi.h>
 #include <linux/platform_data/asoc-ti-mcbsp.h>
 #include <linux/platform_data/iommu-omap.h>
index fefd401..615e5b1 100644 (file)
@@ -292,8 +292,8 @@ int __init omap3_twl_set_sr_bit(bool enable)
        if (twl_sr_enable_autoinit)
                pr_warning("%s: unexpected multiple calls\n", __func__);
 
-       ret = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &temp,
-                                       TWL4030_DCDC_GLOBAL_CFG);
+       ret = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &temp,
+                             TWL4030_DCDC_GLOBAL_CFG);
        if (ret)
                goto err;
 
@@ -302,8 +302,8 @@ int __init omap3_twl_set_sr_bit(bool enable)
        else
                temp &= ~SMARTREFLEX_ENABLE;
 
-       ret = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, temp,
-                               TWL4030_DCDC_GLOBAL_CFG);
+       ret = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, temp,
+                              TWL4030_DCDC_GLOBAL_CFG);
        if (!ret) {
                twl_sr_enable_autoinit = true;
                return 0;
index 250d909..eb78ae7 100644 (file)
@@ -11,8 +11,6 @@
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  */
-#include <linux/pm_runtime.h>
-
 #include <asm/pmu.h>
 
 #include "soc.h"
index faeab18..cc0e714 100644 (file)
@@ -18,9 +18,8 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 
+#include "soc.h"
 #include "common.h"
-#include <plat/cpu.h>
-
 #include "vp.h"
 #include "powerdomain.h"
 #include "clockdomain.h"
index db198d0..39822aa 100644 (file)
@@ -18,9 +18,8 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 
+#include "soc.h"
 #include "common.h"
-#include <plat/cpu.h>
-
 #include "vp.h"
 #include "powerdomain.h"
 #include "prm3xxx.h"
index 9be910f..1dc8a92 100644 (file)
@@ -80,8 +80,8 @@ static void __init sunxi_dt_init(void)
 }
 
 static const char * const sunxi_board_dt_compat[] = {
-       "allwinner,sun4i",
-       "allwinner,sun5i",
+       "allwinner,sun4i-a10",
+       "allwinner,sun5i-a13",
        NULL,
 };
 
index 9d9aa2f..a14a78a 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common support
-obj-y := sram.o dma.o fb.o counter_32k.o
+obj-y := sram.o dma.o counter_32k.o
 obj-m :=
 obj-n :=
 obj-  :=
index 89585c2..d51b75b 100644 (file)
@@ -898,19 +898,8 @@ static struct platform_driver omap_dm_timer_driver = {
        },
 };
 
-static int __init omap_dm_timer_driver_init(void)
-{
-       return platform_driver_register(&omap_dm_timer_driver);
-}
-
-static void __exit omap_dm_timer_driver_exit(void)
-{
-       platform_driver_unregister(&omap_dm_timer_driver);
-}
-
 early_platform_init("earlytimer", &omap_dm_timer_driver);
-module_init(omap_dm_timer_driver_init);
-module_exit(omap_dm_timer_driver_exit);
+module_platform_driver(omap_dm_timer_driver);
 
 MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
 MODULE_LICENSE("GPL");
index b4516ab..c9a66bf 100644 (file)
@@ -32,8 +32,4 @@
 #include <mach/soc.h>
 #endif
 
-#ifdef CONFIG_ARCH_OMAP2PLUS
-#include "../../mach-omap2/soc.h"
-#endif
-
 #endif
index 8b3ff2f..c3cc42a 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    311 /* length of syscall table */
+#define NR_syscalls                    312 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index b706aa5..34fd6fe 100644 (file)
 #define __NR_process_vm_readv          1332
 #define __NR_process_vm_writev         1333
 #define __NR_accept4                   1334
+#define __NR_finit_module              1335
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index e25b784..6bfd842 100644 (file)
@@ -1785,6 +1785,7 @@ sys_call_table:
        data8 sys_process_vm_readv
        data8 sys_process_vm_writev
        data8 sys_accept4
+       data8 sys_finit_module                  // 1335
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index b3b1435..6f6b1cc 100644 (file)
@@ -770,13 +770,8 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
 
 void update_vsyscall_tz(void)
 {
-       /* Make userspace gettimeofday spin until we're done. */
-       ++vdso_data->tb_update_count;
-       smp_mb();
        vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
        vdso_data->tz_dsttime = sys_tz.tz_dsttime;
-       smp_mb();
-       ++vdso_data->tb_update_count;
 }
 
 static void __init clocksource_init(void)
index 969dddc..8f3920e 100644 (file)
@@ -57,7 +57,8 @@ static const char * const board[] __initconst = {
        "amcc,makalu",
        "apm,klondike",
        "est,hotfoot",
-       "plathome,obs600"
+       "plathome,obs600",
+       NULL
 };
 
 static int __init ppc40x_probe(void)
index 1b1dda9..412e128 100644 (file)
@@ -434,7 +434,8 @@ static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
                .callback = set_scan_all,
                .ident = "Stratus/NEC ftServer",
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ftServer"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Stratus"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
                },
        },
        {}
index d70abe7..d47db40 100644 (file)
@@ -538,7 +538,7 @@ static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr
        } else {
                count = -EINVAL;
        }
-       spin_lock_irq(&card->param_queue_lock);
+       spin_unlock_irq(&card->param_queue_lock);
        return count;
 }
 
index 0761a03..2bf9670 100644 (file)
@@ -184,19 +184,27 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
  * -ENOSPC if no suitable free area is available. The preallocated memory node
  * must be cleared.
  */
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
-                      unsigned long size, unsigned alignment)
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+                              unsigned long size, unsigned alignment,
+                              unsigned long color)
 {
        struct drm_mm_node *hole_node;
 
-       hole_node = drm_mm_search_free(mm, size, alignment, false);
+       hole_node = drm_mm_search_free_generic(mm, size, alignment,
+                                              color, 0);
        if (!hole_node)
                return -ENOSPC;
 
-       drm_mm_insert_helper(hole_node, node, size, alignment, 0);
-
+       drm_mm_insert_helper(hole_node, node, size, alignment, color);
        return 0;
 }
+EXPORT_SYMBOL(drm_mm_insert_node_generic);
+
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+                      unsigned long size, unsigned alignment)
+{
+       return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
+}
 EXPORT_SYMBOL(drm_mm_insert_node);
 
 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
@@ -275,22 +283,31 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  * -ENOSPC if no suitable free area is available. This is for range
  * restricted allocations. The preallocated memory node must be cleared.
  */
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
-                               unsigned long size, unsigned alignment,
-                               unsigned long start, unsigned long end)
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+                                       unsigned long size, unsigned alignment, unsigned long color,
+                                       unsigned long start, unsigned long end)
 {
        struct drm_mm_node *hole_node;
 
-       hole_node = drm_mm_search_free_in_range(mm, size, alignment,
-                                               start, end, false);
+       hole_node = drm_mm_search_free_in_range_generic(mm,
+                                                       size, alignment, color,
+                                                       start, end, 0);
        if (!hole_node)
                return -ENOSPC;
 
-       drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
+       drm_mm_insert_helper_range(hole_node, node,
+                                  size, alignment, color,
                                   start, end);
-
        return 0;
 }
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+                               unsigned long size, unsigned alignment,
+                               unsigned long start, unsigned long end)
+{
+       return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
+}
 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 
 /**
index 8f63cd5..99daa89 100644 (file)
@@ -989,6 +989,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_SECURE_BATCHES:
                value = capable(CAP_SYS_ADMIN);
                break;
+       case I915_PARAM_HAS_PINNED_BATCHES:
+               value = 1;
+               break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
index 557843d..ed30595 100644 (file)
@@ -780,6 +780,7 @@ typedef struct drm_i915_private {
                struct i915_hw_ppgtt *aliasing_ppgtt;
 
                struct shrinker inactive_shrinker;
+               bool shrinker_no_lock_stealing;
 
                /**
                 * List of objects currently involved in rendering.
@@ -1100,6 +1101,7 @@ struct drm_i915_gem_object {
         */
        atomic_t pending_flip;
 };
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
 
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
@@ -1166,6 +1168,9 @@ struct drm_i915_file_private {
 #define IS_IVB_GT1(dev)                ((dev)->pci_device == 0x0156 || \
                                 (dev)->pci_device == 0x0152 || \
                                 (dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev)                ((dev)->pci_device == 0x0102 || \
+                                (dev)->pci_device == 0x0106 || \
+                                (dev)->pci_device == 0x010A)
 #define IS_VALLEYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview)
 #define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
@@ -1196,6 +1201,9 @@ struct drm_i915_file_private {
 #define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
 
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev)         (IS_I830(dev) || IS_845G(dev))
+
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
  */
index 742206e..da3c82e 100644 (file)
@@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        if (obj->base.map_list.map)
                return 0;
 
+       dev_priv->mm.shrinker_no_lock_stealing = true;
+
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
-               return ret;
+               goto out;
 
        /* Badly fragmented mmap space? The only way we can recover
         * space is by destroying unwanted objects. We can't randomly release
@@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
-               return ret;
+               goto out;
 
        i915_gem_shrink_all(dev_priv);
-       return drm_gem_create_mmap_offset(&obj->base);
+       ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+       dev_priv->mm.shrinker_no_lock_stealing = false;
+
+       return ret;
 }
 
 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2890,7 +2896,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_mm_node *free_space;
+       struct drm_mm_node *node;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
        int ret;
@@ -2936,66 +2942,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (node == NULL) {
+               i915_gem_object_unpin_pages(obj);
+               return -ENOMEM;
+       }
+
  search_free:
        if (map_and_fenceable)
-               free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
-                                                              size, alignment, obj->cache_level,
-                                                              0, dev_priv->mm.gtt_mappable_end,
-                                                              false);
+               ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+                                                         size, alignment, obj->cache_level,
+                                                         0, dev_priv->mm.gtt_mappable_end);
        else
-               free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
-                                                     size, alignment, obj->cache_level,
-                                                     false);
-
-       if (free_space != NULL) {
-               if (map_and_fenceable)
-                       free_space =
-                               drm_mm_get_block_range_generic(free_space,
-                                                              size, alignment, obj->cache_level,
-                                                              0, dev_priv->mm.gtt_mappable_end,
-                                                              false);
-               else
-                       free_space =
-                               drm_mm_get_block_generic(free_space,
-                                                        size, alignment, obj->cache_level,
-                                                        false);
-       }
-       if (free_space == NULL) {
+               ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+                                                size, alignment, obj->cache_level);
+       if (ret) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
                                               map_and_fenceable,
                                               nonblocking);
-               if (ret) {
-                       i915_gem_object_unpin_pages(obj);
-                       return ret;
-               }
+               if (ret == 0)
+                       goto search_free;
 
-               goto search_free;
+               i915_gem_object_unpin_pages(obj);
+               kfree(node);
+               return ret;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev,
-                                             free_space,
-                                             obj->cache_level))) {
+       if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(free_space);
+               drm_mm_put_block(node);
                return -EINVAL;
        }
 
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(free_space);
+               drm_mm_put_block(node);
                return ret;
        }
 
        list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-       obj->gtt_space = free_space;
-       obj->gtt_offset = free_space->start;
+       obj->gtt_space = node;
+       obj->gtt_offset = node->start;
 
        fenceable =
-               free_space->size == fence_size &&
-               (free_space->start & (fence_alignment - 1)) == 0;
+               node->size == fence_size &&
+               (node->start & (fence_alignment - 1)) == 0;
 
        mappable =
                obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -4392,6 +4386,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
                        return 0;
 
+               if (dev_priv->mm.shrinker_no_lock_stealing)
+                       return 0;
+
                unlock = false;
        }
 
index 773ef77..7be4241 100644 (file)
@@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 
-       return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
+       return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
 }
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
index ee8f97f..d6a994a 100644 (file)
@@ -808,6 +808,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
                flags |= I915_DISPATCH_SECURE;
        }
+       if (args->flags & I915_EXEC_IS_PINNED)
+               flags |= I915_DISPATCH_PINNED;
 
        switch (args->flags & I915_EXEC_RING_MASK) {
        case I915_EXEC_DEFAULT:
index a4dc97f..2220dec 100644 (file)
@@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
        if (!ring->get_seqno)
                return NULL;
 
+       if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+               u32 acthd = I915_READ(ACTHD);
+
+               if (WARN_ON(ring->id != RCS))
+                       return NULL;
+
+               obj = ring->private;
+               if (acthd >= obj->gtt_offset &&
+                   acthd < obj->gtt_offset + obj->base.size)
+                       return i915_error_object_create(dev_priv, obj);
+       }
+
        seqno = ring->get_seqno(ring, false);
        list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
                if (obj->ring != ring)
index 3f75cfa..186ee5c 100644 (file)
  * the enables for writing to the corresponding low bit.
  */
 #define _3D_CHICKEN    0x02084
+#define  _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB     (1 << 10)
 #define _3D_CHICKEN2   0x0208c
 /* Disables pipelining of read flushes past the SF-WIZ interface.
  * Required on all Ironlake steppings according to the B-Spec, but the
 # define MI_FLUSH_ENABLE                               (1 << 12)
 
 #define GEN6_GT_MODE   0x20d0
-#define   GEN6_GT_MODE_HI      (1 << 9)
+#define   GEN6_GT_MODE_HI                              (1 << 9)
+#define   GEN6_TD_FOUR_ROW_DISPATCH_DISABLE            (1 << 5)
 
 #define GFX_MODE       0x02520
 #define GFX_MODE_GEN7  0x0229c
index 5d127e0..a9fb046 100644 (file)
@@ -8144,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
                        DRM_DEBUG_KMS("encoder changed, full mode switch\n");
                        config->mode_changed = true;
                }
-
-               /* Disable all disconnected encoders. */
-               if (connector->base.status == connector_status_disconnected)
-                       connector->new_encoder = NULL;
        }
        /* connector->new_encoder is now updated for all connectors. */
 
@@ -9167,6 +9163,23 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
         * the crtc fixup. */
 }
 
+static void i915_redisable_vga(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 vga_reg;
+
+       if (HAS_PCH_SPLIT(dev))
+               vga_reg = CPU_VGACNTRL;
+       else
+               vga_reg = VGACNTRL;
+
+       if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+               DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+               I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+               POSTING_READ(vga_reg);
+       }
+}
+
 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
  * and i915 state tracking structures. */
 void intel_modeset_setup_hw_state(struct drm_device *dev,
@@ -9275,6 +9288,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
                        intel_set_mode(&crtc->base, &crtc->base.mode,
                                       crtc->base.x, crtc->base.y, crtc->base.fb);
                }
+
+               i915_redisable_vga(dev);
        } else {
                intel_modeset_update_staged_output_state(dev);
        }
index 496caa7..e6f54ff 100644 (file)
@@ -405,7 +405,7 @@ void intel_update_fbc(struct drm_device *dev)
         *   - going to an unsupported config (interlace, pixel multiply, etc.)
         */
        list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
-               if (tmp_crtc->enabled &&
+               if (to_intel_crtc(tmp_crtc)->active &&
                    !to_intel_crtc(tmp_crtc)->primary_disabled &&
                    tmp_crtc->fb) {
                        if (crtc) {
@@ -992,7 +992,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
        struct drm_crtc *crtc, *enabled = NULL;
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (crtc->enabled && crtc->fb) {
+               if (to_intel_crtc(crtc)->active && crtc->fb) {
                        if (enabled)
                                return NULL;
                        enabled = crtc;
@@ -1086,7 +1086,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
        int entries, tlb_miss;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !crtc->enabled) {
+       if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
                *cursor_wm = cursor->guard_size;
                *plane_wm = display->guard_size;
                return false;
@@ -1215,7 +1215,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
        int entries;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !crtc->enabled)
+       if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
                return false;
 
        clock = crtc->mode.clock;       /* VESA DOT Clock */
@@ -1286,6 +1286,7 @@ static void valleyview_update_wm(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
        int plane_sr, cursor_sr;
+       int ignore_plane_sr, ignore_cursor_sr;
        unsigned int enabled = 0;
 
        vlv_update_drain_latency(dev);
@@ -1302,17 +1303,23 @@ static void valleyview_update_wm(struct drm_device *dev)
                            &planeb_wm, &cursorb_wm))
                enabled |= 2;
 
-       plane_sr = cursor_sr = 0;
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
                             sr_latency_ns,
                             &valleyview_wm_info,
                             &valleyview_cursor_wm_info,
-                            &plane_sr, &cursor_sr))
+                            &plane_sr, &ignore_cursor_sr) &&
+           g4x_compute_srwm(dev, ffs(enabled) - 1,
+                            2*sr_latency_ns,
+                            &valleyview_wm_info,
+                            &valleyview_cursor_wm_info,
+                            &ignore_plane_sr, &cursor_sr)) {
                I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
-       else
+       } else {
                I915_WRITE(FW_BLC_SELF_VLV,
                           I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+               plane_sr = cursor_sr = 0;
+       }
 
        DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
                      planea_wm, cursora_wm,
@@ -1352,17 +1359,18 @@ static void g4x_update_wm(struct drm_device *dev)
                            &planeb_wm, &cursorb_wm))
                enabled |= 2;
 
-       plane_sr = cursor_sr = 0;
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
                             sr_latency_ns,
                             &g4x_wm_info,
                             &g4x_cursor_wm_info,
-                            &plane_sr, &cursor_sr))
+                            &plane_sr, &cursor_sr)) {
                I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
-       else
+       } else {
                I915_WRITE(FW_BLC_SELF,
                           I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+               plane_sr = cursor_sr = 0;
+       }
 
        DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
                      planea_wm, cursora_wm,
@@ -1468,7 +1476,7 @@ static void i9xx_update_wm(struct drm_device *dev)
 
        fifo_size = dev_priv->display.get_fifo_size(dev, 0);
        crtc = intel_get_crtc_for_plane(dev, 0);
-       if (crtc->enabled && crtc->fb) {
+       if (to_intel_crtc(crtc)->active && crtc->fb) {
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
@@ -1482,7 +1490,7 @@ static void i9xx_update_wm(struct drm_device *dev)
 
        fifo_size = dev_priv->display.get_fifo_size(dev, 1);
        crtc = intel_get_crtc_for_plane(dev, 1);
-       if (crtc->enabled && crtc->fb) {
+       if (to_intel_crtc(crtc)->active && crtc->fb) {
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
@@ -1811,8 +1819,110 @@ static void sandybridge_update_wm(struct drm_device *dev)
                enabled |= 2;
        }
 
-       if ((dev_priv->num_pipe == 3) &&
-           g4x_compute_wm0(dev, 2,
+       /*
+        * Calculate and update the self-refresh watermark only when one
+        * display plane is used.
+        *
+        * SNB support 3 levels of watermark.
+        *
+        * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+        * and disabled in the descending order
+        *
+        */
+       I915_WRITE(WM3_LP_ILK, 0);
+       I915_WRITE(WM2_LP_ILK, 0);
+       I915_WRITE(WM1_LP_ILK, 0);
+
+       if (!single_plane_enabled(enabled) ||
+           dev_priv->sprite_scaling_enabled)
+               return;
+       enabled = ffs(enabled) - 1;
+
+       /* WM1 */
+       if (!ironlake_compute_srwm(dev, 1, enabled,
+                                  SNB_READ_WM1_LATENCY() * 500,
+                                  &sandybridge_display_srwm_info,
+                                  &sandybridge_cursor_srwm_info,
+                                  &fbc_wm, &plane_wm, &cursor_wm))
+               return;
+
+       I915_WRITE(WM1_LP_ILK,
+                  WM1_LP_SR_EN |
+                  (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (fbc_wm << WM1_LP_FBC_SHIFT) |
+                  (plane_wm << WM1_LP_SR_SHIFT) |
+                  cursor_wm);
+
+       /* WM2 */
+       if (!ironlake_compute_srwm(dev, 2, enabled,
+                                  SNB_READ_WM2_LATENCY() * 500,
+                                  &sandybridge_display_srwm_info,
+                                  &sandybridge_cursor_srwm_info,
+                                  &fbc_wm, &plane_wm, &cursor_wm))
+               return;
+
+       I915_WRITE(WM2_LP_ILK,
+                  WM2_LP_EN |
+                  (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (fbc_wm << WM1_LP_FBC_SHIFT) |
+                  (plane_wm << WM1_LP_SR_SHIFT) |
+                  cursor_wm);
+
+       /* WM3 */
+       if (!ironlake_compute_srwm(dev, 3, enabled,
+                                  SNB_READ_WM3_LATENCY() * 500,
+                                  &sandybridge_display_srwm_info,
+                                  &sandybridge_cursor_srwm_info,
+                                  &fbc_wm, &plane_wm, &cursor_wm))
+               return;
+
+       I915_WRITE(WM3_LP_ILK,
+                  WM3_LP_EN |
+                  (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (fbc_wm << WM1_LP_FBC_SHIFT) |
+                  (plane_wm << WM1_LP_SR_SHIFT) |
+                  cursor_wm);
+}
+
+static void ivybridge_update_wm(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
+       u32 val;
+       int fbc_wm, plane_wm, cursor_wm;
+       int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
+       unsigned int enabled;
+
+       enabled = 0;
+       if (g4x_compute_wm0(dev, 0,
+                           &sandybridge_display_wm_info, latency,
+                           &sandybridge_cursor_wm_info, latency,
+                           &plane_wm, &cursor_wm)) {
+               val = I915_READ(WM0_PIPEA_ILK);
+               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+               I915_WRITE(WM0_PIPEA_ILK, val |
+                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+               DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+                             " plane %d, " "cursor: %d\n",
+                             plane_wm, cursor_wm);
+               enabled |= 1;
+       }
+
+       if (g4x_compute_wm0(dev, 1,
+                           &sandybridge_display_wm_info, latency,
+                           &sandybridge_cursor_wm_info, latency,
+                           &plane_wm, &cursor_wm)) {
+               val = I915_READ(WM0_PIPEB_ILK);
+               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+               I915_WRITE(WM0_PIPEB_ILK, val |
+                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+               DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+                             " plane %d, cursor: %d\n",
+                             plane_wm, cursor_wm);
+               enabled |= 2;
+       }
+
+       if (g4x_compute_wm0(dev, 2,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1875,12 +1985,17 @@ static void sandybridge_update_wm(struct drm_device *dev)
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
 
-       /* WM3 */
+       /* WM3, note we have to correct the cursor latency */
        if (!ironlake_compute_srwm(dev, 3, enabled,
                                   SNB_READ_WM3_LATENCY() * 500,
                                   &sandybridge_display_srwm_info,
                                   &sandybridge_cursor_srwm_info,
-                                  &fbc_wm, &plane_wm, &cursor_wm))
+                                  &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
+           !ironlake_compute_srwm(dev, 3, enabled,
+                                  2 * SNB_READ_WM3_LATENCY() * 500,
+                                  &sandybridge_display_srwm_info,
+                                  &sandybridge_cursor_srwm_info,
+                                  &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
                return;
 
        I915_WRITE(WM3_LP_ILK,
@@ -1929,7 +2044,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
        int entries, tlb_miss;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !crtc->enabled) {
+       if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
                *sprite_wm = display->guard_size;
                return false;
        }
@@ -3471,6 +3586,15 @@ static void gen6_init_clock_gating(struct drm_device *dev)
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
                   ILK_ELPIN_409_SELECT);
 
+       /* WaDisableHiZPlanesWhenMSAAEnabled */
+       I915_WRITE(_3D_CHICKEN,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+       /* WaSetupGtModeTdRowDispatch */
+       if (IS_SNB_GT1(dev))
+               I915_WRITE(GEN6_GT_MODE,
+                          _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
@@ -3999,7 +4123,7 @@ void intel_init_pm(struct drm_device *dev)
                } else if (IS_IVYBRIDGE(dev)) {
                        /* FIXME: detect B0+ stepping and use auto training */
                        if (SNB_READ_WM0_LATENCY()) {
-                               dev_priv->display.update_wm = sandybridge_update_wm;
+                               dev_priv->display.update_wm = ivybridge_update_wm;
                                dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
                                DRM_DEBUG_KMS("Failed to read display plane latency. "
index 2346b92..ae253e0 100644 (file)
@@ -547,9 +547,14 @@ static int init_render_ring(struct intel_ring_buffer *ring)
 
 static void render_ring_cleanup(struct intel_ring_buffer *ring)
 {
+       struct drm_device *dev = ring->dev;
+
        if (!ring->private)
                return;
 
+       if (HAS_BROKEN_CS_TLB(dev))
+               drm_gem_object_unreference(to_gem_object(ring->private));
+
        cleanup_pipe_control(ring);
 }
 
@@ -969,6 +974,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
        return 0;
 }
 
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
 static int
 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
                                u32 offset, u32 len,
@@ -976,15 +983,47 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
 {
        int ret;
 
-       ret = intel_ring_begin(ring, 4);
-       if (ret)
-               return ret;
+       if (flags & I915_DISPATCH_PINNED) {
+               ret = intel_ring_begin(ring, 4);
+               if (ret)
+                       return ret;
 
-       intel_ring_emit(ring, MI_BATCH_BUFFER);
-       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-       intel_ring_emit(ring, offset + len - 8);
-       intel_ring_emit(ring, 0);
-       intel_ring_advance(ring);
+               intel_ring_emit(ring, MI_BATCH_BUFFER);
+               intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+               intel_ring_emit(ring, offset + len - 8);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_advance(ring);
+       } else {
+               struct drm_i915_gem_object *obj = ring->private;
+               u32 cs_offset = obj->gtt_offset;
+
+               if (len > I830_BATCH_LIMIT)
+                       return -ENOSPC;
+
+               ret = intel_ring_begin(ring, 9+3);
+               if (ret)
+                       return ret;
+               /* Blit the batch (which has now all relocs applied) to the stable batch
+                * scratch bo area (so that the CS never stumbles over its tlb
+                * invalidation bug) ... */
+               intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+                               XY_SRC_COPY_BLT_WRITE_ALPHA |
+                               XY_SRC_COPY_BLT_WRITE_RGB);
+               intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+               intel_ring_emit(ring, cs_offset);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 4096);
+               intel_ring_emit(ring, offset);
+               intel_ring_emit(ring, MI_FLUSH);
+
+               /* ... and execute it. */
+               intel_ring_emit(ring, MI_BATCH_BUFFER);
+               intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+               intel_ring_emit(ring, cs_offset + len - 8);
+               intel_ring_advance(ring);
+       }
 
        return 0;
 }
@@ -1596,6 +1635,27 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        ring->init = init_render_ring;
        ring->cleanup = render_ring_cleanup;
 
+       /* Workaround batchbuffer to combat CS tlb bug. */
+       if (HAS_BROKEN_CS_TLB(dev)) {
+               struct drm_i915_gem_object *obj;
+               int ret;
+
+               obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+               if (obj == NULL) {
+                       DRM_ERROR("Failed to allocate batch bo\n");
+                       return -ENOMEM;
+               }
+
+               ret = i915_gem_object_pin(obj, 0, true, false);
+               if (ret != 0) {
+                       drm_gem_object_unreference(&obj->base);
+                       DRM_ERROR("Failed to ping batch bo\n");
+                       return ret;
+               }
+
+               ring->private = obj;
+       }
+
        return intel_init_ring_buffer(dev, ring);
 }
 
index 526182e..6af87cd 100644 (file)
@@ -94,6 +94,7 @@ struct  intel_ring_buffer {
                                               u32 offset, u32 length,
                                               unsigned flags);
 #define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
        void            (*cleanup)(struct intel_ring_buffer *ring);
        int             (*sync_to)(struct intel_ring_buffer *ring,
                                   struct intel_ring_buffer *to,
index 7b715fd..62ab231 100644 (file)
@@ -57,6 +57,11 @@ chipsets:
 .b16 #nve4_gpc_mmio_tail
 .b16 #nve4_tpc_mmio_head
 .b16 #nve4_tpc_mmio_tail
+.b8  0xe6 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
 .b8  0 0 0 0
 
 // GPC mmio lists
index 26c2165..09ee470 100644 (file)
@@ -34,13 +34,16 @@ uint32_t nve0_grgpc_data[] = {
        0x00000000,
 /* 0x0064: chipsets */
        0x000000e4,
-       0x01040080,
-       0x014c0104,
+       0x0110008c,
+       0x01580110,
        0x000000e7,
-       0x01040080,
-       0x014c0104,
+       0x0110008c,
+       0x01580110,
+       0x000000e6,
+       0x0110008c,
+       0x01580110,
        0x00000000,
-/* 0x0080: nve4_gpc_mmio_head */
+/* 0x008c: nve4_gpc_mmio_head */
        0x00000380,
        0x04000400,
        0x0800040c,
@@ -74,8 +77,8 @@ uint32_t nve0_grgpc_data[] = {
        0x14003100,
        0x000031d0,
        0x040031e0,
-/* 0x0104: nve4_gpc_mmio_tail */
-/* 0x0104: nve4_tpc_mmio_head */
+/* 0x0110: nve4_gpc_mmio_tail */
+/* 0x0110: nve4_tpc_mmio_head */
        0x00000048,
        0x00000064,
        0x00000088,
index acfc457..0bcfa4d 100644 (file)
@@ -754,6 +754,16 @@ ctx_mmio_exec:
 //             on load it means: "a save preceeded this load"
 //
 ctx_xfer:
+       // according to mwk, some kind of wait for idle
+       mov $r15 0xc00
+       shl b32 $r15 6
+       mov $r14 4
+       iowr I[$r15 + 0x200] $r14
+       ctx_xfer_idle:
+               iord $r14 I[$r15 + 0x000]
+               and $r14 0x2000
+               bra ne #ctx_xfer_idle
+
        bra not $p1 #ctx_xfer_pre
        bra $p2 #ctx_xfer_pre_load
        ctx_xfer_pre:
index 85a8d55..bb03d2a 100644 (file)
@@ -799,79 +799,80 @@ uint32_t nvc0_grhub_code[] = {
        0x01fa0613,
        0xf803f806,
 /* 0x0829: ctx_xfer */
-       0x0611f400,
-/* 0x082f: ctx_xfer_pre */
-       0xf01102f4,
-       0x21f510f7,
-       0x21f50698,
-       0x11f40631,
-/* 0x083d: ctx_xfer_pre_load */
-       0x02f7f01c,
-       0x065721f5,
-       0x066621f5,
-       0x067821f5,
-       0x21f5f4bd,
-       0x21f50657,
-/* 0x0856: ctx_xfer_exec */
-       0x019806b8,
-       0x1427f116,
-       0x0624b604,
-       0xf10020d0,
-       0xf0a500e7,
-       0x1fb941e3,
-       0x8d21f402,
-       0xf004e0b6,
-       0x2cf001fc,
-       0x0124b602,
-       0xf405f2fd,
-       0x17f18d21,
-       0x13f04afc,
-       0x0c27f002,
-       0xf50012d0,
-       0xf1020721,
-       0xf047fc27,
-       0x20d00223,
-       0x012cf000,
-       0xd00320b6,
-       0xacf00012,
-       0x06a5f001,
-       0x9800b7f0,
-       0x0d98140c,
-       0x00e7f015,
-       0x015c21f5,
-       0xf508a7f0,
-       0xf5010321,
-       0xf4020721,
-       0xa7f02201,
-       0xc921f40c,
-       0x0a1017f1,
-       0xf00614b6,
-       0x12d00527,
-/* 0x08dd: ctx_xfer_post_save_wait */
-       0x0012cf00,
-       0xf40522fd,
-       0x02f4fa1b,
-/* 0x08e9: ctx_xfer_post */
-       0x02f7f032,
-       0x065721f5,
-       0x21f5f4bd,
-       0x21f50698,
-       0x21f50226,
-       0xf4bd0666,
-       0x065721f5,
-       0x981011f4,
-       0x11fd8001,
-       0x070bf405,
-       0x07df21f5,
-/* 0x0914: ctx_xfer_no_post_mmio */
-       0x064921f5,
-/* 0x0918: ctx_xfer_done */
-       0x000000f8,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x00f7f100,
+       0x06f4b60c,
+       0xd004e7f0,
+/* 0x0836: ctx_xfer_idle */
+       0xfecf80fe,
+       0x00e4f100,
+       0xf91bf420,
+       0xf40611f4,
+/* 0x0846: ctx_xfer_pre */
+       0xf7f01102,
+       0x9821f510,
+       0x3121f506,
+       0x1c11f406,
+/* 0x0854: ctx_xfer_pre_load */
+       0xf502f7f0,
+       0xf5065721,
+       0xf5066621,
+       0xbd067821,
+       0x5721f5f4,
+       0xb821f506,
+/* 0x086d: ctx_xfer_exec */
+       0x16019806,
+       0x041427f1,
+       0xd00624b6,
+       0xe7f10020,
+       0xe3f0a500,
+       0x021fb941,
+       0xb68d21f4,
+       0xfcf004e0,
+       0x022cf001,
+       0xfd0124b6,
+       0x21f405f2,
+       0xfc17f18d,
+       0x0213f04a,
+       0xd00c27f0,
+       0x21f50012,
+       0x27f10207,
+       0x23f047fc,
+       0x0020d002,
+       0xb6012cf0,
+       0x12d00320,
+       0x01acf000,
+       0xf006a5f0,
+       0x0c9800b7,
+       0x150d9814,
+       0xf500e7f0,
+       0xf0015c21,
+       0x21f508a7,
+       0x21f50103,
+       0x01f40207,
+       0x0ca7f022,
+       0xf1c921f4,
+       0xb60a1017,
+       0x27f00614,
+       0x0012d005,
+/* 0x08f4: ctx_xfer_post_save_wait */
+       0xfd0012cf,
+       0x1bf40522,
+       0x3202f4fa,
+/* 0x0900: ctx_xfer_post */
+       0xf502f7f0,
+       0xbd065721,
+       0x9821f5f4,
+       0x2621f506,
+       0x6621f502,
+       0xf5f4bd06,
+       0xf4065721,
+       0x01981011,
+       0x0511fd80,
+       0xf5070bf4,
+/* 0x092b: ctx_xfer_no_post_mmio */
+       0xf507df21,
+/* 0x092f: ctx_xfer_done */
+       0xf8064921,
        0x00000000,
        0x00000000,
        0x00000000,
index 138eeaa..7fe9d7c 100644 (file)
@@ -44,6 +44,9 @@ chipsets:
 .b8  0xe7 0 0 0
 .b16 #nve4_hub_mmio_head
 .b16 #nve4_hub_mmio_tail
+.b8  0xe6 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
 .b8  0 0 0 0
 
 nve4_hub_mmio_head:
@@ -680,6 +683,16 @@ ctx_mmio_exec:
 //             on load it means: "a save preceeded this load"
 //
 ctx_xfer:
+       // according to mwk, some kind of wait for idle
+       mov $r15 0xc00
+       shl b32 $r15 6
+       mov $r14 4
+       iowr I[$r15 + 0x200] $r14
+       ctx_xfer_idle:
+               iord $r14 I[$r15 + 0x000]
+               and $r14 0x2000
+               bra ne #ctx_xfer_idle
+
        bra not $p1 #ctx_xfer_pre
        bra $p2 #ctx_xfer_pre_load
        ctx_xfer_pre:
index decf0c6..e3421af 100644 (file)
@@ -30,11 +30,13 @@ uint32_t nve0_grhub_data[] = {
        0x00000000,
 /* 0x005c: chipsets */
        0x000000e4,
-       0x013c0070,
+       0x01440078,
        0x000000e7,
-       0x013c0070,
+       0x01440078,
+       0x000000e6,
+       0x01440078,
        0x00000000,
-/* 0x0070: nve4_hub_mmio_head */
+/* 0x0078: nve4_hub_mmio_head */
        0x0417e91c,
        0x04400204,
        0x18404010,
@@ -86,9 +88,7 @@ uint32_t nve0_grhub_data[] = {
        0x00408840,
        0x08408900,
        0x00408980,
-/* 0x013c: nve4_hub_mmio_tail */
-       0x00000000,
-       0x00000000,
+/* 0x0144: nve4_hub_mmio_tail */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -781,77 +781,78 @@ uint32_t nve0_grhub_code[] = {
        0x0613f002,
        0xf80601fa,
 /* 0x07fb: ctx_xfer */
-       0xf400f803,
-       0x02f40611,
-/* 0x0801: ctx_xfer_pre */
-       0x10f7f00d,
-       0x067221f5,
-/* 0x080b: ctx_xfer_pre_load */
-       0xf01c11f4,
-       0x21f502f7,
-       0x21f50631,
-       0x21f50640,
-       0xf4bd0652,
-       0x063121f5,
-       0x069221f5,
-/* 0x0824: ctx_xfer_exec */
-       0xf1160198,
-       0xb6041427,
-       0x20d00624,
-       0x00e7f100,
-       0x41e3f0a5,
-       0xf4021fb9,
-       0xe0b68d21,
-       0x01fcf004,
-       0xb6022cf0,
-       0xf2fd0124,
-       0x8d21f405,
-       0x4afc17f1,
-       0xf00213f0,
-       0x12d00c27,
-       0x0721f500,
-       0xfc27f102,
-       0x0223f047,
-       0xf00020d0,
-       0x20b6012c,
-       0x0012d003,
-       0xf001acf0,
-       0xb7f006a5,
-       0x140c9800,
-       0xf0150d98,
-       0x21f500e7,
-       0xa7f0015c,
-       0x0321f508,
-       0x0721f501,
-       0x2201f402,
-       0xf40ca7f0,
-       0x17f1c921,
-       0x14b60a10,
-       0x0527f006,
-/* 0x08ab: ctx_xfer_post_save_wait */
-       0xcf0012d0,
-       0x22fd0012,
-       0xfa1bf405,
-/* 0x08b7: ctx_xfer_post */
-       0xf02e02f4,
-       0x21f502f7,
-       0xf4bd0631,
-       0x067221f5,
-       0x022621f5,
-       0x064021f5,
-       0x21f5f4bd,
-       0x11f40631,
-       0x80019810,
-       0xf40511fd,
-       0x21f5070b,
-/* 0x08e2: ctx_xfer_no_post_mmio */
-/* 0x08e2: ctx_xfer_done */
-       0x00f807b1,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0xf100f803,
+       0xb60c00f7,
+       0xe7f006f4,
+       0x80fed004,
+/* 0x0808: ctx_xfer_idle */
+       0xf100fecf,
+       0xf42000e4,
+       0x11f4f91b,
+       0x0d02f406,
+/* 0x0818: ctx_xfer_pre */
+       0xf510f7f0,
+       0xf4067221,
+/* 0x0822: ctx_xfer_pre_load */
+       0xf7f01c11,
+       0x3121f502,
+       0x4021f506,
+       0x5221f506,
+       0xf5f4bd06,
+       0xf5063121,
+/* 0x083b: ctx_xfer_exec */
+       0x98069221,
+       0x27f11601,
+       0x24b60414,
+       0x0020d006,
+       0xa500e7f1,
+       0xb941e3f0,
+       0x21f4021f,
+       0x04e0b68d,
+       0xf001fcf0,
+       0x24b6022c,
+       0x05f2fd01,
+       0xf18d21f4,
+       0xf04afc17,
+       0x27f00213,
+       0x0012d00c,
+       0x020721f5,
+       0x47fc27f1,
+       0xd00223f0,
+       0x2cf00020,
+       0x0320b601,
+       0xf00012d0,
+       0xa5f001ac,
+       0x00b7f006,
+       0x98140c98,
+       0xe7f0150d,
+       0x5c21f500,
+       0x08a7f001,
+       0x010321f5,
+       0x020721f5,
+       0xf02201f4,
+       0x21f40ca7,
+       0x1017f1c9,
+       0x0614b60a,
+       0xd00527f0,
+/* 0x08c2: ctx_xfer_post_save_wait */
+       0x12cf0012,
+       0x0522fd00,
+       0xf4fa1bf4,
+/* 0x08ce: ctx_xfer_post */
+       0xf7f02e02,
+       0x3121f502,
+       0xf5f4bd06,
+       0xf5067221,
+       0xf5022621,
+       0xbd064021,
+       0x3121f5f4,
+       0x1011f406,
+       0xfd800198,
+       0x0bf40511,
+       0xb121f507,
+/* 0x08f9: ctx_xfer_no_post_mmio */
+/* 0x08f9: ctx_xfer_done */
+       0x0000f807,
        0x00000000,
 };
index 47a0208..45aff5f 100644 (file)
@@ -516,18 +516,9 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 {
        struct nouveau_device *device = nv_device(parent);
        struct nvc0_graph_priv *priv;
-       bool enable = true;
        int ret, i;
 
-       switch (device->chipset) {
-       case 0xd9: /* known broken without binary driver firmware */
-               enable = false;
-               break;
-       default:
-               break;
-       }
-
-       ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
+       ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
index 18d2210..a1e78de 100644 (file)
@@ -121,6 +121,7 @@ nvc0_graph_class(void *obj)
                return 0x9297;
        case 0xe4:
        case 0xe7:
+       case 0xe6:
                return 0xa097;
        default:
                return 0;
index 539d4c7..9f82e97 100644 (file)
@@ -203,7 +203,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nvc0_graph_priv *priv;
        int ret, i;
 
-       ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
+       ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -252,6 +252,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                        priv->magic_not_rop_nr = 1;
                break;
        case 0xe7:
+       case 0xe6:
                priv->magic_not_rop_nr = 1;
                break;
        default:
index d145b25..5bd1ca8 100644 (file)
@@ -17,6 +17,7 @@ struct nouveau_bios {
                u8 chip;
                u8 minor;
                u8 micro;
+               u8 patch;
        } version;
 };
 
index 2bf1780..e6563b5 100644 (file)
@@ -25,9 +25,11 @@ struct dcb_gpio_func {
        u8 param;
 };
 
-u16 dcb_gpio_table(struct nouveau_bios *);
-u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver);
-int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line,
+u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len);
+u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len,
                   struct dcb_gpio_func *);
+u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line,
+                  u8 *ver, u8 *len, struct dcb_gpio_func *);
 
 #endif
index e69a8bd..ca2f6bf 100644 (file)
@@ -13,6 +13,7 @@ struct nvbios_init {
        u32 nested;
        u16 repeat;
        u16 repend;
+       u32 ramcfg;
 };
 
 int nvbios_exec(struct nvbios_init *);
index 9ea2b12..b75e8f1 100644 (file)
@@ -11,7 +11,7 @@ struct nouveau_gpio {
        struct nouveau_subdev base;
 
        /* hardware interfaces */
-       void (*reset)(struct nouveau_gpio *);
+       void (*reset)(struct nouveau_gpio *, u8 func);
        int  (*drive)(struct nouveau_gpio *, int line, int dir, int out);
        int  (*sense)(struct nouveau_gpio *, int line);
        void (*irq_enable)(struct nouveau_gpio *, int line, bool);
index dd11194..f621f69 100644 (file)
@@ -447,6 +447,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
                bios->version.chip  = nv_ro08(bios, bit_i.offset + 2);
                bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
                bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
+               bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
        } else
        if (bmp_version(bios)) {
                bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
@@ -455,9 +456,9 @@ nouveau_bios_ctor(struct nouveau_object *parent,
                bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
        }
 
-       nv_info(bios, "version %02x.%02x.%02x.%02x\n",
+       nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
                bios->version.major, bios->version.chip,
-               bios->version.minor, bios->version.micro);
+               bios->version.minor, bios->version.micro, bios->version.patch);
 
        return 0;
 }
index c90d4aa..c84e93f 100644 (file)
 #include <subdev/bios/gpio.h>
 
 u16
-dcb_gpio_table(struct nouveau_bios *bios)
+dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-       u8  ver, hdr, cnt, len;
-       u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
+       u16 data = 0x0000;
+       u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
        if (dcb) {
-               if (ver >= 0x30 && hdr >= 0x0c)
-                       return nv_ro16(bios, dcb + 0x0a);
-               if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
-                       return nv_ro16(bios, dcb - 0x0f);
+               if (*ver >= 0x30 && *hdr >= 0x0c)
+                       data = nv_ro16(bios, dcb + 0x0a);
+               else
+               if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
+                       data = nv_ro16(bios, dcb - 0x0f);
+
+               if (data) {
+                       *ver = nv_ro08(bios, data + 0x00);
+                       if (*ver < 0x30) {
+                               *hdr = 3;
+                               *cnt = nv_ro08(bios, data + 0x02);
+                               *len = nv_ro08(bios, data + 0x01);
+                       } else
+                       if (*ver <= 0x41) {
+                               *hdr = nv_ro08(bios, data + 0x01);
+                               *cnt = nv_ro08(bios, data + 0x02);
+                               *len = nv_ro08(bios, data + 0x03);
+                       } else {
+                               data = 0x0000;
+                       }
+               }
        }
-       return 0x0000;
+       return data;
 }
 
 u16
-dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver)
+dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
 {
-       u16 gpio = dcb_gpio_table(bios);
-       if (gpio) {
-               *ver = nv_ro08(bios, gpio);
-               if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2))
-                       return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
-               else if (ent < nv_ro08(bios, gpio + 2))
-                       return gpio + nv_ro08(bios, gpio + 1) +
-                              (ent * nv_ro08(bios, gpio + 3));
-       }
+       u8  hdr, cnt;
+       u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
+       if (gpio && ent < cnt)
+               return gpio + hdr + (ent * *len);
        return 0x0000;
 }
 
-int
-dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+u16
+dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
               struct dcb_gpio_func *gpio)
 {
-       u8  ver, hdr, cnt, len;
-       u16 entry;
-       int i = -1;
-
-       while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
-               if (ver < 0x40) {
-                       u16 data = nv_ro16(bios, entry);
+       u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
+       if (data) {
+               if (*ver < 0x40) {
+                       u16 info = nv_ro16(bios, data);
                        *gpio = (struct dcb_gpio_func) {
-                               .line = (data & 0x001f) >> 0,
-                               .func = (data & 0x07e0) >> 5,
-                               .log[0] = (data & 0x1800) >> 11,
-                               .log[1] = (data & 0x6000) >> 13,
-                               .param = !!(data & 0x8000),
+                               .line = (info & 0x001f) >> 0,
+                               .func = (info & 0x07e0) >> 5,
+                               .log[0] = (info & 0x1800) >> 11,
+                               .log[1] = (info & 0x6000) >> 13,
+                               .param = !!(info & 0x8000),
                        };
                } else
-               if (ver < 0x41) {
-                       u32 data = nv_ro32(bios, entry);
+               if (*ver < 0x41) {
+                       u32 info = nv_ro32(bios, data);
                        *gpio = (struct dcb_gpio_func) {
-                               .line = (data & 0x0000001f) >> 0,
-                               .func = (data & 0x0000ff00) >> 8,
-                               .log[0] = (data & 0x18000000) >> 27,
-                               .log[1] = (data & 0x60000000) >> 29,
-                               .param = !!(data & 0x80000000),
+                               .line = (info & 0x0000001f) >> 0,
+                               .func = (info & 0x0000ff00) >> 8,
+                               .log[0] = (info & 0x18000000) >> 27,
+                               .log[1] = (info & 0x60000000) >> 29,
+                               .param = !!(info & 0x80000000),
                        };
                } else {
-                       u32 data = nv_ro32(bios, entry + 0);
-                       u8 data1 = nv_ro32(bios, entry + 4);
+                       u32 info = nv_ro32(bios, data + 0);
+                       u8 info1 = nv_ro32(bios, data + 4);
                        *gpio = (struct dcb_gpio_func) {
-                               .line = (data & 0x0000003f) >> 0,
-                               .func = (data & 0x0000ff00) >> 8,
-                               .log[0] = (data1 & 0x30) >> 4,
-                               .log[1] = (data1 & 0xc0) >> 6,
-                               .param = !!(data & 0x80000000),
+                               .line = (info & 0x0000003f) >> 0,
+                               .func = (info & 0x0000ff00) >> 8,
+                               .log[0] = (info1 & 0x30) >> 4,
+                               .log[1] = (info1 & 0xc0) >> 6,
+                               .param = !!(info & 0x80000000),
                        };
                }
+       }
+
+       return data;
+}
 
+u16
+dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+              u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
+{
+       u8  hdr, cnt, i = 0;
+       u16 data;
+
+       while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
                if ((line == 0xff || line == gpio->line) &&
                    (func == 0xff || func == gpio->func))
-                       return 0;
+                       return data;
        }
 
        /* DCB 2.2, fixed TVDAC GPIO data */
-       if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) {
-               if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) {
-                       u8 conf = nv_ro08(bios, entry - 5);
-                       u8 addr = nv_ro08(bios, entry - 4);
+       if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
+               if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
+                       u8 conf = nv_ro08(bios, data - 5);
+                       u8 addr = nv_ro08(bios, data - 4);
                        if (conf & 0x01) {
                                *gpio = (struct dcb_gpio_func) {
                                        .func = DCB_GPIO_TVDAC0,
@@ -112,10 +133,11 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
                                        .log[0] = !!(conf & 0x02),
                                        .log[1] =  !(conf & 0x02),
                                };
-                               return 0;
+                               *ver = 0x00;
+                               return data;
                        }
                }
        }
 
-       return -EINVAL;
+       return 0x0000;
 }
index ae168bb..2917d55 100644 (file)
@@ -2,11 +2,12 @@
 #include <core/device.h>
 
 #include <subdev/bios.h>
-#include <subdev/bios/conn.h>
 #include <subdev/bios/bmp.h>
 #include <subdev/bios/bit.h>
+#include <subdev/bios/conn.h>
 #include <subdev/bios/dcb.h>
 #include <subdev/bios/dp.h>
+#include <subdev/bios/gpio.h>
 #include <subdev/bios/init.h>
 #include <subdev/devinit.h>
 #include <subdev/clock.h>
@@ -410,9 +411,25 @@ init_ram_restrict_group_count(struct nvbios_init *init)
 }
 
 static u8
+init_ram_restrict_strap(struct nvbios_init *init)
+{
+       /* This appears to be the behaviour of the VBIOS parser, and *is*
+        * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
+        * avoid fucking up the memory controller (somehow) by reading it
+        * on every INIT_RAM_RESTRICT_ZM_GROUP opcode.
+        *
+        * Preserving the non-caching behaviour on earlier chipsets just
+        * in case *not* re-reading the strap causes similar breakage.
+        */
+       if (!init->ramcfg || init->bios->version.major < 0x70)
+               init->ramcfg = init_rd32(init, 0x101000);
+       return (init->ramcfg & 0x00000003c) >> 2;
+}
+
+static u8
 init_ram_restrict(struct nvbios_init *init)
 {
-       u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2;
+       u8  strap = init_ram_restrict_strap(init);
        u16 table = init_ram_restrict_table(init);
        if (table)
                return nv_ro08(init->bios, table + strap);
@@ -1781,7 +1798,7 @@ init_gpio(struct nvbios_init *init)
        init->offset += 1;
 
        if (init_exec(init) && gpio && gpio->reset)
-               gpio->reset(gpio);
+               gpio->reset(gpio, DCB_GPIO_UNUSED);
 }
 
 /**
@@ -1995,6 +2012,47 @@ init_i2c_long_if(struct nvbios_init *init)
        init_exec_set(init, false);
 }
 
+/**
+ * INIT_GPIO_NE - opcode 0xa9
+ *
+ */
+static void
+init_gpio_ne(struct nvbios_init *init)
+{
+       struct nouveau_bios *bios = init->bios;
+       struct nouveau_gpio *gpio = nouveau_gpio(bios);
+       struct dcb_gpio_func func;
+       u8 count = nv_ro08(bios, init->offset + 1);
+       u8 idx = 0, ver, len;
+       u16 data, i;
+
+       trace("GPIO_NE\t");
+       init->offset += 2;
+
+       for (i = init->offset; i < init->offset + count; i++)
+               cont("0x%02x ", nv_ro08(bios, i));
+       cont("\n");
+
+       while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
+               if (func.func != DCB_GPIO_UNUSED) {
+                       for (i = init->offset; i < init->offset + count; i++) {
+                               if (func.func == nv_ro08(bios, i))
+                                       break;
+                       }
+
+                       trace("\tFUNC[0x%02x]", func.func);
+                       if (i == (init->offset + count)) {
+                               cont(" *");
+                               if (init_exec(init) && gpio && gpio->reset)
+                                       gpio->reset(gpio, func.func);
+                       }
+                       cont("\n");
+               }
+       }
+
+       init->offset += count;
+}
+
 static struct nvbios_init_opcode {
        void (*exec)(struct nvbios_init *);
 } init_opcode[] = {
@@ -2059,6 +2117,7 @@ static struct nvbios_init_opcode {
        [0x98] = { init_auxch },
        [0x99] = { init_zm_auxch },
        [0x9a] = { init_i2c_long_if },
+       [0xa9] = { init_gpio_ne },
 };
 
 #define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
index 9b7881e..03a6528 100644 (file)
@@ -109,6 +109,34 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                break;
+       case 0xe6:
+               device->cname = "GK106";
+               device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+               device->oclass[NVDEV_SUBDEV_GPIO   ] = &nvd0_gpio_oclass;
+               device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+               device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+               device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+               device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+               device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
+               device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+               device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
+               device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+               device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+               break;
        default:
                nv_fatal(device, "unknown Kepler chipset\n");
                return -EINVAL;
index acf818c..9fb0f9b 100644 (file)
@@ -43,10 +43,15 @@ static int
 nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
                  struct dcb_gpio_func *func)
 {
+       struct nouveau_bios *bios = nouveau_bios(gpio);
+       u8  ver, len;
+       u16 data;
+
        if (line == 0xff && tag == 0xff)
                return -EINVAL;
 
-       if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func))
+       data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func);
+       if (data)
                return 0;
 
        /* Apple iMac G4 NV18 */
@@ -265,7 +270,7 @@ nouveau_gpio_init(struct nouveau_gpio *gpio)
        int ret = nouveau_subdev_init(&gpio->base);
        if (ret == 0 && gpio->reset) {
                if (dmi_check_system(gpio_reset_ids))
-                       gpio->reset(gpio);
+                       gpio->reset(gpio, DCB_GPIO_UNUSED);
        }
        return ret;
 }
index f3502c9..bf13a12 100644 (file)
@@ -29,15 +29,15 @@ struct nv50_gpio_priv {
 };
 
 static void
-nv50_gpio_reset(struct nouveau_gpio *gpio)
+nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
 {
        struct nouveau_bios *bios = nouveau_bios(gpio);
        struct nv50_gpio_priv *priv = (void *)gpio;
+       u8 ver, len;
        u16 entry;
-       u8 ver;
        int ent = -1;
 
-       while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+       while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
                static const u32 regs[] = { 0xe100, 0xe28c };
                u32 data = nv_ro32(bios, entry);
                u8  line =   (data & 0x0000001f);
@@ -48,7 +48,8 @@ nv50_gpio_reset(struct nouveau_gpio *gpio)
                u32 val = (unk1 << 16) | unk0;
                u32 reg = regs[line >> 4]; line &= 0x0f;
 
-               if (func == 0xff)
+               if ( func  == DCB_GPIO_UNUSED ||
+                   (match != DCB_GPIO_UNUSED && match != func))
                        continue;
 
                gpio->set(gpio, 0, func, line, defs);
index 8d18fca..83e8b8f 100644 (file)
@@ -29,15 +29,15 @@ struct nvd0_gpio_priv {
 };
 
 static void
-nvd0_gpio_reset(struct nouveau_gpio *gpio)
+nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
 {
        struct nouveau_bios *bios = nouveau_bios(gpio);
        struct nvd0_gpio_priv *priv = (void *)gpio;
+       u8 ver, len;
        u16 entry;
-       u8 ver;
        int ent = -1;
 
-       while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+       while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
                u32 data = nv_ro32(bios, entry);
                u8  line =   (data & 0x0000003f);
                u8  defs = !!(data & 0x00000080);
@@ -45,7 +45,8 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio)
                u8  unk0 =   (data & 0x00ff0000) >> 16;
                u8  unk1 =   (data & 0x1f000000) >> 24;
 
-               if (func == 0xff)
+               if ( func  == DCB_GPIO_UNUSED ||
+                   (match != DCB_GPIO_UNUSED && match != func))
                        continue;
 
                gpio->set(gpio, 0, func, line, defs);
index 93e3ddf..e286e13 100644 (file)
@@ -260,7 +260,7 @@ nouveau_mxm_create_(struct nouveau_object *parent,
 
        data = mxm_table(bios, &ver, &len);
        if (!data || !(ver = nv_ro08(bios, data))) {
-               nv_info(mxm, "no VBIOS data, nothing to do\n");
+               nv_debug(mxm, "no VBIOS data, nothing to do\n");
                return 0;
        }
 
index 74c6b42..7a44566 100644 (file)
@@ -2654,6 +2654,35 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        ib[idx+4] = upper_32_bits(offset) & 0xff;
                }
                break;
+       case PACKET3_MEM_WRITE:
+       {
+               u64 offset;
+
+               if (pkt->count != 3) {
+                       DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+                       return -EINVAL;
+               }
+               r = evergreen_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+                       return -EINVAL;
+               }
+               offset = radeon_get_ib_value(p, idx+0);
+               offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+               if (offset & 0x7) {
+                       DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+                       return -EINVAL;
+               }
+               if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+                       DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+                                 offset + 8, radeon_bo_size(reloc->robj));
+                       return -EINVAL;
+               }
+               offset += reloc->lobj.gpu_offset;
+               ib[idx+0] = offset;
+               ib[idx+1] = upper_32_bits(offset) & 0xff;
+               break;
+       }
        case PACKET3_COPY_DW:
                if (pkt->count != 4) {
                        DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -3287,6 +3316,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
 
        /* check config regs */
        switch (reg) {
+       case WAIT_UNTIL:
        case GRBM_GFX_INDEX:
        case CP_STRMOUT_CNTL:
        case CP_COHER_CNTL:
index 0be768b..9ea13d0 100644 (file)
@@ -2294,6 +2294,35 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        ib[idx+4] = upper_32_bits(offset) & 0xff;
                }
                break;
+       case PACKET3_MEM_WRITE:
+       {
+               u64 offset;
+
+               if (pkt->count != 3) {
+                       DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+                       return -EINVAL;
+               }
+               r = r600_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+                       return -EINVAL;
+               }
+               offset = radeon_get_ib_value(p, idx+0);
+               offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+               if (offset & 0x7) {
+                       DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+                       return -EINVAL;
+               }
+               if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+                       DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+                                 offset + 8, radeon_bo_size(reloc->robj));
+                       return -EINVAL;
+               }
+               offset += reloc->lobj.gpu_offset;
+               ib[idx+0] = offset;
+               ib[idx+1] = upper_32_bits(offset) & 0xff;
+               break;
+       }
        case PACKET3_COPY_DW:
                if (pkt->count != 4) {
                        DRM_ERROR("bad COPY_DW (invalid count)\n");
index 5dc744d..9b9422c 100644 (file)
@@ -225,12 +225,13 @@ struct radeon_fence {
 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
 int radeon_fence_driver_init(struct radeon_device *rdev);
 void radeon_fence_driver_fini(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev);
 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
 void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
 int radeon_fence_wait_any(struct radeon_device *rdev,
                          struct radeon_fence **fences,
                          bool intr);
index 49b0659..cd75626 100644 (file)
@@ -1164,6 +1164,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
        struct drm_crtc *crtc;
        struct drm_connector *connector;
        int i, r;
+       bool force_completion = false;
 
        if (dev == NULL || dev->dev_private == NULL) {
                return -ENODEV;
@@ -1206,8 +1207,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
 
        mutex_lock(&rdev->ring_lock);
        /* wait for gpu to finish processing current batch */
-       for (i = 0; i < RADEON_NUM_RINGS; i++)
-               radeon_fence_wait_empty_locked(rdev, i);
+       for (i = 0; i < RADEON_NUM_RINGS; i++) {
+               r = radeon_fence_wait_empty_locked(rdev, i);
+               if (r) {
+                       /* delay GPU reset to resume */
+                       force_completion = true;
+               }
+       }
+       if (force_completion) {
+               radeon_fence_driver_force_completion(rdev);
+       }
        mutex_unlock(&rdev->ring_lock);
 
        radeon_save_bios_scratch_regs(rdev);
@@ -1338,7 +1347,6 @@ retry:
        }
 
        radeon_restore_bios_scratch_regs(rdev);
-       drm_helper_resume_force_mode(rdev->ddev);
 
        if (!r) {
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1358,11 +1366,14 @@ retry:
                        }
                }
        } else {
+               radeon_fence_driver_force_completion(rdev);
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                        kfree(ring_data[i]);
                }
        }
 
+       drm_helper_resume_force_mode(rdev->ddev);
+
        ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
        if (r) {
                /* bad news, how to tell it to userspace ? */
index 9b1a727..ff75934 100644 (file)
  *   2.25.0 - eg+: new info request for num SE and num SH
  *   2.26.0 - r600-eg: fix htile size computation
  *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
+ *   2.28.0 - r600-eg: Add MEM_WRITE packet support
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       27
+#define KMS_DRIVER_MINOR       28
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 410a975..3435625 100644 (file)
@@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
  * Returns 0 if the fences have passed, error for all other cases.
  * Caller must hold ring lock.
  */
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
 {
        uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+       int r;
 
-       while(1) {
-               int r;
-               r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+       r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+       if (r) {
                if (r == -EDEADLK) {
-                       mutex_unlock(&rdev->ring_lock);
-                       r = radeon_gpu_reset(rdev);
-                       mutex_lock(&rdev->ring_lock);
-                       if (!r)
-                               continue;
-               }
-               if (r) {
-                       dev_err(rdev->dev, "error waiting for ring to become"
-                               " idle (%d)\n", r);
+                       return -EDEADLK;
                }
-               return;
+               dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
+                       ring, r);
        }
+       return 0;
 }
 
 /**
@@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
  */
 void radeon_fence_driver_fini(struct radeon_device *rdev)
 {
-       int ring;
+       int ring, r;
 
        mutex_lock(&rdev->ring_lock);
        for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
                if (!rdev->fence_drv[ring].initialized)
                        continue;
-               radeon_fence_wait_empty_locked(rdev, ring);
+               r = radeon_fence_wait_empty_locked(rdev, ring);
+               if (r) {
+                       /* no need to trigger GPU reset as we are unloading */
+                       radeon_fence_driver_force_completion(rdev);
+               }
                wake_up_all(&rdev->fence_queue);
                radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
                rdev->fence_drv[ring].initialized = false;
@@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
        mutex_unlock(&rdev->ring_lock);
 }
 
+/**
+ * radeon_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @rdev: radeon device pointer
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+{
+       int ring;
+
+       for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+               if (!rdev->fence_drv[ring].initialized)
+                       continue;
+               radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+       }
+}
+
 
 /*
  * Fence debugfs
index aa14dbb..0bfa656 100644 (file)
@@ -234,7 +234,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
 
 static void radeon_pm_set_clocks(struct radeon_device *rdev)
 {
-       int i;
+       int i, r;
 
        /* no need to take locks, etc. if nothing's going to change */
        if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
@@ -248,8 +248,17 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
        /* wait for the rings to drain */
        for (i = 0; i < RADEON_NUM_RINGS; i++) {
                struct radeon_ring *ring = &rdev->ring[i];
-               if (ring->ready)
-                       radeon_fence_wait_empty_locked(rdev, i);
+               if (!ring->ready) {
+                       continue;
+               }
+               r = radeon_fence_wait_empty_locked(rdev, i);
+               if (r) {
+                       /* needs a GPU reset dont reset here */
+                       mutex_unlock(&rdev->ring_lock);
+                       up_write(&rdev->pm.mclk_lock);
+                       mutex_unlock(&rdev->ddev->struct_mutex);
+                       return;
+               }
        }
 
        radeon_unmap_vram_bos(rdev);
index 0744103..656b2e3 100644 (file)
@@ -102,12 +102,12 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
                ((mode->hsync_end - mode->hsync_start) <<  0);
        tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
 
-       value = ((mode->vsync_start - mode->vdisplay) << 16) |
-               ((mode->hsync_start - mode->hdisplay) <<  0);
-       tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
-
        value = ((mode->vtotal - mode->vsync_end) << 16) |
                ((mode->htotal - mode->hsync_end) <<  0);
+       tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
+
+       value = ((mode->vsync_start - mode->vdisplay) << 16) |
+               ((mode->hsync_start - mode->hdisplay) <<  0);
        tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
 
        value = (mode->vdisplay << 16) | mode->hdisplay;
@@ -221,8 +221,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
        win.stride = crtc->fb->pitches[0];
 
        /* program window registers */
-       value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER);
-       value |= WINDOW_A_SELECT;
+       value = WINDOW_A_SELECT;
        tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
 
        tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
index 3a843a7..741b5dc 100644 (file)
@@ -204,24 +204,6 @@ extern int tegra_output_parse_dt(struct tegra_output *output);
 extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
 extern int tegra_output_exit(struct tegra_output *output);
 
-/* from gem.c */
-extern struct tegra_gem_object *tegra_gem_alloc(struct drm_device *drm,
-                                               size_t size);
-extern int tegra_gem_handle_create(struct drm_device *drm,
-                                  struct drm_file *file, size_t size,
-                                  unsigned long flags, uint32_t *handle);
-extern int tegra_gem_dumb_create(struct drm_file *file, struct drm_device *drm,
-                                struct drm_mode_create_dumb *args);
-extern int tegra_gem_dumb_map_offset(struct drm_file *file,
-                                    struct drm_device *drm, uint32_t handle,
-                                    uint64_t *offset);
-extern int tegra_gem_dumb_destroy(struct drm_file *file,
-                                 struct drm_device *drm, uint32_t handle);
-extern int tegra_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-extern int tegra_gem_init_object(struct drm_gem_object *obj);
-extern void tegra_gem_free_object(struct drm_gem_object *obj);
-extern struct vm_operations_struct tegra_gem_vm_ops;
-
 /* from fb.c */
 extern int tegra_drm_fb_init(struct drm_device *drm);
 extern void tegra_drm_fb_exit(struct drm_device *drm);
index ab40164..e060c7e 100644 (file)
@@ -149,7 +149,7 @@ struct tmds_config {
 };
 
 static const struct tmds_config tegra2_tmds_config[] = {
-       { /* 480p modes */
+       { /* slow pixel clock modes */
                .pclk = 27000000,
                .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
                        SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
@@ -163,21 +163,8 @@ static const struct tmds_config tegra2_tmds_config[] = {
                        DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
                        DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
                        DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
-       }, { /* 720p modes */
-               .pclk = 74250000,
-               .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
-                       SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
-                       SOR_PLL_TX_REG_LOAD(3),
-               .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
-               .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
-                       PE_CURRENT1(PE_CURRENT_6_0_mA) |
-                       PE_CURRENT2(PE_CURRENT_6_0_mA) |
-                       PE_CURRENT3(PE_CURRENT_6_0_mA),
-               .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
-                       DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
-                       DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
-                       DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
-       }, { /* 1080p modes */
+       },
+       { /* high pixel clock modes */
                .pclk = UINT_MAX,
                .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
                        SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
@@ -479,7 +466,7 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
                return;
        }
 
-       h_front_porch = mode->htotal - mode->hsync_end;
+       h_front_porch = mode->hsync_start - mode->hdisplay;
        memset(&frame, 0, sizeof(frame));
        frame.r = HDMI_AVI_R_SAME;
 
@@ -634,8 +621,8 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
 
        pclk = mode->clock * 1000;
        h_sync_width = mode->hsync_end - mode->hsync_start;
-       h_front_porch = mode->htotal - mode->hsync_end;
-       h_back_porch = mode->hsync_start - mode->hdisplay;
+       h_back_porch = mode->htotal - mode->hsync_end;
+       h_front_porch = mode->hsync_start - mode->hdisplay;
 
        err = regulator_enable(hdmi->vdd);
        if (err < 0) {
index bdb97a5..5d17b11 100644 (file)
@@ -239,6 +239,8 @@ int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
                }
        }
 
+       client->host1x = host1x;
+
        return 0;
 }
 
index a98c917..789bd4f 100644 (file)
@@ -187,7 +187,7 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
  * Sysfs callback functions
  */
 
-static const u16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
+static const s16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
 
 static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
        char *buf)
index 8fa2632..7272176 100644 (file)
@@ -49,6 +49,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
        struct i2c_client *client = to_i2c_client(dev);
        long temp;
        short value;
+       s32 err;
 
        int status = kstrtol(buf, 10, &temp);
        if (status < 0)
@@ -57,8 +58,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
        /* Write value */
        value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
                (LM73_TEMP_MAX*4)) << 5;
-       i2c_smbus_write_word_swapped(client, attr->index, value);
-       return count;
+       err = i2c_smbus_write_word_swapped(client, attr->index, value);
+       return (err < 0) ? err : count;
 }
 
 static ssize_t show_temp(struct device *dev, struct device_attribute *da,
@@ -66,11 +67,16 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
        struct i2c_client *client = to_i2c_client(dev);
+       int temp;
+
+       s32 err = i2c_smbus_read_word_swapped(client, attr->index);
+       if (err < 0)
+               return err;
+
        /* use integer division instead of equivalent right shift to
           guarantee arithmetic shift and preserve the sign */
-       int temp = ((s16) (i2c_smbus_read_word_swapped(client,
-                   attr->index))*250) / 32;
-       return sprintf(buf, "%d\n", temp);
+       temp = (((s16) err) * 250) / 32;
+       return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
 }
 
 
index 1885a26..a0d931b 100644 (file)
@@ -127,8 +127,9 @@ static int create_gpio_led(const struct gpio_led *template,
                led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
 
        ret = devm_gpio_request_one(parent, template->gpio,
-                       GPIOF_DIR_OUT | (led_dat->active_low ^ state),
-                       template->name);
+                                   (led_dat->active_low ^ state) ?
+                                   GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+                                   template->name);
        if (ret < 0)
                return ret;
 
index a9f6de5..2e8c0cb 100644 (file)
@@ -71,8 +71,6 @@
 #include <media/v4l2-common.h>
 #include <media/v4l2-device.h>
 
-#include <plat/cpu.h>
-
 #include "isp.h"
 #include "ispreg.h"
 #include "ispccdc.h"
index 6d6002b..74f1c15 100644 (file)
@@ -141,7 +141,7 @@ static int orion_mdio_reset(struct mii_bus *bus)
        return 0;
 }
 
-static int __devinit orion_mdio_probe(struct platform_device *pdev)
+static int orion_mdio_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct mii_bus *bus;
@@ -197,7 +197,7 @@ static int __devinit orion_mdio_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __devexit orion_mdio_remove(struct platform_device *pdev)
+static int orion_mdio_remove(struct platform_device *pdev)
 {
        struct mii_bus *bus = platform_get_drvdata(pdev);
        mdiobus_unregister(bus);
@@ -214,7 +214,7 @@ MODULE_DEVICE_TABLE(of, orion_mdio_match);
 
 static struct platform_driver orion_mdio_driver = {
        .probe = orion_mdio_probe,
-       .remove = __devexit_p(orion_mdio_remove),
+       .remove = orion_mdio_remove,
        .driver = {
                .name = "orion-mdio",
                .of_match_table = orion_mdio_match,
index 3f8086b..b6025c3 100644 (file)
@@ -635,7 +635,7 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
 
 
 /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
-static void __devinit mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
+static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
 {
        u32  val;
 
@@ -650,7 +650,7 @@ static void __devinit mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
 }
 
 /* Config SGMII port */
-static void __devinit mvneta_port_sgmii_config(struct mvneta_port *pp)
+static void mvneta_port_sgmii_config(struct mvneta_port *pp)
 {
        u32 val;
 
@@ -2564,7 +2564,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
 };
 
 /* Initialize hw */
-static int __devinit mvneta_init(struct mvneta_port *pp, int phy_addr)
+static int mvneta_init(struct mvneta_port *pp, int phy_addr)
 {
        int queue;
 
@@ -2613,9 +2613,8 @@ static void mvneta_deinit(struct mvneta_port *pp)
 }
 
 /* platform glue : initialize decoding windows */
-static void __devinit
-mvneta_conf_mbus_windows(struct mvneta_port *pp,
-                        const struct mbus_dram_target_info *dram)
+static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
+                                    const struct mbus_dram_target_info *dram)
 {
        u32 win_enable;
        u32 win_protect;
@@ -2648,7 +2647,7 @@ mvneta_conf_mbus_windows(struct mvneta_port *pp,
 }
 
 /* Power up the port */
-static void __devinit mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
 {
        u32 val;
 
@@ -2671,7 +2670,7 @@ static void __devinit mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
 }
 
 /* Device initialization routine */
-static int __devinit mvneta_probe(struct platform_device *pdev)
+static int mvneta_probe(struct platform_device *pdev)
 {
        const struct mbus_dram_target_info *dram_target_info;
        struct device_node *dn = pdev->dev.of_node;
@@ -2803,7 +2802,7 @@ err_free_netdev:
 }
 
 /* Device removal routine */
-static int __devexit mvneta_remove(struct platform_device *pdev)
+static int mvneta_remove(struct platform_device *pdev)
 {
        struct net_device  *dev = platform_get_drvdata(pdev);
        struct mvneta_port *pp = netdev_priv(dev);
@@ -2828,7 +2827,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match);
 
 static struct platform_driver mvneta_driver = {
        .probe = mvneta_probe,
-       .remove = __devexit_p(mvneta_remove),
+       .remove = mvneta_remove,
        .driver = {
                .name = MVNETA_DRIVER_NAME,
                .of_match_table = mvneta_match,
index 5e62c1a..463597f 100644 (file)
@@ -247,8 +247,7 @@ static void cpts_clk_init(struct cpts *cpts)
                cpts->refclk = NULL;
                return;
        }
-       clk_enable(cpts->refclk);
-       cpts->freq = cpts->refclk->recalc(cpts->refclk);
+       clk_prepare_enable(cpts->refclk);
 }
 
 static void cpts_clk_release(struct cpts *cpts)
index e1bba3a..fe993cd 100644 (file)
@@ -120,7 +120,6 @@ struct cpts {
        struct delayed_work overflow_work;
        int phc_index;
        struct clk *refclk;
-       unsigned long freq;
        struct list_head events;
        struct list_head pool;
        struct cpts_event pool_data[CPTS_MAX_EVENTS];
index 504f7f1..fbd106e 100644 (file)
@@ -180,7 +180,6 @@ struct tun_struct {
        int debug;
 #endif
        spinlock_t lock;
-       struct kmem_cache *flow_cache;
        struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
        struct timer_list flow_gc_timer;
        unsigned long ageing_time;
@@ -209,8 +208,8 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
                                              struct hlist_head *head,
                                              u32 rxhash, u16 queue_index)
 {
-       struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache,
-                                                   GFP_ATOMIC);
+       struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
+
        if (e) {
                tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
                          rxhash, queue_index);
@@ -223,19 +222,12 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
        return e;
 }
 
-static void tun_flow_free(struct rcu_head *head)
-{
-       struct tun_flow_entry *e
-               = container_of(head, struct tun_flow_entry, rcu);
-       kmem_cache_free(e->tun->flow_cache, e);
-}
-
 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
 {
        tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
                  e->rxhash, e->queue_index);
        hlist_del_rcu(&e->hash_link);
-       call_rcu(&e->rcu, tun_flow_free);
+       kfree_rcu(e, rcu);
 }
 
 static void tun_flow_flush(struct tun_struct *tun)
@@ -833,12 +825,6 @@ static int tun_flow_init(struct tun_struct *tun)
 {
        int i;
 
-       tun->flow_cache = kmem_cache_create("tun_flow_cache",
-                                           sizeof(struct tun_flow_entry), 0, 0,
-                                           NULL);
-       if (!tun->flow_cache)
-               return -ENOMEM;
-
        for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
                INIT_HLIST_HEAD(&tun->flows[i]);
 
@@ -854,10 +840,6 @@ static void tun_flow_uninit(struct tun_struct *tun)
 {
        del_timer_sync(&tun->flow_gc_timer);
        tun_flow_flush(tun);
-
-       /* Wait for completion of call_rcu()'s */
-       rcu_barrier();
-       kmem_cache_destroy(tun->flow_cache);
 }
 
 /* Initialize net device. */
index 3b3fdf6..40f2cc1 100644 (file)
@@ -505,7 +505,8 @@ static int vxlan_join_group(struct net_device *dev)
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct sock *sk = vn->sock->sk;
        struct ip_mreqn mreq = {
-               .imr_multiaddr.s_addr = vxlan->gaddr,
+               .imr_multiaddr.s_addr   = vxlan->gaddr,
+               .imr_ifindex            = vxlan->link,
        };
        int err;
 
@@ -532,7 +533,8 @@ static int vxlan_leave_group(struct net_device *dev)
        int err = 0;
        struct sock *sk = vn->sock->sk;
        struct ip_mreqn mreq = {
-               .imr_multiaddr.s_addr = vxlan->gaddr,
+               .imr_multiaddr.s_addr   = vxlan->gaddr,
+               .imr_ifindex            = vxlan->link,
        };
 
        /* Only leave group when last vxlan is done. */
index 18b0bc5..bb7cc90 100644 (file)
@@ -341,7 +341,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
        .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
 };
 
-static struct pci_device_id rtl8723ae_pci_ids[] __devinitdata = {
+static struct pci_device_id rtl8723ae_pci_ids[] = {
        {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723ae_hal_cfg)},
        {},
 };
index 05b78b1..9c6e9bb 100644 (file)
@@ -422,77 +422,60 @@ static ssize_t sriov_numvfs_show(struct device *dev,
 }
 
 /*
- * num_vfs > 0; number of vfs to enable
- * num_vfs = 0; disable all vfs
+ * num_vfs > 0; number of VFs to enable
+ * num_vfs = 0; disable all VFs
  *
  * Note: SRIOV spec doesn't allow partial VF
- *       disable, so its all or none.
+ *       disable, so it's all or none.
  */
 static ssize_t sriov_numvfs_store(struct device *dev,
                                  struct device_attribute *attr,
                                  const char *buf, size_t count)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       int num_vfs_enabled = 0;
-       int num_vfs;
-       int ret = 0;
-       u16 total;
+       int ret;
+       u16 num_vfs;
 
-       if (kstrtoint(buf, 0, &num_vfs) < 0)
-               return -EINVAL;
+       ret = kstrtou16(buf, 0, &num_vfs);
+       if (ret < 0)
+               return ret;
+
+       if (num_vfs > pci_sriov_get_totalvfs(pdev))
+               return -ERANGE;
+
+       if (num_vfs == pdev->sriov->num_VFs)
+               return count;           /* no change */
 
        /* is PF driver loaded w/callback */
        if (!pdev->driver || !pdev->driver->sriov_configure) {
-               dev_info(&pdev->dev,
-                        "Driver doesn't support SRIOV configuration via sysfs\n");
+               dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n");
                return -ENOSYS;
        }
 
-       /* if enabling vf's ... */
-       total = pci_sriov_get_totalvfs(pdev);
-       /* Requested VFs to enable < totalvfs and none enabled already */
-       if ((num_vfs > 0) && (num_vfs <= total)) {
-               if (pdev->sriov->num_VFs == 0) {
-                       num_vfs_enabled =
-                               pdev->driver->sriov_configure(pdev, num_vfs);
-                       if ((num_vfs_enabled >= 0) &&
-                           (num_vfs_enabled != num_vfs)) {
-                               dev_warn(&pdev->dev,
-                                        "Only %d VFs enabled\n",
-                                        num_vfs_enabled);
-                               return count;
-                       } else if (num_vfs_enabled < 0)
-                               /* error code from driver callback */
-                               return num_vfs_enabled;
-               } else if (num_vfs == pdev->sriov->num_VFs) {
-                       dev_warn(&pdev->dev,
-                                "%d VFs already enabled; no enable action taken\n",
-                                num_vfs);
-                       return count;
-               } else {
-                       dev_warn(&pdev->dev,
-                                "%d VFs already enabled. Disable before enabling %d VFs\n",
-                                pdev->sriov->num_VFs, num_vfs);
-                       return -EINVAL;
-               }
+       if (num_vfs == 0) {
+               /* disable VFs */
+               ret = pdev->driver->sriov_configure(pdev, 0);
+               if (ret < 0)
+                       return ret;
+               return count;
        }
 
-       /* disable vfs */
-       if (num_vfs == 0) {
-               if (pdev->sriov->num_VFs != 0) {
-                       ret = pdev->driver->sriov_configure(pdev, 0);
-                       return ret ? ret : count;
-               } else {
-                       dev_warn(&pdev->dev,
-                                "All VFs disabled; no disable action taken\n");
-                       return count;
-               }
+       /* enable VFs */
+       if (pdev->sriov->num_VFs) {
+               dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n",
+                        pdev->sriov->num_VFs, num_vfs);
+               return -EBUSY;
        }
 
-       dev_err(&pdev->dev,
-               "Invalid value for number of VFs to enable: %d\n", num_vfs);
+       ret = pdev->driver->sriov_configure(pdev, num_vfs);
+       if (ret < 0)
+               return ret;
 
-       return -EINVAL;
+       if (ret != num_vfs)
+               dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n",
+                        num_vfs, ret);
+
+       return count;
 }
 
 static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
index d4824cb..08c243a 100644 (file)
@@ -134,10 +134,28 @@ static int pcie_port_runtime_resume(struct device *dev)
        return 0;
 }
 
+static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
+{
+       bool *pme_poll = data;
+
+       if (pdev->pme_poll)
+               *pme_poll = true;
+       return 0;
+}
+
 static int pcie_port_runtime_idle(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
+       bool pme_poll = false;
+
+       /*
+        * If any subordinate device needs pme poll, we should keep
+        * the port in D0, because we need port in D0 to poll it.
+        */
+       pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
        /* Delay for a short while to prevent too frequent suspend/resume */
-       pm_schedule_suspend(dev, 10);
+       if (!pme_poll)
+               pm_schedule_suspend(dev, 10);
        return -EBUSY;
 }
 #else
index 8f7a634..0369fb6 100644 (file)
@@ -2725,7 +2725,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
        if (PCI_FUNC(dev->devfn))
                return;
        /*
-        * RICOH 0xe823 SD/MMC card reader fails to recognize
+        * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
         * certain types of SD/MMC cards. Lowering the SD base
         * clock frequency from 200Mhz to 50Mhz fixes this issue.
         *
@@ -2736,7 +2736,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
         * 0xf9  - Key register for 0x150
         * 0xfc  - key register for 0xe1
         */
-       if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+       if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
+           dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
                pci_write_config_byte(dev, 0xf9, 0xfc);
                pci_write_config_byte(dev, 0x150, 0x10);
                pci_write_config_byte(dev, 0xf9, 0x00);
@@ -2763,6 +2764,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
 #endif /*CONFIG_MMC_RICOH_MMC*/
index a17d084..6b2238b 100644 (file)
@@ -27,8 +27,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/power/smartreflex.h>
 
-#include <plat/cpu.h>
-
 #define SMARTREFLEX_NAME_LEN   16
 #define NVALUE_NAME_LEN                40
 #define SR_DISABLE_TIMEOUT     200
index 709ea1a..f5ad105 100644 (file)
@@ -72,20 +72,21 @@ static int da9055_wdt_set_timeout(struct watchdog_device *wdt_dev,
                                        DA9055_TWDSCALE_MASK,
                                        da9055_wdt_maps[i].reg_val <<
                                        DA9055_TWDSCALE_SHIFT);
-       if (ret < 0)
+       if (ret < 0) {
                dev_err(da9055->dev,
                        "Failed to update timescale bit, %d\n", ret);
+               return ret;
+       }
 
        wdt_dev->timeout = timeout;
 
-       return ret;
+       return 0;
 }
 
 static int da9055_wdt_ping(struct watchdog_device *wdt_dev)
 {
        struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
        struct da9055 *da9055 = driver_data->da9055;
-       int ret;
 
        /*
         * We have a minimum time for watchdog window called TWDMIN. A write
@@ -94,18 +95,12 @@ static int da9055_wdt_ping(struct watchdog_device *wdt_dev)
        mdelay(DA9055_TWDMIN);
 
        /* Reset the watchdog timer */
-       ret = da9055_reg_update(da9055, DA9055_REG_CONTROL_E,
-                               DA9055_WATCHDOG_MASK, 1);
-
-       return ret;
+       return da9055_reg_update(da9055, DA9055_REG_CONTROL_E,
+                                DA9055_WATCHDOG_MASK, 1);
 }
 
 static void da9055_wdt_release_resources(struct kref *r)
 {
-       struct da9055_wdt_data *driver_data =
-               container_of(r, struct da9055_wdt_data, kref);
-
-       kfree(driver_data);
 }
 
 static void da9055_wdt_ref(struct watchdog_device *wdt_dev)
index 34ed61e..b0e541d 100644 (file)
@@ -296,7 +296,6 @@ static int omap_wdt_remove(struct platform_device *pdev)
 {
        struct watchdog_device *wdog = platform_get_drvdata(pdev);
        struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
        pm_runtime_disable(wdev->dev);
        watchdog_unregister_device(wdog);
index 81918cf..0f03106 100644 (file)
@@ -131,14 +131,21 @@ static int twl4030_wdt_resume(struct platform_device *pdev)
 #define twl4030_wdt_resume         NULL
 #endif
 
+static const struct of_device_id twl_wdt_of_match[] = {
+       { .compatible = "ti,twl4030-wdt", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, twl_wdt_of_match);
+
 static struct platform_driver twl4030_wdt_driver = {
        .probe          = twl4030_wdt_probe,
        .remove         = twl4030_wdt_remove,
        .suspend        = twl4030_wdt_suspend,
        .resume         = twl4030_wdt_resume,
        .driver         = {
-               .owner  = THIS_MODULE,
-               .name   = "twl4030_wdt",
+               .owner          = THIS_MODULE,
+               .name           = "twl4030_wdt",
+               .of_match_table = twl_wdt_of_match,
        },
 };
 
index ea99312..a7b0c2d 100644 (file)
@@ -1935,7 +1935,7 @@ static const unsigned char filename_rev_map[256] = {
  * @src: Source location for the filename to encode
  * @src_size: Size of the source in bytes
  */
-void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size,
+static void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size,
                                  unsigned char *src, size_t src_size)
 {
        size_t num_blocks;
index 809e67d..f1ea610 100644 (file)
@@ -102,12 +102,12 @@ int __init ecryptfs_init_kthread(void)
 
 void ecryptfs_destroy_kthread(void)
 {
-       struct ecryptfs_open_req *req;
+       struct ecryptfs_open_req *req, *tmp;
 
        mutex_lock(&ecryptfs_kthread_ctl.mux);
        ecryptfs_kthread_ctl.flags |= ECRYPTFS_KTHREAD_ZOMBIE;
-       list_for_each_entry(req, &ecryptfs_kthread_ctl.req_list,
-                           kthread_ctl_list) {
+       list_for_each_entry_safe(req, tmp, &ecryptfs_kthread_ctl.req_list,
+                                kthread_ctl_list) {
                list_del(&req->kthread_ctl_list);
                *req->lower_file = ERR_PTR(-EIO);
                complete(&req->done);
index bd1d57f..564a1fa 100644 (file)
@@ -338,7 +338,8 @@ static int ecryptfs_write_begin(struct file *file,
                        if (prev_page_end_size
                            >= i_size_read(page->mapping->host)) {
                                zero_user(page, 0, PAGE_CACHE_SIZE);
-                       } else {
+                               SetPageUptodate(page);
+                       } else if (len < PAGE_CACHE_SIZE) {
                                rc = ecryptfs_decrypt_page(page);
                                if (rc) {
                                        printk(KERN_ERR "%s: Error decrypting "
@@ -348,8 +349,8 @@ static int ecryptfs_write_begin(struct file *file,
                                        ClearPageUptodate(page);
                                        goto out;
                                }
+                               SetPageUptodate(page);
                        }
-                       SetPageUptodate(page);
                }
        }
        /* If creating a page or more of holes, zero them out via truncate.
@@ -499,6 +500,13 @@ static int ecryptfs_write_end(struct file *file,
                }
                goto out;
        }
+       if (!PageUptodate(page)) {
+               if (copied < PAGE_CACHE_SIZE) {
+                       rc = 0;
+                       goto out;
+               }
+               SetPageUptodate(page);
+       }
        /* Fills in zeros if 'to' goes beyond inode size */
        rc = fill_zeros_to_end_of_page(page, to);
        if (rc) {
index be56b21..9fec183 100644 (file)
@@ -1313,7 +1313,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
         * otherwise we might miss an event that happens between the
         * f_op->poll() call and the new event set registering.
         */
-       epi->event.events = event->events;
+       epi->event.events = event->events; /* need barrier below */
        pt._key = event->events;
        epi->event.data = event->data; /* protected by mtx */
        if (epi->event.events & EPOLLWAKEUP) {
@@ -1324,6 +1324,26 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
        }
 
        /*
+        * The following barrier has two effects:
+        *
+        * 1) Flush epi changes above to other CPUs.  This ensures
+        *    we do not miss events from ep_poll_callback if an
+        *    event occurs immediately after we call f_op->poll().
+        *    We need this because we did not take ep->lock while
+        *    changing epi above (but ep_poll_callback does take
+        *    ep->lock).
+        *
+        * 2) We also need to ensure we do not miss _past_ events
+        *    when calling f_op->poll().  This barrier also
+        *    pairs with the barrier in wq_has_sleeper (see
+        *    comments for wq_has_sleeper).
+        *
+        * This barrier will now guarantee ep_poll_callback or f_op->poll
+        * (or both) will notice the readiness of an item.
+        */
+       smp_mb();
+
+       /*
         * Get current event bits. We can safely use the file* here because
         * its usage count has been increased by the caller of this function.
         */
index 26af228..5ae1674 100644 (file)
@@ -2226,13 +2226,14 @@ errout:
  * removes index from the index block.
  */
 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
-                       struct ext4_ext_path *path)
+                       struct ext4_ext_path *path, int depth)
 {
        int err;
        ext4_fsblk_t leaf;
 
        /* free index block */
-       path--;
+       depth--;
+       path = path + depth;
        leaf = ext4_idx_pblock(path->p_idx);
        if (unlikely(path->p_hdr->eh_entries == 0)) {
                EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
@@ -2257,6 +2258,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
 
        ext4_free_blocks(handle, inode, NULL, leaf, 1,
                         EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
+
+       while (--depth >= 0) {
+               if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
+                       break;
+               path--;
+               err = ext4_ext_get_access(handle, inode, path);
+               if (err)
+                       break;
+               path->p_idx->ei_block = (path+1)->p_idx->ei_block;
+               err = ext4_ext_dirty(handle, inode, path);
+               if (err)
+                       break;
+       }
        return err;
 }
 
@@ -2599,7 +2613,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
        /* if this leaf is free, then we should
         * remove it from index block above */
        if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
-               err = ext4_ext_rm_idx(handle, inode, path + depth);
+               err = ext4_ext_rm_idx(handle, inode, path, depth);
 
 out:
        return err;
@@ -2802,7 +2816,7 @@ again:
                                /* index is empty, remove it;
                                 * handle must be already prepared by the
                                 * truncatei_leaf() */
-                               err = ext4_ext_rm_idx(handle, inode, path + i);
+                               err = ext4_ext_rm_idx(handle, inode, path, i);
                        }
                        /* root level has p_bh == NULL, brelse() eats this */
                        brelse(path[i].p_bh);
index d07c27c..405565a 100644 (file)
@@ -108,14 +108,6 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
 
        /* Unaligned direct AIO must be serialized; see comment above */
        if (unaligned_aio) {
-               static unsigned long unaligned_warn_time;
-
-               /* Warn about this once per day */
-               if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
-                       ext4_msg(inode->i_sb, KERN_WARNING,
-                                "Unaligned AIO/DIO on inode %ld by %s; "
-                                "performance will be poor.",
-                                inode->i_ino, current->comm);
                mutex_lock(ext4_aio_mutex(inode));
                ext4_unwritten_wait(inode);
        }
index dfbc1fe..3278e64 100644 (file)
@@ -109,8 +109,6 @@ static int __sync_inode(struct inode *inode, int datasync)
  *
  * What we do is just kick off a commit and wait on it.  This will snapshot the
  * inode to disk.
- *
- * i_mutex lock is held when entering and exiting this function
  */
 
 int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
index cb1c1ab..cbfe13b 100644 (file)
@@ -2880,8 +2880,6 @@ static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offs
 
 static void ext4_invalidatepage(struct page *page, unsigned long offset)
 {
-       journal_t *journal = EXT4_JOURNAL(page->mapping->host);
-
        trace_ext4_invalidatepage(page, offset);
 
        /*
@@ -2889,16 +2887,34 @@ static void ext4_invalidatepage(struct page *page, unsigned long offset)
         */
        if (ext4_should_dioread_nolock(page->mapping->host))
                ext4_invalidatepage_free_endio(page, offset);
+
+       /* No journalling happens on data buffers when this function is used */
+       WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
+
+       block_invalidatepage(page, offset);
+}
+
+static int __ext4_journalled_invalidatepage(struct page *page,
+                                           unsigned long offset)
+{
+       journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+
+       trace_ext4_journalled_invalidatepage(page, offset);
+
        /*
         * If it's a full truncate we just forget about the pending dirtying
         */
        if (offset == 0)
                ClearPageChecked(page);
 
-       if (journal)
-               jbd2_journal_invalidatepage(journal, page, offset);
-       else
-               block_invalidatepage(page, offset);
+       return jbd2_journal_invalidatepage(journal, page, offset);
+}
+
+/* Wrapper for aops... */
+static void ext4_journalled_invalidatepage(struct page *page,
+                                          unsigned long offset)
+{
+       WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
 }
 
 static int ext4_releasepage(struct page *page, gfp_t wait)
@@ -3264,7 +3280,7 @@ static const struct address_space_operations ext4_journalled_aops = {
        .write_end              = ext4_journalled_write_end,
        .set_page_dirty         = ext4_journalled_set_page_dirty,
        .bmap                   = ext4_bmap,
-       .invalidatepage         = ext4_invalidatepage,
+       .invalidatepage         = ext4_journalled_invalidatepage,
        .releasepage            = ext4_releasepage,
        .direct_IO              = ext4_direct_IO,
        .is_partially_uptodate  = block_is_partially_uptodate,
@@ -4305,6 +4321,47 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
 }
 
 /*
+ * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
+ * buffers that are attached to a page stradding i_size and are undergoing
+ * commit. In that case we have to wait for commit to finish and try again.
+ */
+static void ext4_wait_for_tail_page_commit(struct inode *inode)
+{
+       struct page *page;
+       unsigned offset;
+       journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+       tid_t commit_tid = 0;
+       int ret;
+
+       offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+       /*
+        * All buffers in the last page remain valid? Then there's nothing to
+        * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
+        * blocksize case
+        */
+       if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
+               return;
+       while (1) {
+               page = find_lock_page(inode->i_mapping,
+                                     inode->i_size >> PAGE_CACHE_SHIFT);
+               if (!page)
+                       return;
+               ret = __ext4_journalled_invalidatepage(page, offset);
+               unlock_page(page);
+               page_cache_release(page);
+               if (ret != -EBUSY)
+                       return;
+               commit_tid = 0;
+               read_lock(&journal->j_state_lock);
+               if (journal->j_committing_transaction)
+                       commit_tid = journal->j_committing_transaction->t_tid;
+               read_unlock(&journal->j_state_lock);
+               if (commit_tid)
+                       jbd2_log_wait_commit(journal, commit_tid);
+       }
+}
+
+/*
  * ext4_setattr()
  *
  * Called from notify_change.
@@ -4417,16 +4474,28 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        if (attr->ia_valid & ATTR_SIZE) {
-               if (attr->ia_size != i_size_read(inode)) {
-                       truncate_setsize(inode, attr->ia_size);
-                       /* Inode size will be reduced, wait for dio in flight.
-                        * Temporarily disable dioread_nolock to prevent
-                        * livelock. */
+               if (attr->ia_size != inode->i_size) {
+                       loff_t oldsize = inode->i_size;
+
+                       i_size_write(inode, attr->ia_size);
+                       /*
+                        * Blocks are going to be removed from the inode. Wait
+                        * for dio in flight.  Temporarily disable
+                        * dioread_nolock to prevent livelock.
+                        */
                        if (orphan) {
-                               ext4_inode_block_unlocked_dio(inode);
-                               inode_dio_wait(inode);
-                               ext4_inode_resume_unlocked_dio(inode);
+                               if (!ext4_should_journal_data(inode)) {
+                                       ext4_inode_block_unlocked_dio(inode);
+                                       inode_dio_wait(inode);
+                                       ext4_inode_resume_unlocked_dio(inode);
+                               } else
+                                       ext4_wait_for_tail_page_commit(inode);
                        }
+                       /*
+                        * Truncate pagecache after we've waited for commit
+                        * in data=journal mode to make pages freeable.
+                        */
+                       truncate_pagecache(inode, oldsize, inode->i_size);
                }
                ext4_truncate(inode);
        }
index cac4482..8990165 100644 (file)
@@ -2648,7 +2648,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
        struct ext4_iloc iloc;
        int err = 0;
 
-       if (!EXT4_SB(inode->i_sb)->s_journal)
+       if ((!EXT4_SB(inode->i_sb)->s_journal) &&
+           !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
                return 0;
 
        mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
index 3cdb0a2..3d4fb81 100644 (file)
@@ -1645,9 +1645,7 @@ static int parse_options(char *options, struct super_block *sb,
                         unsigned int *journal_ioprio,
                         int is_remount)
 {
-#ifdef CONFIG_QUOTA
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-#endif
        char *p;
        substring_t args[MAX_OPT_ARGS];
        int token;
@@ -1696,6 +1694,16 @@ static int parse_options(char *options, struct super_block *sb,
                }
        }
 #endif
+       if (test_opt(sb, DIOREAD_NOLOCK)) {
+               int blocksize =
+                       BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
+
+               if (blocksize < PAGE_CACHE_SIZE) {
+                       ext4_msg(sb, KERN_ERR, "can't mount with "
+                                "dioread_nolock if block size != PAGE_SIZE");
+                       return 0;
+               }
+       }
        return 1;
 }
 
@@ -2212,7 +2220,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                                __func__, inode->i_ino, inode->i_size);
                        jbd_debug(2, "truncating inode %lu to %lld bytes\n",
                                  inode->i_ino, inode->i_size);
+                       mutex_lock(&inode->i_mutex);
                        ext4_truncate(inode);
+                       mutex_unlock(&inode->i_mutex);
                        nr_truncates++;
                } else {
                        ext4_msg(sb, KERN_DEBUG,
@@ -3223,6 +3233,10 @@ int ext4_calculate_overhead(struct super_block *sb)
                        memset(buf, 0, PAGE_SIZE);
                cond_resched();
        }
+       /* Add the journal blocks as well */
+       if (sbi->s_journal)
+               overhead += EXT4_B2C(sbi, sbi->s_journal->j_maxlen);
+
        sbi->s_overhead = overhead;
        smp_wmb();
        free_page((unsigned long) buf);
@@ -3436,15 +3450,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        clear_opt(sb, DELALLOC);
        }
 
-       blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
-       if (test_opt(sb, DIOREAD_NOLOCK)) {
-               if (blocksize < PAGE_SIZE) {
-                       ext4_msg(sb, KERN_ERR, "can't mount with "
-                                "dioread_nolock if block size != PAGE_SIZE");
-                       goto failed_mount;
-               }
-       }
-
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
 
@@ -3486,6 +3491,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
                goto failed_mount;
 
+       blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
        if (blocksize < EXT4_MIN_BLOCK_SIZE ||
            blocksize > EXT4_MAX_BLOCK_SIZE) {
                ext4_msg(sb, KERN_ERR,
@@ -4725,7 +4731,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 
        ext4_setup_system_zone(sb);
-       if (sbi->s_journal == NULL)
+       if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
                ext4_commit_super(sb, 1);
 
 #ifdef CONFIG_QUOTA
index fed74d1..e95b949 100644 (file)
@@ -82,7 +82,6 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
                case ACL_GROUP_OBJ:
                case ACL_MASK:
                case ACL_OTHER:
-                       acl->a_entries[i].e_id = ACL_UNDEFINED_ID;
                        entry = (struct f2fs_acl_entry *)((char *)entry +
                                        sizeof(struct f2fs_acl_entry_short));
                        break;
index 8dad6b0..b906ed1 100644 (file)
@@ -241,6 +241,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
 
 static void gfs2_reverse_hex(char *c, u64 value)
 {
+       *c = '0';
        while (value) {
                *c-- = hex_asc[value & 0x0f];
                value >>= 4;
index 37ee061..b7eff07 100644 (file)
@@ -350,10 +350,14 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
                BUG_ON(len < chunk_size);
                len -= chunk_size;
                block = gfs2_rbm_to_block(&rbm);
-               gfs2_rbm_from_block(&rbm, block + chunk_size);
-               n_unaligned = 3;
-               if (ptr)
+               if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
+                       n_unaligned = 0;
                        break;
+               }
+               if (ptr) {
+                       n_unaligned = 3;
+                       break;
+               }
                n_unaligned = len & 3;
        }
 
@@ -557,22 +561,20 @@ void gfs2_free_clones(struct gfs2_rgrpd *rgd)
  */
 int gfs2_rs_alloc(struct gfs2_inode *ip)
 {
-       struct gfs2_blkreserv *res;
+       int error = 0;
 
+       down_write(&ip->i_rw_mutex);
        if (ip->i_res)
-               return 0;
-
-       res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
-       if (!res)
-               return -ENOMEM;
+               goto out;
 
-       RB_CLEAR_NODE(&res->rs_node);
+       ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
+       if (!ip->i_res) {
+               error = -ENOMEM;
+               goto out;
+       }
 
-       down_write(&ip->i_rw_mutex);
-       if (ip->i_res)
-               kmem_cache_free(gfs2_rsrv_cachep, res);
-       else
-               ip->i_res = res;
+       RB_CLEAR_NODE(&ip->i_res->rs_node);
+out:
        up_write(&ip->i_rw_mutex);
        return 0;
 }
@@ -1424,6 +1426,9 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
                rs->rs_free = extlen;
                rs->rs_inum = ip->i_no_addr;
                rs_insert(ip);
+       } else {
+               if (goal == rgd->rd_last_alloc + rgd->rd_data0)
+                       rgd->rd_last_alloc = 0;
        }
 }
 
index 42f6615..df9f297 100644 (file)
@@ -209,7 +209,8 @@ repeat:
                if (!new_transaction)
                        goto alloc_transaction;
                write_lock(&journal->j_state_lock);
-               if (!journal->j_running_transaction) {
+               if (!journal->j_running_transaction &&
+                   !journal->j_barrier_count) {
                        jbd2_get_transaction(journal, new_transaction);
                        new_transaction = NULL;
                }
@@ -1839,7 +1840,6 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
 
        BUFFER_TRACE(bh, "entry");
 
-retry:
        /*
         * It is safe to proceed here without the j_list_lock because the
         * buffers cannot be stolen by try_to_free_buffers as long as we are
@@ -1934,14 +1934,11 @@ retry:
                 * for commit and try again.
                 */
                if (partial_page) {
-                       tid_t tid = journal->j_committing_transaction->t_tid;
-
                        jbd2_journal_put_journal_head(jh);
                        spin_unlock(&journal->j_list_lock);
                        jbd_unlock_bh_state(bh);
                        write_unlock(&journal->j_state_lock);
-                       jbd2_log_wait_commit(journal, tid);
-                       goto retry;
+                       return -EBUSY;
                }
                /*
                 * OK, buffer won't be reachable after truncate. We just set
@@ -2002,21 +1999,23 @@ zap_buffer_unlocked:
  * @page:    page to flush
  * @offset:  length of page to invalidate.
  *
- * Reap page buffers containing data after offset in page.
- *
+ * Reap page buffers containing data after offset in page. Can return -EBUSY
+ * if buffers are part of the committing transaction and the page is straddling
+ * i_size. Caller then has to wait for current commit and try again.
  */
-void jbd2_journal_invalidatepage(journal_t *journal,
-                     struct page *page,
-                     unsigned long offset)
+int jbd2_journal_invalidatepage(journal_t *journal,
+                               struct page *page,
+                               unsigned long offset)
 {
        struct buffer_head *head, *bh, *next;
        unsigned int curr_off = 0;
        int may_free = 1;
+       int ret = 0;
 
        if (!PageLocked(page))
                BUG();
        if (!page_has_buffers(page))
-               return;
+               return 0;
 
        /* We will potentially be playing with lists other than just the
         * data lists (especially for journaled data mode), so be
@@ -2030,9 +2029,11 @@ void jbd2_journal_invalidatepage(journal_t *journal,
                if (offset <= curr_off) {
                        /* This block is wholly outside the truncation point */
                        lock_buffer(bh);
-                       may_free &= journal_unmap_buffer(journal, bh,
-                                                        offset > 0);
+                       ret = journal_unmap_buffer(journal, bh, offset > 0);
                        unlock_buffer(bh);
+                       if (ret < 0)
+                               return ret;
+                       may_free &= ret;
                }
                curr_off = next_off;
                bh = next;
@@ -2043,6 +2044,7 @@ void jbd2_journal_invalidatepage(journal_t *journal,
                if (may_free && try_to_free_buffers(page))
                        J_ASSERT(!page_has_buffers(page));
        }
+       return 0;
 }
 
 /*
index e064f56..76ddae8 100644 (file)
@@ -352,18 +352,18 @@ retry:
        if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL))
                return -ENOMEM;
 
-       spin_lock_bh(&proc_inum_lock);
+       spin_lock_irq(&proc_inum_lock);
        error = ida_get_new(&proc_inum_ida, &i);
-       spin_unlock_bh(&proc_inum_lock);
+       spin_unlock_irq(&proc_inum_lock);
        if (error == -EAGAIN)
                goto retry;
        else if (error)
                return error;
 
        if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
-               spin_lock_bh(&proc_inum_lock);
+               spin_lock_irq(&proc_inum_lock);
                ida_remove(&proc_inum_ida, i);
-               spin_unlock_bh(&proc_inum_lock);
+               spin_unlock_irq(&proc_inum_lock);
                return -ENOSPC;
        }
        *inum = PROC_DYNAMIC_FIRST + i;
@@ -372,9 +372,10 @@ retry:
 
 void proc_free_inum(unsigned int inum)
 {
-       spin_lock_bh(&proc_inum_lock);
+       unsigned long flags;
+       spin_lock_irqsave(&proc_inum_lock, flags);
        ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
-       spin_unlock_bh(&proc_inum_lock);
+       spin_unlock_irqrestore(&proc_inum_lock, flags);
 }
 
 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
index 448455b..ca5ce7f 100644 (file)
@@ -1278,7 +1278,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
        walk.mm = mm;
 
        pol = get_vma_policy(task, vma, vma->vm_start);
-       mpol_to_str(buffer, sizeof(buffer), pol, 0);
+       mpol_to_str(buffer, sizeof(buffer), pol);
        mpol_cond_put(pol);
 
        seq_printf(m, "%08lx %s", vma->vm_start, buffer);
index 83256b6..1dfd33e 100644 (file)
@@ -1,8 +1,5 @@
 # Top-level Makefile calls into asm-$(ARCH)
 # List only non-arch directories below
 
-header-y += linux/
-header-y += sound/
-header-y += rdma/
 header-y += video/
 header-y += scsi/
index 06d7f79..0f4a366 100644 (file)
@@ -158,12 +158,29 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
        return drm_mm_get_block_range_generic(parent, size, alignment, 0,
                                                start, end, 1);
 }
-extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
-                             unsigned long size, unsigned alignment);
+
+extern int drm_mm_insert_node(struct drm_mm *mm,
+                             struct drm_mm_node *node,
+                             unsigned long size,
+                             unsigned alignment);
 extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
                                       struct drm_mm_node *node,
-                                      unsigned long size, unsigned alignment,
-                                      unsigned long start, unsigned long end);
+                                      unsigned long size,
+                                      unsigned alignment,
+                                      unsigned long start,
+                                      unsigned long end);
+extern int drm_mm_insert_node_generic(struct drm_mm *mm,
+                                     struct drm_mm_node *node,
+                                     unsigned long size,
+                                     unsigned alignment,
+                                     unsigned long color);
+extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+                                      struct drm_mm_node *node,
+                                      unsigned long size,
+                                      unsigned alignment,
+                                      unsigned long color,
+                                      unsigned long start,
+                                      unsigned long end);
 extern void drm_mm_put_block(struct drm_mm_node *cur);
 extern void drm_mm_remove_node(struct drm_mm_node *node);
 extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
deleted file mode 100644 (file)
index 7fe2dae..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-header-y += dvb/
-header-y += hdlc/
-header-y += hsi/
-header-y += raid/
-header-y += usb/
diff --git a/include/linux/hdlc/Kbuild b/include/linux/hdlc/Kbuild
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/include/linux/hsi/Kbuild b/include/linux/hsi/Kbuild
deleted file mode 100644 (file)
index e69de29..0000000
index 1be23d9..e30b663 100644 (file)
@@ -1098,7 +1098,7 @@ void               jbd2_journal_set_triggers(struct buffer_head *,
 extern int      jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
 extern int      jbd2_journal_forget (handle_t *, struct buffer_head *);
 extern void     journal_sync_buffer (struct buffer_head *);
-extern void     jbd2_journal_invalidatepage(journal_t *,
+extern int      jbd2_journal_invalidatepage(journal_t *,
                                struct page *, unsigned long);
 extern int      jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
 extern int      jbd2_journal_stop(handle_t *);
index 9adc270..0d7df39 100644 (file)
@@ -123,7 +123,7 @@ struct sp_node {
 
 struct shared_policy {
        struct rb_root root;
-       struct mutex mutex;
+       spinlock_t lock;
 };
 
 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
@@ -165,11 +165,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 
 
 #ifdef CONFIG_TMPFS
-extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
+extern int mpol_parse_str(char *str, struct mempolicy **mpol);
 #endif
 
-extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
-                       int no_context);
+extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
 
 /* Check if a vma is migratable */
 static inline int vma_migratable(struct vm_area_struct *vma)
@@ -296,15 +295,13 @@ static inline void check_highest_zone(int k)
 }
 
 #ifdef CONFIG_TMPFS
-static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
-                               int no_context)
+static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
 {
        return 1;       /* error */
 }
 #endif
 
-static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
-                               int no_context)
+static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
 {
        return 0;
 }
index 02e0f6b..c599e47 100644 (file)
@@ -1576,7 +1576,7 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 
 extern rwlock_t                                dev_base_lock;          /* Device list lock */
 
-extern seqlock_t       devnet_rename_seq;      /* Device rename lock */
+extern seqcount_t      devnet_rename_seq;      /* Device rename seq */
 
 
 #define for_each_netdev(net, d)                \
index b5d1384..70473da 100644 (file)
@@ -362,7 +362,7 @@ static inline void ClearPageCompound(struct page *page)
  * pages on the LRU and/or pagecache.
  */
 TESTPAGEFLAG(Compound, compound)
-__PAGEFLAG(Head, compound)
+__SETPAGEFLAG(Head, compound)  __CLEARPAGEFLAG(Head, compound)
 
 /*
  * PG_reclaim is used in combination with PG_compound to mark the
@@ -374,8 +374,14 @@ __PAGEFLAG(Head, compound)
  * PG_compound & PG_reclaim    => Tail page
  * PG_compound & ~PG_reclaim   => Head page
  */
+#define PG_head_mask ((1L << PG_compound))
 #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
 
+static inline int PageHead(struct page *page)
+{
+       return ((page->flags & PG_head_tail_mask) == PG_head_mask);
+}
+
 static inline int PageTail(struct page *page)
 {
        return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
index 0f84473..0eb6579 100644 (file)
 #define PCI_DEVICE_ID_RICOH_RL5C476    0x0476
 #define PCI_DEVICE_ID_RICOH_RL5C478    0x0478
 #define PCI_DEVICE_ID_RICOH_R5C822     0x0822
+#define PCI_DEVICE_ID_RICOH_R5CE822    0xe822
 #define PCI_DEVICE_ID_RICOH_R5CE823    0xe823
 #define PCI_DEVICE_ID_RICOH_R5C832     0x0832
 #define PCI_DEVICE_ID_RICOH_R5C843     0x0843
index b152d44..2381c97 100644 (file)
@@ -121,6 +121,7 @@ int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
 
 extern struct pid *alloc_pid(struct pid_namespace *ns);
 extern void free_pid(struct pid *pid);
+extern void disable_pid_allocation(struct pid_namespace *ns);
 
 /*
  * ns_of_pid() returns the pid namespace in which the specified pid was
index bf28599..215e5e3 100644 (file)
@@ -21,7 +21,7 @@ struct pid_namespace {
        struct kref kref;
        struct pidmap pidmap[PIDMAP_ENTRIES];
        int last_pid;
-       int nr_hashed;
+       unsigned int nr_hashed;
        struct task_struct *child_reaper;
        struct kmem_cache *pid_cachep;
        unsigned int level;
@@ -42,6 +42,8 @@ struct pid_namespace {
 
 extern struct pid_namespace init_pid_ns;
 
+#define PIDNS_HASH_ADDING (1U << 31)
+
 #ifdef CONFIG_PID_NS
 static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
 {
diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild
deleted file mode 100644 (file)
index e69de29..0000000
index 93a6745..182ca99 100644 (file)
@@ -367,7 +367,7 @@ struct sock {
        unsigned short          sk_ack_backlog;
        unsigned short          sk_max_ack_backlog;
        __u32                   sk_priority;
-#ifdef CONFIG_CGROUPS
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
        __u32                   sk_cgrp_prioidx;
 #endif
        struct pid              *sk_peer_pid;
diff --git a/include/rdma/Kbuild b/include/rdma/Kbuild
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/include/sound/Kbuild b/include/sound/Kbuild
deleted file mode 100644 (file)
index e69de29..0000000
index f6372b0..7e8c36b 100644 (file)
@@ -451,7 +451,7 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage,
        TP_ARGS(page)
 );
 
-TRACE_EVENT(ext4_invalidatepage,
+DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
        TP_PROTO(struct page *page, unsigned long offset),
 
        TP_ARGS(page, offset),
@@ -477,6 +477,18 @@ TRACE_EVENT(ext4_invalidatepage,
                  (unsigned long) __entry->index, __entry->offset)
 );
 
+DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
+       TP_PROTO(struct page *page, unsigned long offset),
+
+       TP_ARGS(page, offset)
+);
+
+DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
+       TP_PROTO(struct page *page, unsigned long offset),
+
+       TP_ARGS(page, offset)
+);
+
 TRACE_EVENT(ext4_discard_blocks,
        TP_PROTO(struct super_block *sb, unsigned long long blk,
                        unsigned long long count),
index b746a3c..c4d2e9c 100644 (file)
@@ -307,6 +307,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH         21
 #define I915_PARAM_RSVD_FOR_FUTURE_USE  22
 #define I915_PARAM_HAS_SECURE_BATCHES   23
+#define I915_PARAM_HAS_PINNED_BATCHES   24
 
 typedef struct drm_i915_getparam {
        int param;
@@ -677,6 +678,15 @@ struct drm_i915_gem_execbuffer2 {
  */
 #define I915_EXEC_SECURE               (1<<9)
 
+/** Inform the kernel that the batch is and will always be pinned. This
+ * negates the requirement for a workaround to be performed to avoid
+ * an incoherent CS (such as can be found on 830/845). If this flag is
+ * not passed, the kernel will endeavour to make sure the batch is
+ * coherent with the CS before execution. If this flag is passed,
+ * userspace assumes the responsibility for ensuring the same.
+ */
+#define I915_EXEC_IS_PINNED            (1<<10)
+
 #define I915_EXEC_CONTEXT_ID_MASK      (0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
        (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
index 6b7b6f1..ebfadc5 100644 (file)
 #define  PCI_EXP_DEVSTA_TRPND  0x20    /* Transactions Pending */
 #define PCI_EXP_LNKCAP         12      /* Link Capabilities */
 #define  PCI_EXP_LNKCAP_SLS    0x0000000f /* Supported Link Speeds */
+#define  PCI_EXP_LNKCAP_SLS_2_5GB 0x1  /* LNKCAP2 SLS Vector bit 0 (2.5GT/s) */
+#define  PCI_EXP_LNKCAP_SLS_5_0GB 0x2  /* LNKCAP2 SLS Vector bit 1 (5.0GT/s) */
 #define  PCI_EXP_LNKCAP_MLW    0x000003f0 /* Maximum Link Width */
 #define  PCI_EXP_LNKCAP_ASPMS  0x00000c00 /* ASPM Support */
 #define  PCI_EXP_LNKCAP_L0SEL  0x00007000 /* L0s Exit Latency */
index a31b823..65ca6d2 100644 (file)
@@ -1166,6 +1166,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                                current->signal->flags & SIGNAL_UNKILLABLE)
                return ERR_PTR(-EINVAL);
 
+       /*
+        * If the new process will be in a different pid namespace
+        * don't allow the creation of threads.
+        */
+       if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
+           (task_active_pid_ns(current) != current->nsproxy->pid_ns))
+               return ERR_PTR(-EINVAL);
+
        retval = security_task_create(clone_flags);
        if (retval)
                goto fork_out;
index 36aa02f..de9af60 100644 (file)
@@ -270,7 +270,6 @@ void free_pid(struct pid *pid)
                        wake_up_process(ns->child_reaper);
                        break;
                case 0:
-                       ns->nr_hashed = -1;
                        schedule_work(&ns->proc_work);
                        break;
                }
@@ -319,7 +318,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
 
        upid = pid->numbers + ns->level;
        spin_lock_irq(&pidmap_lock);
-       if (ns->nr_hashed < 0)
+       if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
                goto out_unlock;
        for ( ; upid >= pid->numbers; --upid) {
                hlist_add_head_rcu(&upid->pid_chain,
@@ -342,6 +341,13 @@ out_free:
        goto out;
 }
 
+void disable_pid_allocation(struct pid_namespace *ns)
+{
+       spin_lock_irq(&pidmap_lock);
+       ns->nr_hashed &= ~PIDNS_HASH_ADDING;
+       spin_unlock_irq(&pidmap_lock);
+}
+
 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
 {
        struct hlist_node *elem;
@@ -573,6 +579,9 @@ void __init pidhash_init(void)
 
 void __init pidmap_init(void)
 {
+       /* Veryify no one has done anything silly */
+       BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
+
        /* bump default and minimum pid_max based on number of cpus */
        pid_max = min(pid_max_max, max_t(int, pid_max,
                                PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
@@ -584,7 +593,7 @@ void __init pidmap_init(void)
        /* Reserve PID 0. We never call free_pidmap(0) */
        set_bit(0, init_pid_ns.pidmap[0].page);
        atomic_dec(&init_pid_ns.pidmap[0].nr_free);
-       init_pid_ns.nr_hashed = 1;
+       init_pid_ns.nr_hashed = PIDNS_HASH_ADDING;
 
        init_pid_ns.pid_cachep = KMEM_CACHE(pid,
                        SLAB_HWCACHE_ALIGN | SLAB_PANIC);
index fdbd0cd..c1c3dc1 100644 (file)
@@ -115,6 +115,7 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
        ns->level = level;
        ns->parent = get_pid_ns(parent_pid_ns);
        ns->user_ns = get_user_ns(user_ns);
+       ns->nr_hashed = PIDNS_HASH_ADDING;
        INIT_WORK(&ns->proc_work, proc_cleanup_work);
 
        set_bit(0, ns->pidmap[0].page);
@@ -181,6 +182,9 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
        int rc;
        struct task_struct *task, *me = current;
 
+       /* Don't allow any more processes into the pid namespace */
+       disable_pid_allocation(pid_ns);
+
        /* Ignore SIGCHLD causing any terminated children to autoreap */
        spin_lock_irq(&me->sighand->siglock);
        me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
index d1b315e..e2df1c1 100644 (file)
@@ -2132,7 +2132,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
  */
 
 /* lookup first element intersecting start-end */
-/* Caller holds sp->mutex */
+/* Caller holds sp->lock */
 static struct sp_node *
 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
 {
@@ -2196,13 +2196,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 
        if (!sp->root.rb_node)
                return NULL;
-       mutex_lock(&sp->mutex);
+       spin_lock(&sp->lock);
        sn = sp_lookup(sp, idx, idx+1);
        if (sn) {
                mpol_get(sn->policy);
                pol = sn->policy;
        }
-       mutex_unlock(&sp->mutex);
+       spin_unlock(&sp->lock);
        return pol;
 }
 
@@ -2328,6 +2328,14 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
        sp_free(n);
 }
 
+static void sp_node_init(struct sp_node *node, unsigned long start,
+                       unsigned long end, struct mempolicy *pol)
+{
+       node->start = start;
+       node->end = end;
+       node->policy = pol;
+}
+
 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
                                struct mempolicy *pol)
 {
@@ -2344,10 +2352,7 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
                return NULL;
        }
        newpol->flags |= MPOL_F_SHARED;
-
-       n->start = start;
-       n->end = end;
-       n->policy = newpol;
+       sp_node_init(n, start, end, newpol);
 
        return n;
 }
@@ -2357,9 +2362,12 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
                                 unsigned long end, struct sp_node *new)
 {
        struct sp_node *n;
+       struct sp_node *n_new = NULL;
+       struct mempolicy *mpol_new = NULL;
        int ret = 0;
 
-       mutex_lock(&sp->mutex);
+restart:
+       spin_lock(&sp->lock);
        n = sp_lookup(sp, start, end);
        /* Take care of old policies in the same range. */
        while (n && n->start < end) {
@@ -2372,14 +2380,16 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
                } else {
                        /* Old policy spanning whole new range. */
                        if (n->end > end) {
-                               struct sp_node *new2;
-                               new2 = sp_alloc(end, n->end, n->policy);
-                               if (!new2) {
-                                       ret = -ENOMEM;
-                                       goto out;
-                               }
+                               if (!n_new)
+                                       goto alloc_new;
+
+                               *mpol_new = *n->policy;
+                               atomic_set(&mpol_new->refcnt, 1);
+                               sp_node_init(n_new, n->end, end, mpol_new);
+                               sp_insert(sp, n_new);
                                n->end = start;
-                               sp_insert(sp, new2);
+                               n_new = NULL;
+                               mpol_new = NULL;
                                break;
                        } else
                                n->end = start;
@@ -2390,9 +2400,27 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
        }
        if (new)
                sp_insert(sp, new);
-out:
-       mutex_unlock(&sp->mutex);
+       spin_unlock(&sp->lock);
+       ret = 0;
+
+err_out:
+       if (mpol_new)
+               mpol_put(mpol_new);
+       if (n_new)
+               kmem_cache_free(sn_cache, n_new);
+
        return ret;
+
+alloc_new:
+       spin_unlock(&sp->lock);
+       ret = -ENOMEM;
+       n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
+       if (!n_new)
+               goto err_out;
+       mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
+       if (!mpol_new)
+               goto err_out;
+       goto restart;
 }
 
 /**
@@ -2410,7 +2438,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
        int ret;
 
        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
-       mutex_init(&sp->mutex);
+       spin_lock_init(&sp->lock);
 
        if (mpol) {
                struct vm_area_struct pvma;
@@ -2476,14 +2504,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
 
        if (!p->root.rb_node)
                return;
-       mutex_lock(&p->mutex);
+       spin_lock(&p->lock);
        next = rb_first(&p->root);
        while (next) {
                n = rb_entry(next, struct sp_node, nd);
                next = rb_next(&n->nd);
                sp_delete(p, n);
        }
-       mutex_unlock(&p->mutex);
+       spin_unlock(&p->lock);
 }
 
 #ifdef CONFIG_NUMA_BALANCING
@@ -2595,8 +2623,7 @@ void numa_default_policy(void)
  */
 
 /*
- * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
- * Used only for mpol_parse_str() and mpol_to_str()
+ * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
  */
 static const char * const policy_modes[] =
 {
@@ -2610,28 +2637,20 @@ static const char * const policy_modes[] =
 
 #ifdef CONFIG_TMPFS
 /**
- * mpol_parse_str - parse string to mempolicy
+ * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
  * @str:  string containing mempolicy to parse
  * @mpol:  pointer to struct mempolicy pointer, returned on success.
- * @no_context:  flag whether to "contextualize" the mempolicy
  *
  * Format of input:
  *     <mode>[=<flags>][:<nodelist>]
  *
- * if @no_context is true, save the input nodemask in w.user_nodemask in
- * the returned mempolicy.  This will be used to "clone" the mempolicy in
- * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
- * mount option.  Note that if 'static' or 'relative' mode flags were
- * specified, the input nodemask will already have been saved.  Saving
- * it again is redundant, but safe.
- *
  * On success, returns 0, else 1
  */
-int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
+int mpol_parse_str(char *str, struct mempolicy **mpol)
 {
        struct mempolicy *new = NULL;
        unsigned short mode;
-       unsigned short uninitialized_var(mode_flags);
+       unsigned short mode_flags;
        nodemask_t nodes;
        char *nodelist = strchr(str, ':');
        char *flags = strchr(str, '=');
@@ -2719,24 +2738,23 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
        if (IS_ERR(new))
                goto out;
 
-       if (no_context) {
-               /* save for contextualization */
-               new->w.user_nodemask = nodes;
-       } else {
-               int ret;
-               NODEMASK_SCRATCH(scratch);
-               if (scratch) {
-                       task_lock(current);
-                       ret = mpol_set_nodemask(new, &nodes, scratch);
-                       task_unlock(current);
-               } else
-                       ret = -ENOMEM;
-               NODEMASK_SCRATCH_FREE(scratch);
-               if (ret) {
-                       mpol_put(new);
-                       goto out;
-               }
-       }
+       /*
+        * Save nodes for mpol_to_str() to show the tmpfs mount options
+        * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
+        */
+       if (mode != MPOL_PREFERRED)
+               new->v.nodes = nodes;
+       else if (nodelist)
+               new->v.preferred_node = first_node(nodes);
+       else
+               new->flags |= MPOL_F_LOCAL;
+
+       /*
+        * Save nodes for contextualization: this will be used to "clone"
+        * the mempolicy in a specific context [cpuset] at a later time.
+        */
+       new->w.user_nodemask = nodes;
+
        err = 0;
 
 out:
@@ -2756,13 +2774,12 @@ out:
  * @buffer:  to contain formatted mempolicy string
  * @maxlen:  length of @buffer
  * @pol:  pointer to mempolicy to be formatted
- * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
  *
  * Convert a mempolicy into a string.
  * Returns the number of characters in buffer (if positive)
  * or an error (negative)
  */
-int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
 {
        char *p = buffer;
        int l;
@@ -2788,7 +2805,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
        case MPOL_PREFERRED:
                nodes_clear(nodes);
                if (flags & MPOL_F_LOCAL)
-                       mode = MPOL_LOCAL;      /* pseudo-policy */
+                       mode = MPOL_LOCAL;
                else
                        node_set(pol->v.preferred_node, nodes);
                break;
@@ -2796,10 +2813,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
        case MPOL_BIND:
                /* Fall through */
        case MPOL_INTERLEAVE:
-               if (no_context)
-                       nodes = pol->w.user_nodemask;
-               else
-                       nodes = pol->v.nodes;
+               nodes = pol->v.nodes;
                break;
 
        default:
index 5c90d84..5dd56f6 100644 (file)
@@ -889,7 +889,7 @@ static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
        if (!mpol || mpol->mode == MPOL_DEFAULT)
                return;         /* show nothing */
 
-       mpol_to_str(buffer, sizeof(buffer), mpol, 1);
+       mpol_to_str(buffer, sizeof(buffer), mpol);
 
        seq_printf(seq, ",mpol=%s", buffer);
 }
@@ -2463,7 +2463,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
                        if (!gid_valid(sbinfo->gid))
                                goto bad_val;
                } else if (!strcmp(this_char,"mpol")) {
-                       if (mpol_parse_str(value, &sbinfo->mpol, 1))
+                       if (mpol_parse_str(value, &sbinfo->mpol))
                                goto bad_val;
                } else {
                        printk(KERN_ERR "tmpfs: Bad mount option %s\n",
index 23291b9..16b42af 100644 (file)
@@ -2775,7 +2775,7 @@ loop_again:
                if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
                        if (has_under_min_watermark_zone)
                                count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
-                       else
+                       else if (unbalanced_zone)
                                wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10);
                }
 
index 9f3925a..7d02ebd 100644 (file)
@@ -123,7 +123,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
        unsigned int msecs;
 
        msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
-       msecs += (random32() % 2 * BATADV_JITTER);
+       msecs += random32() % (2 * BATADV_JITTER);
 
        return jiffies + msecs_to_jiffies(msecs);
 }
index 1c8fdc3..37fe693 100644 (file)
@@ -366,11 +366,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 
        err = netdev_set_master(dev, br->dev);
        if (err)
-               goto err3;
+               goto err4;
 
        err = netdev_rx_handler_register(dev, br_handle_frame, p);
        if (err)
-               goto err4;
+               goto err5;
 
        dev->priv_flags |= IFF_BRIDGE_PORT;
 
@@ -402,8 +402,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 
        return 0;
 
-err4:
+err5:
        netdev_set_master(dev, NULL);
+err4:
+       br_netpoll_disable(p);
 err3:
        sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
index 4d111fd..5ccf87e 100644 (file)
@@ -506,6 +506,7 @@ static void reset_connection(struct ceph_connection *con)
 {
        /* reset connection, out_queue, msg_ and connect_seq */
        /* discard existing out_queue and msg_seq */
+       dout("reset_connection %p\n", con);
        ceph_msg_remove_list(&con->out_queue);
        ceph_msg_remove_list(&con->out_sent);
 
@@ -561,7 +562,7 @@ void ceph_con_open(struct ceph_connection *con,
        mutex_lock(&con->mutex);
        dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
 
-       BUG_ON(con->state != CON_STATE_CLOSED);
+       WARN_ON(con->state != CON_STATE_CLOSED);
        con->state = CON_STATE_PREOPEN;
 
        con->peer_name.type = (__u8) entity_type;
@@ -1506,13 +1507,6 @@ static int process_banner(struct ceph_connection *con)
        return 0;
 }
 
-static void fail_protocol(struct ceph_connection *con)
-{
-       reset_connection(con);
-       BUG_ON(con->state != CON_STATE_NEGOTIATING);
-       con->state = CON_STATE_CLOSED;
-}
-
 static int process_connect(struct ceph_connection *con)
 {
        u64 sup_feat = con->msgr->supported_features;
@@ -1530,7 +1524,7 @@ static int process_connect(struct ceph_connection *con)
                       ceph_pr_addr(&con->peer_addr.in_addr),
                       sup_feat, server_feat, server_feat & ~sup_feat);
                con->error_msg = "missing required protocol features";
-               fail_protocol(con);
+               reset_connection(con);
                return -1;
 
        case CEPH_MSGR_TAG_BADPROTOVER:
@@ -1541,7 +1535,7 @@ static int process_connect(struct ceph_connection *con)
                       le32_to_cpu(con->out_connect.protocol_version),
                       le32_to_cpu(con->in_reply.protocol_version));
                con->error_msg = "protocol version mismatch";
-               fail_protocol(con);
+               reset_connection(con);
                return -1;
 
        case CEPH_MSGR_TAG_BADAUTHORIZER:
@@ -1631,11 +1625,11 @@ static int process_connect(struct ceph_connection *con)
                               ceph_pr_addr(&con->peer_addr.in_addr),
                               req_feat, server_feat, req_feat & ~server_feat);
                        con->error_msg = "missing required protocol features";
-                       fail_protocol(con);
+                       reset_connection(con);
                        return -1;
                }
 
-               BUG_ON(con->state != CON_STATE_NEGOTIATING);
+               WARN_ON(con->state != CON_STATE_NEGOTIATING);
                con->state = CON_STATE_OPEN;
 
                con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
@@ -2132,7 +2126,6 @@ more:
                if (ret < 0)
                        goto out;
 
-               BUG_ON(con->state != CON_STATE_CONNECTING);
                con->state = CON_STATE_NEGOTIATING;
 
                /*
@@ -2160,7 +2153,7 @@ more:
                goto more;
        }
 
-       BUG_ON(con->state != CON_STATE_OPEN);
+       WARN_ON(con->state != CON_STATE_OPEN);
 
        if (con->in_base_pos < 0) {
                /*
@@ -2382,7 +2375,7 @@ static void ceph_fault(struct ceph_connection *con)
        dout("fault %p state %lu to peer %s\n",
             con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
 
-       BUG_ON(con->state != CON_STATE_CONNECTING &&
+       WARN_ON(con->state != CON_STATE_CONNECTING &&
               con->state != CON_STATE_NEGOTIATING &&
               con->state != CON_STATE_OPEN);
 
index 780caf6..eb9a444 100644 (file)
@@ -1270,7 +1270,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
  * Requeue requests whose mapping to an OSD has changed.  If requests map to
  * no osd, request a new map.
  *
- * Caller should hold map_sem for read and request_mutex.
+ * Caller should hold map_sem for read.
  */
 static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
 {
@@ -1284,6 +1284,24 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
        for (p = rb_first(&osdc->requests); p; ) {
                req = rb_entry(p, struct ceph_osd_request, r_node);
                p = rb_next(p);
+
+               /*
+                * For linger requests that have not yet been
+                * registered, move them to the linger list; they'll
+                * be sent to the osd in the loop below.  Unregister
+                * the request before re-registering it as a linger
+                * request to ensure the __map_request() below
+                * will decide it needs to be sent.
+                */
+               if (req->r_linger && list_empty(&req->r_linger_item)) {
+                       dout("%p tid %llu restart on osd%d\n",
+                            req, req->r_tid,
+                            req->r_osd ? req->r_osd->o_osd : -1);
+                       __unregister_request(osdc, req);
+                       __register_linger_request(osdc, req);
+                       continue;
+               }
+
                err = __map_request(osdc, req, force_resend);
                if (err < 0)
                        continue;  /* error */
@@ -1298,17 +1316,6 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
                                req->r_flags |= CEPH_OSD_FLAG_RETRY;
                        }
                }
-               if (req->r_linger && list_empty(&req->r_linger_item)) {
-                       /*
-                        * register as a linger so that we will
-                        * re-submit below and get a new tid
-                        */
-                       dout("%p tid %llu restart on osd%d\n",
-                            req, req->r_tid,
-                            req->r_osd ? req->r_osd->o_osd : -1);
-                       __register_linger_request(osdc, req);
-                       __unregister_request(osdc, req);
-               }
        }
 
        list_for_each_entry_safe(req, nreq, &osdc->req_linger,
@@ -1316,6 +1323,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
                dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
 
                err = __map_request(osdc, req, force_resend);
+               dout("__map_request returned %d\n", err);
                if (err == 0)
                        continue;  /* no change and no osd was specified */
                if (err < 0)
@@ -1337,6 +1345,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
                dout("%d requests for down osds, need new map\n", needmap);
                ceph_monc_request_next_osdmap(&osdc->client->monc);
        }
+       reset_changed_osds(osdc);
 }
 
 
@@ -1393,7 +1402,6 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
                                osdc->osdmap = newmap;
                        }
                        kick_requests(osdc, 0);
-                       reset_changed_osds(osdc);
                } else {
                        dout("ignoring incremental map %u len %d\n",
                             epoch, maplen);
index d0cbc93..515473e 100644 (file)
@@ -203,7 +203,7 @@ static struct list_head offload_base __read_mostly;
 DEFINE_RWLOCK(dev_base_lock);
 EXPORT_SYMBOL(dev_base_lock);
 
-DEFINE_SEQLOCK(devnet_rename_seq);
+seqcount_t devnet_rename_seq;
 
 static inline void dev_base_seq_inc(struct net *net)
 {
@@ -1093,10 +1093,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
        if (dev->flags & IFF_UP)
                return -EBUSY;
 
-       write_seqlock(&devnet_rename_seq);
+       write_seqcount_begin(&devnet_rename_seq);
 
        if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
-               write_sequnlock(&devnet_rename_seq);
+               write_seqcount_end(&devnet_rename_seq);
                return 0;
        }
 
@@ -1104,7 +1104,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
 
        err = dev_get_valid_name(net, dev, newname);
        if (err < 0) {
-               write_sequnlock(&devnet_rename_seq);
+               write_seqcount_end(&devnet_rename_seq);
                return err;
        }
 
@@ -1112,11 +1112,11 @@ rollback:
        ret = device_rename(&dev->dev, dev->name);
        if (ret) {
                memcpy(dev->name, oldname, IFNAMSIZ);
-               write_sequnlock(&devnet_rename_seq);
+               write_seqcount_end(&devnet_rename_seq);
                return ret;
        }
 
-       write_sequnlock(&devnet_rename_seq);
+       write_seqcount_end(&devnet_rename_seq);
 
        write_lock_bh(&dev_base_lock);
        hlist_del_rcu(&dev->name_hlist);
@@ -1135,7 +1135,7 @@ rollback:
                /* err >= 0 after dev_alloc_name() or stores the first errno */
                if (err >= 0) {
                        err = ret;
-                       write_seqlock(&devnet_rename_seq);
+                       write_seqcount_begin(&devnet_rename_seq);
                        memcpy(dev->name, oldname, IFNAMSIZ);
                        goto rollback;
                } else {
@@ -4180,7 +4180,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
                return -EFAULT;
 
 retry:
-       seq = read_seqbegin(&devnet_rename_seq);
+       seq = read_seqcount_begin(&devnet_rename_seq);
        rcu_read_lock();
        dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
        if (!dev) {
@@ -4190,7 +4190,7 @@ retry:
 
        strcpy(ifr.ifr_name, dev->name);
        rcu_read_unlock();
-       if (read_seqretry(&devnet_rename_seq, seq))
+       if (read_seqcount_retry(&devnet_rename_seq, seq))
                goto retry;
 
        if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
index 334efd5..28c5f5a 100644 (file)
@@ -1334,7 +1334,6 @@ struct kobj_ns_type_operations net_ns_type_operations = {
 };
 EXPORT_SYMBOL_GPL(net_ns_type_operations);
 
-#ifdef CONFIG_HOTPLUG
 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
 {
        struct net_device *dev = to_net_dev(d);
@@ -1353,7 +1352,6 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
 exit:
        return retval;
 }
-#endif
 
 /*
  *     netdev_release -- destroy and free a dead device.
@@ -1382,9 +1380,7 @@ static struct class net_class = {
 #ifdef CONFIG_SYSFS
        .dev_attrs = net_class_attributes,
 #endif /* CONFIG_SYSFS */
-#ifdef CONFIG_HOTPLUG
        .dev_uevent = netdev_uevent,
-#endif
        .ns_type = &net_ns_type_operations,
        .namespace = net_namespace,
 };
index a692ef4..bc131d4 100644 (file)
@@ -583,7 +583,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
                goto out;
 
 retry:
-       seq = read_seqbegin(&devnet_rename_seq);
+       seq = read_seqcount_begin(&devnet_rename_seq);
        rcu_read_lock();
        dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
        ret = -ENODEV;
@@ -594,7 +594,7 @@ retry:
 
        strcpy(devname, dev->name);
        rcu_read_unlock();
-       if (read_seqretry(&devnet_rename_seq, seq))
+       if (read_seqcount_retry(&devnet_rename_seq, seq))
                goto retry;
 
        len = strlen(devname) + 1;
index ce6fbdf..9547a27 100644 (file)
@@ -321,7 +321,7 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
 {
        __be32 saddr = 0;
-       u8  *dst_ha = NULL;
+       u8 dst_ha[MAX_ADDR_LEN], *dst_hw = NULL;
        struct net_device *dev = neigh->dev;
        __be32 target = *(__be32 *)neigh->primary_key;
        int probes = atomic_read(&neigh->probes);
@@ -363,8 +363,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
        if (probes < 0) {
                if (!(neigh->nud_state & NUD_VALID))
                        pr_debug("trying to ucast probe in NUD_INVALID\n");
-               dst_ha = neigh->ha;
-               read_lock_bh(&neigh->lock);
+               neigh_ha_snapshot(dst_ha, neigh, dev);
+               dst_hw = dst_ha;
        } else {
                probes -= neigh->parms->app_probes;
                if (probes < 0) {
@@ -376,9 +376,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
        }
 
        arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
-                dst_ha, dev->dev_addr, NULL);
-       if (dst_ha)
-               read_unlock_bh(&neigh->lock);
+                dst_hw, dev->dev_addr, NULL);
 }
 
 static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
index a85ae2f..303012a 100644 (file)
@@ -750,6 +750,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        int    gre_hlen;
        __be32 dst;
        int    mtu;
+       u8     ttl;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL &&
            skb_checksum_help(skb))
@@ -760,7 +761,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
 
        if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
                gre_hlen = 0;
-               tiph = (const struct iphdr *)skb->data;
+               if (skb->protocol == htons(ETH_P_IP))
+                       tiph = (const struct iphdr *)skb->data;
+               else
+                       tiph = &tunnel->parms.iph;
        } else {
                gre_hlen = tunnel->hlen;
                tiph = &tunnel->parms.iph;
@@ -812,6 +816,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        goto tx_error;
        }
 
+       ttl = tiph->ttl;
        tos = tiph->tos;
        if (tos == 1) {
                tos = 0;
@@ -904,11 +909,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                dev_kfree_skb(skb);
                skb = new_skb;
                old_iph = ip_hdr(skb);
+               /* Warning : tiph value might point to freed memory */
        }
 
-       skb_reset_transport_header(skb);
        skb_push(skb, gre_hlen);
        skb_reset_network_header(skb);
+       skb_set_transport_header(skb, sizeof(*iph));
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
                              IPSKB_REROUTED);
@@ -927,8 +933,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
        iph->daddr              =       fl4.daddr;
        iph->saddr              =       fl4.saddr;
+       iph->ttl                =       ttl;
 
-       if ((iph->ttl = tiph->ttl) == 0) {
+       if (ttl == 0) {
                if (skb->protocol == htons(ETH_P_IP))
                        iph->ttl = old_iph->ttl;
 #if IS_ENABLED(CONFIG_IPV6)
index a136925..a28e4db 100644 (file)
@@ -5543,6 +5543,9 @@ slow_path:
        if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
                goto csum_error;
 
+       if (!th->ack)
+               goto discard;
+
        /*
         *      Standard slow path.
         */
@@ -5551,7 +5554,7 @@ slow_path:
                return 0;
 
 step5:
-       if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+       if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
                goto discard;
 
        /* ts_recent update must be made after we are sure that the packet
@@ -5984,11 +5987,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
                        goto discard;
        }
+
+       if (!th->ack)
+               goto discard;
+
        if (!tcp_validate_incoming(sk, skb, th, 0))
                return 0;
 
        /* step 5: check the ACK field */
-       if (th->ack) {
+       if (true) {
                int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
 
                switch (sk->sk_state) {
@@ -6138,8 +6145,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        }
                        break;
                }
-       } else
-               goto discard;
+       }
 
        /* ts_recent update must be made after we are sure that the packet
         * is in window.
index 867466c..c727e47 100644 (file)
@@ -758,8 +758,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
                skb_dst_set_noref(skb, dst);
        }
 
-       skb->transport_header = skb->network_header;
-
        proto = NEXTHDR_GRE;
        if (encap_limit >= 0) {
                init_tel_txopt(&opt, encap_limit);
@@ -768,6 +766,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
 
        skb_push(skb, gre_hlen);
        skb_reset_network_header(skb);
+       skb_set_transport_header(skb, sizeof(*ipv6h));
 
        /*
         *      Push down and install the IP header.
index a1e1162..31b74f5 100644 (file)
@@ -434,12 +434,11 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
                version = RDS_PROTOCOL_3_0;
                while ((common >>= 1) != 0)
                        version++;
-       }
-       printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using "
-                       "incompatible protocol version %u.%u\n",
-                       &dp->dp_saddr,
-                       dp->dp_protocol_major,
-                       dp->dp_protocol_minor);
+       } else
+               printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
+                               &dp->dp_saddr,
+                               dp->dp_protocol_major,
+                               dp->dp_protocol_minor);
        return version;
 }
 
index 8c5bc85..8eb9501 100644 (file)
@@ -339,8 +339,8 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
        sge->length = sizeof(struct rds_header);
 
        sge = &recv->r_sge[1];
-       sge->addr = sg_dma_address(&recv->r_frag->f_sg);
-       sge->length = sg_dma_len(&recv->r_frag->f_sg);
+       sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
+       sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
 
        ret = 0;
 out:
@@ -381,7 +381,10 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
                ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
                rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
                         recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
-                        (long) sg_dma_address(&recv->r_frag->f_sg), ret);
+                        (long) ib_sg_dma_address(
+                               ic->i_cm_id->device,
+                               &recv->r_frag->f_sg),
+                       ret);
                if (ret) {
                        rds_ib_conn_error(conn, "recv post on "
                               "%pI4 returned %d, disconnecting and "
index d2922c0..51561ea 100644 (file)
@@ -919,7 +919,7 @@ ok:
        q->now = ktime_to_ns(ktime_get());
        start_at = jiffies;
 
-       next_event = q->now + 5 * NSEC_PER_SEC;
+       next_event = q->now + 5LLU * NSEC_PER_SEC;
 
        for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
                /* common case optimization - skip event handler quickly */
index 6e53089..82c4fc7 100644 (file)
@@ -2365,7 +2365,6 @@ int set_regdom(const struct ieee80211_regdomain *rd)
        return r;
 }
 
-#ifdef CONFIG_HOTPLUG
 int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
        if (last_request && !last_request->processed) {
@@ -2377,12 +2376,6 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
 
        return 0;
 }
-#else
-int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       return -ENODEV;
-}
-#endif /* CONFIG_HOTPLUG */
 
 void wiphy_regulatory_register(struct wiphy *wiphy)
 {
index 9bf6d5e..1f6f01e 100644 (file)
@@ -77,13 +77,11 @@ static void wiphy_dev_release(struct device *dev)
        cfg80211_dev_free(rdev);
 }
 
-#ifdef CONFIG_HOTPLUG
 static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
        /* TODO, we probably need stuff here */
        return 0;
 }
-#endif
 
 static int wiphy_suspend(struct device *dev, pm_message_t state)
 {
@@ -134,9 +132,7 @@ struct class ieee80211_class = {
        .owner = THIS_MODULE,
        .dev_release = wiphy_dev_release,
        .dev_attrs = ieee80211_dev_attrs,
-#ifdef CONFIG_HOTPLUG
        .dev_uevent = wiphy_uevent,
-#endif
        .suspend = wiphy_suspend,
        .resume = wiphy_resume,
        .ns_type = &net_ns_type_operations,
index 6c353ae..581ca99 100644 (file)
@@ -42,9 +42,9 @@ foreach my $filename (@files) {
                $line =~ s/(^|\s)(inline)\b/$1__$2__/g;
                $line =~ s/(^|\s)(asm)\b(\s|[(]|$)/$1__$2__$3/g;
                $line =~ s/(^|\s|[(])(volatile)\b(\s|[(]|$)/$1__$2__$3/g;
-               $line =~ s/#ifndef _UAPI/#ifndef /;
-               $line =~ s/#define _UAPI/#define /;
-               $line =~ s!#endif /[*] _UAPI!#endif /* !;
+               $line =~ s/#ifndef\s+_UAPI/#ifndef /;
+               $line =~ s/#define\s+_UAPI/#define /;
+               $line =~ s!#endif\s+/[*]\s*_UAPI!#endif /* !;
                printf {$out} "%s", $line;
        }
        close $out;