Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Fri, 31 Aug 2012 19:14:10 +0000 (15:14 -0400)
committerDavid S. Miller <davem@davemloft.net>
Fri, 31 Aug 2012 19:14:18 +0000 (15:14 -0400)
Merge the 'net' tree to get the recent set of netfilter bug fixes in
order to assist with some merge hassles Pablo is going to have to deal
with for upcoming changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
125 files changed:
Makefile
drivers/acpi/acpica/tbxface.c
drivers/char/agp/intel-agp.h
drivers/char/agp/intel-gtt.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_proc.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/reg_srcs/r600
drivers/gpu/drm/udl/udl_modeset.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/i2c/busses/i2c-diolan-u2c.c
drivers/i2c/busses/i2c-nomadik.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-tegra.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/softing/softing_fw.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/descs.h
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/dwmac100.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/mmc.h
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
drivers/net/wireless/ath/ath5k/eeprom.c
drivers/net/wireless/ath/ath5k/eeprom.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/xen-netfront.c
drivers/pwm/Kconfig
drivers/pwm/core.c
drivers/pwm/pwm-samsung.c
drivers/pwm/pwm-tegra.c
drivers/pwm/pwm-tiecap.c
drivers/pwm/pwm-tiehrpwm.c
drivers/pwm/pwm-vt8500.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tcm_fc.h
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_sess.c
drivers/vfio/vfio.c
drivers/vhost/tcm_vhost.c
drivers/vhost/tcm_vhost.h
drivers/video/console/fbcon.c
fs/ceph/debugfs.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/eventpoll.c
fs/namei.c
fs/nfs/Makefile
fs/nfs/client.c
fs/nfs/idmap.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4super.c
fs/nfs/nfs4xdr.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/super.c
fs/nfs/write.c
include/drm/drm_crtc.h
include/linux/kref.h
include/linux/nfs_page.h
include/linux/nfs_xdr.h
include/linux/pci_ids.h
include/net/netfilter/nf_conntrack_ecache.h
include/target/target_core_base.h
ipc/mqueue.c
net/ceph/ceph_common.c
net/ceph/debugfs.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/core/netpoll.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_nat_sip.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nfnetlink_log.c
net/netlink/af_netlink.c
net/packet/af_packet.c

index 9cc77ac..3540268 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index ea4c6d5..29e51bc 100644 (file)
@@ -387,6 +387,7 @@ acpi_get_table_with_size(char *signature,
 
        return (AE_NOT_FOUND);
 }
+ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
 
 acpi_status
 acpi_get_table(char *signature,
index 6f007b6..6ec0fff 100644 (file)
@@ -64,6 +64,7 @@
 #define I830_PTE_SYSTEM_CACHED  0x00000006
 /* GT PTE cache control fields */
 #define GEN6_PTE_UNCACHED      0x00000002
+#define HSW_PTE_UNCACHED       0x00000000
 #define GEN6_PTE_LLC           0x00000004
 #define GEN6_PTE_LLC_MLC       0x00000006
 #define GEN6_PTE_GFDT          0x00000008
index 08fc5cb..58e32f7 100644 (file)
@@ -1156,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags)
        return true;
 }
 
+static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
+                               unsigned int flags)
+{
+       unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+       unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+       u32 pte_flags;
+
+       if (type_mask == AGP_USER_MEMORY)
+               pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
+       else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
+               pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+               if (gfdt)
+                       pte_flags |= GEN6_PTE_GFDT;
+       } else { /* set 'normal'/'cached' to LLC by default */
+               pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+               if (gfdt)
+                       pte_flags |= GEN6_PTE_GFDT;
+       }
+
+       /* gen6 has bit11-4 for physical addr bit39-32 */
+       addr |= (addr >> 28) & 0xff0;
+       writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
 static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
                             unsigned int flags)
 {
@@ -1382,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
        .check_flags = gen6_check_flags,
        .chipset_flush = i9xx_chipset_flush,
 };
+static const struct intel_gtt_driver haswell_gtt_driver = {
+       .gen = 6,
+       .setup = i9xx_setup,
+       .cleanup = gen6_cleanup,
+       .write_entry = haswell_write_entry,
+       .dma_mask_size = 40,
+       .check_flags = gen6_check_flags,
+       .chipset_flush = i9xx_chipset_flush,
+};
 static const struct intel_gtt_driver valleyview_gtt_driver = {
        .gen = 7,
        .setup = i9xx_setup,
@@ -1499,77 +1532,77 @@ static const struct intel_gtt_driver_description {
        { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
            "ValleyView", &valleyview_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
-           "Haswell", &sandybridge_gtt_driver },
+           "Haswell", &haswell_gtt_driver },
        { 0, NULL, NULL }
 };
 
index b7adb4a..28637c1 100644 (file)
@@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
        p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
        p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
        p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
-
-       p->crtc_hadjusted = false;
-       p->crtc_vadjusted = false;
 }
 EXPORT_SYMBOL(drm_mode_set_crtcinfo);
 
index 371c695..da457b1 100644 (file)
@@ -89,7 +89,7 @@ static const struct file_operations drm_proc_fops = {
  * Create a given set of proc files represented by an array of
  * gdm_proc_lists in the given root directory.
  */
-int drm_proc_create_files(struct drm_info_list *files, int count,
+static int drm_proc_create_files(struct drm_info_list *files, int count,
                          struct proc_dir_entry *root, struct drm_minor *minor)
 {
        struct drm_device *dev = minor->dev;
@@ -172,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
        return 0;
 }
 
-int drm_proc_remove_files(struct drm_info_list *files, int count,
+static int drm_proc_remove_files(struct drm_info_list *files, int count,
                          struct drm_minor *minor)
 {
        struct list_head *pos, *q;
index 5c4657a..489e2b1 100644 (file)
@@ -2365,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev)
 
        /* Flush everything onto the inactive list. */
        for_each_ring(ring, dev_priv, i) {
+               ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+               if (ret)
+                       return ret;
+
                ret = i915_ring_idle(ring);
                if (ret)
                        return ret;
@@ -2372,10 +2376,6 @@ int i915_gpu_idle(struct drm_device *dev)
                /* Is the device fubar? */
                if (WARN_ON(!list_empty(&ring->gpu_write_list)))
                        return -EBUSY;
-
-               ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
-               if (ret)
-                       return ret;
        }
 
        return 0;
index ee9b68f..d9a5372 100644 (file)
@@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
                pte_flags |= GEN6_PTE_CACHE_LLC;
                break;
        case I915_CACHE_NONE:
-               pte_flags |= GEN6_PTE_UNCACHED;
+               if (IS_HASWELL(dev))
+                       pte_flags |= HSW_PTE_UNCACHED;
+               else
+                       pte_flags |= GEN6_PTE_UNCACHED;
                break;
        default:
                BUG();
index acc99b2..28725ce 100644 (file)
 
 #define GEN6_PTE_VALID                 (1 << 0)
 #define GEN6_PTE_UNCACHED              (1 << 1)
+#define HSW_PTE_UNCACHED               (0)
 #define GEN6_PTE_CACHE_LLC             (2 << 1)
 #define GEN6_PTE_CACHE_LLC_MLC         (3 << 1)
 #define GEN6_PTE_CACHE_BITS            (3 << 1)
index 7ed4a41..23bdc8c 100644 (file)
@@ -326,6 +326,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
        return ret;
 }
 
+static struct edid *intel_crt_get_edid(struct drm_connector *connector,
+                               struct i2c_adapter *i2c)
+{
+       struct edid *edid;
+
+       edid = drm_get_edid(connector, i2c);
+
+       if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+               DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+               intel_gmbus_force_bit(i2c, true);
+               edid = drm_get_edid(connector, i2c);
+               intel_gmbus_force_bit(i2c, false);
+       }
+
+       return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+static int intel_crt_ddc_get_modes(struct drm_connector *connector,
+                               struct i2c_adapter *adapter)
+{
+       struct edid *edid;
+
+       edid = intel_crt_get_edid(connector, adapter);
+       if (!edid)
+               return 0;
+
+       return intel_connector_update_modes(connector, edid);
+}
+
 static bool intel_crt_detect_ddc(struct drm_connector *connector)
 {
        struct intel_crt *crt = intel_attached_crt(connector);
@@ -336,7 +366,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
        BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
        i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
-       edid = drm_get_edid(connector, i2c);
+       edid = intel_crt_get_edid(connector, i2c);
 
        if (edid) {
                bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@@ -544,13 +574,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
        struct i2c_adapter *i2c;
 
        i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
-       ret = intel_ddc_get_modes(connector, i2c);
+       ret = intel_crt_ddc_get_modes(connector, i2c);
        if (ret || !IS_G4X(dev))
                return ret;
 
        /* Try to probe digital port for output in DVI-I -> VGA mode. */
        i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
-       return intel_ddc_get_modes(connector, i2c);
+       return intel_crt_ddc_get_modes(connector, i2c);
 }
 
 static int intel_crt_set_property(struct drm_connector *connector,
index 132ab51..cd54cf8 100644 (file)
@@ -342,6 +342,8 @@ struct intel_fbc_work {
        int interval;
 };
 
+int intel_connector_update_modes(struct drm_connector *connector,
+                               struct edid *edid);
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 
 extern void intel_attach_force_audio_property(struct drm_connector *connector);
index 45848b9..29b7259 100644 (file)
 #include "i915_drv.h"
 
 /**
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
+ */
+int intel_connector_update_modes(struct drm_connector *connector,
+                               struct edid *edid)
+{
+       int ret;
+
+       drm_mode_connector_update_edid_property(connector, edid);
+       ret = drm_add_edid_modes(connector, edid);
+       drm_edid_to_eld(connector, edid);
+       connector->display_info.raw_edid = NULL;
+       kfree(edid);
+
+       return ret;
+}
+
+/**
  * intel_ddc_get_modes - get modelist from monitor
  * @connector: DRM connector device to use
  * @adapter: i2c adapter
@@ -43,18 +62,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,
                        struct i2c_adapter *adapter)
 {
        struct edid *edid;
-       int ret = 0;
 
        edid = drm_get_edid(connector, adapter);
-       if (edid) {
-               drm_mode_connector_update_edid_property(connector, edid);
-               ret = drm_add_edid_modes(connector, edid);
-               drm_edid_to_eld(connector, edid);
-               connector->display_info.raw_edid = NULL;
-               kfree(edid);
-       }
+       if (!edid)
+               return 0;
 
-       return ret;
+       return intel_connector_update_modes(connector, edid);
 }
 
 static const struct drm_prop_enum_list force_audio_names[] = {
index 58c07cd..1881c8c 100644 (file)
@@ -2441,17 +2441,10 @@ static void gen6_enable_rps(struct drm_device *dev)
                   dev_priv->max_delay << 24 |
                   dev_priv->min_delay << 16);
 
-       if (IS_HASWELL(dev)) {
-               I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-               I915_WRITE(GEN6_RP_UP_EI, 66000);
-               I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-       } else {
-               I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
-               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
-               I915_WRITE(GEN6_RP_UP_EI, 100000);
-               I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
-       }
+       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+       I915_WRITE(GEN6_RP_UP_EI, 66000);
+       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
        I915_WRITE(GEN6_RP_CONTROL,
index d172e98..d81bb0b 100644 (file)
@@ -1692,6 +1692,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
        edid = intel_sdvo_get_edid(connector);
        if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
                has_audio = drm_detect_monitor_audio(edid);
+       kfree(edid);
 
        return has_audio;
 }
index c6fcb5b..f4d4505 100644 (file)
@@ -444,11 +444,28 @@ union atom_enable_ss {
 static void atombios_crtc_program_ss(struct radeon_device *rdev,
                                     int enable,
                                     int pll_id,
+                                    int crtc_id,
                                     struct radeon_atom_ss *ss)
 {
+       unsigned i;
        int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
        union atom_enable_ss args;
 
+       if (!enable) {
+               for (i = 0; i < rdev->num_crtc; i++) {
+                       if (rdev->mode_info.crtcs[i] &&
+                           rdev->mode_info.crtcs[i]->enabled &&
+                           i != crtc_id &&
+                           pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+                               /* one other crtc is using this pll don't turn
+                                * off spread spectrum as it might turn off
+                                * display on active crtc
+                                */
+                               return;
+                       }
+               }
+       }
+
        memset(&args, 0, sizeof(args));
 
        if (ASIC_IS_DCE5(rdev)) {
@@ -1028,7 +1045,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
                radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
                                          &ref_div, &post_div);
 
-       atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
+       atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
 
        atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
                                  encoder_mode, radeon_encoder->encoder_id, mode->clock,
@@ -1051,7 +1068,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
                        ss.step = step_size;
                }
 
-               atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
+               atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
        }
 }
 
@@ -1572,11 +1589,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
                                                                   ASIC_INTERNAL_SS_ON_DCPLL,
                                                                   rdev->clock.default_dispclk);
                if (ss_enabled)
-                       atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss);
+                       atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
                /* XXX: DCE5, make sure voltage, dispclk is high enough */
                atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
                if (ss_enabled)
-                       atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss);
+                       atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
        }
 
 }
index 3dab49c..ab74e6b 100644 (file)
@@ -47,13 +47,17 @@ struct r600_cs_track {
        u32                     npipes;
        /* value we track */
        u32                     sq_config;
+       u32                     log_nsamples;
        u32                     nsamples;
        u32                     cb_color_base_last[8];
        struct radeon_bo        *cb_color_bo[8];
        u64                     cb_color_bo_mc[8];
-       u32                     cb_color_bo_offset[8];
-       struct radeon_bo        *cb_color_frag_bo[8]; /* unused */
-       struct radeon_bo        *cb_color_tile_bo[8]; /* unused */
+       u64                     cb_color_bo_offset[8];
+       struct radeon_bo        *cb_color_frag_bo[8];
+       u64                     cb_color_frag_offset[8];
+       struct radeon_bo        *cb_color_tile_bo[8];
+       u64                     cb_color_tile_offset[8];
+       u32                     cb_color_mask[8];
        u32                     cb_color_info[8];
        u32                     cb_color_view[8];
        u32                     cb_color_size_idx[8]; /* unused */
@@ -349,10 +353,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
        unsigned array_mode;
        u32 format;
 
-       if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
-               dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
-               return -EINVAL;
-       }
        size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
        format = G_0280A0_FORMAT(track->cb_color_info[i]);
        if (!r600_fmt_is_valid_color(format)) {
@@ -420,7 +420,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
        }
 
        /* check offset */
-       tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
+       tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
+             r600_fmt_get_blocksize(format) * track->nsamples;
        switch (array_mode) {
        default:
        case V_0280A0_ARRAY_LINEAR_GENERAL:
@@ -441,7 +442,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
                         * broken userspace.
                         */
                } else {
-                       dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
+                       dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
                                 __func__, i, array_mode,
                                 track->cb_color_bo_offset[i], tmp,
                                 radeon_bo_size(track->cb_color_bo[i]),
@@ -458,6 +459,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
        tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
                S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
        ib[track->cb_color_size_idx[i]] = tmp;
+
+       /* FMASK/CMASK */
+       switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+       case V_0280A0_TILE_DISABLE:
+               break;
+       case V_0280A0_FRAG_ENABLE:
+               if (track->nsamples > 1) {
+                       uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
+                       /* the tile size is 8x8, but the size is in units of bits.
+                        * for bytes, do just * 8. */
+                       uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
+
+                       if (bytes + track->cb_color_frag_offset[i] >
+                           radeon_bo_size(track->cb_color_frag_bo[i])) {
+                               dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
+                                        "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+                                        __func__, tile_max, bytes,
+                                        track->cb_color_frag_offset[i],
+                                        radeon_bo_size(track->cb_color_frag_bo[i]));
+                               return -EINVAL;
+                       }
+               }
+               /* fall through */
+       case V_0280A0_CLEAR_ENABLE:
+       {
+               uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
+               /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
+                * (128*128) / (8*8) / 2 = 128 bytes per block. */
+               uint32_t bytes = (block_max + 1) * 128;
+
+               if (bytes + track->cb_color_tile_offset[i] >
+                   radeon_bo_size(track->cb_color_tile_bo[i])) {
+                       dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
+                                "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+                                __func__, block_max, bytes,
+                                track->cb_color_tile_offset[i],
+                                radeon_bo_size(track->cb_color_tile_bo[i]));
+                       return -EINVAL;
+               }
+               break;
+       }
+       default:
+               dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+               return -EINVAL;
+       }
        return 0;
 }
 
@@ -566,7 +612,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
 
                ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
                nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
-               tmp = ntiles * bpe * 64 * nviews;
+               tmp = ntiles * bpe * 64 * nviews * track->nsamples;
                if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
                        dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
                                        array_mode,
@@ -1231,6 +1277,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                break;
        case R_028C04_PA_SC_AA_CONFIG:
                tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+               track->log_nsamples = tmp;
                track->nsamples = 1 << tmp;
                track->cb_dirty = true;
                break;
@@ -1312,16 +1359,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
                                return -EINVAL;
                        }
-                       ib[idx] = track->cb_color_base_last[tmp];
                        track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+                       track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
+                       ib[idx] = track->cb_color_base_last[tmp];
                } else {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
                        track->cb_color_frag_bo[tmp] = reloc->robj;
+                       track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
+                       ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               }
+               if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+                       track->cb_dirty = true;
                }
                break;
        case R_0280C0_CB_COLOR0_TILE:
@@ -1338,16 +1390,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
                                return -EINVAL;
                        }
-                       ib[idx] = track->cb_color_base_last[tmp];
                        track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+                       track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
+                       ib[idx] = track->cb_color_base_last[tmp];
                } else {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
                        track->cb_color_tile_bo[tmp] = reloc->robj;
+                       track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
+                       ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               }
+               if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+                       track->cb_dirty = true;
+               }
+               break;
+       case R_028100_CB_COLOR0_MASK:
+       case R_028104_CB_COLOR1_MASK:
+       case R_028108_CB_COLOR2_MASK:
+       case R_02810C_CB_COLOR3_MASK:
+       case R_028110_CB_COLOR4_MASK:
+       case R_028114_CB_COLOR5_MASK:
+       case R_028118_CB_COLOR6_MASK:
+       case R_02811C_CB_COLOR7_MASK:
+               tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
+               track->cb_color_mask[tmp] = ib[idx];
+               if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+                       track->cb_dirty = true;
                }
                break;
        case CB_COLOR0_BASE:
@@ -1492,7 +1563,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level)
 }
 
 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
-                             unsigned w0, unsigned h0, unsigned d0, unsigned format,
+                             unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
                              unsigned block_align, unsigned height_align, unsigned base_align,
                              unsigned *l0_size, unsigned *mipmap_size)
 {
@@ -1520,7 +1591,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
 
                depth = r600_mip_minify(d0, i);
 
-               size = nbx * nby * blocksize;
+               size = nbx * nby * blocksize * nsamples;
                if (nfaces)
                        size *= nfaces;
                else
@@ -1672,7 +1743,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
 
                nfaces = larray - barray + 1;
        }
-       r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
+       r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
                          pitch_align, height_align, base_align,
                          &l0_size, &mipmap_size);
        /* using get ib will give us the offset into the texture bo */
index fd328f4..bdb69a6 100644 (file)
 #define R_028094_CB_COLOR5_VIEW                      0x028094
 #define R_028098_CB_COLOR6_VIEW                      0x028098
 #define R_02809C_CB_COLOR7_VIEW                      0x02809C
+#define R_028100_CB_COLOR0_MASK                      0x028100
+#define   S_028100_CMASK_BLOCK_MAX(x)                  (((x) & 0xFFF) << 0)
+#define   G_028100_CMASK_BLOCK_MAX(x)                  (((x) >> 0) & 0xFFF)
+#define   C_028100_CMASK_BLOCK_MAX                     0xFFFFF000
+#define   S_028100_FMASK_TILE_MAX(x)                   (((x) & 0xFFFFF) << 12)
+#define   G_028100_FMASK_TILE_MAX(x)                   (((x) >> 12) & 0xFFFFF)
+#define   C_028100_FMASK_TILE_MAX                      0x00000FFF
+#define R_028104_CB_COLOR1_MASK                      0x028104
+#define R_028108_CB_COLOR2_MASK                      0x028108
+#define R_02810C_CB_COLOR3_MASK                      0x02810C
+#define R_028110_CB_COLOR4_MASK                      0x028110
+#define R_028114_CB_COLOR5_MASK                      0x028114
+#define R_028118_CB_COLOR6_MASK                      0x028118
+#define R_02811C_CB_COLOR7_MASK                      0x02811C
 #define CB_COLOR0_INFO                                  0x280a0
 #      define CB_FORMAT(x)                             ((x) << 2)
 #       define CB_ARRAY_MODE(x)                         ((x) << 8)
 #define   S_0280A0_TILE_MODE(x)                        (((x) & 0x3) << 18)
 #define   G_0280A0_TILE_MODE(x)                        (((x) >> 18) & 0x3)
 #define   C_0280A0_TILE_MODE                           0xFFF3FFFF
+#define     V_0280A0_TILE_DISABLE                      0
+#define     V_0280A0_CLEAR_ENABLE                      1
+#define     V_0280A0_FRAG_ENABLE                       2
 #define   S_0280A0_BLEND_CLAMP(x)                      (((x) & 0x1) << 20)
 #define   G_0280A0_BLEND_CLAMP(x)                      (((x) >> 20) & 0x1)
 #define   C_0280A0_BLEND_CLAMP                         0xFFEFFFFF
index 9930419..59a1531 100644 (file)
@@ -142,21 +142,6 @@ struct radeon_device;
 /*
  * BIOS.
  */
-#define ATRM_BIOS_PAGE 4096
-
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_atrm_supported(struct pci_dev *pdev);
-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
-#else
-static inline bool radeon_atrm_supported(struct pci_dev *pdev)
-{
-       return false;
-}
-
-static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
-       return -EINVAL;
-}
-#endif
 bool radeon_get_bios(struct radeon_device *rdev);
 
 /*
index f9c21f9..d67d4f3 100644 (file)
@@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
        }
 
        /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
-       if ((dev->pdev->device == 0x9802) &&
+       if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
            (dev->pdev->subsystem_vendor == 0x1734) &&
            (dev->pdev->subsystem_device == 0x11bd)) {
                if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
index 98724fc..2a2cf0b 100644 (file)
@@ -30,57 +30,8 @@ static struct radeon_atpx_priv {
        /* handle for device - and atpx */
        acpi_handle dhandle;
        acpi_handle atpx_handle;
-       acpi_handle atrm_handle;
 } radeon_atpx_priv;
 
-/* retrieve the ROM in 4k blocks */
-static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
-                           int offset, int len)
-{
-       acpi_status status;
-       union acpi_object atrm_arg_elements[2], *obj;
-       struct acpi_object_list atrm_arg;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-
-       atrm_arg.count = 2;
-       atrm_arg.pointer = &atrm_arg_elements[0];
-
-       atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
-       atrm_arg_elements[0].integer.value = offset;
-
-       atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
-       atrm_arg_elements[1].integer.value = len;
-
-       status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
-       if (ACPI_FAILURE(status)) {
-               printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
-               return -ENODEV;
-       }
-
-       obj = (union acpi_object *)buffer.pointer;
-       memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
-       len = obj->buffer.length;
-       kfree(buffer.pointer);
-       return len;
-}
-
-bool radeon_atrm_supported(struct pci_dev *pdev)
-{
-       /* get the discrete ROM only via ATRM */
-       if (!radeon_atpx_priv.atpx_detected)
-               return false;
-
-       if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
-               return false;
-       return true;
-}
-
-
-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
-       return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
-}
-
 static int radeon_atpx_get_version(acpi_handle handle)
 {
        acpi_status status;
@@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
 
 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 {
-       acpi_handle dhandle, atpx_handle, atrm_handle;
+       acpi_handle dhandle, atpx_handle;
        acpi_status status;
 
        dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
@@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
        if (ACPI_FAILURE(status))
                return false;
 
-       status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
-       if (ACPI_FAILURE(status))
-               return false;
-
        radeon_atpx_priv.dhandle = dhandle;
        radeon_atpx_priv.atpx_handle = atpx_handle;
-       radeon_atpx_priv.atrm_handle = atrm_handle;
        return true;
 }
 
index 501f488..d306cc8 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
+#include <linux/acpi.h>
 /*
  * BIOS.
  */
@@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
        return true;
 }
 
+#ifdef CONFIG_ACPI
 /* ATRM is used to get the BIOS on the discrete cards in
  * dual-gpu systems.
  */
+/* retrieve the ROM in 4k blocks */
+#define ATRM_BIOS_PAGE 4096
+/**
+ * radeon_atrm_call - fetch a chunk of the vbios
+ *
+ * @atrm_handle: acpi ATRM handle
+ * @bios: vbios image pointer
+ * @offset: offset of vbios image data to fetch
+ * @len: length of vbios image data to fetch
+ *
+ * Executes ATRM to fetch a chunk of the discrete
+ * vbios image on PX systems (all asics).
+ * Returns the length of the buffer fetched.
+ */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+                           int offset, int len)
+{
+       acpi_status status;
+       union acpi_object atrm_arg_elements[2], *obj;
+       struct acpi_object_list atrm_arg;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+       atrm_arg.count = 2;
+       atrm_arg.pointer = &atrm_arg_elements[0];
+
+       atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+       atrm_arg_elements[0].integer.value = offset;
+
+       atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+       atrm_arg_elements[1].integer.value = len;
+
+       status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+       if (ACPI_FAILURE(status)) {
+               printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+               return -ENODEV;
+       }
+
+       obj = (union acpi_object *)buffer.pointer;
+       memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+       len = obj->buffer.length;
+       kfree(buffer.pointer);
+       return len;
+}
+
 static bool radeon_atrm_get_bios(struct radeon_device *rdev)
 {
        int ret;
        int size = 256 * 1024;
        int i;
+       struct pci_dev *pdev = NULL;
+       acpi_handle dhandle, atrm_handle;
+       acpi_status status;
+       bool found = false;
+
+       /* ATRM is for the discrete card only */
+       if (rdev->flags & RADEON_IS_IGP)
+               return false;
+
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+               dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+               if (!dhandle)
+                       continue;
+
+               status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+               if (!ACPI_FAILURE(status)) {
+                       found = true;
+                       break;
+               }
+       }
 
-       if (!radeon_atrm_supported(rdev->pdev))
+       if (!found)
                return false;
 
        rdev->bios = kmalloc(size, GFP_KERNEL);
@@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
        }
 
        for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
-               ret = radeon_atrm_get_bios_chunk(rdev->bios,
-                                                (i * ATRM_BIOS_PAGE),
-                                                ATRM_BIOS_PAGE);
+               ret = radeon_atrm_call(atrm_handle,
+                                      rdev->bios,
+                                      (i * ATRM_BIOS_PAGE),
+                                      ATRM_BIOS_PAGE);
                if (ret < ATRM_BIOS_PAGE)
                        break;
        }
@@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
        }
        return true;
 }
+#else
+static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+       return false;
+}
+#endif
 
 static bool ni_read_disabled_bios(struct radeon_device *rdev)
 {
@@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
                return legacy_read_disabled_bios(rdev);
 }
 
+#ifdef CONFIG_ACPI
+static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+       bool ret = false;
+       struct acpi_table_header *hdr;
+       acpi_size tbl_size;
+       UEFI_ACPI_VFCT *vfct;
+       GOP_VBIOS_CONTENT *vbios;
+       VFCT_IMAGE_HEADER *vhdr;
+
+       if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+               return false;
+       if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+               DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+               goto out_unmap;
+       }
+
+       vfct = (UEFI_ACPI_VFCT *)hdr;
+       if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
+               DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+               goto out_unmap;
+       }
+
+       vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
+       vhdr = &vbios->VbiosHeader;
+       DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
+                       vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
+                       vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
+
+       if (vhdr->PCIBus != rdev->pdev->bus->number ||
+           vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
+           vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
+           vhdr->VendorID != rdev->pdev->vendor ||
+           vhdr->DeviceID != rdev->pdev->device) {
+               DRM_INFO("ACPI VFCT table is not for this card\n");
+               goto out_unmap;
+       };
+
+       if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
+               DRM_ERROR("ACPI VFCT image truncated\n");
+               goto out_unmap;
+       }
+
+       rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
+       ret = !!rdev->bios;
+
+out_unmap:
+       return ret;
+}
+#else
+static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+       return false;
+}
+#endif
 
 bool radeon_get_bios(struct radeon_device *rdev)
 {
@@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
 
        r = radeon_atrm_get_bios(rdev);
        if (r == false)
+               r = radeon_acpi_vfct_bios(rdev);
+       if (r == false)
                r = igp_read_bios_from_vram(rdev);
        if (r == false)
                r = radeon_read_bios(rdev);
index d7269f4..27d22d7 100644 (file)
  *   2.18.0 - r600-eg: allow "invalid" DB formats
  *   2.19.0 - r600-eg: MSAA textures
  *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
+ *   2.21.0 - r600-r700: FMASK and CMASK
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       20
+#define KMS_DRIVER_MINOR       21
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 1cb014b..9024e72 100644 (file)
@@ -132,6 +132,7 @@ int radeon_bo_create(struct radeon_device *rdev,
        acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
                                       sizeof(struct radeon_bo));
 
+retry:
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
@@ -145,8 +146,6 @@ int radeon_bo_create(struct radeon_device *rdev,
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
        INIT_LIST_HEAD(&bo->va);
-
-retry:
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        down_read(&rdev->pm.mclk_lock);
index ec79b37..43c431a 100644 (file)
@@ -706,6 +706,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
        if (radeon_debugfs_ring_init(rdev, ring)) {
                DRM_ERROR("Failed to register debugfs file for rings !\n");
        }
+       radeon_ring_lockup_update(ring);
        return 0;
 }
 
index 5e659b0..f93e45d 100644 (file)
@@ -744,14 +744,6 @@ r600 0x9400
 0x00028C38 CB_CLRCMP_DST
 0x00028C3C CB_CLRCMP_MSK
 0x00028C34 CB_CLRCMP_SRC
-0x00028100 CB_COLOR0_MASK
-0x00028104 CB_COLOR1_MASK
-0x00028108 CB_COLOR2_MASK
-0x0002810C CB_COLOR3_MASK
-0x00028110 CB_COLOR4_MASK
-0x00028114 CB_COLOR5_MASK
-0x00028118 CB_COLOR6_MASK
-0x0002811C CB_COLOR7_MASK
 0x00028808 CB_COLOR_CONTROL
 0x0002842C CB_FOG_BLUE
 0x00028428 CB_FOG_GREEN
index f5dd89e..9159d48 100644 (file)
@@ -354,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
 
 static void udl_crtc_disable(struct drm_crtc *crtc)
 {
-
-
+       udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 }
 
 static void udl_crtc_destroy(struct drm_crtc *crtc)
index 6b0078f..c50724b 100644 (file)
@@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
        struct vmw_private *dev_priv = vmw_priv(crtc->dev);
        struct drm_framebuffer *old_fb = crtc->fb;
        struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
-       struct drm_file *file_priv = event->base.file_priv;
+       struct drm_file *file_priv ;
        struct vmw_fence_obj *fence = NULL;
        struct drm_clip_rect clips;
        int ret;
 
+       if (event == NULL)
+               return -EINVAL;
+
        /* require ScreenObject support for page flipping */
        if (!dev_priv->sou_priv)
                return -ENOSYS;
 
+       file_priv = event->base.file_priv;
        if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
                return -EINVAL;
 
index aedb94f..dae3ddf 100644 (file)
@@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
                        }
                }
        }
+       ret = num;
 abort:
        sret = diolan_i2c_stop(dev);
        if (sret < 0 && ret >= 0)
index 5e6f1ee..61b00ed 100644 (file)
@@ -350,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
 
        i2c_clk = clk_get_rate(dev->clk);
 
-       /* fallback to std. mode if machine has not provided it */
-       if (dev->cfg.clk_freq == 0)
-               dev->cfg.clk_freq = 100000;
-
        /*
         * The spec says, in case of std. mode the divider is
         * 2 whereas it is 3 for fast and fastplus mode of
@@ -911,20 +907,32 @@ static const struct i2c_algorithm nmk_i2c_algo = {
        .functionality  = nmk_i2c_functionality
 };
 
+static struct nmk_i2c_controller u8500_i2c = {
+       /*
+        * Slave data setup time; 250ns, 100ns, and 10ns, which
+        * is 14, 6 and 2 respectively for a 48Mhz i2c clock.
+        */
+       .slsu           = 0xe,
+       .tft            = 1,      /* Tx FIFO threshold */
+       .rft            = 8,      /* Rx FIFO threshold */
+       .clk_freq       = 400000, /* fast mode operation */
+       .timeout        = 200,    /* Slave response timeout(ms) */
+       .sm             = I2C_FREQ_MODE_FAST,
+};
+
 static atomic_t adapter_id = ATOMIC_INIT(0);
 
 static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
 {
        int ret = 0;
-       struct nmk_i2c_controller *pdata =
-                       adev->dev.platform_data;
+       struct nmk_i2c_controller *pdata = adev->dev.platform_data;
        struct nmk_i2c_dev      *dev;
        struct i2c_adapter *adap;
 
-       if (!pdata) {
-               dev_warn(&adev->dev, "no platform data\n");
-               return -ENODEV;
-       }
+       if (!pdata)
+               /* No i2c configuration found, using the default. */
+               pdata = &u8500_i2c;
+
        dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
        if (!dev) {
                dev_err(&adev->dev, "cannot allocate memory\n");
index 6849635..5d19a49 100644 (file)
@@ -584,7 +584,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 
        r = pm_runtime_get_sync(dev->dev);
        if (IS_ERR_VALUE(r))
-               return r;
+               goto out;
 
        r = omap_i2c_wait_for_bb(dev);
        if (r < 0)
index 66eb53f..9a08c57 100644 (file)
@@ -712,7 +712,7 @@ static int __devexit tegra_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int tegra_i2c_suspend(struct device *dev)
 {
        struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
index 4f50145..662c5f7 100644 (file)
@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
 
        dev->irq = res_irq->start;
-       priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
+       priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+       if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+               priv->irq_flags |= IRQF_SHARED;
        priv->reg_base = addr;
        /* The CAN clock frequency is half the oscillator clock frequency */
        priv->can.clock.freq = pdata->osc_freq / 2;
index 3105961..b595d34 100644 (file)
@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
        const uint8_t *mem, *end, *dat;
        uint16_t type, len;
        uint32_t addr;
-       uint8_t *buf = NULL;
+       uint8_t *buf = NULL, *new_buf;
        int buflen = 0;
        int8_t type_end = 0;
 
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
                if (len > buflen) {
                        /* align buflen */
                        buflen = (len + (1024-1)) & ~(1024-1);
-                       buf = krealloc(buf, buflen, GFP_KERNEL);
-                       if (!buf) {
+                       new_buf = krealloc(buf, buflen, GFP_KERNEL);
+                       if (!new_buf) {
                                ret = -ENOMEM;
                                goto failed;
                        }
+                       buf = new_buf;
                }
                /* verify record data */
                memcpy_fromio(buf, &dpram[addr + offset], len);
index 463b9ec..6d1a24a 100644 (file)
@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
                        continue;               \
                else
 
-#define for_each_napi_rx_queue(bp, var) \
-       for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
-
 /* Skip OOO FP */
 #define for_each_tx_queue(bp, var) \
        for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
index e879e19..af20c6e 100644 (file)
@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
         */
        bnx2x_setup_tc(bp->dev, bp->max_cos);
 
+       /* Add all NAPI objects */
+       bnx2x_add_all_napi(bp);
        bnx2x_napi_enable(bp);
 
        /* set pf load just before approaching the MCP */
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 
                /* Disable HW interrupts, NAPI */
                bnx2x_netif_stop(bp, 1);
+               /* Delete all NAPI objects */
+               bnx2x_del_all_napi(bp);
 
                /* Release IRQs */
                bnx2x_free_irq(bp);
index dfa757e..21b5532 100644 (file)
@@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
        bp->num_napi_queues = bp->num_queues;
 
        /* Add NAPI objects */
-       for_each_napi_rx_queue(bp, i)
+       for_each_rx_queue(bp, i)
                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
                               bnx2x_poll, BNX2X_NAPI_WEIGHT);
 }
@@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
 {
        int i;
 
-       for_each_napi_rx_queue(bp, i)
+       for_each_rx_queue(bp, i)
                netif_napi_del(&bnx2x_fp(bp, i, napi));
 }
 
index fc4e0e3..c37a68d 100644 (file)
@@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
  */
 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
 {
-       bnx2x_del_all_napi(bp);
        bnx2x_disable_msi(bp);
        BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
        bnx2x_set_int_mode(bp);
-       bnx2x_add_all_napi(bp);
 }
 
 /**
index 02b5a34..2105498 100644 (file)
@@ -8427,6 +8427,8 @@ unload_error:
 
        /* Disable HW interrupts, NAPI */
        bnx2x_netif_stop(bp, 1);
+       /* Delete all NAPI objects */
+       bnx2x_del_all_napi(bp);
 
        /* Release IRQs */
        bnx2x_free_irq(bp);
@@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static void poll_bnx2x(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
+       int i;
 
-       disable_irq(bp->pdev->irq);
-       bnx2x_interrupt(bp->pdev->irq, dev);
-       enable_irq(bp->pdev->irq);
+       for_each_eth_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+               napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+       }
 }
 #endif
 
@@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
         */
        bnx2x_set_int_mode(bp);
 
-       /* Add all NAPI objects */
-       bnx2x_add_all_napi(bp);
-
        rc = register_netdev(dev);
        if (rc) {
                dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
 
        unregister_netdev(dev);
 
-       /* Delete all NAPI objects */
-       bnx2x_del_all_napi(bp);
-
        /* Power on: we can't let PCI layer write to us while we are in D3 */
        bnx2x_set_power_state(bp, PCI_D0);
 
@@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        bnx2x_tx_disable(bp);
 
        bnx2x_netif_stop(bp, 0);
+       /* Delete all NAPI objects */
+       bnx2x_del_all_napi(bp);
 
        del_timer_sync(&bp->timer);
 
index fc68ca0..701b3e9 100644 (file)
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
        int num = 0, status = 0;
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
-       spin_lock_bh(&adapter->mcc_cq_lock);
+       spin_lock(&adapter->mcc_cq_lock);
        while ((compl = be_mcc_compl_get(adapter))) {
                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
                        /* Interpret flags as an async trailer */
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
        if (num)
                be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
 
-       spin_unlock_bh(&adapter->mcc_cq_lock);
+       spin_unlock(&adapter->mcc_cq_lock);
        return status;
 }
 
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
                if (be_error(adapter))
                        return -EIO;
 
+               local_bh_disable();
                status = be_process_mcc(adapter);
+               local_bh_enable();
 
                if (atomic_read(&mcc_obj->q.used) == 0)
                        break;
index 6d139d6..111dc88 100644 (file)
@@ -3765,7 +3765,9 @@ static void be_worker(struct work_struct *work)
        /* when interrupts are not yet enabled, just reap any pending
        * mcc completions */
        if (!netif_running(adapter->netdev)) {
+               local_bh_disable();
                be_process_mcc(adapter);
+               local_bh_enable();
                goto reschedule;
        }
 
index f762a7f..4d5b58c 100644 (file)
@@ -1040,7 +1040,7 @@ static int gfar_probe(struct platform_device *ofdev)
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
                dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-               dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+               dev->features |= NETIF_F_HW_VLAN_RX;
        }
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
index cd15332..cb3356c 100644 (file)
@@ -310,6 +310,7 @@ struct e1000_adapter {
         */
        struct e1000_ring *tx_ring /* One per active queue */
                                                ____cacheline_aligned_in_smp;
+       u32 tx_fifo_limit;
 
        struct napi_struct napi;
 
index 46c3b1f..d01a099 100644 (file)
@@ -3517,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
        }
 
        /*
+        * Alignment of Tx data is on an arbitrary byte boundary with the
+        * maximum size per Tx descriptor limited only to the transmit
+        * allocation of the packet buffer minus 96 bytes with an upper
+        * limit of 24KB due to receive synchronization limitations.
+        */
+       adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+                                      24 << 10);
+
+       /*
         * Disable Adaptive Interrupt Moderation if 2 full packets cannot
         * fit in receive buffer.
         */
@@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
        return 1;
 }
 
-#define E1000_MAX_PER_TXD      8192
-#define E1000_MAX_TXD_PWR      12
-
 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
                        unsigned int first, unsigned int max_per_txd,
-                       unsigned int nr_frags, unsigned int mss)
+                       unsigned int nr_frags)
 {
        struct e1000_adapter *adapter = tx_ring->adapter;
        struct pci_dev *pdev = adapter->pdev;
@@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 
 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 {
+       BUG_ON(size > tx_ring->count);
+
        if (e1000_desc_unused(tx_ring) >= size)
                return 0;
        return __e1000_maybe_stop_tx(tx_ring, size);
 }
 
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int first;
-       unsigned int max_per_txd = E1000_MAX_PER_TXD;
-       unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
        unsigned int tx_flags = 0;
        unsigned int len = skb_headlen(skb);
        unsigned int nr_frags;
@@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        }
 
        mss = skb_shinfo(skb)->gso_size;
-       /*
-        * The controller does a simple calculation to
-        * make sure there is enough room in the FIFO before
-        * initiating the DMA for each buffer.  The calc is:
-        * 4 = ceil(buffer len/mss).  To make sure we don't
-        * overrun the FIFO, adjust the max buffer len if mss
-        * drops.
-        */
        if (mss) {
                u8 hdr_len;
-               max_per_txd = min(mss << 2, max_per_txd);
-               max_txd_pwr = fls(max_per_txd) - 1;
 
                /*
                 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                count++;
        count++;
 
-       count += TXD_USE_COUNT(len, max_txd_pwr);
+       count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
 
        nr_frags = skb_shinfo(skb)->nr_frags;
        for (f = 0; f < nr_frags; f++)
-               count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
-                                      max_txd_pwr);
+               count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+                                     adapter->tx_fifo_limit);
 
        if (adapter->hw.mac.tx_pkt_filtering)
                e1000_transfer_dhcp_info(adapter, skb);
@@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                tx_flags |= E1000_TX_FLAGS_NO_FCS;
 
        /* if count is 0 then mapping error has occurred */
-       count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
+       count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
+                            nr_frags);
        if (count) {
                skb_tx_timestamp(skb);
 
                netdev_sent_queue(netdev, skb->len);
                e1000_tx_queue(tx_ring, tx_flags, count);
                /* Make sure there is space in the ring for the next send. */
-               e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
-
+               e1000_maybe_stop_tx(tx_ring,
+                                   (MAX_SKB_FRAGS *
+                                    DIV_ROUND_UP(PAGE_SIZE,
+                                                 adapter->tx_fifo_limit) + 2));
        } else {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
@@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        adapter->hw.phy.autoneg_advertised = 0x2f;
 
        /* ring size defaults */
-       adapter->rx_ring->count = 256;
-       adapter->tx_ring->count = 256;
+       adapter->rx_ring->count = E1000_DEFAULT_RXD;
+       adapter->tx_ring->count = E1000_DEFAULT_TXD;
 
        /*
         * Initial Wake on LAN setting - If APM wake is enabled in
index 2bd5c2d..f8e7e20 100644 (file)
@@ -861,8 +861,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
                                       &ip_entry->ip4dst, &ip_entry->pdst);
        if (rc != 0) {
                rc = efx_filter_get_ipv4_full(
-                       &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
-                       &ip_entry->ip4dst, &ip_entry->pdst);
+                       &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
+                       &ip_entry->ip4src, &ip_entry->psrc);
                EFX_WARN_ON_PARANOID(rc);
                ip_mask->ip4src = ~0;
                ip_mask->psrc = ~0;
index e2d0832..719be39 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
 
 extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
+
+#endif /* __COMMON_H__ */
index 9820ec8..223adf9 100644 (file)
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
+
+#ifndef __DESCS_H__
+#define __DESCS_H__
+
 struct dma_desc {
        /* Receive descriptor */
        union {
@@ -166,3 +170,5 @@ enum tdes_csum_insertion {
                                         * is not calculated */
        cic_full = 3,           /* IP header and pseudoheader */
 };
+
+#endif /* __DESCS_H__ */
index dd8d6e1..7ee9499 100644 (file)
@@ -27,6 +27,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __DESC_COM_H__
+#define __DESC_COM_H__
+
 #if defined(CONFIG_STMMAC_RING)
 static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
 {
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
        p->des01.tx.buffer1_size = len;
 }
 #endif
+
+#endif /* __DESC_COM_H__ */
index 7c6d857..2ec6aea 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __DWMAC100_H__
+#define __DWMAC100_H__
+
 #include <linux/phy.h>
 #include "common.h"
 
@@ -119,3 +122,5 @@ enum ttc_control {
 #define DMA_MISSED_FRAME_M_CNTR        0x0000ffff      /* Missed Frame Couinter */
 
 extern const struct stmmac_dma_ops dwmac100_dma_ops;
+
+#endif /* __DWMAC100_H__ */
index f90fcb5..0e4cace 100644 (file)
@@ -19,6 +19,8 @@
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
+#ifndef __DWMAC1000_H__
+#define __DWMAC1000_H__
 
 #include <linux/phy.h>
 #include "common.h"
@@ -229,6 +231,7 @@ enum rtc_control {
 #define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
 
 /* Synopsys Core versions */
-#define        DWMAC_CORE_3_40 34
+#define        DWMAC_CORE_3_40 0x34
 
 extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+#endif /* __DWMAC1000_H__ */
index e678ce3..e49c9a0 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __DWMAC_DMA_H__
+#define __DWMAC_DMA_H__
+
 /* DMA CRS Control and Status Register Mapping */
 #define DMA_BUS_MODE           0x00001000      /* Bus Mode */
 #define DMA_XMT_POLL_DEMAND    0x00001004      /* Transmit Poll Demand */
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
 extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
 extern int dwmac_dma_interrupt(void __iomem *ioaddr,
                                struct stmmac_extra_stats *x);
+
+#endif /* __DWMAC_DMA_H__ */
index a383520..67995ef 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __MMC_H__
+#define __MMC_H__
+
 /* MMC control register */
 /* When set, all counter are reset */
 #define MMC_CNTRL_COUNTER_RESET                0x1
@@ -129,3 +132,5 @@ struct stmmac_counters {
 extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
 extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
 extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+
+#endif /* __MMC_H__ */
index c07cfe9..0c74a70 100644 (file)
@@ -33,7 +33,7 @@
 #define MMC_TX_INTR            0x00000108      /* MMC TX Interrupt */
 #define MMC_RX_INTR_MASK       0x0000010c      /* MMC Interrupt Mask */
 #define MMC_TX_INTR_MASK       0x00000110      /* MMC Interrupt Mask */
-#define MMC_DEFAUL_MASK                0xffffffff
+#define MMC_DEFAULT_MASK               0xffffffff
 
 /* MMC TX counter registers */
 
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
 /* To mask all all interrupts.*/
 void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
 {
-       writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
-       writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
+       writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
+       writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
 }
 
 /* This reads the MAC core counters (if actaully supported).
index f2d3665..e872e1d 100644 (file)
@@ -20,6 +20,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __STMMAC_H__
+#define __STMMAC_H__
+
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
 #define DRV_MODULE_VERSION     "March_2012"
 
@@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
 {
 }
 #endif /* CONFIG_STMMAC_PCI */
+
+#endif /* __STMMAC_H__ */
index 6863590..aea9b14 100644 (file)
@@ -21,6 +21,8 @@
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
+#ifndef __STMMAC_TIMER_H__
+#define __STMMAC_TIMER_H__
 
 struct stmmac_timer {
        void (*timer_start) (unsigned int new_freq);
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
 extern int tmu2_register_user(void *fnt, void *data);
 extern void tmu2_unregister_user(void);
 #endif
+
+#endif /* __STMMAC_TIMER_H__ */
index 4026c90..b7e0258 100644 (file)
@@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
        case AR5K_EEPROM_MODE_11A:
                offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
                rate_pcal_info = ee->ee_rate_tpwr_a;
-               ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN;
+               ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
                break;
        case AR5K_EEPROM_MODE_11B:
                offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
index dc2bcfe..94a9bbe 100644 (file)
 #define AR5K_EEPROM_EEP_DELTA          10
 #define AR5K_EEPROM_N_MODES            3
 #define AR5K_EEPROM_N_5GHZ_CHAN                10
+#define AR5K_EEPROM_N_5GHZ_RATE_CHAN   8
 #define AR5K_EEPROM_N_2GHZ_CHAN                3
 #define AR5K_EEPROM_N_2GHZ_CHAN_2413   4
 #define        AR5K_EEPROM_N_2GHZ_CHAN_MAX     4
index 513e172..718da8d 100644 (file)
@@ -1237,6 +1237,9 @@ uint brcms_reset(struct brcms_info *wl)
        /* dpc will not be rescheduled */
        wl->resched = false;
 
+       /* inform publicly that interface is down */
+       wl->pub->up = false;
+
        return 0;
 }
 
index 95aa8e1..83324b3 100644 (file)
@@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
                return;
        }
        len = ETH_ALEN;
-       ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
+       ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
+                                 &len);
        if (ret) {
                IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
                               __LINE__);
index ce826bc..1a98fa3 100644 (file)
@@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
        const struct fw_img *img;
        size_t bufsz;
 
+       if (!iwl_is_ready_rf(priv))
+               return -EAGAIN;
+
        /* default is to dump the entire data segment */
        if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
                priv->dbgfs_sram_offset = 0x800000;
index 3ef8d5a..71c7994 100644 (file)
@@ -351,7 +351,7 @@ int iwl_queue_space(const struct iwl_queue *q);
 /*****************************************************
 * Error handling
 ******************************************************/
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
 void iwl_dump_csr(struct iwl_trans *trans);
 
 /*****************************************************
index d80604a..4983720 100644 (file)
@@ -565,7 +565,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
        }
 
        iwl_dump_csr(trans);
-       iwl_dump_fh(trans, NULL, false);
+       iwl_dump_fh(trans, NULL);
 
        iwl_op_mode_nic_error(trans->op_mode);
 }
index 38f51b0..8488511 100644 (file)
@@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
 #undef IWL_CMD
 }
 
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
 {
        int i;
-#ifdef CONFIG_IWLWIFI_DEBUG
-       int pos = 0;
-       size_t bufsz = 0;
-#endif
        static const u32 fh_tbl[] = {
                FH_RSCSR_CHNL0_STTS_WPTR_REG,
                FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
                FH_TSSR_TX_STATUS_REG,
                FH_TSSR_TX_ERROR_REG
        };
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (display) {
-               bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (buf) {
+               int pos = 0;
+               size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
                *buf = kmalloc(bufsz, GFP_KERNEL);
                if (!*buf)
                        return -ENOMEM;
+
                pos += scnprintf(*buf + pos, bufsz - pos,
                                "FH register values:\n");
-               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+
+               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
                        pos += scnprintf(*buf + pos, bufsz - pos,
                                "  %34s: 0X%08x\n",
                                get_fh_string(fh_tbl[i]),
                                iwl_read_direct32(trans, fh_tbl[i]));
-               }
+
                return pos;
        }
 #endif
+
        IWL_ERR(trans, "FH register values:\n");
-       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
+       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++)
                IWL_ERR(trans, "  %34s: 0X%08x\n",
                        get_fh_string(fh_tbl[i]),
                        iwl_read_direct32(trans, fh_tbl[i]));
-       }
+
        return 0;
 }
 
@@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
                                     size_t count, loff_t *ppos)
 {
        struct iwl_trans *trans = file->private_data;
-       char *buf;
+       char *buf = NULL;
        int pos = 0;
        ssize_t ret = -EFAULT;
 
-       ret = pos = iwl_dump_fh(trans, &buf, true);
+       ret = pos = iwl_dump_fh(trans, &buf);
        if (buf) {
                ret = simple_read_from_buffer(user_buf,
                                              count, ppos, buf, pos);
index 39afd37..c934fe8 100644 (file)
@@ -57,8 +57,7 @@
 static const struct ethtool_ops xennet_ethtool_ops;
 
 struct netfront_cb {
-       struct page *page;
-       unsigned offset;
+       int pull_to;
 };
 
 #define NETFRONT_SKB_CB(skb)   ((struct netfront_cb *)((skb)->cb))
@@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
        struct sk_buff *skb;
 
        while ((skb = __skb_dequeue(rxq)) != NULL) {
-               struct page *page = NETFRONT_SKB_CB(skb)->page;
-               void *vaddr = page_address(page);
-               unsigned offset = NETFRONT_SKB_CB(skb)->offset;
-
-               memcpy(skb->data, vaddr + offset,
-                      skb_headlen(skb));
+               int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
-               if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
-                       __free_page(page);
+               __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 
                /* Ethernet work: Delayed to here as it peeks the header. */
                skb->protocol = eth_type_trans(skb, dev);
@@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
        struct sk_buff_head errq;
        struct sk_buff_head tmpq;
        unsigned long flags;
-       unsigned int len;
        int err;
 
        spin_lock(&np->rx_lock);
@@ -955,24 +947,13 @@ err:
                        }
                }
 
-               NETFRONT_SKB_CB(skb)->page =
-                       skb_frag_page(&skb_shinfo(skb)->frags[0]);
-               NETFRONT_SKB_CB(skb)->offset = rx->offset;
-
-               len = rx->status;
-               if (len > RX_COPY_THRESHOLD)
-                       len = RX_COPY_THRESHOLD;
-               skb_put(skb, len);
+               NETFRONT_SKB_CB(skb)->pull_to = rx->status;
+               if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
+                       NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
 
-               if (rx->status > len) {
-                       skb_shinfo(skb)->frags[0].page_offset =
-                               rx->offset + len;
-                       skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
-                       skb->data_len = rx->status - len;
-               } else {
-                       __skb_fill_page_desc(skb, 0, NULL, 0, 0);
-                       skb_shinfo(skb)->nr_frags = 0;
-               }
+               skb_shinfo(skb)->frags[0].page_offset = rx->offset;
+               skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
+               skb->data_len = rx->status;
 
                i = xennet_fill_frags(np, skb, &tmpq);
 
@@ -999,7 +980,7 @@ err:
                 * receive throughout using the standard receive
                 * buffer size was cut by 25%(!!!).
                 */
-               skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+               skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
                skb->len += skb->data_len;
 
                if (rx->flags & XEN_NETRXF_csum_blank)
index 8fc3808..90c5c73 100644 (file)
@@ -1,12 +1,31 @@
 menuconfig PWM
-       bool "PWM Support"
+       bool "Pulse-Width Modulation (PWM) Support"
        depends on !MACH_JZ4740 && !PUV3_PWM
        help
-         This enables PWM support through the generic PWM framework.
-         You only need to enable this, if you also want to enable
-         one or more of the PWM drivers below.
-
-         If unsure, say N.
+         Generic Pulse-Width Modulation (PWM) support.
+
+         In Pulse-Width Modulation, a variation of the width of pulses
+         in a rectangular pulse signal is used as a means to alter the
+         average power of the signal. Applications include efficient
+         power delivery and voltage regulation. In computer systems,
+         PWMs are commonly used to control fans or the brightness of
+         display backlights.
+
+         This framework provides a generic interface to PWM devices
+         within the Linux kernel. On the driver side it provides an API
+         to register and unregister a PWM chip, an abstraction of a PWM
+         controller, that supports one or more PWM devices. Client
+         drivers can request PWM devices and use the generic framework
+         to configure as well as enable and disable them.
+
+         This generic framework replaces the legacy PWM framework which
+         allows only a single driver implementing the required API. Not
+         all legacy implementations have been ported to the framework
+         yet. The framework provides an API that is backward compatible
+         with the legacy framework so that existing client drivers
+         continue to work as expected.
+
+         If unsure, say no.
 
 if PWM
 
index ecb7690..c6e0507 100644 (file)
@@ -129,8 +129,8 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
        return 0;
 }
 
-static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc,
-                                             const struct of_phandle_args *args)
+static struct pwm_device *
+of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
 {
        struct pwm_device *pwm;
 
@@ -149,7 +149,7 @@ static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc,
        return pwm;
 }
 
-void of_pwmchip_add(struct pwm_chip *chip)
+static void of_pwmchip_add(struct pwm_chip *chip)
 {
        if (!chip->dev || !chip->dev->of_node)
                return;
@@ -162,7 +162,7 @@ void of_pwmchip_add(struct pwm_chip *chip)
        of_node_get(chip->dev->of_node);
 }
 
-void of_pwmchip_remove(struct pwm_chip *chip)
+static void of_pwmchip_remove(struct pwm_chip *chip)
 {
        if (chip->dev && chip->dev->of_node)
                of_node_put(chip->dev->of_node);
@@ -527,7 +527,7 @@ void __init pwm_add_table(struct pwm_lookup *table, size_t num)
 struct pwm_device *pwm_get(struct device *dev, const char *con_id)
 {
        struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER);
-       const char *dev_id = dev ? dev_name(dev): NULL;
+       const char *dev_id = dev ? dev_name(dev) : NULL;
        struct pwm_chip *chip = NULL;
        unsigned int index = 0;
        unsigned int best = 0;
@@ -609,7 +609,7 @@ void pwm_put(struct pwm_device *pwm)
        mutex_lock(&pwm_lock);
 
        if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) {
-               pr_warning("PWM device already freed\n");
+               pr_warn("PWM device already freed\n");
                goto out;
        }
 
index d103865..e5187c0 100644 (file)
@@ -225,6 +225,7 @@ static int s3c_pwm_probe(struct platform_device *pdev)
 
        /* calculate base of control bits in TCON */
        s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4;
+       s3c->chip.dev = &pdev->dev;
        s3c->chip.ops = &s3c_pwm_ops;
        s3c->chip.base = -1;
        s3c->chip.npwm = 1;
index 02ce18d..057465e 100644 (file)
@@ -187,10 +187,8 @@ static int tegra_pwm_probe(struct platform_device *pdev)
        }
 
        pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
-       if (!pwm->mmio_base) {
-               dev_err(&pdev->dev, "failed to ioremap() region\n");
+       if (!pwm->mmio_base)
                return -EADDRNOTAVAIL;
-       }
 
        platform_set_drvdata(pdev, pwm);
 
index 3c2ad28..0b66d0f 100644 (file)
@@ -192,10 +192,8 @@ static int __devinit ecap_pwm_probe(struct platform_device *pdev)
        }
 
        pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
-       if (!pc->mmio_base) {
-               dev_err(&pdev->dev, "failed to ioremap() registers\n");
+       if (!pc->mmio_base)
                return -EADDRNOTAVAIL;
-       }
 
        ret = pwmchip_add(&pc->chip);
        if (ret < 0) {
index 010d232..c3756d1 100644 (file)
@@ -371,10 +371,8 @@ static int __devinit ehrpwm_pwm_probe(struct platform_device *pdev)
        }
 
        pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
-       if (!pc->mmio_base) {
-               dev_err(&pdev->dev, "failed to ioremap() registers\n");
+       if (!pc->mmio_base)
                return  -EADDRNOTAVAIL;
-       }
 
        ret = pwmchip_add(&pc->chip);
        if (ret < 0) {
index 5480214..ad14389 100644 (file)
@@ -41,7 +41,7 @@ static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)
                cpu_relax();
 
        if (unlikely(!loops))
-               pr_warning("Waiting for status bits 0x%x to clear timed out\n",
+               pr_warn("Waiting for status bits 0x%x to clear timed out\n",
                           bitmask);
 }
 
index 6e32ff6..5552fa7 100644 (file)
@@ -673,8 +673,15 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
        struct scsi_device *sd = pdv->pdv_sd;
        int result;
        struct pscsi_plugin_task *pt = cmd->priv;
-       unsigned char *cdb = &pt->pscsi_cdb[0];
+       unsigned char *cdb;
+       /*
+        * Special case for REPORT_LUNs handling where pscsi_plugin_task has
+        * not been allocated because TCM is handling the emulation directly.
+        */
+       if (!pt)
+               return 0;
 
+       cdb = &pt->pscsi_cdb[0];
        result = pt->pscsi_result;
        /*
         * Hack to make sure that Write-Protect modepage is set if R/O mode is
index 0eaae23..4de3186 100644 (file)
@@ -1165,8 +1165,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
                        " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
                                cmd->data_length, size, cmd->t_task_cdb[0]);
 
-               cmd->cmd_spdtl = size;
-
                if (cmd->data_direction == DMA_TO_DEVICE) {
                        pr_err("Rejecting underflow/overflow"
                                        " WRITE data\n");
@@ -2294,9 +2292,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
        return 0;
 
 out:
-       while (i >= 0) {
-               __free_page(sg_page(&cmd->t_data_sg[i]));
+       while (i > 0) {
                i--;
+               __free_page(sg_page(&cmd->t_data_sg[i]));
        }
        kfree(cmd->t_data_sg);
        cmd->t_data_sg = NULL;
@@ -2323,9 +2321,12 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
                if (ret < 0)
                        goto out_fail;
        }
-
-       /* Workaround for handling zero-length control CDBs */
-       if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) {
+       /*
+        * If this command doesn't have any payload and we don't have to call
+        * into the fabric for data transfers, go ahead and complete it right
+        * away.
+        */
+       if (!cmd->data_length) {
                spin_lock_irq(&cmd->t_state_lock);
                cmd->t_state = TRANSPORT_COMPLETE;
                cmd->transport_state |= CMD_T_ACTIVE;
index c5eb3c3..eea6935 100644 (file)
@@ -131,6 +131,7 @@ extern struct list_head ft_lport_list;
 extern struct mutex ft_lport_lock;
 extern struct fc4_prov ft_prov;
 extern struct target_fabric_configfs *ft_configfs;
+extern unsigned int ft_debug_logging;
 
 /*
  * Fabric methods.
index b9cb500..823e692 100644 (file)
@@ -48,7 +48,7 @@
 /*
  * Dump cmd state for debugging.
  */
-void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
+static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
 {
        struct fc_exch *ep;
        struct fc_seq *sp;
@@ -80,6 +80,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
        }
 }
 
+void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
+{
+       if (unlikely(ft_debug_logging))
+               _ft_dump_cmd(cmd, caller);
+}
+
 static void ft_free_cmd(struct ft_cmd *cmd)
 {
        struct fc_frame *fp;
index 87901fa..3c9e5b5 100644 (file)
@@ -456,7 +456,9 @@ static void ft_prlo(struct fc_rport_priv *rdata)
        struct ft_tport *tport;
 
        mutex_lock(&ft_lport_lock);
-       tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
+       tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
+                                         lockdep_is_held(&ft_lport_lock));
+
        if (!tport) {
                mutex_unlock(&ft_lport_lock);
                return;
index 9591e2b..17830c9 100644 (file)
@@ -264,6 +264,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
        return group;
 }
 
+/* called with vfio.group_lock held */
 static void vfio_group_release(struct kref *kref)
 {
        struct vfio_group *group = container_of(kref, struct vfio_group, kref);
@@ -287,13 +288,7 @@ static void vfio_group_release(struct kref *kref)
 
 static void vfio_group_put(struct vfio_group *group)
 {
-       mutex_lock(&vfio.group_lock);
-       /*
-        * Release needs to unlock to unregister the notifier, so only
-        * unlock if not released.
-        */
-       if (!kref_put(&group->kref, vfio_group_release))
-               mutex_unlock(&vfio.group_lock);
+       kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
 }
 
 /* Assume group_lock or group reference is held */
@@ -401,7 +396,6 @@ static void vfio_device_release(struct kref *kref)
                                                  struct vfio_device, kref);
        struct vfio_group *group = device->group;
 
-       mutex_lock(&group->device_lock);
        list_del(&device->group_next);
        mutex_unlock(&group->device_lock);
 
@@ -416,8 +410,9 @@ static void vfio_device_release(struct kref *kref)
 /* Device reference always implies a group reference */
 static void vfio_device_put(struct vfio_device *device)
 {
-       kref_put(&device->kref, vfio_device_release);
-       vfio_group_put(device->group);
+       struct vfio_group *group = device->group;
+       kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
+       vfio_group_put(group);
 }
 
 static void vfio_device_get(struct vfio_device *device)
@@ -1116,10 +1111,10 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
                 */
                filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
 
-               fd_install(ret, filep);
-
                vfio_device_get(device);
                atomic_inc(&group->container_users);
+
+               fd_install(ret, filep);
                break;
        }
        mutex_unlock(&group->device_lock);
index fb36654..ed8e2e6 100644 (file)
 #include "vhost.h"
 #include "tcm_vhost.h"
 
+enum {
+       VHOST_SCSI_VQ_CTL = 0,
+       VHOST_SCSI_VQ_EVT = 1,
+       VHOST_SCSI_VQ_IO = 2,
+};
+
 struct vhost_scsi {
-       atomic_t vhost_ref_cnt;
-       struct tcm_vhost_tpg *vs_tpg;
+       struct tcm_vhost_tpg *vs_tpg;   /* Protected by vhost_scsi->dev.mutex */
        struct vhost_dev dev;
        struct vhost_virtqueue vqs[3];
 
@@ -131,8 +136,7 @@ static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
        return 1;
 }
 
-static u32 tcm_vhost_get_pr_transport_id(
-       struct se_portal_group *se_tpg,
+static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
        struct se_node_acl *se_nacl,
        struct t10_pr_registration *pr_reg,
        int *format_code,
@@ -162,8 +166,7 @@ static u32 tcm_vhost_get_pr_transport_id(
                        format_code, buf);
 }
 
-static u32 tcm_vhost_get_pr_transport_id_len(
-       struct se_portal_group *se_tpg,
+static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
        struct se_node_acl *se_nacl,
        struct t10_pr_registration *pr_reg,
        int *format_code)
@@ -192,8 +195,7 @@ static u32 tcm_vhost_get_pr_transport_id_len(
                        format_code);
 }
 
-static char *tcm_vhost_parse_pr_out_transport_id(
-       struct se_portal_group *se_tpg,
+static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
        const char *buf,
        u32 *out_tid_len,
        char **port_nexus_ptr)
@@ -236,8 +238,7 @@ static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
        return &nacl->se_node_acl;
 }
 
-static void tcm_vhost_release_fabric_acl(
-       struct se_portal_group *se_tpg,
+static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
        struct se_node_acl *se_nacl)
 {
        struct tcm_vhost_nacl *nacl = container_of(se_nacl,
@@ -297,7 +298,16 @@ static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
        return 0;
 }
 
-static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *);
+static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
+{
+       struct vhost_scsi *vs = tv_cmd->tvc_vhost;
+
+       spin_lock_bh(&vs->vs_completion_lock);
+       list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
+       spin_unlock_bh(&vs->vs_completion_lock);
+
+       vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+}
 
 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
 {
@@ -381,7 +391,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                                        vs_completion_work);
        struct tcm_vhost_cmd *tv_cmd;
 
-       while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) {
+       while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
                struct virtio_scsi_cmd_resp v_rsp;
                struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
                int ret;
@@ -408,19 +418,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
        vhost_signal(&vs->dev, &vs->vqs[2]);
 }
 
-static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
-{
-       struct vhost_scsi *vs = tv_cmd->tvc_vhost;
-
-       pr_debug("%s tv_cmd %p\n", __func__, tv_cmd);
-
-       spin_lock_bh(&vs->vs_completion_lock);
-       list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
-       spin_unlock_bh(&vs->vs_completion_lock);
-
-       vhost_work_queue(&vs->dev, &vs->vs_completion_work);
-}
-
 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
        struct tcm_vhost_tpg *tv_tpg,
        struct virtio_scsi_cmd_req *v_req,
@@ -533,8 +530,8 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
        sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
        if (!sg)
                return -ENOMEM;
-       pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__,
-              sg, sgl_count, IS_ERR(sg));
+       pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
+              sg, sgl_count, !sg);
        sg_init_table(sg, sgl_count);
 
        tv_cmd->tvc_sgl = sg;
@@ -787,12 +784,12 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
 
 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
 {
-       pr_err("%s: The handling func for control queue.\n", __func__);
+       pr_debug("%s: The handling func for control queue.\n", __func__);
 }
 
 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
 {
-       pr_err("%s: The handling func for event queue.\n", __func__);
+       pr_debug("%s: The handling func for event queue.\n", __func__);
 }
 
 static void vhost_scsi_handle_kick(struct vhost_work *work)
@@ -825,11 +822,6 @@ static int vhost_scsi_set_endpoint(
                        return -EFAULT;
                }
        }
-
-       if (vs->vs_tpg) {
-               mutex_unlock(&vs->dev.mutex);
-               return -EEXIST;
-       }
        mutex_unlock(&vs->dev.mutex);
 
        mutex_lock(&tcm_vhost_mutex);
@@ -839,7 +831,7 @@ static int vhost_scsi_set_endpoint(
                        mutex_unlock(&tv_tpg->tv_tpg_mutex);
                        continue;
                }
-               if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) {
+               if (tv_tpg->tv_tpg_vhost_count != 0) {
                        mutex_unlock(&tv_tpg->tv_tpg_mutex);
                        continue;
                }
@@ -847,14 +839,20 @@ static int vhost_scsi_set_endpoint(
 
                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
                    (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
-                       atomic_inc(&tv_tpg->tv_tpg_vhost_count);
-                       smp_mb__after_atomic_inc();
+                       tv_tpg->tv_tpg_vhost_count++;
                        mutex_unlock(&tv_tpg->tv_tpg_mutex);
                        mutex_unlock(&tcm_vhost_mutex);
 
                        mutex_lock(&vs->dev.mutex);
+                       if (vs->vs_tpg) {
+                               mutex_unlock(&vs->dev.mutex);
+                               mutex_lock(&tv_tpg->tv_tpg_mutex);
+                               tv_tpg->tv_tpg_vhost_count--;
+                               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+                               return -EEXIST;
+                       }
+
                        vs->vs_tpg = tv_tpg;
-                       atomic_inc(&vs->vhost_ref_cnt);
                        smp_mb__after_atomic_inc();
                        mutex_unlock(&vs->dev.mutex);
                        return 0;
@@ -871,38 +869,42 @@ static int vhost_scsi_clear_endpoint(
 {
        struct tcm_vhost_tport *tv_tport;
        struct tcm_vhost_tpg *tv_tpg;
-       int index;
+       int index, ret;
 
        mutex_lock(&vs->dev.mutex);
        /* Verify that ring has been setup correctly. */
        for (index = 0; index < vs->dev.nvqs; ++index) {
                if (!vhost_vq_access_ok(&vs->vqs[index])) {
-                       mutex_unlock(&vs->dev.mutex);
-                       return -EFAULT;
+                       ret = -EFAULT;
+                       goto err;
                }
        }
 
        if (!vs->vs_tpg) {
-               mutex_unlock(&vs->dev.mutex);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err;
        }
        tv_tpg = vs->vs_tpg;
        tv_tport = tv_tpg->tport;
 
        if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
            (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
-               mutex_unlock(&vs->dev.mutex);
                pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
                        " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
                        tv_tport->tport_name, tv_tpg->tport_tpgt,
                        t->vhost_wwpn, t->vhost_tpgt);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err;
        }
-       atomic_dec(&tv_tpg->tv_tpg_vhost_count);
+       tv_tpg->tv_tpg_vhost_count--;
        vs->vs_tpg = NULL;
        mutex_unlock(&vs->dev.mutex);
 
        return 0;
+
+err:
+       mutex_unlock(&vs->dev.mutex);
+       return ret;
 }
 
 static int vhost_scsi_open(struct inode *inode, struct file *f)
@@ -918,9 +920,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
        INIT_LIST_HEAD(&s->vs_completion_list);
        spin_lock_init(&s->vs_completion_lock);
 
-       s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick;
-       s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick;
-       s->vqs[2].handle_kick = vhost_scsi_handle_kick;
+       s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
+       s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
+       s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
        r = vhost_dev_init(&s->dev, s->vqs, 3);
        if (r < 0) {
                kfree(s);
@@ -949,6 +951,18 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
        return 0;
 }
 
+static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
+{
+       vhost_poll_flush(&vs->dev.vqs[index].poll);
+}
+
+static void vhost_scsi_flush(struct vhost_scsi *vs)
+{
+       vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
+       vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
+       vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
+}
+
 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
 {
        if (features & ~VHOST_FEATURES)
@@ -961,7 +975,8 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
                return -EFAULT;
        }
        vs->dev.acked_features = features;
-       /* TODO possibly smp_wmb() and flush vqs */
+       smp_wmb();
+       vhost_scsi_flush(vs);
        mutex_unlock(&vs->dev.mutex);
        return 0;
 }
@@ -974,26 +989,25 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
        void __user *argp = (void __user *)arg;
        u64 __user *featurep = argp;
        u64 features;
-       int r;
+       int r, abi_version = VHOST_SCSI_ABI_VERSION;
 
        switch (ioctl) {
        case VHOST_SCSI_SET_ENDPOINT:
                if (copy_from_user(&backend, argp, sizeof backend))
                        return -EFAULT;
+               if (backend.reserved != 0)
+                       return -EOPNOTSUPP;
 
                return vhost_scsi_set_endpoint(vs, &backend);
        case VHOST_SCSI_CLEAR_ENDPOINT:
                if (copy_from_user(&backend, argp, sizeof backend))
                        return -EFAULT;
+               if (backend.reserved != 0)
+                       return -EOPNOTSUPP;
 
                return vhost_scsi_clear_endpoint(vs, &backend);
        case VHOST_SCSI_GET_ABI_VERSION:
-               if (copy_from_user(&backend, argp, sizeof backend))
-                       return -EFAULT;
-
-               backend.abi_version = VHOST_SCSI_ABI_VERSION;
-
-               if (copy_to_user(argp, &backend, sizeof backend))
+               if (copy_to_user(argp, &abi_version, sizeof abi_version))
                        return -EFAULT;
                return 0;
        case VHOST_GET_FEATURES:
@@ -1013,11 +1027,21 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
        }
 }
 
+#ifdef CONFIG_COMPAT
+static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
+                               unsigned long arg)
+{
+       return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
+}
+#endif
+
 static const struct file_operations vhost_scsi_fops = {
        .owner          = THIS_MODULE,
        .release        = vhost_scsi_release,
        .unlocked_ioctl = vhost_scsi_ioctl,
-       /* TODO compat ioctl? */
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = vhost_scsi_compat_ioctl,
+#endif
        .open           = vhost_scsi_open,
        .llseek         = noop_llseek,
 };
@@ -1054,28 +1078,28 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
        return "Unknown";
 }
 
-static int tcm_vhost_port_link(
-       struct se_portal_group *se_tpg,
+static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
        struct se_lun *lun)
 {
        struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
 
-       atomic_inc(&tv_tpg->tv_tpg_port_count);
-       smp_mb__after_atomic_inc();
+       mutex_lock(&tv_tpg->tv_tpg_mutex);
+       tv_tpg->tv_tpg_port_count++;
+       mutex_unlock(&tv_tpg->tv_tpg_mutex);
 
        return 0;
 }
 
-static void tcm_vhost_port_unlink(
-       struct se_portal_group *se_tpg,
+static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
        struct se_lun *se_lun)
 {
        struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
 
-       atomic_dec(&tv_tpg->tv_tpg_port_count);
-       smp_mb__after_atomic_dec();
+       mutex_lock(&tv_tpg->tv_tpg_mutex);
+       tv_tpg->tv_tpg_port_count--;
+       mutex_unlock(&tv_tpg->tv_tpg_mutex);
 }
 
 static struct se_node_acl *tcm_vhost_make_nodeacl(
@@ -1122,8 +1146,7 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
        kfree(nacl);
 }
 
-static int tcm_vhost_make_nexus(
-       struct tcm_vhost_tpg *tv_tpg,
+static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
        const char *name)
 {
        struct se_portal_group *se_tpg;
@@ -1168,7 +1191,7 @@ static int tcm_vhost_make_nexus(
                return -ENOMEM;
        }
        /*
-        * Now register the TCM vHost virtual I_T Nexus as active with the
+        * Now register the TCM vhost virtual I_T Nexus as active with the
         * call to __transport_register_session()
         */
        __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
@@ -1179,8 +1202,7 @@ static int tcm_vhost_make_nexus(
        return 0;
 }
 
-static int tcm_vhost_drop_nexus(
-       struct tcm_vhost_tpg *tpg)
+static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
 {
        struct se_session *se_sess;
        struct tcm_vhost_nexus *tv_nexus;
@@ -1198,27 +1220,27 @@ static int tcm_vhost_drop_nexus(
                return -ENODEV;
        }
 
-       if (atomic_read(&tpg->tv_tpg_port_count)) {
+       if (tpg->tv_tpg_port_count != 0) {
                mutex_unlock(&tpg->tv_tpg_mutex);
-               pr_err("Unable to remove TCM_vHost I_T Nexus with"
+               pr_err("Unable to remove TCM_vhost I_T Nexus with"
                        " active TPG port count: %d\n",
-                       atomic_read(&tpg->tv_tpg_port_count));
-               return -EPERM;
+                       tpg->tv_tpg_port_count);
+               return -EBUSY;
        }
 
-       if (atomic_read(&tpg->tv_tpg_vhost_count)) {
+       if (tpg->tv_tpg_vhost_count != 0) {
                mutex_unlock(&tpg->tv_tpg_mutex);
-               pr_err("Unable to remove TCM_vHost I_T Nexus with"
+               pr_err("Unable to remove TCM_vhost I_T Nexus with"
                        " active TPG vhost count: %d\n",
-                       atomic_read(&tpg->tv_tpg_vhost_count));
-               return -EPERM;
+                       tpg->tv_tpg_vhost_count);
+               return -EBUSY;
        }
 
-       pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated"
+       pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
                " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
        /*
-        * Release the SCSI I_T Nexus to the emulated vHost Target Port
+        * Release the SCSI I_T Nexus to the emulated vhost Target Port
         */
        transport_deregister_session(tv_nexus->tvn_se_sess);
        tpg->tpg_nexus = NULL;
@@ -1228,8 +1250,7 @@ static int tcm_vhost_drop_nexus(
        return 0;
 }
 
-static ssize_t tcm_vhost_tpg_show_nexus(
-       struct se_portal_group *se_tpg,
+static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
        char *page)
 {
        struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
@@ -1250,8 +1271,7 @@ static ssize_t tcm_vhost_tpg_show_nexus(
        return ret;
 }
 
-static ssize_t tcm_vhost_tpg_store_nexus(
-       struct se_portal_group *se_tpg,
+static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
        const char *page,
        size_t count)
 {
@@ -1336,8 +1356,7 @@ static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
        NULL,
 };
 
-static struct se_portal_group *tcm_vhost_make_tpg(
-       struct se_wwn *wwn,
+static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
        struct config_group *group,
        const char *name)
 {
@@ -1385,7 +1404,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
        list_del(&tpg->tv_tpg_list);
        mutex_unlock(&tcm_vhost_mutex);
        /*
-        * Release the virtual I_T Nexus for this vHost TPG
+        * Release the virtual I_T Nexus for this vhost TPG
         */
        tcm_vhost_drop_nexus(tpg);
        /*
@@ -1395,8 +1414,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
        kfree(tpg);
 }
 
-static struct se_wwn *tcm_vhost_make_tport(
-       struct target_fabric_configfs *tf,
+static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
        struct config_group *group,
        const char *name)
 {
@@ -1592,7 +1610,10 @@ static void tcm_vhost_deregister_configfs(void)
 static int __init tcm_vhost_init(void)
 {
        int ret = -ENOMEM;
-
+       /*
+        * Use our own dedicated workqueue for submitting I/O into
+        * target core to avoid contention within system_wq.
+        */
        tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
        if (!tcm_vhost_workqueue)
                goto out;
index c983ed2..d9e9355 100644 (file)
@@ -47,9 +47,9 @@ struct tcm_vhost_tpg {
        /* Vhost port target portal group tag for TCM */
        u16 tport_tpgt;
        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
-       atomic_t tv_tpg_port_count;
-       /* Used for vhost_scsi device reference to tpg_nexus */
-       atomic_t tv_tpg_vhost_count;
+       int tv_tpg_port_count;
+       /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
+       int tv_tpg_vhost_count;
        /* list for tcm_vhost_list */
        struct list_head tv_tpg_list;
        /* Used to protect access for tpg_nexus */
@@ -91,11 +91,13 @@ struct tcm_vhost_tport {
 
 struct vhost_scsi_target {
        int abi_version;
-       unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
+       char vhost_wwpn[TRANSPORT_IQN_LEN];
        unsigned short vhost_tpgt;
+       unsigned short reserved;
 };
 
 /* VHOST_SCSI specific defines */
 #define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
 #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
-#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
+/* Changing this breaks userspace. */
+#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
index f8a79fc..88e9204 100644 (file)
@@ -374,6 +374,9 @@ static void fb_flashcursor(struct work_struct *work)
        int mode;
        int ret;
 
+       /* FIXME: we should sort out the unbind locking instead */
+       /* instead we just fail to flash the cursor if we can't get
+        * the lock instead of blocking fbcon deinit */
        ret = console_trylock();
        if (ret == 0)
                return;
index fb962ef..6d59006 100644 (file)
@@ -201,6 +201,7 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
        int err = -ENOMEM;
 
        dout("ceph_fs_debugfs_init\n");
+       BUG_ON(!fsc->client->debugfs_dir);
        fsc->debugfs_congestion_kb =
                debugfs_create_file("writeback_congestion_kb",
                                    0600,
index 9fff9f3..4b5762e 100644 (file)
@@ -992,11 +992,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
        if (rinfo->head->is_dentry) {
                struct inode *dir = req->r_locked_dir;
 
-               err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
-                                session, req->r_request_started, -1,
-                                &req->r_caps_reservation);
-               if (err < 0)
-                       return err;
+               if (dir) {
+                       err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
+                                        session, req->r_request_started, -1,
+                                        &req->r_caps_reservation);
+                       if (err < 0)
+                               return err;
+               } else {
+                       WARN_ON_ONCE(1);
+               }
        }
 
        /*
@@ -1004,6 +1008,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
         * will have trouble splicing in the virtual snapdir later
         */
        if (rinfo->head->is_dentry && !req->r_aborted &&
+           req->r_locked_dir &&
            (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
                                               fsc->mount_options->snapdir_name,
                                               req->r_dentry->d_name.len))) {
index 8e3fb69..1396ceb 100644 (file)
@@ -42,7 +42,8 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
        /* validate striping parameters */
        if ((l->object_size & ~PAGE_MASK) ||
            (l->stripe_unit & ~PAGE_MASK) ||
-           ((unsigned)l->object_size % (unsigned)l->stripe_unit))
+           (l->stripe_unit != 0 &&
+            ((unsigned)l->object_size % (unsigned)l->stripe_unit)))
                return -EINVAL;
 
        /* make sure it's a valid data pool */
index 1c8b556..eedec84 100644 (file)
@@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
                error = PTR_ERR(file);
                goto out_free_fd;
        }
-       fd_install(fd, file);
        ep->file = file;
+       fd_install(fd, file);
        return fd;
 
 out_free_fd:
index 51e9aa6..a856e7f 100644 (file)
@@ -352,6 +352,7 @@ int __inode_permission(struct inode *inode, int mask)
 /**
  * sb_permission - Check superblock-level permissions
  * @sb: Superblock of inode to check permission on
+ * @inode: Inode to check permission on
  * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
  *
  * Separate out file-system wide checks from inode-specific permission checks.
@@ -656,6 +657,7 @@ int sysctl_protected_hardlinks __read_mostly = 1;
 /**
  * may_follow_link - Check symlink following for unsafe situations
  * @link: The path of the symlink
+ * @nd: nameidata pathwalk data
  *
  * In the case of the sysctl_protected_symlinks sysctl being enabled,
  * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
index 8bf3a3f..b7db608 100644 (file)
@@ -12,19 +12,19 @@ nfs-$(CONFIG_ROOT_NFS)      += nfsroot.o
 nfs-$(CONFIG_SYSCTL)   += sysctl.o
 nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
 
-obj-$(CONFIG_NFS_V2) += nfs2.o
-nfs2-y := nfs2super.o proc.o nfs2xdr.o
+obj-$(CONFIG_NFS_V2) += nfsv2.o
+nfsv2-y := nfs2super.o proc.o nfs2xdr.o
 
-obj-$(CONFIG_NFS_V3) += nfs3.o
-nfs3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o
-nfs3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
+obj-$(CONFIG_NFS_V3) += nfsv3.o
+nfsv3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o
+nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
 
-obj-$(CONFIG_NFS_V4) += nfs4.o
-nfs4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
+obj-$(CONFIG_NFS_V4) += nfsv4.o
+nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
          delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \
          nfs4namespace.o nfs4getroot.o nfs4client.o
-nfs4-$(CONFIG_SYSCTL)  += nfs4sysctl.o
-nfs4-$(CONFIG_NFS_V4_1)        += pnfs.o pnfs_dev.o
+nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
+nfsv4-$(CONFIG_NFS_V4_1)       += pnfs.o pnfs_dev.o
 
 obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
 nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
index 9fc0d9d..9969444 100644 (file)
@@ -105,7 +105,7 @@ struct nfs_subversion *get_nfs_version(unsigned int version)
 
        if (IS_ERR(nfs)) {
                mutex_lock(&nfs_version_mutex);
-               request_module("nfs%d", version);
+               request_module("nfsv%d", version);
                nfs = find_nfs_version(version);
                mutex_unlock(&nfs_version_mutex);
        }
index b701358..a850079 100644 (file)
@@ -61,6 +61,12 @@ struct idmap {
        struct mutex            idmap_mutex;
 };
 
+struct idmap_legacy_upcalldata {
+       struct rpc_pipe_msg pipe_msg;
+       struct idmap_msg idmap_msg;
+       struct idmap *idmap;
+};
+
 /**
  * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
  * @fattr: fully initialised struct nfs_fattr
@@ -324,6 +330,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
                ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
                                            name, namelen, type, data,
                                            data_size, idmap);
+               idmap->idmap_key_cons = NULL;
                mutex_unlock(&idmap->idmap_mutex);
        }
        return ret;
@@ -380,11 +387,13 @@ static const match_table_t nfs_idmap_tokens = {
 static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
 static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
                                   size_t);
+static void idmap_release_pipe(struct inode *);
 static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
 
 static const struct rpc_pipe_ops idmap_upcall_ops = {
        .upcall         = rpc_pipe_generic_upcall,
        .downcall       = idmap_pipe_downcall,
+       .release_pipe   = idmap_release_pipe,
        .destroy_msg    = idmap_pipe_destroy_msg,
 };
 
@@ -616,7 +625,8 @@ void nfs_idmap_quit(void)
        nfs_idmap_quit_keyring();
 }
 
-static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im,
+static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
+                                    struct idmap_msg *im,
                                     struct rpc_pipe_msg *msg)
 {
        substring_t substr;
@@ -659,6 +669,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
                                   const char *op,
                                   void *aux)
 {
+       struct idmap_legacy_upcalldata *data;
        struct rpc_pipe_msg *msg;
        struct idmap_msg *im;
        struct idmap *idmap = (struct idmap *)aux;
@@ -666,15 +677,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
        int ret = -ENOMEM;
 
        /* msg and im are freed in idmap_pipe_destroy_msg */
-       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
-       if (!msg)
-               goto out0;
-
-       im = kmalloc(sizeof(*im), GFP_KERNEL);
-       if (!im)
+       data = kmalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
                goto out1;
 
-       ret = nfs_idmap_prepare_message(key->description, im, msg);
+       msg = &data->pipe_msg;
+       im = &data->idmap_msg;
+       data->idmap = idmap;
+
+       ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
        if (ret < 0)
                goto out2;
 
@@ -683,15 +694,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
 
        ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
        if (ret < 0)
-               goto out2;
+               goto out3;
 
        return ret;
 
+out3:
+       idmap->idmap_key_cons = NULL;
 out2:
-       kfree(im);
+       kfree(data);
 out1:
-       kfree(msg);
-out0:
        complete_request_key(cons, ret);
        return ret;
 }
@@ -749,9 +760,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
        }
 
        if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
-               ret = mlen;
-               complete_request_key(cons, -ENOKEY);
-               goto out_incomplete;
+               ret = -ENOKEY;
+               goto out;
        }
 
        namelen_in = strnlen(im.im_name, IDMAP_NAMESZ);
@@ -768,16 +778,32 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
 
 out:
        complete_request_key(cons, ret);
-out_incomplete:
        return ret;
 }
 
 static void
 idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
 {
+       struct idmap_legacy_upcalldata *data = container_of(msg,
+                       struct idmap_legacy_upcalldata,
+                       pipe_msg);
+       struct idmap *idmap = data->idmap;
+       struct key_construction *cons;
+       if (msg->errno) {
+               cons = ACCESS_ONCE(idmap->idmap_key_cons);
+               idmap->idmap_key_cons = NULL;
+               complete_request_key(cons, msg->errno);
+       }
        /* Free memory allocated in nfs_idmap_legacy_upcall() */
-       kfree(msg->data);
-       kfree(msg);
+       kfree(data);
+}
+
+static void
+idmap_release_pipe(struct inode *inode)
+{
+       struct rpc_inode *rpci = RPC_I(inode);
+       struct idmap *idmap = (struct idmap *)rpci->private;
+       idmap->idmap_key_cons = NULL;
 }
 
 int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
index 0952c79..d6b3b5f 100644 (file)
@@ -69,7 +69,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
        nfs_fattr_init(info->fattr);
        status = rpc_call_sync(client, &msg, 0);
        dprintk("%s: reply fsinfo: %d\n", __func__, status);
-       if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
+       if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
                msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
                msg.rpc_resp = info->fattr;
                status = rpc_call_sync(client, &msg, 0);
index 3b950dd..da0618a 100644 (file)
@@ -205,6 +205,9 @@ extern const struct dentry_operations nfs4_dentry_operations;
 int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
                    unsigned, umode_t, int *);
 
+/* super.c */
+extern struct file_system_type nfs4_fs_type;
+
 /* nfs4namespace.c */
 rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
 struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
index cbcdfaf..24eb663 100644 (file)
@@ -74,7 +74,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
        return clp;
 
 error:
-       kfree(clp);
+       nfs_free_client(clp);
        return ERR_PTR(err);
 }
 
index a99a8d9..6352741 100644 (file)
@@ -3737,9 +3737,10 @@ out:
 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
 {
        struct nfs4_cached_acl *acl;
+       size_t buflen = sizeof(*acl) + acl_len;
 
-       if (pages && acl_len <= PAGE_SIZE) {
-               acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
+       if (pages && buflen <= PAGE_SIZE) {
+               acl = kmalloc(buflen, GFP_KERNEL);
                if (acl == NULL)
                        goto out;
                acl->cached = 1;
@@ -3819,7 +3820,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
        if (ret)
                goto out_free;
 
-       acl_len = res.acl_len - res.acl_data_offset;
+       acl_len = res.acl_len;
        if (acl_len > args.acl_len)
                nfs4_write_cached_acl(inode, NULL, 0, acl_len);
        else
@@ -6223,11 +6224,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
        dprintk("<-- %s\n", __func__);
 }
 
+static size_t max_response_pages(struct nfs_server *server)
+{
+       u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+       return nfs_page_array_len(0, max_resp_sz);
+}
+
+static void nfs4_free_pages(struct page **pages, size_t size)
+{
+       int i;
+
+       if (!pages)
+               return;
+
+       for (i = 0; i < size; i++) {
+               if (!pages[i])
+                       break;
+               __free_page(pages[i]);
+       }
+       kfree(pages);
+}
+
+static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
+{
+       struct page **pages;
+       int i;
+
+       pages = kcalloc(size, sizeof(struct page *), gfp_flags);
+       if (!pages) {
+               dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
+               return NULL;
+       }
+
+       for (i = 0; i < size; i++) {
+               pages[i] = alloc_page(gfp_flags);
+               if (!pages[i]) {
+                       dprintk("%s: failed to allocate page\n", __func__);
+                       nfs4_free_pages(pages, size);
+                       return NULL;
+               }
+       }
+
+       return pages;
+}
+
 static void nfs4_layoutget_release(void *calldata)
 {
        struct nfs4_layoutget *lgp = calldata;
+       struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+       size_t max_pages = max_response_pages(server);
 
        dprintk("--> %s\n", __func__);
+       nfs4_free_pages(lgp->args.layout.pages, max_pages);
        put_nfs_open_context(lgp->args.ctx);
        kfree(calldata);
        dprintk("<-- %s\n", __func__);
@@ -6239,9 +6287,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
        .rpc_release = nfs4_layoutget_release,
 };
 
-int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
 {
        struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+       size_t max_pages = max_response_pages(server);
        struct rpc_task *task;
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
@@ -6259,12 +6308,19 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
 
        dprintk("--> %s\n", __func__);
 
+       lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
+       if (!lgp->args.layout.pages) {
+               nfs4_layoutget_release(lgp);
+               return;
+       }
+       lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+
        lgp->res.layoutp = &lgp->args.layout;
        lgp->res.seq_res.sr_slot = NULL;
        nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
-               return PTR_ERR(task);
+               return;
        status = nfs4_wait_for_completion_rpc_task(task);
        if (status == 0)
                status = task->tk_status;
@@ -6272,7 +6328,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
                status = pnfs_layout_process(lgp);
        rpc_put_task(task);
        dprintk("<-- %s status=%d\n", __func__, status);
-       return status;
+       return;
 }
 
 static void
@@ -6304,12 +6360,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
                return;
        }
        spin_lock(&lo->plh_inode->i_lock);
-       if (task->tk_status == 0) {
-               if (lrp->res.lrs_present) {
-                       pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
-               } else
-                       BUG_ON(!list_empty(&lo->plh_segs));
-       }
+       if (task->tk_status == 0 && lrp->res.lrs_present)
+               pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
        lo->plh_block_lgets--;
        spin_unlock(&lo->plh_inode->i_lock);
        dprintk("<-- %s\n", __func__);
index 12a31a9..bd61221 100644 (file)
@@ -23,14 +23,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
 static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *raw_data);
 
-static struct file_system_type nfs4_fs_type = {
-       .owner          = THIS_MODULE,
-       .name           = "nfs4",
-       .mount          = nfs_fs_mount,
-       .kill_sb        = nfs_kill_super,
-       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
-};
-
 static struct file_system_type nfs4_remote_fs_type = {
        .owner          = THIS_MODULE,
        .name           = "nfs4",
@@ -344,14 +336,8 @@ static int __init init_nfs_v4(void)
        if (err)
                goto out1;
 
-       err = register_filesystem(&nfs4_fs_type);
-       if (err < 0)
-               goto out2;
-
        register_nfs_version(&nfs_v4);
        return 0;
-out2:
-       nfs4_unregister_sysctl();
 out1:
        nfs_idmap_quit();
 out:
@@ -361,7 +347,6 @@ out:
 static void __exit exit_nfs_v4(void)
 {
        unregister_nfs_version(&nfs_v4);
-       unregister_filesystem(&nfs4_fs_type);
        nfs4_unregister_sysctl();
        nfs_idmap_quit();
 }
index ca13483..1bfbd67 100644 (file)
@@ -5045,22 +5045,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
                         struct nfs_getaclres *res)
 {
        unsigned int savep;
-       __be32 *bm_p;
        uint32_t attrlen,
                 bitmap[3] = {0};
        int status;
-       size_t page_len = xdr->buf->page_len;
+       unsigned int pg_offset;
 
        res->acl_len = 0;
        if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
                goto out;
 
-       bm_p = xdr->p;
-       res->acl_data_offset = be32_to_cpup(bm_p) + 2;
-       res->acl_data_offset <<= 2;
-       /* Check if the acl data starts beyond the allocated buffer */
-       if (res->acl_data_offset > page_len)
-               return -ERANGE;
+       xdr_enter_page(xdr, xdr->buf->page_len);
+
+       /* Calculate the offset of the page data */
+       pg_offset = xdr->buf->head[0].iov_len;
 
        if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
                goto out;
@@ -5074,23 +5071,20 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
                /* The bitmap (xdr len + bitmaps) and the attr xdr len words
                 * are stored with the acl data to handle the problem of
                 * variable length bitmaps.*/
-               xdr->p = bm_p;
+               res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;
 
                /* We ignore &savep and don't do consistency checks on
                 * the attr length.  Let userspace figure it out.... */
-               attrlen += res->acl_data_offset;
-               if (attrlen > page_len) {
+               res->acl_len = attrlen;
+               if (attrlen > (xdr->nwords << 2)) {
                        if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
                                /* getxattr interface called with a NULL buf */
-                               res->acl_len = attrlen;
                                goto out;
                        }
-                       dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
-                                       attrlen, page_len);
+                       dprintk("NFS: acl reply: attrlen %u > page_len %u\n",
+                                       attrlen, xdr->nwords << 2);
                        return -EINVAL;
                }
-               xdr_read_pages(xdr, attrlen);
-               res->acl_len = attrlen;
        } else
                status = -EOPNOTSUPP;
 
index f50d3e8..ea6d111 100644 (file)
@@ -570,17 +570,66 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
                return false;
 
        return pgio->pg_count + req->wb_bytes <=
-                       OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+                       (unsigned long)pgio->pg_layout_private;
+}
+
+void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+       pnfs_generic_pg_init_read(pgio, req);
+       if (unlikely(pgio->pg_lseg == NULL))
+               return; /* Not pNFS */
+
+       pgio->pg_layout_private = (void *)
+                               OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+}
+
+static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout,
+                                  unsigned long *stripe_end)
+{
+       u32 stripe_off;
+       unsigned stripe_size;
+
+       if (layout->raid_algorithm == PNFS_OSD_RAID_0)
+               return true;
+
+       stripe_size = layout->stripe_unit *
+                               (layout->group_width - layout->parity);
+
+       div_u64_rem(offset, stripe_size, &stripe_off);
+       if (!stripe_off)
+               return true;
+
+       *stripe_end = stripe_size - stripe_off;
+       return false;
+}
+
+void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+       unsigned long stripe_end = 0;
+
+       pnfs_generic_pg_init_write(pgio, req);
+       if (unlikely(pgio->pg_lseg == NULL))
+               return; /* Not pNFS */
+
+       if (req->wb_offset ||
+           !aligned_on_raid_stripe(req->wb_index * PAGE_SIZE,
+                              &OBJIO_LSEG(pgio->pg_lseg)->layout,
+                              &stripe_end)) {
+               pgio->pg_layout_private = (void *)stripe_end;
+       } else {
+               pgio->pg_layout_private = (void *)
+                               OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+       }
 }
 
 static const struct nfs_pageio_ops objio_pg_read_ops = {
-       .pg_init = pnfs_generic_pg_init_read,
+       .pg_init = objio_init_read,
        .pg_test = objio_pg_test,
        .pg_doio = pnfs_generic_pg_readpages,
 };
 
 static const struct nfs_pageio_ops objio_pg_write_ops = {
-       .pg_init = pnfs_generic_pg_init_write,
+       .pg_init = objio_init_write,
        .pg_test = objio_pg_test,
        .pg_doio = pnfs_generic_pg_writepages,
 };
index 1a6732e..311a796 100644 (file)
@@ -49,6 +49,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
        hdr->io_start = req_offset(hdr->req);
        hdr->good_bytes = desc->pg_count;
        hdr->dreq = desc->pg_dreq;
+       hdr->layout_private = desc->pg_layout_private;
        hdr->release = release;
        hdr->completion_ops = desc->pg_completion_ops;
        if (hdr->completion_ops->init_hdr)
@@ -268,6 +269,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
        desc->pg_error = 0;
        desc->pg_lseg = NULL;
        desc->pg_dreq = NULL;
+       desc->pg_layout_private = NULL;
 }
 EXPORT_SYMBOL_GPL(nfs_pageio_init);
 
index 76875bf..2e00fea 100644 (file)
@@ -583,9 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        struct nfs_server *server = NFS_SERVER(ino);
        struct nfs4_layoutget *lgp;
        struct pnfs_layout_segment *lseg = NULL;
-       struct page **pages = NULL;
-       int i;
-       u32 max_resp_sz, max_pages;
 
        dprintk("--> %s\n", __func__);
 
@@ -594,20 +591,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        if (lgp == NULL)
                return NULL;
 
-       /* allocate pages for xdr post processing */
-       max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
-       max_pages = nfs_page_array_len(0, max_resp_sz);
-
-       pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
-       if (!pages)
-               goto out_err_free;
-
-       for (i = 0; i < max_pages; i++) {
-               pages[i] = alloc_page(gfp_flags);
-               if (!pages[i])
-                       goto out_err_free;
-       }
-
        lgp->args.minlength = PAGE_CACHE_SIZE;
        if (lgp->args.minlength > range->length)
                lgp->args.minlength = range->length;
@@ -616,39 +599,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        lgp->args.type = server->pnfs_curr_ld->id;
        lgp->args.inode = ino;
        lgp->args.ctx = get_nfs_open_context(ctx);
-       lgp->args.layout.pages = pages;
-       lgp->args.layout.pglen = max_pages * PAGE_SIZE;
        lgp->lsegpp = &lseg;
        lgp->gfp_flags = gfp_flags;
 
        /* Synchronously retrieve layout information from server and
         * store in lseg.
         */
-       nfs4_proc_layoutget(lgp);
+       nfs4_proc_layoutget(lgp, gfp_flags);
        if (!lseg) {
                /* remember that LAYOUTGET failed and suspend trying */
                set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
        }
 
-       /* free xdr pages */
-       for (i = 0; i < max_pages; i++)
-               __free_page(pages[i]);
-       kfree(pages);
-
        return lseg;
-
-out_err_free:
-       /* free any allocated xdr pages, lgp as it's not used */
-       if (pages) {
-               for (i = 0; i < max_pages; i++) {
-                       if (!pages[i])
-                               break;
-                       __free_page(pages[i]);
-               }
-               kfree(pages);
-       }
-       kfree(lgp);
-       return NULL;
 }
 
 /*
index 2c6c805..745aa1b 100644 (file)
@@ -172,7 +172,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
                                   struct pnfs_devicelist *devlist);
 extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
                                   struct pnfs_device *dev);
-extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
+extern void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
 extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
 
 /* pnfs.c */
index ac6a3c5..239aff7 100644 (file)
@@ -319,6 +319,34 @@ EXPORT_SYMBOL_GPL(nfs_sops);
 static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
 static int nfs4_validate_mount_data(void *options,
        struct nfs_parsed_mount_data *args, const char *dev_name);
+
+struct file_system_type nfs4_fs_type = {
+       .owner          = THIS_MODULE,
+       .name           = "nfs4",
+       .mount          = nfs_fs_mount,
+       .kill_sb        = nfs_kill_super,
+       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+EXPORT_SYMBOL_GPL(nfs4_fs_type);
+
+static int __init register_nfs4_fs(void)
+{
+       return register_filesystem(&nfs4_fs_type);
+}
+
+static void unregister_nfs4_fs(void)
+{
+       unregister_filesystem(&nfs4_fs_type);
+}
+#else
+static int __init register_nfs4_fs(void)
+{
+       return 0;
+}
+
+static void unregister_nfs4_fs(void)
+{
+}
 #endif
 
 static struct shrinker acl_shrinker = {
@@ -337,12 +365,18 @@ int __init register_nfs_fs(void)
        if (ret < 0)
                goto error_0;
 
-       ret = nfs_register_sysctl();
+       ret = register_nfs4_fs();
        if (ret < 0)
                goto error_1;
+
+       ret = nfs_register_sysctl();
+       if (ret < 0)
+               goto error_2;
        register_shrinker(&acl_shrinker);
        return 0;
 
+error_2:
+       unregister_nfs4_fs();
 error_1:
        unregister_filesystem(&nfs_fs_type);
 error_0:
@@ -356,6 +390,7 @@ void __exit unregister_nfs_fs(void)
 {
        unregister_shrinker(&acl_shrinker);
        nfs_unregister_sysctl();
+       unregister_nfs4_fs();
        unregister_filesystem(&nfs_fs_type);
 }
 
@@ -2645,4 +2680,6 @@ MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
 module_param(send_implementation_id, ushort, 0644);
 MODULE_PARM_DESC(send_implementation_id,
                "Send implementation ID with NFSv4.1 exchange_id");
+MODULE_ALIAS("nfs4");
+
 #endif /* CONFIG_NFS_V4 */
index 5829d0c..e3b5537 100644 (file)
@@ -1814,19 +1814,19 @@ int __init nfs_init_writepagecache(void)
        nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
                                                     nfs_wdata_cachep);
        if (nfs_wdata_mempool == NULL)
-               return -ENOMEM;
+               goto out_destroy_write_cache;
 
        nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
                                             sizeof(struct nfs_commit_data),
                                             0, SLAB_HWCACHE_ALIGN,
                                             NULL);
        if (nfs_cdata_cachep == NULL)
-               return -ENOMEM;
+               goto out_destroy_write_mempool;
 
        nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
                                                      nfs_wdata_cachep);
        if (nfs_commit_mempool == NULL)
-               return -ENOMEM;
+               goto out_destroy_commit_cache;
 
        /*
         * NFS congestion size, scale with available memory.
@@ -1849,11 +1849,20 @@ int __init nfs_init_writepagecache(void)
                nfs_congestion_kb = 256*1024;
 
        return 0;
+
+out_destroy_commit_cache:
+       kmem_cache_destroy(nfs_cdata_cachep);
+out_destroy_write_mempool:
+       mempool_destroy(nfs_wdata_mempool);
+out_destroy_write_cache:
+       kmem_cache_destroy(nfs_wdata_cachep);
+       return -ENOMEM;
 }
 
 void nfs_destroy_writepagecache(void)
 {
        mempool_destroy(nfs_commit_mempool);
+       kmem_cache_destroy(nfs_cdata_cachep);
        mempool_destroy(nfs_wdata_mempool);
        kmem_cache_destroy(nfs_wdata_cachep);
 }
index a1a0386..ced3625 100644 (file)
@@ -166,8 +166,6 @@ struct drm_display_mode {
        int crtc_vsync_start;
        int crtc_vsync_end;
        int crtc_vtotal;
-       int crtc_hadjusted;
-       int crtc_vadjusted;
 
        /* Driver private mode info */
        int private_size;
index 9c07dce..65af688 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bug.h>
 #include <linux/atomic.h>
 #include <linux/kernel.h>
+#include <linux/mutex.h>
 
 struct kref {
        atomic_t refcount;
@@ -93,4 +94,21 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
 {
        return kref_sub(kref, 1, release);
 }
+
+static inline int kref_put_mutex(struct kref *kref,
+                                void (*release)(struct kref *kref),
+                                struct mutex *lock)
+{
+       WARN_ON(release == NULL);
+        if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
+               mutex_lock(lock);
+               if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
+                       mutex_unlock(lock);
+                       return 0;
+               }
+               release(kref);
+               return 1;
+       }
+       return 0;
+}
 #endif /* _KREF_H_ */
index 8808057..92ce578 100644 (file)
@@ -69,6 +69,7 @@ struct nfs_pageio_descriptor {
        const struct nfs_pgio_completion_ops *pg_completion_ops;
        struct pnfs_layout_segment *pg_lseg;
        struct nfs_direct_req   *pg_dreq;
+       void                    *pg_layout_private;
 };
 
 #define NFS_WBACK_BUSY(req)    (test_bit(PG_BUSY,&(req)->wb_flags))
index 00485e0..ac7c8ae 100644 (file)
@@ -1248,6 +1248,7 @@ struct nfs_pgio_header {
        void (*release) (struct nfs_pgio_header *hdr);
        const struct nfs_pgio_completion_ops *completion_ops;
        struct nfs_direct_req   *dreq;
+       void                    *layout_private;
        spinlock_t              lock;
        /* fields protected by lock */
        int                     pnfs_error;
index fc35260..6b4565c 100644 (file)
 #define PCI_DEVICE_ID_TIGON3_5704S     0x16a8
 #define PCI_DEVICE_ID_NX2_57800_VF     0x16a9
 #define PCI_DEVICE_ID_NX2_5706S                0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF     0x16ab
+#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_5708S                0x16ac
 #define PCI_DEVICE_ID_NX2_57840_VF     0x16ad
 #define PCI_DEVICE_ID_NX2_57810_MF     0x16ae
index e1ce104..4a045cd 100644 (file)
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
        u16 ctmask;             /* bitmask of ct events to be delivered */
        u16 expmask;            /* bitmask of expect events to be delivered */
        u32 pid;                /* netlink pid of destroyer */
+       struct timer_list timeout;
 };
 
 static inline struct nf_conntrack_ecache *
index 128ce46..015cea0 100644 (file)
@@ -503,8 +503,6 @@ struct se_cmd {
        u32                     se_ordered_id;
        /* Total size in bytes associated with command */
        u32                     data_length;
-       /* SCSI Presented Data Transfer Length */
-       u32                     cmd_spdtl;
        u32                     residual_count;
        u32                     orig_fe_lun;
        /* Persistent Reservation key */
index f8e54f5..9a08acc 100644 (file)
@@ -726,7 +726,6 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
                        struct mq_attr *attr)
 {
        const struct cred *cred = current_cred();
-       struct file *result;
        int ret;
 
        if (attr) {
@@ -748,21 +747,11 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
        }
 
        mode &= ~current_umask();
-       ret = mnt_want_write(path->mnt);
-       if (ret)
-               return ERR_PTR(ret);
        ret = vfs_create(dir, path->dentry, mode, true);
        path->dentry->d_fsdata = NULL;
-       if (!ret)
-               result = dentry_open(path, oflag, cred);
-       else
-               result = ERR_PTR(ret);
-       /*
-        * dentry_open() took a persistent mnt_want_write(),
-        * so we can now drop this one.
-        */
-       mnt_drop_write(path->mnt);
-       return result;
+       if (ret)
+               return ERR_PTR(ret);
+       return dentry_open(path, oflag, cred);
 }
 
 /* Opens existing queue */
@@ -788,7 +777,9 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
        struct mq_attr attr;
        int fd, error;
        struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
-       struct dentry *root = ipc_ns->mq_mnt->mnt_root;
+       struct vfsmount *mnt = ipc_ns->mq_mnt;
+       struct dentry *root = mnt->mnt_root;
+       int ro;
 
        if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
                return -EFAULT;
@@ -802,6 +793,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
        if (fd < 0)
                goto out_putname;
 
+       ro = mnt_want_write(mnt);       /* we'll drop it in any case */
        error = 0;
        mutex_lock(&root->d_inode->i_mutex);
        path.dentry = lookup_one_len(name, root, strlen(name));
@@ -809,7 +801,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
                error = PTR_ERR(path.dentry);
                goto out_putfd;
        }
-       path.mnt = mntget(ipc_ns->mq_mnt);
+       path.mnt = mntget(mnt);
 
        if (oflag & O_CREAT) {
                if (path.dentry->d_inode) {     /* entry already exists */
@@ -820,6 +812,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
                        }
                        filp = do_open(&path, oflag);
                } else {
+                       if (ro) {
+                               error = ro;
+                               goto out;
+                       }
                        filp = do_create(ipc_ns, root->d_inode,
                                                &path, oflag, mode,
                                                u_attr ? &attr : NULL);
@@ -845,6 +841,7 @@ out_putfd:
                fd = error;
        }
        mutex_unlock(&root->d_inode->i_mutex);
+       mnt_drop_write(mnt);
 out_putname:
        putname(name);
        return fd;
@@ -857,40 +854,38 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
        struct dentry *dentry;
        struct inode *inode = NULL;
        struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
+       struct vfsmount *mnt = ipc_ns->mq_mnt;
 
        name = getname(u_name);
        if (IS_ERR(name))
                return PTR_ERR(name);
 
-       mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex,
-                       I_MUTEX_PARENT);
-       dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
+       err = mnt_want_write(mnt);
+       if (err)
+               goto out_name;
+       mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT);
+       dentry = lookup_one_len(name, mnt->mnt_root, strlen(name));
        if (IS_ERR(dentry)) {
                err = PTR_ERR(dentry);
                goto out_unlock;
        }
 
-       if (!dentry->d_inode) {
-               err = -ENOENT;
-               goto out_err;
-       }
-
        inode = dentry->d_inode;
-       if (inode)
+       if (!inode) {
+               err = -ENOENT;
+       } else {
                ihold(inode);
-       err = mnt_want_write(ipc_ns->mq_mnt);
-       if (err)
-               goto out_err;
-       err = vfs_unlink(dentry->d_parent->d_inode, dentry);
-       mnt_drop_write(ipc_ns->mq_mnt);
-out_err:
+               err = vfs_unlink(dentry->d_parent->d_inode, dentry);
+       }
        dput(dentry);
 
 out_unlock:
-       mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
-       putname(name);
+       mutex_unlock(&mnt->mnt_root->d_inode->i_mutex);
        if (inode)
                iput(inode);
+       mnt_drop_write(mnt);
+out_name:
+       putname(name);
 
        return err;
 }
index 69e38db..a802029 100644 (file)
@@ -84,7 +84,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
                        return -1;
                }
        } else {
-               pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
                memcpy(&client->fsid, fsid, sizeof(*fsid));
        }
        return 0;
index 54b531a..38b5dc1 100644 (file)
@@ -189,6 +189,9 @@ int ceph_debugfs_client_init(struct ceph_client *client)
        snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
                 client->monc.auth->global_id);
 
+       dout("ceph_debugfs_client_init %p %s\n", client, name);
+
+       BUG_ON(client->debugfs_dir);
        client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
        if (!client->debugfs_dir)
                goto out;
@@ -234,6 +237,7 @@ out:
 
 void ceph_debugfs_client_cleanup(struct ceph_client *client)
 {
+       dout("ceph_debugfs_client_cleanup %p\n", client);
        debugfs_remove(client->debugfs_osdmap);
        debugfs_remove(client->debugfs_monmap);
        debugfs_remove(client->osdc.debugfs_file);
index b979675..24c5eea 100644 (file)
@@ -915,7 +915,6 @@ static int prepare_write_connect(struct ceph_connection *con)
        con->out_connect.authorizer_len = auth ?
                cpu_to_le32(auth->authorizer_buf_len) : 0;
 
-       con_out_kvec_reset(con);
        con_out_kvec_add(con, sizeof (con->out_connect),
                                        &con->out_connect);
        if (auth && auth->authorizer_buf_len)
@@ -1557,6 +1556,7 @@ static int process_connect(struct ceph_connection *con)
                        return -1;
                }
                con->auth_retry = 1;
+               con_out_kvec_reset(con);
                ret = prepare_write_connect(con);
                if (ret < 0)
                        return ret;
@@ -1577,6 +1577,7 @@ static int process_connect(struct ceph_connection *con)
                       ENTITY_NAME(con->peer_name),
                       ceph_pr_addr(&con->peer_addr.in_addr));
                reset_connection(con);
+               con_out_kvec_reset(con);
                ret = prepare_write_connect(con);
                if (ret < 0)
                        return ret;
@@ -1601,6 +1602,7 @@ static int process_connect(struct ceph_connection *con)
                     le32_to_cpu(con->out_connect.connect_seq),
                     le32_to_cpu(con->in_reply.connect_seq));
                con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
+               con_out_kvec_reset(con);
                ret = prepare_write_connect(con);
                if (ret < 0)
                        return ret;
@@ -1617,6 +1619,7 @@ static int process_connect(struct ceph_connection *con)
                     le32_to_cpu(con->in_reply.global_seq));
                get_global_seq(con->msgr,
                               le32_to_cpu(con->in_reply.global_seq));
+               con_out_kvec_reset(con);
                ret = prepare_write_connect(con);
                if (ret < 0)
                        return ret;
@@ -2135,7 +2138,11 @@ more:
                BUG_ON(con->state != CON_STATE_CONNECTING);
                con->state = CON_STATE_NEGOTIATING;
 
-               /* Banner is good, exchange connection info */
+               /*
+                * Received banner is good, exchange connection info.
+                * Do not reset out_kvec, as sending our banner raced
+                * with receiving peer banner after connect completed.
+                */
                ret = prepare_write_connect(con);
                if (ret < 0)
                        goto out;
index 105d533..900ea0f 100644 (file)
@@ -311,6 +311,17 @@ int ceph_monc_open_session(struct ceph_mon_client *monc)
 EXPORT_SYMBOL(ceph_monc_open_session);
 
 /*
+ * We require the fsid and global_id in order to initialize our
+ * debugfs dir.
+ */
+static bool have_debugfs_info(struct ceph_mon_client *monc)
+{
+       dout("have_debugfs_info fsid %d globalid %lld\n",
+            (int)monc->client->have_fsid, monc->auth->global_id);
+       return monc->client->have_fsid && monc->auth->global_id > 0;
+}
+
+/*
  * The monitor responds with mount ack indicate mount success.  The
  * included client ticket allows the client to talk to MDSs and OSDs.
  */
@@ -320,9 +331,12 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
        struct ceph_client *client = monc->client;
        struct ceph_monmap *monmap = NULL, *old = monc->monmap;
        void *p, *end;
+       int had_debugfs_info, init_debugfs = 0;
 
        mutex_lock(&monc->mutex);
 
+       had_debugfs_info = have_debugfs_info(monc);
+
        dout("handle_monmap\n");
        p = msg->front.iov_base;
        end = p + msg->front.iov_len;
@@ -344,12 +358,22 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
 
        if (!client->have_fsid) {
                client->have_fsid = true;
+               if (!had_debugfs_info && have_debugfs_info(monc)) {
+                       pr_info("client%lld fsid %pU\n",
+                               ceph_client_id(monc->client),
+                               &monc->client->fsid);
+                       init_debugfs = 1;
+               }
                mutex_unlock(&monc->mutex);
-               /*
-                * do debugfs initialization without mutex to avoid
-                * creating a locking dependency
-                */
-               ceph_debugfs_client_init(client);
+
+               if (init_debugfs) {
+                       /*
+                        * do debugfs initialization without mutex to avoid
+                        * creating a locking dependency
+                        */
+                       ceph_debugfs_client_init(monc->client);
+               }
+
                goto out_unlocked;
        }
 out:
@@ -865,8 +889,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
 {
        int ret;
        int was_auth = 0;
+       int had_debugfs_info, init_debugfs = 0;
 
        mutex_lock(&monc->mutex);
+       had_debugfs_info = have_debugfs_info(monc);
        if (monc->auth->ops)
                was_auth = monc->auth->ops->is_authenticated(monc->auth);
        monc->pending_auth = 0;
@@ -889,7 +915,22 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
                __send_subscribe(monc);
                __resend_generic_request(monc);
        }
+
+       if (!had_debugfs_info && have_debugfs_info(monc)) {
+               pr_info("client%lld fsid %pU\n",
+                       ceph_client_id(monc->client),
+                       &monc->client->fsid);
+               init_debugfs = 1;
+       }
        mutex_unlock(&monc->mutex);
+
+       if (init_debugfs) {
+               /*
+                * do debugfs initialization without mutex to avoid
+                * creating a locking dependency
+                */
+               ceph_debugfs_client_init(monc->client);
+       }
 }
 
 static int __validate_auth(struct ceph_mon_client *monc)
index 5af9c26..dd67818 100644 (file)
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
        struct napi_struct *napi;
        int budget = 16;
 
-       WARN_ON_ONCE(!irqs_disabled());
-
        list_for_each_entry(napi, &dev->napi_list, dev_list) {
-               local_irq_enable();
                if (napi->poll_owner != smp_processor_id() &&
                    spin_trylock(&napi->poll_lock)) {
-                       rcu_read_lock_bh();
                        budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
                                               napi, budget);
-                       rcu_read_unlock_bh();
                        spin_unlock(&napi->poll_lock);
 
-                       if (!budget) {
-                               local_irq_disable();
+                       if (!budget)
                                break;
-                       }
                }
-               local_irq_disable();
        }
 }
 
index 3a57570..8aa7a4c 100644 (file)
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
 static struct kmem_cache *mrt_cachep __read_mostly;
 
 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static void ipmr_free_table(struct mr_table *mrt);
+
 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
                         struct sk_buff *skb, struct mfc_cache *cache,
                         int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
                             struct sk_buff *pkt, vifi_t vifi, int assert);
 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
                              struct mfc_cache *c, struct rtmsg *rtm);
+static void mroute_clean_tables(struct mr_table *mrt);
 static void ipmr_expire_process(unsigned long arg);
 
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
 
        list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
                list_del(&mrt->list);
-               kfree(mrt);
+               ipmr_free_table(mrt);
        }
        fib_rules_unregister(net->ipv4.mr_rules_ops);
 }
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
 
 static void __net_exit ipmr_rules_exit(struct net *net)
 {
-       kfree(net->ipv4.mrt);
+       ipmr_free_table(net->ipv4.mrt);
 }
 #endif
 
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        return mrt;
 }
 
+static void ipmr_free_table(struct mr_table *mrt)
+{
+       del_timer_sync(&mrt->ipmr_expire_timer);
+       mroute_clean_tables(mrt);
+       kfree(mrt);
+}
+
 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
 
 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
index 4ad9cf1..9c87cde 100644 (file)
@@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
                ret = nf_ct_expect_related(rtcp_exp);
                if (ret == 0)
                        break;
-               else if (ret != -EBUSY) {
+               else if (ret == -EBUSY) {
+                       nf_ct_unexpect_related(rtp_exp);
+                       continue;
+               } else if (ret < 0) {
                        nf_ct_unexpect_related(rtp_exp);
                        port = 0;
                        break;
index 50f6d3a..d2d1e15 100644 (file)
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
        if (mtu < ip_rt_min_pmtu)
                mtu = ip_rt_min_pmtu;
 
+       rcu_read_lock();
        if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
                struct fib_nh *nh = &FIB_RES_NH(res);
 
                update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
                                      jiffies + ip_rt_mtu_expires);
        }
+       rcu_read_unlock();
        return mtu;
 }
 
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
                dst->obsolete = DST_OBSOLETE_KILL;
        } else {
                rt->rt_pmtu = mtu;
-               dst_set_expires(&rt->dst, ip_rt_mtu_expires);
+               rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
        }
 }
 
@@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
 {
        struct rtable *rt = (struct rtable *) dst;
 
-       if (dst->flags & DST_NOCACHE) {
+       if (!list_empty(&rt->rt_uncached)) {
                spin_lock_bh(&rt_uncached_lock);
                list_del(&rt->rt_uncached);
                spin_unlock_bh(&rt_uncached_lock);
index bcfccc5..ce4ffe9 100644 (file)
@@ -2930,13 +2930,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
-                                 int newly_acked_sacked, bool is_dupack,
+                                 int prior_sacked, bool is_dupack,
                                  int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
                                    (tcp_fackets_out(tp) > tp->reordering));
+       int newly_acked_sacked = 0;
        int fast_rexmit = 0;
 
        if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2996,6 +2997,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                                tcp_add_reno_sack(sk);
                } else
                        do_lost = tcp_try_undo_partial(sk, pkts_acked);
+               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
                break;
        case TCP_CA_Loss:
                if (flag & FLAG_DATA_ACKED)
@@ -3017,6 +3019,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        if (is_dupack)
                                tcp_add_reno_sack(sk);
                }
+               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
 
                if (icsk->icsk_ca_state <= TCP_CA_Disorder)
                        tcp_try_undo_dsack(sk);
@@ -3594,7 +3597,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        int prior_packets;
        int prior_sacked = tp->sacked_out;
        int pkts_acked = 0;
-       int newly_acked_sacked = 0;
        bool frto_cwnd = false;
 
        /* If the ack is older than previous acks
@@ -3670,8 +3672,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
        pkts_acked = prior_packets - tp->packets_out;
-       newly_acked_sacked = (prior_packets - prior_sacked) -
-                            (tp->packets_out - tp->sacked_out);
 
        if (tp->frto_counter)
                frto_cwnd = tcp_process_frto(sk, flag);
@@ -3685,7 +3685,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                    tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack, prior_in_flight);
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
-               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        } else {
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3702,7 +3702,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 no_queue:
        /* If data was DSACKed, see if we can undo a cwnd reduction. */
        if (flag & FLAG_DSACKING_ACK)
-               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
@@ -3722,8 +3722,7 @@ old_ack:
         */
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
-               newly_acked_sacked = tp->sacked_out - prior_sacked;
-               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        }
 
index 393355d..513cab0 100644 (file)
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
        /* Remove from tunnel list */
        spin_lock_bh(&pn->l2tp_tunnel_list_lock);
        list_del_rcu(&tunnel->list);
+       kfree_rcu(tunnel, rcu);
        spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-       synchronize_rcu();
 
        atomic_dec(&l2tp_tunnel_count);
-       kfree(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
index a38ec6c..56d583e 100644 (file)
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
 
 struct l2tp_tunnel {
        int                     magic;          /* Should be L2TP_TUNNEL_MAGIC */
+       struct rcu_head rcu;
        rwlock_t                hlist_lock;     /* protect session_hlist */
        struct hlist_head       session_hlist[L2TP_HASH_SIZE];
                                                /* hashed list of sessions,
index 3b807bc..29eb4e6 100644 (file)
@@ -1807,37 +1807,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                        meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
                                        sdata, NULL, NULL);
                } else {
-                       int is_mesh_mcast = 1;
-                       const u8 *mesh_da;
+                       /* DS -> MBSS (802.11-2012 13.11.3.3).
+                        * For unicast with unknown forwarding information,
+                        * destination might be in the MBSS or if that fails
+                        * forwarded to another mesh gate. In either case
+                        * resolution will be handled in ieee80211_xmit(), so
+                        * leave the original DA. This also works for mcast */
+                       const u8 *mesh_da = skb->data;
+
+                       if (mppath)
+                               mesh_da = mppath->mpp;
+                       else if (mpath)
+                               mesh_da = mpath->dst;
+                       rcu_read_unlock();
 
-                       if (is_multicast_ether_addr(skb->data))
-                               /* DA TA mSA AE:SA */
-                               mesh_da = skb->data;
-                       else {
-                               static const u8 bcast[ETH_ALEN] =
-                                       { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-                               if (mppath) {
-                                       /* RA TA mDA mSA AE:DA SA */
-                                       mesh_da = mppath->mpp;
-                                       is_mesh_mcast = 0;
-                               } else if (mpath) {
-                                       mesh_da = mpath->dst;
-                                       is_mesh_mcast = 0;
-                               } else {
-                                       /* DA TA mSA AE:SA */
-                                       mesh_da = bcast;
-                               }
-                       }
                        hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
                                        mesh_da, sdata->vif.addr);
-                       rcu_read_unlock();
-                       if (is_mesh_mcast)
+                       if (is_multicast_ether_addr(mesh_da))
+                               /* DA TA mSA AE:SA */
                                meshhdrlen =
                                        ieee80211_new_mesh_header(&mesh_hdr,
                                                        sdata,
                                                        skb->data + ETH_ALEN,
                                                        NULL);
                        else
+                               /* RA TA mDA mSA AE:DA SA */
                                meshhdrlen =
                                        ieee80211_new_mesh_header(&mesh_hdr,
                                                        sdata,
index 3c60137..767cc12 100644 (file)
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
                goto out_err;
        }
        svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
-       if (!svc->stats.cpustats)
+       if (!svc->stats.cpustats) {
+               ret = -ENOMEM;
                goto out_err;
+       }
 
        /* I'm the first user of the service */
        atomic_set(&svc->usecnt, 0);
index cf48755..2ceec64 100644 (file)
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
 {
        struct nf_conn *ct = (void *)ul_conntrack;
        struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+       BUG_ON(ecache == NULL);
 
        if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
                /* bad luck, let's retry again */
-               ct->timeout.expires = jiffies +
+               ecache->timeout.expires = jiffies +
                        (random32() % net->ct.sysctl_events_retry_timeout);
-               add_timer(&ct->timeout);
+               add_timer(&ecache->timeout);
                return;
        }
        /* we've got the event delivered, now it's dying */
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
 void nf_ct_insert_dying_list(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+       BUG_ON(ecache == NULL);
 
        /* add this conntrack to the dying list */
        spin_lock_bh(&nf_conntrack_lock);
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
                             &net->ct.dying);
        spin_unlock_bh(&nf_conntrack_lock);
        /* set a new timer to retry event delivery */
-       setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
-       ct->timeout.expires = jiffies +
+       setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
+       ecache->timeout.expires = jiffies +
                (random32() % net->ct.sysctl_events_retry_timeout);
-       add_timer(&ct->timeout);
+       add_timer(&ecache->timeout);
 }
 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
 
index da4fc37..9807f32 100644 (file)
@@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void)
                goto err_unreg_subsys;
        }
 
-       if (register_pernet_subsys(&ctnetlink_net_ops)) {
+       ret = register_pernet_subsys(&ctnetlink_net_ops);
+       if (ret < 0) {
                pr_err("ctnetlink_init: cannot register pernet operations\n");
                goto err_unreg_exp_subsys;
        }
index 4142aac..be194b1 100644 (file)
@@ -482,7 +482,7 @@ __build_packet_message(struct nfulnl_instance *inst,
        }
 
        if (indev && skb_mac_header_was_set(skb)) {
-               if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+               if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
                    nla_put_be16(inst->skb, NFULA_HWLEN,
                                 htons(skb->dev->hard_header_len)) ||
                    nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -1002,8 +1002,10 @@ static int __init nfnetlink_log_init(void)
 
 #ifdef CONFIG_PROC_FS
        if (!proc_create("nfnetlink_log", 0440,
-                        proc_net_netfilter, &nful_file_ops))
+                        proc_net_netfilter, &nful_file_ops)) {
+               status = -ENOMEM;
                goto cleanup_logger;
+       }
 #endif
        return status;
 
index aacfb1d..3821199 100644 (file)
@@ -1375,7 +1375,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                dst_pid = addr->nl_pid;
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
-               if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
+               if ((dst_group || dst_pid) &&
+                   !netlink_capable(sock, NL_NONROOT_SEND))
                        goto out;
        } else {
                dst_pid = nlk->dst_pid;
@@ -2149,6 +2150,7 @@ static void __init netlink_add_usersock_entry(void)
        rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
        nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
        nl_table[NETLINK_USERSOCK].registered = 1;
+       nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
 
        netlink_table_ungrab();
 }
index 5dafe84..94060ed 100644 (file)
@@ -1162,7 +1162,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
        spin_unlock(&f->lock);
 }
 
-bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
 {
        if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
                return true;