Merge remote-tracking branch 'upstream/linux-3.14.y' into tizen_next_linux_3.14
authorSeokYeon Hwang <syeon.hwang@samsung.com>
Thu, 13 Nov 2014 05:17:46 +0000 (14:17 +0900)
committerSeokYeon Hwang <syeon.hwang@samsung.com>
Thu, 13 Nov 2014 05:55:38 +0000 (14:55 +0900)
Signed-off-by: SeokYeon Hwang <syeon.hwang@samsung.com>
Conflicts:
arch/arm/boot/dts/cros5250-common.dtsi
arch/arm/mach-tegra/common.c
arch/x86/kernel/microcode_amd.c
drivers/staging/zram/zram_drv.c
drivers/staging/zsmalloc/zsmalloc-main.c
kernel/rcutree.h
kernel/rcutree_plugin.h

Change-Id: I5fc3a16aea9daee9cdc4f59ec9f845547af5be53

49 files changed:
1  2 
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/processor_idle.c
drivers/block/virtio_blk.c
drivers/char/Kconfig
drivers/char/Makefile
drivers/cpufreq/intel_pstate.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/radeon/rv770_dpm.c
drivers/input/misc/arizona-haptics.c
drivers/md/dm-delay.c
drivers/media/usb/dvb-usb/dw2102.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/misc/Makefile
drivers/net/ethernet/marvell/mvneta.c
drivers/net/xen-netback/netback.c
drivers/staging/rtl8712/usb_intf.c
drivers/target/iscsi/iscsi_target_util.c
drivers/usb/serial/option.c
drivers/video/backlight/backlight.c
drivers/video/omap2/displays-new/panel-sony-acx565akm.c
fs/ext4/inode.c
fs/nfsd/nfscache.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_qm.c
include/linux/backlight.h
include/linux/hugetlb.h
include/linux/mm_types.h
include/linux/pci_ids.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/thread_info.h
ipc/shm.c
kernel/sched/fair.c
kernel/time/tick-sched.c
kernel/trace/trace.c
mm/huge_memory.c
mm/mprotect.c
mm/page_alloc.c
net/core/iovec.c
net/mac80211/tx.c
net/packet/af_packet.c
net/sched/sch_tbf.c
security/selinux/xfrm.c
security/smack/smackfs.c
sound/pci/hda/patch_realtek.c
tools/perf/util/util.h

Simple merge
index f37dec579712dd996dc8aaac2ceab4fa6a97b5c1,3d8413d02a975f0643275a247524a0c9d7569341..523da37fa75428462ba6106be65418ac11a475d0
@@@ -291,60 -268,60 +268,108 @@@ static struct dmi_system_id acpi_osi_dm
        },
        {
        .callback = dmi_disable_osi_win8,
-       .ident = "Lenovo ThinkPad Edge E530",
+       .ident = "ThinkPad Edge E530",
        .matches = {
                     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad Edge E530",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Acer Aspire V5-573G",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Acer Aspire V5-572G",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad T431s",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad T430",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Dell Inspiron 7737",
+       .matches = {
+                   DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                   DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
                },
        },
 +      {
 +      .callback = dmi_disable_osi_win8,
 +      .ident = "ThinkPad Edge E530",
 +      .matches = {
 +                   DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 +                   DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
 +              },
 +      },
 +      {
 +      .callback = dmi_disable_osi_win8,
 +      .ident = "ThinkPad Edge E530",
 +      .matches = {
 +                   DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 +                   DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
 +              },
 +      },
 +      {
 +      .callback = dmi_disable_osi_win8,
 +      .ident = "Acer Aspire V5-573G",
 +      .matches = {
 +                   DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
 +                   DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
 +              },
 +      },
 +      {
 +      .callback = dmi_disable_osi_win8,
 +      .ident = "Acer Aspire V5-572G",
 +      .matches = {
 +                   DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
 +                   DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
 +              },
 +      },
 +      {
 +      .callback = dmi_disable_osi_win8,
 +      .ident = "ThinkPad T431s",
 +      .matches = {
 +                   DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 +                   DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
 +              },
 +      },
 +      {
 +      .callback = dmi_disable_osi_win8,
 +      .ident = "ThinkPad T430",
 +      .matches = {
 +                   DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 +                   DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
 +              },
 +      },
  
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
Simple merge
index f0106cc9b184e6c55148a3faf55ab1ef03a4c8b0,6c911c86fb5a404ad1cd2fb990f3d18f49bbea28..f5c9f5460eb4bdd0f0afbdd64a4b491a4a9047cf
@@@ -751,23 -562,11 +562,23 @@@ static int virtblk_probe(struct virtio_
                goto out_put_disk;
        }
  
-       if (use_bio)
-               blk_queue_make_request(q, virtblk_make_request);
+       blk_mq_init_commands(q, virtblk_init_vbr, vblk);
        q->queuedata = vblk;
  
 +#ifdef CONFIG_MARU    // for virtio sdcard...
 +      // index 0 for root partition
 +      // index 1 for swap partition
 +      // index over 2 for mmcblk
 +      if (index > 1) {
 +              snprintf(vblk->disk->disk_name, DISK_NAME_LEN, "mmcblk%d", index - 2);
 +      }
 +      else {
 +              virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
 +      }
 +#else
        virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
 +#endif
  
        vblk->disk->major = major;
        vblk->disk->first_minor = index_to_minor(index);
Simple merge
Simple merge
index d5dc567efd9682374298b7448f9531dd5b0ae3d6,ae52c777339d12865006fb2fb29b7ec95bde8916..d1f443c65b71e73b3e5546eff2a34cb7af27e0b4
@@@ -550,16 -709,9 +709,14 @@@ static int intel_pstate_init_cpu(unsign
  
        cpu = all_cpu_data[cpunum];
  
+       cpu->cpu = cpunum;
        intel_pstate_get_cpu_pstates(cpu);
 +      if (!cpu->pstate.current_pstate) {
 +              all_cpu_data[cpunum] = NULL;
 +              kfree(cpu);
 +              return -ENODATA;
 +      }
  
-       cpu->cpu = cpunum;
-       cpu->pstate_policy =
-               (struct pstate_adjust_policy *)id->driver_data;
        init_timer_deferrable(&cpu->timer);
        cpu->timer.function = intel_pstate_timer_func;
        cpu->timer.data =
Simple merge
index 554762087292826f707fa79e196daec17e1d3855,292a79d64146274428ad1ef251d08e1d78d46bf5..246b57d2d7417cb6935df5b8dda9fd2793579cde
@@@ -48,8 -51,8 +51,9 @@@ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau
  obj-$(CONFIG_DRM_EXYNOS) +=exynos/
  obj-$(CONFIG_DRM_GMA500) += gma500/
  obj-$(CONFIG_DRM_UDL) += udl/
 +obj-$(CONFIG_DRM_VIGS) += vigs/
  obj-$(CONFIG_DRM_AST) += ast/
+ obj-$(CONFIG_DRM_ARMADA) += armada/
  obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
  obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
  obj-$(CONFIG_DRM_OMAP)        += omapdrm/
Simple merge
index a239b30aaf9df114fb1ea8d308a4fb1bca8b8d56,8fcb932a3a553d8ba09369f87d9d347691de73a7..0fe69007853e3d5cf54f7de18ceb2ccb6a78e5af
@@@ -2522,15 -2520,8 +2526,14 @@@ u32 rv770_dpm_get_mclk(struct radeon_de
  bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
  {
        u32 vblank_time = r600_dpm_get_vblank_time(rdev);
-       u32 switch_limit = 300;
-       /* quirks */
-       /* ASUS K70AF */
-       if ((rdev->pdev->device == 0x9553) &&
-           (rdev->pdev->subsystem_vendor == 0x1043) &&
-           (rdev->pdev->subsystem_device == 0x1c42))
-               switch_limit = 200;
+       u32 switch_limit = 200; /* 300 */
++      /* RV770 */
++      /* mclk switching doesn't seem to work reliably on desktop RV770s */
++      if ((rdev->family == CHIP_RV770) &&
++          !(rdev->flags & RADEON_IS_MOBILITY))
++              switch_limit = 0xffffffff; /* disable mclk switching */
 +
        /* RV770 */
        /* mclk switching doesn't seem to work reliably on desktop RV770s */
        if ((rdev->family == CHIP_RV770) &&
Simple merge
index 2f91d6d4a2ccf40023c6bccfe142d7781024c810,42c3a27a14cc3a906b5f892a6206de348b6b58ee..c00066880f10c75ee73453fc8df3ee20a32b6e8c
@@@ -185,18 -182,12 +182,18 @@@ static int delay_ctr(struct dm_target *
        }
  
  out:
-       dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache);
-       if (!dc->delayed_pool) {
-               DMERR("Couldn't create delayed bio pool.");
-               goto bad_dev_write;
+       dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
+       if (!dc->kdelayd_wq) {
+               DMERR("Couldn't start kdelayd");
+               goto bad_queue;
        }
  
 +      dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
 +      if (!dc->kdelayd_wq) {
 +              DMERR("Couldn't start kdelayd");
 +              goto bad_queue;
 +      }
 +
        setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
  
        INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
index 4170a45d17e0ae9b0770cb690780c0790bb82394,ae0f56a32e4d0ba888b09a214492cb2b2f14b244..943b6972d05423f8d11b6eafbe161d3478689a8c
  #include "stb6100.h"
  #include "stb6100_proc.h"
  #include "m88rs2000.h"
+ #include "tda18271.h"
+ #include "cxd2820r.h"
+ /* Max transfer size done by I2C transfer functions */
+ #define MAX_XFER_SIZE  64
  
 +/* Max transfer size done by I2C transfer functions */
 +#define MAX_XFER_SIZE  64
 +
  #ifndef USB_PID_DW2102
  #define USB_PID_DW2102 0x2102
  #endif
Simple merge
index 2eea55ef9fdcdc0944c85213e7713f444eae6904,99b9424ce31d809fac0f55928c70b1a9c875d6a1..93595dec9f9d6ee2d27143d994597723b3a9bd38
@@@ -51,6 -50,7 +50,8 @@@ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa
  obj-$(CONFIG_ALTERA_STAPL)    +=altera-stapl/
  obj-$(CONFIG_INTEL_MEI)               += mei/
  obj-$(CONFIG_VMWARE_VMCI)     += vmw_vmci/
 +obj-$(CONFIG_SLP_GLOBAL_LOCK) += slp_global_lock.o
  obj-$(CONFIG_LATTICE_ECP3_CONFIG)     += lattice-ecp3-config.o
  obj-$(CONFIG_SRAM)            += sram.o
+ obj-y                         += mic/
+ obj-$(CONFIG_GENWQE)          += genwqe/
Simple merge
Simple merge
index e3a005da776b8f432eff2792f54fe6d2f66760fb,bbd5888e316b788102743f9c5f4f655500045ef0..4e1996e16a8e69638f88c6b159747b83fd77c68c
@@@ -353,15 -353,10 +353,14 @@@ static void disable_ht_for_spec_devid(c
        }
  }
  
- static u8 key_2char2num(u8 hch, u8 lch)
- {
-       return (hex_to_bin(hch) << 4) | hex_to_bin(lch);
- }
+ static const struct device_type wlan_type = {
+       .name = "wlan",
+ };
  
 +static const struct device_type wlan_type = {
 +      .name = "wlan",
 +};
 +
  /*
   * drv_init() - a device potentially for us
   *
index 1039de499bc6b0f619f8c20a5b5f4eb68230d867,ab77f80ead2b2679c5f49d030355069d2b8d640e..32843084d60e8032a36020c626f4cd5217182163
@@@ -156,13 -156,12 +156,17 @@@ struct iscsi_cmd *iscsit_allocate_cmd(s
  {
        struct iscsi_cmd *cmd;
        struct se_session *se_sess = conn->sess->se_sess;
 -      int size, tag;
 +      int size, tag, state = (gfp_mask & __GFP_WAIT) ? TASK_INTERRUPTIBLE :
 +                              TASK_RUNNING;
 +
 +      tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
 +      if (tag < 0)
 +              return NULL;
  
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+       if (tag < 0)
+               return NULL;
        size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
        cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
        memset(cmd, 0, size);
Simple merge
Simple merge
index d94f35dbd5369e83a50265c78b421aac2ab300c7,8e97d06921ffdfff11a20ce6916666d8c1672351..ffabee9f6a161a0cfd17bcd5b9fc8276b3d60116
@@@ -616,7 -630,9 +632,8 @@@ static int acx565akm_enable(struct omap
        if (omapdss_device_is_enabled(dssdev))
                return 0;
  
 -      mutex_lock(&ddata->mutex);
        r = acx565akm_panel_power_on(dssdev);
+       mutex_unlock(&ddata->mutex);
        if (r)
                return r;
  
diff --cc fs/ext4/inode.c
Simple merge
Simple merge
Simple merge
diff --cc fs/xfs/xfs_qm.c
index 4688a622b3734bfbb9241b4cefc2299c6ddf56cf,6d7d1de134037c70be4adcf3cd84ad44e13caaa3..e03b3d7300c3f6598fc14a505a0d5a6416e7549c
@@@ -225,6 -222,6 +222,14 @@@ xfs_qm_dqpurge_hints
  
        xfs_dqunlock(dqp);
  
++      xfs_dqlock(dqp);
++      if (dqp->dq_flags & XFS_DQ_FREEING) {
++              xfs_dqunlock(dqp);
++              return EAGAIN;
++      }
++
++      /* If this quota has a hint attached, prepare for releasing it now */
++      gdqp = dqp->q_gdquot;
        if (gdqp)
                xfs_qm_dqrele(gdqp);
        if (pdqp)
Simple merge
Simple merge
index 8e082f18fb6aa149cbbbe96da63b468cfa5ed5cf,2b58d192ea2401071c6a44d84286ba956acc939d..6a175451483d39781f82a14770ed072a8c1ad7bc
@@@ -428,13 -443,7 +443,15 @@@ struct mm_struct 
  
        /* numa_scan_seq prevents two threads setting pte_numa */
        int numa_scan_seq;
+ #endif
++#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 +      /*
-        * The first node a task was scheduled on. If a task runs on
-        * a different node than Make PTE Scan Go Now.
++       * An operation with batched TLB flushing is going on. Anything that
++       * can move process memory needs to flush the TLB when moving a
++       * PROT_NONE or PROT_NUMA mapped page.
 +       */
-       int first_nid;
++      bool tlb_flush_pending;
 +#endif
  #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
        /*
         * An operation with batched TLB flushing is going on. Anything that
Simple merge
index b1e963efbde8aebf80441815bc97e83ca08fdde8,218b058060f14dc515495d40e9c988f180ec0a68..b2f1186b702d1fbcdb49459f700fa52c4eb2db75
@@@ -318,19 -396,13 +396,17 @@@ arch_get_unmapped_area_topdown(struct f
  static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
  #endif
  
- extern void set_dumpable(struct mm_struct *mm, int value);
- extern int get_dumpable(struct mm_struct *mm);
+ #define SUID_DUMP_DISABLE     0       /* No setuid dumping */
+ #define SUID_DUMP_USER                1       /* Dump as user of process */
+ #define SUID_DUMP_ROOT                2       /* Dump as root */
 +
 +#define SUID_DUMP_DISABLE     0       /* No setuid dumping */
 +#define SUID_DUMP_USER                1       /* Dump as user of process */
 +#define SUID_DUMP_ROOT                2       /* Dump as root */
  
  /* mm flags */
- /* dumpable bits */
- #define MMF_DUMPABLE      0  /* core dump is permitted */
- #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
  
+ /* for SUID_DUMP_* above */
  #define MMF_DUMPABLE_BITS 2
  #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
  
index 9995165ff3d0cae65587f08caf620a68b23267fc,ad8f85908a565f0a2be0e1362fcfa16aa560ab2f..0c4c2c9b5c38ae02cb58405ca846edccdbdf826b
@@@ -2338,63 -2421,55 +2421,57 @@@ static inline void skb_frag_add_head(st
  #define skb_walk_frags(skb, iter)     \
        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
  
- extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
-                                          int *peeked, int *off, int *err);
- extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
-                                        int noblock, int *err);
- extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
-                                    struct poll_table_struct *wait);
- extern int           skb_copy_datagram_iovec(const struct sk_buff *from,
-                                              int offset, struct iovec *to,
-                                              int size);
- extern int           skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
-                                                       int hlen,
-                                                       struct iovec *iov);
- extern int           skb_copy_datagram_from_iovec(struct sk_buff *skb,
-                                                   int offset,
-                                                   const struct iovec *from,
-                                                   int from_offset,
-                                                   int len);
- extern int           zerocopy_sg_from_iovec(struct sk_buff *skb,
-                                             const struct iovec *frm,
-                                             int offset,
-                                             size_t count);
- extern int           skb_copy_datagram_const_iovec(const struct sk_buff *from,
-                                                    int offset,
-                                                    const struct iovec *to,
-                                                    int to_offset,
-                                                    int size);
- extern void          skb_free_datagram(struct sock *sk, struct sk_buff *skb);
- extern void          skb_free_datagram_locked(struct sock *sk,
-                                               struct sk_buff *skb);
- extern int           skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
-                                        unsigned int flags);
- extern __wsum        skb_checksum(const struct sk_buff *skb, int offset,
-                                   int len, __wsum csum);
- extern int           skb_copy_bits(const struct sk_buff *skb, int offset,
-                                    void *to, int len);
- extern int           skb_store_bits(struct sk_buff *skb, int offset,
-                                     const void *from, int len);
- extern __wsum        skb_copy_and_csum_bits(const struct sk_buff *skb,
-                                             int offset, u8 *to, int len,
-                                             __wsum csum);
- extern int             skb_splice_bits(struct sk_buff *skb,
-                                               unsigned int offset,
-                                               struct pipe_inode_info *pipe,
-                                               unsigned int len,
-                                               unsigned int flags);
- extern void          skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
- extern void          skb_split(struct sk_buff *skb,
-                                struct sk_buff *skb1, const u32 len);
- extern int           skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
-                                int shiftlen);
- extern void          skb_scrub_packet(struct sk_buff *skb, bool xnet);
- extern struct sk_buff *skb_segment(struct sk_buff *skb,
-                                  netdev_features_t features);
+ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+                                   int *peeked, int *off, int *err);
+ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+                                 int *err);
+ unsigned int datagram_poll(struct file *file, struct socket *sock,
+                          struct poll_table_struct *wait);
+ int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
+                           struct iovec *to, int size);
+ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
+                                    struct iovec *iov);
+ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+                                const struct iovec *from, int from_offset,
+                                int len);
+ int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
+                          int offset, size_t count);
+ int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
+                                 const struct iovec *to, int to_offset,
+                                 int size);
+ void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
+ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
+ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
+ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
+                             int len, __wsum csum);
+ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+                   struct pipe_inode_info *pipe, unsigned int len,
+                   unsigned int flags);
+ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+ unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
+ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
+                int len, int hlen);
+ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
+ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
+ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
+ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
+ struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
+ struct skb_checksum_ops {
+       __wsum (*update)(const void *mem, int len, __wsum wsum);
+       __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
+ };
+ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
+                     __wsum csum, const struct skb_checksum_ops *ops);
+ __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
+                   __wsum csum);
  
 +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
 +
  static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
                                       int len, void *buffer)
  {
index 4ae6f32c8033de9ae577ef2e8418a2ba36a65b7f,a629e4b23217df9d0611f247c2c9144d62d662cc..fddbe2023a5d568717b3b90eabc692bb4612f9bd
@@@ -104,11 -104,20 +104,22 @@@ static inline int test_ti_thread_flag(s
  #define test_thread_flag(flag) \
        test_ti_thread_flag(current_thread_info(), flag)
  
- #define set_need_resched()    set_thread_flag(TIF_NEED_RESCHED)
- #define clear_need_resched()  clear_thread_flag(TIF_NEED_RESCHED)
+ static inline __deprecated void set_need_resched(void)
+ {
+       /*
+        * Use of this function in deprecated.
+        *
+        * As of this writing there are only a few users in the DRM tree left
+        * all of which are wrong and can be removed without causing too much
+        * grief.
+        *
+        * The DRM people are aware and are working on removing the last few
+        * instances.
+        */
+ }
  
 +#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
 +
  #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
  /*
   * An arch can define its own version of set_restore_sigmask() to get the
diff --cc ipc/shm.c
Simple merge
index 790e2fc808daf78cdfd940c6d18ed58a166862fb,9b4c4f3201301a269bd66718650899274f204bc6..2c7e85842ee8499b2db7d960c3e89a94d90fb3cc
@@@ -967,20 -1720,26 +1720,33 @@@ void task_numa_work(struct callback_hea
                vma = mm->mmap;
        }
        for (; vma; vma = vma->vm_next) {
-               if (!vma_migratable(vma))
+               if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
+                       continue;
+               /*
+                * Shared library pages mapped by multiple processes are not
+                * migrated as it is expected they are cache replicated. Avoid
+                * hinting faults in read-only file-backed mappings or the vdso
+                * as migrating the pages will be of marginal benefit.
+                */
+               if (!vma->vm_mm ||
+                   (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
                        continue;
  
-               /* Skip small VMAs. They are not likely to be of relevance */
-               if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
+               /*
+                * Skip inaccessible VMAs to avoid any confusion between
+                * PROT_NONE and NUMA hinting ptes
+                */
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
                        continue;
  
 +              /*
 +               * Skip inaccessible VMAs to avoid any confusion between
 +               * PROT_NONE and NUMA hinting ptes
 +               */
 +              if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
 +                      continue;
 +
                do {
                        start = max(start, vma->vm_start);
                        end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
Simple merge
index 138077b1a6070bbbf30a16d432c515859d2bdf7d,71136720ffa189ea63c4bfea23b6fab884b9650b..c18da575818cc07bb76b48200df0cd0b195fef01
@@@ -434,7 -454,13 +454,16 @@@ int __trace_puts(unsigned long ip, cons
        struct print_entry *entry;
        unsigned long irq_flags;
        int alloc;
+       int pc;
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+       pc = preempt_count();
++      if (unlikely(tracing_selftest_running || tracing_disabled))
++              return 0;
 +
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
  
@@@ -477,7 -504,13 +507,16 @@@ int __trace_bputs(unsigned long ip, con
        struct bputs_entry *entry;
        unsigned long irq_flags;
        int size = sizeof(struct bputs_entry);
+       int pc;
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+       pc = preempt_count();
++      if (unlikely(tracing_selftest_running || tracing_disabled))
++              return 0;
 +
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
  
index 389973fd6bb782ac322b52069ec8e25b1e869d47,718bfa16a36f7fcde45e732e4fefdb7c68e6bf38..603f3743acbc0d4fb9f05067832d57abe6db327e
@@@ -1499,21 -1547,25 +1547,27 @@@ int change_huge_pmd(struct vm_area_stru
                        if (pmd_numa(entry))
                                entry = pmd_mknonnuma(entry);
                        entry = pmd_modify(entry, newprot);
+                       ret = HPAGE_PMD_NR;
+                       set_pmd_at(mm, addr, pmd, entry);
                        BUG_ON(pmd_write(entry));
 +                      set_pmd_at(mm, addr, pmd, entry);
                } else {
                        struct page *page = pmd_page(*pmd);
 +                      entry = *pmd;
  
-                       /* only check non-shared pages */
-                       if (page_mapcount(page) == 1 &&
+                       /*
+                        * Do not trap faults against the zero page. The
+                        * read-only data is likely to be read-cached on the
+                        * local CPU cache and it is less useful to know about
+                        * local vs remote hits on the zero page.
+                        */
+                       if (!is_huge_zero_page(page) &&
                            !pmd_numa(*pmd)) {
-                               entry = pmd_mknuma(entry);
-                               set_pmd_at(mm, addr, pmd, entry);
+                               pmdp_set_numa(mm, addr, pmd);
+                               ret = HPAGE_PMD_NR;
                        }
                }
-               spin_unlock(&vma->vm_mm->page_table_lock);
-               ret = 1;
+               spin_unlock(ptl);
        }
  
        return ret;
diff --cc mm/mprotect.c
index 7651a571f2830926fa6db0b87e70c9691ae7ff91,769a67a158037197341742104d2a9023cb1e03a0..df5867793d1b39cb60885203f0c61daa45f0da4b
@@@ -63,20 -69,10 +69,11 @@@ static unsigned long change_pte_range(s
                        } else {
                                struct page *page;
  
 +                              ptent = *pte;
                                page = vm_normal_page(vma, addr, oldpte);
-                               if (page) {
-                                       int this_nid = page_to_nid(page);
-                                       if (last_nid == -1)
-                                               last_nid = this_nid;
-                                       if (last_nid != this_nid)
-                                               all_same_node = false;
-                                       /* only check non-shared pages */
-                                       if (!pte_numa(oldpte) &&
-                                           page_mapcount(page) == 1) {
-                                               ptent = pte_mknuma(ptent);
-                                               set_pte_at(mm, addr, pte, ptent);
+                               if (page && !PageKsm(page)) {
+                                       if (!pte_numa(oldpte)) {
+                                               ptep_set_numa(mm, addr, pte);
                                                updated = true;
                                        }
                                }
diff --cc mm/page_alloc.c
Simple merge
index 7d84ea1fbb20d38c5668713c5be3921049881703,26dc0062652f2a6ed4c5470372bd44b88c987806..72cba9b45105645dc9a550139ea7ffe6cecfacda
@@@ -48,10 -48,10 +48,11 @@@ int verify_iovec(struct msghdr *m, stru
                        if (err < 0)
                                return err;
                }
 -              m->msg_name = address;
 +              if (m->msg_name)
 +                      m->msg_name = address;
        } else {
                m->msg_name = NULL;
+               m->msg_namelen = 0;
        }
  
        size = m->msg_iovlen * sizeof(struct iovec);
index d6a47e76efffd868bab722225531c55cc671838b,e5a7ac2f36879ff9026d82dd7f8b4fe870a5788f..74132f43b3b44f2a052829c32c1aa678ee77dafb
@@@ -530,22 -545,8 +535,11 @@@ ieee80211_tx_h_unicast_ps_buf(struct ie
  static ieee80211_tx_result debug_noinline
  ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
  {
 +      struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 +      struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 +
        if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
                return TX_CONTINUE;
-       /* only deauth, disassoc and action are bufferable MMPDUs */
-       if (ieee80211_is_mgmt(hdr->frame_control) &&
-           !ieee80211_is_deauth(hdr->frame_control) &&
-           !ieee80211_is_disassoc(hdr->frame_control) &&
-           !ieee80211_is_action(hdr->frame_control)) {
-               if (tx->flags & IEEE80211_TX_UNICAST)
-                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
-               return TX_CONTINUE;
-       }
        if (tx->flags & IEEE80211_TX_UNICAST)
                return ieee80211_tx_h_unicast_ps_buf(tx);
        else
index 88cfbc189558f75b4b508b849827eea18c3be7c0,48b181797d7b5dc652bc374ad310569456eaaeea..9b859001afa0ebc670bea646988179a306f19be8
@@@ -2639,9 -2805,14 +2805,16 @@@ static int packet_create(struct net *ne
        po = pkt_sk(sk);
        sk->sk_family = PF_PACKET;
        po->num = proto;
+       po->xmit = dev_queue_xmit;
+       err = packet_alloc_pending(po);
+       if (err)
+               goto out2;
+       packet_cached_dev_reset(po);
  
 +      packet_cached_dev_reset(po);
 +
        sk->sk_destruct = packet_sock_destruct;
        sk_refcnt_debug_inc(sk);
  
index fecd35af1935184f8ed60f116cb273120bd68abc,4f505a006896578ebdac1392af1dc064500665c9..c96675d0285cc0d32102f6c771f855ca5936325c
@@@ -294,39 -326,58 +326,63 @@@ static int tbf_change(struct Qdisc *sch
                goto done;
  
        qopt = nla_data(tb[TCA_TBF_PARMS]);
-       rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
-       if (rtab == NULL)
-               goto done;
+       if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
+               qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
+                                             tb[TCA_TBF_RTAB]));
+       if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
+                       qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
+                                                     tb[TCA_TBF_PTAB]));
+       buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
+       mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
+       if (tb[TCA_TBF_RATE64])
+               rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
+       psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
+       if (tb[TCA_TBF_BURST]) {
+               max_size = nla_get_u32(tb[TCA_TBF_BURST]);
+               buffer = psched_l2t_ns(&rate, max_size);
+       } else {
+               max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
+       }
  
        if (qopt->peakrate.rate) {
-               if (qopt->peakrate.rate > qopt->rate.rate)
-                       ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
-               if (ptab == NULL)
+               if (tb[TCA_TBF_PRATE64])
+                       prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
+               psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
+               if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
+                       pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
+                                       peak.rate_bytes_ps, rate.rate_bytes_ps);
+                       err = -EINVAL;
                        goto done;
-       }
+               }
  
-       for (n = 0; n < 256; n++)
-               if (rtab->data[n] > qopt->buffer)
-                       break;
-       max_size = (n << qopt->rate.cell_log) - 1;
-       if (ptab) {
-               int size;
-               for (n = 0; n < 256; n++)
-                       if (ptab->data[n] > qopt->mtu)
-                               break;
-               size = (n << qopt->peakrate.cell_log) - 1;
-               if (size < max_size)
-                       max_size = size;
+               if (tb[TCA_TBF_PBURST]) {
+                       u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
+                       max_size = min_t(u32, max_size, pburst);
+                       mtu = psched_l2t_ns(&peak, pburst);
+               } else {
+                       max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
+               }
        }
-       if (max_size < 0)
+       if (max_size < psched_mtu(qdisc_dev(sch)))
+               pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
+                                   max_size, qdisc_dev(sch)->name,
+                                   psched_mtu(qdisc_dev(sch)));
+       if (!max_size) {
+               err = -EINVAL;
                goto done;
+       }
  
 +      if (max_size < psched_mtu(qdisc_dev(sch)))
 +              pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n",
 +                                  max_size, qdisc_dev(sch)->name,
 +                                  psched_mtu(qdisc_dev(sch)));
 +
        if (q->qdisc != &noop_qdisc) {
                err = fifo_set_limit(q->qdisc, qopt->limit);
                if (err)
index 78504a18958aa8438b873fcbfb3279e062fd7e8f,98b042630a9eafcfa982a469461ee5fd4da362b7..22cbd112a9ed4a3d838f42ca41768737f413b260
@@@ -182,132 -255,27 +255,66 @@@ out
        return 0;
  }
  
 +static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
 +{
 +      struct dst_entry *dst = skb_dst(skb);
 +      struct xfrm_state *x;
 +
 +      if (dst == NULL)
 +              return SECSID_NULL;
 +      x = dst->xfrm;
 +      if (x == NULL || !selinux_authorizable_xfrm(x))
 +              return SECSID_NULL;
 +
 +      return x->security->ctx_sid;
 +}
 +
 +/*
 + * LSM hook implementation that checks and/or returns the xfrm sid for the
 + * incoming packet.
 + */
 +
 +int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
 +{
 +      if (skb == NULL) {
 +              *sid = SECSID_NULL;
 +              return 0;
 +      }
 +      return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
 +}
 +
 +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
 +{
 +      int rc;
 +
 +      rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
 +      if (rc == 0 && *sid == SECSID_NULL)
 +              *sid = selinux_xfrm_skb_sid_egress(skb);
 +
 +      return rc;
 +}
 +
  /*
-  * Security blob allocation for xfrm_policy and xfrm_state
-  * CTX does not have a meaningful value on input
+  * LSM hook implementation that checks and/or returns the xfrm sid for the
+  * incoming packet.
   */
- static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
-       struct xfrm_user_sec_ctx *uctx, u32 sid)
+ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
  {
-       int rc = 0;
-       const struct task_security_struct *tsec = current_security();
-       struct xfrm_sec_ctx *ctx = NULL;
-       char *ctx_str = NULL;
-       u32 str_len;
-       BUG_ON(uctx && sid);
-       if (!uctx)
-               goto not_from_user;
-       if (uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
-               return -EINVAL;
-       str_len = uctx->ctx_len;
-       if (str_len >= PAGE_SIZE)
-               return -ENOMEM;
-       *ctxp = ctx = kmalloc(sizeof(*ctx) +
-                             str_len + 1,
-                             GFP_KERNEL);
-       if (!ctx)
-               return -ENOMEM;
-       ctx->ctx_doi = uctx->ctx_doi;
-       ctx->ctx_len = str_len;
-       ctx->ctx_alg = uctx->ctx_alg;
-       memcpy(ctx->ctx_str,
-              uctx+1,
-              str_len);
-       ctx->ctx_str[str_len] = 0;
-       rc = security_context_to_sid(ctx->ctx_str,
-                                    str_len,
-                                    &ctx->ctx_sid);
-       if (rc)
-               goto out;
-       /*
-        * Does the subject have permission to set security context?
-        */
-       rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
-                         SECCLASS_ASSOCIATION,
-                         ASSOCIATION__SETCONTEXT, NULL);
-       if (rc)
-               goto out;
-       return rc;
- not_from_user:
-       rc = security_sid_to_context(sid, &ctx_str, &str_len);
-       if (rc)
-               goto out;
-       *ctxp = ctx = kmalloc(sizeof(*ctx) +
-                             str_len,
-                             GFP_ATOMIC);
-       if (!ctx) {
-               rc = -ENOMEM;
-               goto out;
+       if (skb == NULL) {
+               *sid = SECSID_NULL;
+               return 0;
        }
+       return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
+ }
  
-       ctx->ctx_doi = XFRM_SC_DOI_LSM;
-       ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
-       ctx->ctx_sid = sid;
-       ctx->ctx_len = str_len;
-       memcpy(ctx->ctx_str,
-              ctx_str,
-              str_len);
+ int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+ {
+       int rc;
  
-       goto out2;
+       rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
+       if (rc == 0 && *sid == SECSID_NULL)
+               *sid = selinux_xfrm_skb_sid_egress(skb);
  
- out:
-       *ctxp = NULL;
-       kfree(ctx);
- out2:
-       kfree(ctx_str);
        return rc;
  }
  
Simple merge
index 6a32c857f7042bd6640d0e27d9a8eb1476eecc30,4c826a40705cf544ca824b27414570d34005e1cf..b20afda6d484f2bc943958eaa4c56f52ce2612b8
@@@ -3978,13 -4274,32 +4274,38 @@@ static const struct hda_fixup alc269_fi
        [ALC269_FIXUP_LIMIT_INT_MIC_BOOST] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+       },
+       [ALC269VB_FIXUP_ASUS_ZENBOOK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC269VB_FIXUP_DMIC,
+       },
+       [ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* class-D output amp +5dB */
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x12 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x2800 },
+                       {}
+               },
+               .chained = true,
+               .chain_id = ALC269VB_FIXUP_ASUS_ZENBOOK,
+       },
+       [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HP_MUTE_LED_MIC1,
        },
 +      [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
 +              .type = HDA_FIXUP_FUNC,
 +              .v.func = alc269_fixup_limit_int_mic_boost,
 +              .chained = true,
 +              .chain_id = ALC269_FIXUP_HP_MUTE_LED_MIC1,
 +      },
        [ALC269VB_FIXUP_ORDISSIMO_EVE2] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
Simple merge