From: SeokYeon Hwang Date: Thu, 13 Nov 2014 05:17:46 +0000 (+0900) Subject: Merge remote-tracking branch 'upstream/linux-3.14.y' into tizen_next_linux_3.14 X-Git-Tag: submit/tizen/20160422.055611~1^2~99^2~18 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=075f03f6a758090c615bef4c37e6e92230d51d48;p=sdk%2Femulator%2Femulator-kernel.git Merge remote-tracking branch 'upstream/linux-3.14.y' into tizen_next_linux_3.14 Signed-off-by: SeokYeon Hwang Conflicts: arch/arm/boot/dts/cros5250-common.dtsi arch/arm/mach-tegra/common.c arch/x86/kernel/microcode_amd.c drivers/staging/zram/zram_drv.c drivers/staging/zsmalloc/zsmalloc-main.c kernel/rcutree.h kernel/rcutree_plugin.h Change-Id: I5fc3a16aea9daee9cdc4f59ec9f845547af5be53 --- 075f03f6a758090c615bef4c37e6e92230d51d48 diff --cc drivers/acpi/blacklist.c index f37dec579712,3d8413d02a97..523da37fa754 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@@ -291,60 -268,60 +268,108 @@@ static struct dmi_system_id acpi_osi_dm }, { .callback = dmi_disable_osi_win8, - .ident = "Lenovo ThinkPad Edge E530", + .ident = "ThinkPad Edge E530", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"), + DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "ThinkPad Edge E530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Acer Aspire V5-573G", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), + DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Acer Aspire V5-572G", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), + DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "ThinkPad T431s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "ThinkPad T430", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 7737", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), }, }, + { + .callback = dmi_disable_osi_win8, + .ident = "ThinkPad Edge E530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "ThinkPad Edge E530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Acer Aspire V5-573G", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), + DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Acer Aspire V5-572G", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), + DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "ThinkPad T431s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "ThinkPad T430", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), + }, + }, /* * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. diff --cc drivers/block/virtio_blk.c index f0106cc9b184,6c911c86fb5a..f5c9f5460eb4 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@@ -751,23 -562,11 +562,23 @@@ static int virtblk_probe(struct virtio_ goto out_put_disk; } - if (use_bio) - blk_queue_make_request(q, virtblk_make_request); + blk_mq_init_commands(q, virtblk_init_vbr, vblk); + q->queuedata = vblk; +#ifdef CONFIG_MARU // for virtio sdcard... + // index 0 for root partition + // index 1 for swap partition + // index over 2 for mmcblk + if (index > 1) { + snprintf(vblk->disk->disk_name, DISK_NAME_LEN, "mmcblk%d", index - 2); + } + else { + virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); + } +#else virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); +#endif vblk->disk->major = major; vblk->disk->first_minor = index_to_minor(index); diff --cc drivers/cpufreq/intel_pstate.c index d5dc567efd96,ae52c777339d..d1f443c65b71 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@@ -550,16 -709,9 +709,14 @@@ static int intel_pstate_init_cpu(unsign cpu = all_cpu_data[cpunum]; + cpu->cpu = cpunum; intel_pstate_get_cpu_pstates(cpu); + if (!cpu->pstate.current_pstate) { + all_cpu_data[cpunum] = NULL; + kfree(cpu); + return -ENODATA; + } - cpu->cpu = cpunum; - cpu->pstate_policy = - (struct pstate_adjust_policy *)id->driver_data; init_timer_deferrable(&cpu->timer); cpu->timer.function = intel_pstate_timer_func; cpu->timer.data = diff --cc drivers/gpu/drm/Makefile index 554762087292,292a79d64146..246b57d2d741 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@@ -48,8 -51,8 +51,9 @@@ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau obj-$(CONFIG_DRM_EXYNOS) +=exynos/ obj-$(CONFIG_DRM_GMA500) += gma500/ obj-$(CONFIG_DRM_UDL) += udl/ +obj-$(CONFIG_DRM_VIGS) += vigs/ obj-$(CONFIG_DRM_AST) += ast/ + obj-$(CONFIG_DRM_ARMADA) += armada/ obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ obj-$(CONFIG_DRM_OMAP) += omapdrm/ diff --cc drivers/gpu/drm/radeon/rv770_dpm.c index a239b30aaf9d,8fcb932a3a55..0fe69007853e --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@@ -2522,15 -2520,8 +2526,14 @@@ u32 rv770_dpm_get_mclk(struct radeon_de bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) { u32 vblank_time = r600_dpm_get_vblank_time(rdev); - u32 switch_limit = 300; - - /* quirks */ - /* ASUS K70AF */ - if ((rdev->pdev->device == 0x9553) && - (rdev->pdev->subsystem_vendor == 0x1043) && - (rdev->pdev->subsystem_device == 0x1c42)) - switch_limit = 200; + u32 switch_limit = 200; /* 300 */ + ++ /* RV770 */ ++ /* mclk switching doesn't seem to work reliably on desktop RV770s */ ++ if ((rdev->family == CHIP_RV770) && ++ !(rdev->flags & RADEON_IS_MOBILITY)) ++ switch_limit = 0xffffffff; /* disable mclk switching */ + /* RV770 */ /* mclk switching doesn't seem to work reliably on desktop RV770s */ if ((rdev->family == CHIP_RV770) && diff --cc drivers/md/dm-delay.c index 2f91d6d4a2cc,42c3a27a14cc..c00066880f10 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@@ -185,18 -182,12 +182,18 @@@ static int delay_ctr(struct dm_target * } out: - dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache); - if (!dc->delayed_pool) { - DMERR("Couldn't create delayed bio pool."); - goto bad_dev_write; + dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); + if (!dc->kdelayd_wq) { + DMERR("Couldn't start kdelayd"); + goto bad_queue; } + dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); + if (!dc->kdelayd_wq) { + DMERR("Couldn't start kdelayd"); + goto bad_queue; + } + setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); diff --cc drivers/media/usb/dvb-usb/dw2102.c index 4170a45d17e0,ae0f56a32e4d..943b6972d054 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@@ -29,10 -29,12 +29,15 @@@ #include "stb6100.h" #include "stb6100_proc.h" #include "m88rs2000.h" + #include "tda18271.h" + #include "cxd2820r.h" + + /* Max transfer size done by I2C transfer functions */ + #define MAX_XFER_SIZE 64 +/* Max transfer size done by I2C transfer functions */ +#define MAX_XFER_SIZE 64 + #ifndef USB_PID_DW2102 #define USB_PID_DW2102 0x2102 #endif diff --cc drivers/misc/Makefile index 2eea55ef9fdc,99b9424ce31d..93595dec9f9d --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@@ -51,6 -50,7 +50,8 @@@ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ obj-$(CONFIG_INTEL_MEI) += mei/ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ +obj-$(CONFIG_SLP_GLOBAL_LOCK) += slp_global_lock.o obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o obj-$(CONFIG_SRAM) += sram.o + obj-y += mic/ + obj-$(CONFIG_GENWQE) += genwqe/ diff --cc drivers/staging/rtl8712/usb_intf.c index e3a005da776b,bbd5888e316b..4e1996e16a8e --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@@ -353,15 -353,10 +353,14 @@@ static void disable_ht_for_spec_devid(c } } - static u8 key_2char2num(u8 hch, u8 lch) - { - return (hex_to_bin(hch) << 4) | hex_to_bin(lch); - } + static const struct device_type wlan_type = { + .name = "wlan", + }; +static const struct device_type wlan_type = { + .name = "wlan", +}; + /* * drv_init() - a device potentially for us * diff --cc drivers/target/iscsi/iscsi_target_util.c index 1039de499bc6,ab77f80ead2b..32843084d60e --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@@ -156,13 -156,12 +156,17 @@@ struct iscsi_cmd *iscsit_allocate_cmd(s { struct iscsi_cmd *cmd; struct se_session *se_sess = conn->sess->se_sess; - int size, tag; + int size, tag, state = (gfp_mask & __GFP_WAIT) ? TASK_INTERRUPTIBLE : + TASK_RUNNING; + + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); + if (tag < 0) + return NULL; + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); + if (tag < 0) + return NULL; + size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); memset(cmd, 0, size); diff --cc drivers/video/omap2/displays-new/panel-sony-acx565akm.c index d94f35dbd536,8e97d06921ff..ffabee9f6a16 --- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c +++ b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c @@@ -616,7 -630,9 +632,8 @@@ static int acx565akm_enable(struct omap if (omapdss_device_is_enabled(dssdev)) return 0; - mutex_lock(&ddata->mutex); r = acx565akm_panel_power_on(dssdev); + mutex_unlock(&ddata->mutex); if (r) return r; diff --cc fs/xfs/xfs_qm.c index 4688a622b373,6d7d1de13403..e03b3d7300c3 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@@ -225,6 -222,6 +222,14 @@@ xfs_qm_dqpurge_hints xfs_dqunlock(dqp); ++ xfs_dqlock(dqp); ++ if (dqp->dq_flags & XFS_DQ_FREEING) { ++ xfs_dqunlock(dqp); ++ return EAGAIN; ++ } ++ ++ /* If this quota has a hint attached, prepare for releasing it now */ ++ gdqp = dqp->q_gdquot; if (gdqp) xfs_qm_dqrele(gdqp); if (pdqp) diff --cc include/linux/mm_types.h index 8e082f18fb6a,2b58d192ea24..6a175451483d --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@@ -428,13 -443,7 +443,15 @@@ struct mm_struct /* numa_scan_seq prevents two threads setting pte_numa */ int numa_scan_seq; - + #endif ++#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + /* - * The first node a task was scheduled on. If a task runs on - * a different node than Make PTE Scan Go Now. ++ * An operation with batched TLB flushing is going on. Anything that ++ * can move process memory needs to flush the TLB when moving a ++ * PROT_NONE or PROT_NUMA mapped page. + */ - int first_nid; ++ bool tlb_flush_pending; +#endif #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) /* * An operation with batched TLB flushing is going on. Anything that diff --cc include/linux/sched.h index b1e963efbde8,218b058060f1..b2f1186b702d --- a/include/linux/sched.h +++ b/include/linux/sched.h @@@ -318,19 -396,13 +396,17 @@@ arch_get_unmapped_area_topdown(struct f static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} #endif - - extern void set_dumpable(struct mm_struct *mm, int value); - extern int get_dumpable(struct mm_struct *mm); + #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ + #define SUID_DUMP_USER 1 /* Dump as user of process */ + #define SUID_DUMP_ROOT 2 /* Dump as root */ + +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ +#define SUID_DUMP_USER 1 /* Dump as user of process */ +#define SUID_DUMP_ROOT 2 /* Dump as root */ /* mm flags */ - /* dumpable bits */ - #define MMF_DUMPABLE 0 /* core dump is permitted */ - #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ + /* for SUID_DUMP_* above */ #define MMF_DUMPABLE_BITS 2 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) diff --cc include/linux/skbuff.h index 9995165ff3d0,ad8f85908a56..0c4c2c9b5c38 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@@ -2338,63 -2421,55 +2421,57 @@@ static inline void skb_frag_add_head(st #define skb_walk_frags(skb, iter) \ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) - extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, - int *peeked, int *off, int *err); - extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, - int noblock, int *err); - extern unsigned int datagram_poll(struct file *file, struct socket *sock, - struct poll_table_struct *wait); - extern int skb_copy_datagram_iovec(const struct sk_buff *from, - int offset, struct iovec *to, - int size); - extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, - int hlen, - struct iovec *iov); - extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, - int offset, - const struct iovec *from, - int from_offset, - int len); - extern int zerocopy_sg_from_iovec(struct sk_buff *skb, - const struct iovec *frm, - int offset, - size_t count); - extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, - int offset, - const struct iovec *to, - int to_offset, - int size); - extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); - extern void skb_free_datagram_locked(struct sock *sk, - struct sk_buff *skb); - extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, - unsigned int flags); - extern __wsum skb_checksum(const struct sk_buff *skb, int offset, - int len, __wsum csum); - extern int skb_copy_bits(const struct sk_buff *skb, int offset, - void *to, int len); - extern int skb_store_bits(struct sk_buff *skb, int offset, - const void *from, int len); - extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, - int offset, u8 *to, int len, - __wsum csum); - extern int skb_splice_bits(struct sk_buff *skb, - unsigned int offset, - struct pipe_inode_info *pipe, - unsigned int len, - unsigned int flags); - extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); - extern void skb_split(struct sk_buff *skb, - struct sk_buff *skb1, const u32 len); - extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, - int shiftlen); - extern void skb_scrub_packet(struct sk_buff *skb, bool xnet); - - extern struct sk_buff *skb_segment(struct sk_buff *skb, - netdev_features_t features); + struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *off, int *err); + struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, + int *err); + unsigned int datagram_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); + int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, + struct iovec *to, int size); + int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, + struct iovec *iov); + int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, + const struct iovec *from, int from_offset, + int len); + int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, + int offset, size_t count); + int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, + const struct iovec *to, int to_offset, + int size); + void skb_free_datagram(struct sock *sk, struct sk_buff *skb); + void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); + int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); + int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); + int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); + __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, + int len, __wsum csum); + int skb_splice_bits(struct sk_buff *skb, unsigned int offset, + struct pipe_inode_info *pipe, unsigned int len, + unsigned int flags); + void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); + unsigned int skb_zerocopy_headlen(const struct sk_buff *from); + int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, + int len, int hlen); + void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); + int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); + void skb_scrub_packet(struct sk_buff *skb, bool xnet); + unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); + struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); + struct sk_buff *skb_vlan_untag(struct sk_buff *skb); + + struct skb_checksum_ops { + __wsum (*update)(const void *mem, int len, __wsum wsum); + __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); + }; + + __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, + __wsum csum, const struct skb_checksum_ops *ops); + __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, + __wsum csum); +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); + static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) { diff --cc include/linux/thread_info.h index 4ae6f32c8033,a629e4b23217..fddbe2023a5d --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@@ -104,11 -104,20 +104,22 @@@ static inline int test_ti_thread_flag(s #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) - #define set_need_resched() set_thread_flag(TIF_NEED_RESCHED) - #define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED) + static inline __deprecated void set_need_resched(void) + { + /* + * Use of this function in deprecated. + * + * As of this writing there are only a few users in the DRM tree left + * all of which are wrong and can be removed without causing too much + * grief. + * + * The DRM people are aware and are working on removing the last few + * instances. + */ + } +#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) + #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK /* * An arch can define its own version of set_restore_sigmask() to get the diff --cc kernel/sched/fair.c index 790e2fc808da,9b4c4f320130..2c7e85842ee8 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@@ -967,20 -1720,26 +1720,33 @@@ void task_numa_work(struct callback_hea vma = mm->mmap; } for (; vma; vma = vma->vm_next) { - if (!vma_migratable(vma)) + if (!vma_migratable(vma) || !vma_policy_mof(p, vma)) + continue; + + /* + * Shared library pages mapped by multiple processes are not + * migrated as it is expected they are cache replicated. Avoid + * hinting faults in read-only file-backed mappings or the vdso + * as migrating the pages will be of marginal benefit. + */ + if (!vma->vm_mm || + (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) continue; - /* Skip small VMAs. They are not likely to be of relevance */ - if (vma->vm_end - vma->vm_start < HPAGE_SIZE) + /* + * Skip inaccessible VMAs to avoid any confusion between + * PROT_NONE and NUMA hinting ptes + */ + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) continue; + /* + * Skip inaccessible VMAs to avoid any confusion between + * PROT_NONE and NUMA hinting ptes + */ + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) + continue; + do { start = max(start, vma->vm_start); end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); diff --cc kernel/trace/trace.c index 138077b1a607,71136720ffa1..c18da575818c --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@@ -434,7 -454,13 +454,16 @@@ int __trace_puts(unsigned long ip, cons struct print_entry *entry; unsigned long irq_flags; int alloc; + int pc; + + if (!(trace_flags & TRACE_ITER_PRINTK)) + return 0; + + pc = preempt_count(); + ++ if (unlikely(tracing_selftest_running || tracing_disabled)) ++ return 0; + if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; @@@ -477,7 -504,13 +507,16 @@@ int __trace_bputs(unsigned long ip, con struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); + int pc; + + if (!(trace_flags & TRACE_ITER_PRINTK)) + return 0; + + pc = preempt_count(); + ++ if (unlikely(tracing_selftest_running || tracing_disabled)) ++ return 0; + if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; diff --cc mm/huge_memory.c index 389973fd6bb7,718bfa16a36f..603f3743acbc --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@@ -1499,21 -1547,25 +1547,27 @@@ int change_huge_pmd(struct vm_area_stru if (pmd_numa(entry)) entry = pmd_mknonnuma(entry); entry = pmd_modify(entry, newprot); + ret = HPAGE_PMD_NR; + set_pmd_at(mm, addr, pmd, entry); BUG_ON(pmd_write(entry)); + set_pmd_at(mm, addr, pmd, entry); } else { struct page *page = pmd_page(*pmd); + entry = *pmd; - /* only check non-shared pages */ - if (page_mapcount(page) == 1 && + /* + * Do not trap faults against the zero page. The + * read-only data is likely to be read-cached on the + * local CPU cache and it is less useful to know about + * local vs remote hits on the zero page. + */ + if (!is_huge_zero_page(page) && !pmd_numa(*pmd)) { - entry = pmd_mknuma(entry); - set_pmd_at(mm, addr, pmd, entry); + pmdp_set_numa(mm, addr, pmd); + ret = HPAGE_PMD_NR; } } - spin_unlock(&vma->vm_mm->page_table_lock); - ret = 1; + spin_unlock(ptl); } return ret; diff --cc mm/mprotect.c index 7651a571f283,769a67a15803..df5867793d1b --- a/mm/mprotect.c +++ b/mm/mprotect.c @@@ -63,20 -69,10 +69,11 @@@ static unsigned long change_pte_range(s } else { struct page *page; + ptent = *pte; page = vm_normal_page(vma, addr, oldpte); - if (page) { - int this_nid = page_to_nid(page); - if (last_nid == -1) - last_nid = this_nid; - if (last_nid != this_nid) - all_same_node = false; - - /* only check non-shared pages */ - if (!pte_numa(oldpte) && - page_mapcount(page) == 1) { - ptent = pte_mknuma(ptent); - set_pte_at(mm, addr, pte, ptent); + if (page && !PageKsm(page)) { + if (!pte_numa(oldpte)) { + ptep_set_numa(mm, addr, pte); updated = true; } } diff --cc net/core/iovec.c index 7d84ea1fbb20,26dc0062652f..72cba9b45105 --- a/net/core/iovec.c +++ b/net/core/iovec.c @@@ -48,10 -48,10 +48,11 @@@ int verify_iovec(struct msghdr *m, stru if (err < 0) return err; } - m->msg_name = address; + if (m->msg_name) + m->msg_name = address; } else { m->msg_name = NULL; + m->msg_namelen = 0; } size = m->msg_iovlen * sizeof(struct iovec); diff --cc net/mac80211/tx.c index d6a47e76efff,e5a7ac2f3687..74132f43b3b4 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@@ -530,22 -545,8 +535,11 @@@ ieee80211_tx_h_unicast_ps_buf(struct ie static ieee80211_tx_result debug_noinline ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) return TX_CONTINUE; - - /* only deauth, disassoc and action are bufferable MMPDUs */ - if (ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_deauth(hdr->frame_control) && - !ieee80211_is_disassoc(hdr->frame_control) && - !ieee80211_is_action(hdr->frame_control)) { - if (tx->flags & IEEE80211_TX_UNICAST) - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; - return TX_CONTINUE; - } - if (tx->flags & IEEE80211_TX_UNICAST) return ieee80211_tx_h_unicast_ps_buf(tx); else diff --cc net/packet/af_packet.c index 88cfbc189558,48b181797d7b..9b859001afa0 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@@ -2639,9 -2805,14 +2805,16 @@@ static int packet_create(struct net *ne po = pkt_sk(sk); sk->sk_family = PF_PACKET; po->num = proto; + po->xmit = dev_queue_xmit; + + err = packet_alloc_pending(po); + if (err) + goto out2; + + packet_cached_dev_reset(po); + packet_cached_dev_reset(po); + sk->sk_destruct = packet_sock_destruct; sk_refcnt_debug_inc(sk); diff --cc net/sched/sch_tbf.c index fecd35af1935,4f505a006896..c96675d0285c --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@@ -294,39 -326,58 +326,63 @@@ static int tbf_change(struct Qdisc *sch goto done; qopt = nla_data(tb[TCA_TBF_PARMS]); - rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); - if (rtab == NULL) - goto done; + if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE) + qdisc_put_rtab(qdisc_get_rtab(&qopt->rate, + tb[TCA_TBF_RTAB])); + + if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE) + qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, + tb[TCA_TBF_PTAB])); + + buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); + mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); + + if (tb[TCA_TBF_RATE64]) + rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); + psched_ratecfg_precompute(&rate, &qopt->rate, rate64); + + if (tb[TCA_TBF_BURST]) { + max_size = nla_get_u32(tb[TCA_TBF_BURST]); + buffer = psched_l2t_ns(&rate, max_size); + } else { + max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U); + } if (qopt->peakrate.rate) { - if (qopt->peakrate.rate > qopt->rate.rate) - ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); - if (ptab == NULL) + if (tb[TCA_TBF_PRATE64]) + prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); + psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64); + if (peak.rate_bytes_ps <= rate.rate_bytes_ps) { + pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n", + peak.rate_bytes_ps, rate.rate_bytes_ps); + err = -EINVAL; goto done; - } + } - for (n = 0; n < 256; n++) - if (rtab->data[n] > qopt->buffer) - break; - max_size = (n << qopt->rate.cell_log) - 1; - if (ptab) { - int size; - - for (n = 0; n < 256; n++) - if (ptab->data[n] > qopt->mtu) - break; - size = (n << qopt->peakrate.cell_log) - 1; - if (size < max_size) - max_size = size; + if (tb[TCA_TBF_PBURST]) { + u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]); + max_size = min_t(u32, max_size, pburst); + mtu = psched_l2t_ns(&peak, pburst); + } else { + max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu)); + } } - if (max_size < 0) + + if (max_size < psched_mtu(qdisc_dev(sch))) + pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n", + max_size, qdisc_dev(sch)->name, + psched_mtu(qdisc_dev(sch))); + + if (!max_size) { + err = -EINVAL; goto done; + } + if (max_size < psched_mtu(qdisc_dev(sch))) + pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n", + max_size, qdisc_dev(sch)->name, + psched_mtu(qdisc_dev(sch))); + if (q->qdisc != &noop_qdisc) { err = fifo_set_limit(q->qdisc, qopt->limit); if (err) diff --cc security/selinux/xfrm.c index 78504a18958a,98b042630a9e..22cbd112a9ed --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@@ -182,132 -255,27 +255,66 @@@ out return 0; } +static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + struct xfrm_state *x; + + if (dst == NULL) + return SECSID_NULL; + x = dst->xfrm; + if (x == NULL || !selinux_authorizable_xfrm(x)) + return SECSID_NULL; + + return x->security->ctx_sid; +} + +/* + * LSM hook implementation that checks and/or returns the xfrm sid for the + * incoming packet. + */ + +int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) +{ + if (skb == NULL) { + *sid = SECSID_NULL; + return 0; + } + return selinux_xfrm_skb_sid_ingress(skb, sid, ckall); +} + +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) +{ + int rc; + + rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0); + if (rc == 0 && *sid == SECSID_NULL) + *sid = selinux_xfrm_skb_sid_egress(skb); + + return rc; +} + /* - * Security blob allocation for xfrm_policy and xfrm_state - * CTX does not have a meaningful value on input + * LSM hook implementation that checks and/or returns the xfrm sid for the + * incoming packet. */ - static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *uctx, u32 sid) + int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) { - int rc = 0; - const struct task_security_struct *tsec = current_security(); - struct xfrm_sec_ctx *ctx = NULL; - char *ctx_str = NULL; - u32 str_len; - - BUG_ON(uctx && sid); - - if (!uctx) - goto not_from_user; - - if (uctx->ctx_alg != XFRM_SC_ALG_SELINUX) - return -EINVAL; - - str_len = uctx->ctx_len; - if (str_len >= PAGE_SIZE) - return -ENOMEM; - - *ctxp = ctx = kmalloc(sizeof(*ctx) + - str_len + 1, - GFP_KERNEL); - - if (!ctx) - return -ENOMEM; - - ctx->ctx_doi = uctx->ctx_doi; - ctx->ctx_len = str_len; - ctx->ctx_alg = uctx->ctx_alg; - - memcpy(ctx->ctx_str, - uctx+1, - str_len); - ctx->ctx_str[str_len] = 0; - rc = security_context_to_sid(ctx->ctx_str, - str_len, - &ctx->ctx_sid); - - if (rc) - goto out; - - /* - * Does the subject have permission to set security context? - */ - rc = avc_has_perm(tsec->sid, ctx->ctx_sid, - SECCLASS_ASSOCIATION, - ASSOCIATION__SETCONTEXT, NULL); - if (rc) - goto out; - - return rc; - - not_from_user: - rc = security_sid_to_context(sid, &ctx_str, &str_len); - if (rc) - goto out; - - *ctxp = ctx = kmalloc(sizeof(*ctx) + - str_len, - GFP_ATOMIC); - - if (!ctx) { - rc = -ENOMEM; - goto out; + if (skb == NULL) { + *sid = SECSID_NULL; + return 0; } + return selinux_xfrm_skb_sid_ingress(skb, sid, ckall); + } - ctx->ctx_doi = XFRM_SC_DOI_LSM; - ctx->ctx_alg = XFRM_SC_ALG_SELINUX; - ctx->ctx_sid = sid; - ctx->ctx_len = str_len; - memcpy(ctx->ctx_str, - ctx_str, - str_len); + int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) + { + int rc; - goto out2; + rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0); + if (rc == 0 && *sid == SECSID_NULL) + *sid = selinux_xfrm_skb_sid_egress(skb); - out: - *ctxp = NULL; - kfree(ctx); - out2: - kfree(ctx_str); return rc; } diff --cc sound/pci/hda/patch_realtek.c index 6a32c857f704,4c826a40705c..b20afda6d484 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@@ -3978,13 -4274,32 +4274,38 @@@ static const struct hda_fixup alc269_fi [ALC269_FIXUP_LIMIT_INT_MIC_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC269_FIXUP_THINKPAD_ACPI, + }, + [ALC269VB_FIXUP_ASUS_ZENBOOK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC269VB_FIXUP_DMIC, + }, + [ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* class-D output amp +5dB */ + { 0x20, AC_VERB_SET_COEF_INDEX, 0x12 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x2800 }, + {} + }, + .chained = true, + .chain_id = ALC269VB_FIXUP_ASUS_ZENBOOK, + }, + [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC269_FIXUP_HP_MUTE_LED_MIC1, }, + [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC269_FIXUP_HP_MUTE_LED_MIC1, + }, [ALC269VB_FIXUP_ORDISSIMO_EVE2] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) {