Merge tag 'x86_urgent_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Jun 2021 17:00:25 +0000 (10:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Jun 2021 17:00:25 +0000 (10:00 -0700)
Pull x86 fixes from Borislav Petkov:
 "Two more urgent FPU fixes:

   - prevent unprivileged userspace from reinitializing supervisor
     states

   - prepare init_fpstate, which is the buffer used when initializing
     FPU state, properly in case the skip-writing-state-components
     XSAVE* variants are used"

* tag 'x86_urgent_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu: Make init_fpstate correct with optimized XSAVE
  x86/fpu: Preserve supervisor states in sanitize_restored_user_xstate()

44 files changed:
MAINTAINERS
arch/arm/kernel/setup.c
arch/x86/entry/common.c
arch/x86/events/intel/lbr.c
arch/x86/include/asm/page_64.h
arch/x86/lib/retpoline.S
arch/x86/xen/enlighten_pv.c
drivers/gpio/Kconfig
drivers/gpio/gpio-mxc.c
drivers/gpio/gpiolib-cdev.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/kmb/kmb_drv.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/panel/panel-samsung-ld9040.c
drivers/gpu/drm/radeon/radeon_prime.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/mmc/host/meson-gx-mmc.c
drivers/pci/pci.c
drivers/spi/spi-nxp-fspi.c
drivers/spi/spi-tegra20-slink.c
fs/afs/write.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/super.h
fs/netfs/read_helper.c
include/linux/ceph/auth.h
include/linux/debug_locks.h
kernel/dma/swiotlb.c
kernel/locking/lockdep.c
kernel/module.c
kernel/sched/fair.c
kernel/signal.c
lib/debug_locks.c
net/ceph/auth.c
net/ceph/auth_none.c
net/ceph/auth_x.c
sound/soc/codecs/rt5645.c

index 8c5ee008301a65518d30503292980cc6f6a9fe91..b3b9a253316f931b2f7429b61196c7fa45bd1224 100644 (file)
@@ -7354,7 +7354,6 @@ F:        drivers/net/ethernet/freescale/fs_enet/
 F:     include/linux/fs_enet_pd.h
 
 FREESCALE SOC SOUND DRIVERS
-M:     Timur Tabi <timur@kernel.org>
 M:     Nicolin Chen <nicoleotsuka@gmail.com>
 M:     Xiubo Li <Xiubo.Lee@gmail.com>
 R:     Fabio Estevam <festevam@gmail.com>
index 1a5edf562e85efe5f33745548326e4d4a5373b24..73ca7797b92f62f9ba4c1d977165702d1b22a8a5 100644 (file)
@@ -545,9 +545,11 @@ void notrace cpu_init(void)
         * In Thumb-2, msr with an immediate value is not allowed.
         */
 #ifdef CONFIG_THUMB2_KERNEL
-#define PLC    "r"
+#define PLC_l  "l"
+#define PLC_r  "r"
 #else
-#define PLC    "I"
+#define PLC_l  "I"
+#define PLC_r  "I"
 #endif
 
        /*
@@ -569,15 +571,15 @@ void notrace cpu_init(void)
        "msr    cpsr_c, %9"
            :
            : "r" (stk),
-             PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+             PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
              "I" (offsetof(struct stack, irq[0])),
-             PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+             PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
              "I" (offsetof(struct stack, abt[0])),
-             PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+             PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
              "I" (offsetof(struct stack, und[0])),
-             PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+             PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
              "I" (offsetof(struct stack, fiq[0])),
-             PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+             PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
            : "r14");
 #endif
 }
index 7b2542b13ebd9683748677df37604adb911c4b40..04bce95bc7e3bbb0a83bf49ac0b02917ce049f23 100644 (file)
@@ -130,8 +130,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
                /* User code screwed up. */
                regs->ax = -EFAULT;
 
-               instrumentation_end();
                local_irq_disable();
+               instrumentation_end();
                irqentry_exit_to_user_mode(regs);
                return false;
        }
@@ -269,15 +269,16 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
        irqentry_state_t state = irqentry_enter(regs);
        bool inhcall;
 
+       instrumentation_begin();
        run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
 
        inhcall = get_and_clear_inhcall();
        if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
-               instrumentation_begin();
                irqentry_exit_cond_resched();
                instrumentation_end();
                restore_inhcall(inhcall);
        } else {
+               instrumentation_end();
                irqentry_exit(regs, state);
        }
 }
index 4409d2cccfda574fce38c8bef90ea97156313394..e8453de7a96485700e308f340ecd16fd284472c1 100644 (file)
@@ -731,7 +731,8 @@ void reserve_lbr_buffers(void)
                if (!kmem_cache || cpuc->lbr_xsave)
                        continue;
 
-               cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
+               cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
+                                                       GFP_KERNEL | __GFP_ZERO,
                                                        cpu_to_node(cpu));
        }
 }
index ca840fec777654fd4be79b0ca0083b26ab445714..4bde0dc66100cd6d8207f317b2eec59ebc5cc57a 100644 (file)
@@ -75,7 +75,7 @@ void copy_page(void *to, void *from);
  *
  * With page table isolation enabled, we map the LDT in ... [stay tuned]
  */
-static inline unsigned long task_size_max(void)
+static __always_inline unsigned long task_size_max(void)
 {
        unsigned long ret;
 
index 4d32cb06ffd5b2b6174e49bdce58630dc6286be3..ec9922cba30a4b38fd9f1e87afa29d6ee17c675e 100644 (file)
@@ -58,12 +58,16 @@ SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg)
 2:     .skip   5-(2b-1b), 0x90
 SYM_FUNC_END(__x86_indirect_alt_call_\reg)
 
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_call_\reg)
+
 SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg)
        ANNOTATE_RETPOLINE_SAFE
 1:     jmp     *%\reg
 2:     .skip   5-(2b-1b), 0x90
 SYM_FUNC_END(__x86_indirect_alt_jmp_\reg)
 
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg)
+
 .endm
 
 /*
index e87699aa2dc8263d29054bba0adf937f7841e978..03149422dce2ba1024926ccd80acab010a08dd4b 100644 (file)
@@ -592,8 +592,10 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
 DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
 {
        /* This should never happen and there is no way to handle it. */
+       instrumentation_begin();
        pr_err("Unknown trap in Xen PV mode.");
        BUG();
+       instrumentation_end();
 }
 
 #ifdef CONFIG_X86_MCE
index 1dd0ec6727fde20db48edb2c3c01f6cde8f6b391..3c69b785cb79d465ac2ff70350a30e4db5f4edad 100644 (file)
@@ -1383,6 +1383,7 @@ config GPIO_TPS68470
 config GPIO_TQMX86
        tristate "TQ-Systems QTMX86 GPIO"
        depends on MFD_TQMX86 || COMPILE_TEST
+       depends on HAS_IOPORT_MAP
        select GPIOLIB_IRQCHIP
        help
          This driver supports GPIO on the TQMX86 IO controller.
@@ -1450,6 +1451,7 @@ menu "PCI GPIO expanders"
 config GPIO_AMD8111
        tristate "AMD 8111 GPIO driver"
        depends on X86 || COMPILE_TEST
+       depends on HAS_IOPORT_MAP
        help
          The AMD 8111 south bridge contains 32 GPIO pins which can be used.
 
index 157106e1e43817b07ea660c6ee5233ba0bcd058f..b9fdf05d766947ece7d242516306f8ade3de23be 100644 (file)
@@ -334,7 +334,7 @@ static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
        ct->chip.irq_unmask = irq_gc_mask_set_bit;
        ct->chip.irq_set_type = gpio_set_irq_type;
        ct->chip.irq_set_wake = gpio_set_wake_irq;
-       ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+       ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
        ct->regs.ack = GPIO_ISR;
        ct->regs.mask = GPIO_IMR;
 
index 1631727bf0da1ce90d87b36aa3625cc79d12fab1..c7b5446d01fd2363fa65c9d681cfd52bcfe042e7 100644 (file)
@@ -1880,6 +1880,7 @@ static void gpio_v2_line_info_changed_to_v1(
                struct gpio_v2_line_info_changed *lic_v2,
                struct gpioline_info_changed *lic_v1)
 {
+       memset(lic_v1, 0, sizeof(*lic_v1));
        gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
        lic_v1->timestamp = lic_v2->timestamp_ns;
        lic_v1->event_type = lic_v2->event_type;
index c13985fb35bed8a8388dfe8f237ab85bc1bb6c78..2a4cd7d377bfaf798545ff7f239ea1373a561b5a 100644 (file)
@@ -1047,11 +1047,12 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
 
        rfb->base.obj[0] = obj;
        drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
-       ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
+       ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
        if (ret)
                goto err;
 
-       ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+       ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
        if (ret)
                goto err;
 
@@ -1071,9 +1072,6 @@ int amdgpu_display_gem_fb_verify_and_init(
 
        rfb->base.obj[0] = obj;
        drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
-       ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
-       if (ret)
-               goto err;
        /* Verify that the modifier is supported. */
        if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
                                      mode_cmd->modifier[0])) {
@@ -1092,6 +1090,10 @@ int amdgpu_display_gem_fb_verify_and_init(
        if (ret)
                goto err;
 
+       ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+       if (ret)
+               goto err;
+
        return 0;
 err:
        drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
index baa980a477d9449d1a5a4f7c10ef1e3328b0031d..37ec593650803579e7911d7e34c88151ee1c333e 100644 (file)
@@ -214,9 +214,21 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
 {
        struct drm_gem_object *obj = attach->dmabuf->priv;
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+       int r;
 
        /* pin buffer into GTT */
-       return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+       if (r)
+               return r;
+
+       if (bo->tbo.moving) {
+               r = dma_fence_wait(bo->tbo.moving, true);
+               if (r) {
+                       amdgpu_bo_unpin(bo);
+                       return r;
+               }
+       }
+       return 0;
 }
 
 /**
index 327b1f8213a8bd2a4fe079e7507d67aeefa8e3d5..0597aeb5f0e8988aab8a0dac29aecacdae65f19a 100644 (file)
@@ -6871,12 +6871,8 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                        (adev->doorbell_index.kiq * 2) << 2);
-               /* If GC has entered CGPG, ringing doorbell > first page doesn't
-                * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
-                * this issue.
-                */
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-                       (adev->doorbell.size - 4));
+                       (adev->doorbell_index.userqueue_end * 2) << 2);
        }
 
        WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
index c09225d065c277aa6d2718f16fa5eed59c123fc8..516467e962b727bb3f079d20536825de0886fc9c 100644 (file)
@@ -3673,12 +3673,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                                        (adev->doorbell_index.kiq * 2) << 2);
-               /* If GC has entered CGPG, ringing doorbell > first page doesn't
-                * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
-                * this issue.
-                */
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-                                       (adev->doorbell.size - 4));
+                                       (adev->doorbell_index.userqueue_end * 2) << 2);
        }
 
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
index 05ad75d155e8407f18a3b6af72a835a958cb8d32..cfe4fc69277e6865ef812d82b6eadb0a34839a3a 100644 (file)
@@ -232,7 +232,6 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
 
        pm_runtime_put_sync(dev->dev);
 
-       drm_crtc_vblank_on(c);
 }
 
 #define ATMEL_HLCDC_RGB444_OUTPUT      BIT(0)
@@ -343,8 +342,17 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
 
 static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
                                          struct drm_atomic_state *state)
+{
+       drm_crtc_vblank_on(c);
+}
+
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *c,
+                                         struct drm_atomic_state *state)
 {
        struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+       unsigned long flags;
+
+       spin_lock_irqsave(&c->dev->event_lock, flags);
 
        if (c->state->event) {
                c->state->event->pipe = drm_crtc_index(c);
@@ -354,12 +362,7 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
                crtc->event = c->state->event;
                c->state->event = NULL;
        }
-}
-
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
-                                         struct drm_atomic_state *state)
-{
-       /* TODO: write common plane control register if available */
+       spin_unlock_irqrestore(&c->dev->event_lock, flags);
 }
 
 static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
index 65af56e4712943a5eff0f2bfcc597dc66a72da38..f09b6dd8754c628728c5d81cedc85451c84b35ed 100644 (file)
@@ -593,6 +593,7 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
        dev->mode_config.max_width = dc->desc->max_width;
        dev->mode_config.max_height = dc->desc->max_height;
        dev->mode_config.funcs = &mode_config_funcs;
+       dev->mode_config.async_page_flip = true;
 
        return 0;
 }
index f64e06e1067dd8d373d270b3c2ee9697cd7aa40d..96ea1a2c11dd6a3771c111713b8dbe09eb769b74 100644 (file)
@@ -137,6 +137,7 @@ static int kmb_hw_init(struct drm_device *drm, unsigned long flags)
        /* Allocate LCD interrupt resources */
        irq_lcd = platform_get_irq(pdev, 0);
        if (irq_lcd < 0) {
+               ret = irq_lcd;
                drm_err(&kmb->drm, "irq_lcd not found");
                goto setup_fail;
        }
index 3e09df0472ce40183caf0babe242ac61ee3fe519..170aba99a11015ebab91f318ccfa11b8b1c98cca 100644 (file)
@@ -546,7 +546,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
        struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
        int i, j;
 
-       if (!ttm_dma)
+       if (!ttm_dma || !ttm_dma->dma_address)
                return;
        if (!ttm_dma->pages) {
                NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
@@ -582,7 +582,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
        struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
        int i, j;
 
-       if (!ttm_dma)
+       if (!ttm_dma || !ttm_dma->dma_address)
                return;
        if (!ttm_dma->pages) {
                NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
index 347488685f745a2cd80a88b431769daa59006371..60019d0532fcff81cd499733d41952afafa91323 100644 (file)
@@ -93,7 +93,22 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
        if (ret)
                return -EINVAL;
 
-       return 0;
+       ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
+       if (ret)
+               goto error;
+
+       if (nvbo->bo.moving)
+               ret = dma_fence_wait(nvbo->bo.moving, true);
+
+       ttm_bo_unreserve(&nvbo->bo);
+       if (ret)
+               goto error;
+
+       return ret;
+
+error:
+       nouveau_bo_unpin(nvbo);
+       return ret;
 }
 
 void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
index f484147fc3a668ea4682fc814041e2b3b17fc24a..c4b388850a13e01f0cadc02a8a541c6dc91fabf1 100644 (file)
@@ -383,6 +383,7 @@ MODULE_DEVICE_TABLE(spi, ld9040_ids);
 static struct spi_driver ld9040_driver = {
        .probe = ld9040_probe,
        .remove = ld9040_remove,
+       .id_table = ld9040_ids,
        .driver = {
                .name = "panel-samsung-ld9040",
                .of_match_table = ld9040_of_match,
index 42a87948e28c5bed77428455708c5f5a85b8743d..4a90807351e72d7ffcf0484867600ab19e2ef5e1 100644 (file)
@@ -77,9 +77,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
 
        /* pin buffer into GTT */
        ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
-       if (likely(ret == 0))
-               bo->prime_shared_count++;
-
+       if (unlikely(ret))
+               goto error;
+
+       if (bo->tbo.moving) {
+               ret = dma_fence_wait(bo->tbo.moving, false);
+               if (unlikely(ret)) {
+                       radeon_bo_unpin(bo);
+                       goto error;
+               }
+       }
+
+       bo->prime_shared_count++;
+error:
        radeon_bo_unreserve(bo);
        return ret;
 }
index 1fda574579afc326d7d7d29f8a3bd94d15133d34..8106b5634fe10e1af22a5e98506d0d4157f27150 100644 (file)
@@ -159,6 +159,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
        struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
        bool connected = false;
 
+       WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+
        if (vc4_hdmi->hpd_gpio) {
                if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
                    vc4_hdmi->hpd_active_low)
@@ -180,10 +182,12 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
                        }
                }
 
+               pm_runtime_put(&vc4_hdmi->pdev->dev);
                return connector_status_connected;
        }
 
        cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+       pm_runtime_put(&vc4_hdmi->pdev->dev);
        return connector_status_disconnected;
 }
 
@@ -473,7 +477,6 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
                   HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
 
        clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
-       clk_disable_unprepare(vc4_hdmi->hsm_clock);
        clk_disable_unprepare(vc4_hdmi->pixel_clock);
 
        ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -784,13 +787,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                return;
        }
 
-       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
-       if (ret) {
-               DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
-               clk_disable_unprepare(vc4_hdmi->pixel_clock);
-               return;
-       }
-
        vc4_hdmi_cec_update_clk_div(vc4_hdmi);
 
        /*
@@ -801,7 +797,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                               (hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000));
        if (ret) {
                DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
-               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -809,7 +804,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
        ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
        if (ret) {
                DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
-               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -1929,6 +1923,29 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int vc4_hdmi_runtime_suspend(struct device *dev)
+{
+       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(vc4_hdmi->hsm_clock);
+
+       return 0;
+}
+
+static int vc4_hdmi_runtime_resume(struct device *dev)
+{
+       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+#endif
+
 static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2165,11 +2182,18 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
        {}
 };
 
+static const struct dev_pm_ops vc4_hdmi_pm_ops = {
+       SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
+                          vc4_hdmi_runtime_resume,
+                          NULL)
+};
+
 struct platform_driver vc4_hdmi_driver = {
        .probe = vc4_hdmi_dev_probe,
        .remove = vc4_hdmi_dev_remove,
        .driver = {
                .name = "vc4_hdmi",
                .of_match_table = vc4_hdmi_dt_match,
+               .pm = &vc4_hdmi_pm_ops,
        },
 };
index 016a6106151a5bcdd75a66483ed2d57842c69124..3f28eb4d17fe79a9c13f37ff0d5cf5afb11a4db0 100644 (file)
@@ -165,6 +165,7 @@ struct meson_host {
 
        unsigned int bounce_buf_size;
        void *bounce_buf;
+       void __iomem *bounce_iomem_buf;
        dma_addr_t bounce_dma_addr;
        struct sd_emmc_desc *descs;
        dma_addr_t descs_dma_addr;
@@ -745,6 +746,47 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
        writel(start, host->regs + SD_EMMC_START);
 }
 
+/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
+                                 size_t buflen, bool to_buffer)
+{
+       unsigned int sg_flags = SG_MITER_ATOMIC;
+       struct scatterlist *sgl = data->sg;
+       unsigned int nents = data->sg_len;
+       struct sg_mapping_iter miter;
+       unsigned int offset = 0;
+
+       if (to_buffer)
+               sg_flags |= SG_MITER_FROM_SG;
+       else
+               sg_flags |= SG_MITER_TO_SG;
+
+       sg_miter_start(&miter, sgl, nents, sg_flags);
+
+       while ((offset < buflen) && sg_miter_next(&miter)) {
+               unsigned int len;
+
+               len = min(miter.length, buflen - offset);
+
+               /* When dram_access_quirk, the bounce buffer is a iomem mapping */
+               if (host->dram_access_quirk) {
+                       if (to_buffer)
+                               memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
+                       else
+                               memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+               } else {
+                       if (to_buffer)
+                               memcpy(host->bounce_buf + offset, miter.addr, len);
+                       else
+                               memcpy(miter.addr, host->bounce_buf + offset, len);
+               }
+
+               offset += len;
+       }
+
+       sg_miter_stop(&miter);
+}
+
 static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
 {
        struct meson_host *host = mmc_priv(mmc);
@@ -788,8 +830,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
                if (data->flags & MMC_DATA_WRITE) {
                        cmd_cfg |= CMD_CFG_DATA_WR;
                        WARN_ON(xfer_bytes > host->bounce_buf_size);
-                       sg_copy_to_buffer(data->sg, data->sg_len,
-                                         host->bounce_buf, xfer_bytes);
+                       meson_mmc_copy_buffer(host, data, xfer_bytes, true);
                        dma_wmb();
                }
 
@@ -958,8 +999,7 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
        if (meson_mmc_bounce_buf_read(data)) {
                xfer_bytes = data->blksz * data->blocks;
                WARN_ON(xfer_bytes > host->bounce_buf_size);
-               sg_copy_from_buffer(data->sg, data->sg_len,
-                                   host->bounce_buf, xfer_bytes);
+               meson_mmc_copy_buffer(host, data, xfer_bytes, false);
        }
 
        next_cmd = meson_mmc_get_next_command(cmd);
@@ -1179,7 +1219,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
                 * instead of the DDR memory
                 */
                host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
-               host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
+               host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
                host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
        } else {
                /* data bounce buffer */
index b717680377a9e7380d59dca5a1c7c67d6cd9b92c..8d4ebe095d0c8041c608a908fd1b74c207a11ff0 100644 (file)
@@ -1900,11 +1900,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
        int err;
        int i, bars = 0;
 
-       if (atomic_inc_return(&dev->enable_cnt) > 1) {
-               pci_update_current_state(dev, dev->current_state);
-               return 0;               /* already enabled */
+       /*
+        * Power state could be unknown at this point, either due to a fresh
+        * boot or a device removal call.  So get the current power state
+        * so that things like MSI message writing will behave as expected
+        * (e.g. if the device really is in D0 at enable time).
+        */
+       if (dev->pm_cap) {
+               u16 pmcsr;
+               pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+               dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
        }
 
+       if (atomic_inc_return(&dev->enable_cnt) > 1)
+               return 0;               /* already enabled */
+
        bridge = pci_upstream_bridge(dev);
        if (bridge)
                pci_enable_bridge(bridge);
index 6e6c2403944dd21d23b1a4501c4c1706475ffa41..a66fa97046ee1d8f17f0d11ed5e3fb688e37a87b 100644 (file)
@@ -1124,12 +1124,6 @@ static int nxp_fspi_probe(struct platform_device *pdev)
                goto err_put_ctrl;
        }
 
-       /* Clear potential interrupts */
-       reg = fspi_readl(f, f->iobase + FSPI_INTR);
-       if (reg)
-               fspi_writel(f, reg, f->iobase + FSPI_INTR);
-
-
        /* find the resources - controller memory mapped space */
        if (is_acpi_node(f->dev->fwnode))
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1167,6 +1161,11 @@ static int nxp_fspi_probe(struct platform_device *pdev)
                }
        }
 
+       /* Clear potential interrupts */
+       reg = fspi_readl(f, f->iobase + FSPI_INTR);
+       if (reg)
+               fspi_writel(f, reg, f->iobase + FSPI_INTR);
+
        /* find the irq */
        ret = platform_get_irq(pdev, 0);
        if (ret < 0)
index f7c832fd40036c725405ee059193e5afe65e61f2..6a726c95ac7a89a523a73ca10e1f3249e6852756 100644 (file)
@@ -1118,6 +1118,11 @@ static int tegra_slink_probe(struct platform_device *pdev)
                pm_runtime_put_noidle(&pdev->dev);
                goto exit_pm_disable;
        }
+
+       reset_control_assert(tspi->rst);
+       udelay(2);
+       reset_control_deassert(tspi->rst);
+
        tspi->def_command_reg  = SLINK_M_S;
        tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
        tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
index e9ccaa3baf2e65fbf8bbd958f638f1ab61cf5445..3104b62c208263f31924774ad5f6692e1955f2cb 100644 (file)
@@ -118,6 +118,15 @@ int afs_write_end(struct file *file, struct address_space *mapping,
        _enter("{%llx:%llu},{%lx}",
               vnode->fid.vid, vnode->fid.vnode, page->index);
 
+       if (!PageUptodate(page)) {
+               if (copied < len) {
+                       copied = 0;
+                       goto out;
+               }
+
+               SetPageUptodate(page);
+       }
+
        if (copied == 0)
                goto out;
 
@@ -132,8 +141,6 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                write_sequnlock(&vnode->cb_lock);
        }
 
-       ASSERT(PageUptodate(page));
-
        if (PagePrivate(page)) {
                priv = page_private(page);
                f = afs_page_dirty_from(page, priv);
index 5624fae7a603dad12f84e3de7f11a11fdada5ad2..9ba79b6531fba55451ff1a4a5ebf45e4a93fd442 100644 (file)
@@ -668,14 +668,13 @@ out:
  * Handle lookups for the hidden .snap directory.
  */
 struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
-                                  struct dentry *dentry, int err)
+                                  struct dentry *dentry)
 {
        struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
        struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
 
        /* .snap dir? */
-       if (err == -ENOENT &&
-           ceph_snap(parent) == CEPH_NOSNAP &&
+       if (ceph_snap(parent) == CEPH_NOSNAP &&
            strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) {
                struct dentry *res;
                struct inode *inode = ceph_get_snapdir(parent);
@@ -742,7 +741,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
        struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
        struct ceph_mds_request *req;
-       struct dentry *res;
        int op;
        int mask;
        int err;
@@ -793,12 +791,16 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
        req->r_parent = dir;
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
        err = ceph_mdsc_do_request(mdsc, NULL, req);
-       res = ceph_handle_snapdir(req, dentry, err);
-       if (IS_ERR(res)) {
-               err = PTR_ERR(res);
-       } else {
-               dentry = res;
-               err = 0;
+       if (err == -ENOENT) {
+               struct dentry *res;
+
+               res = ceph_handle_snapdir(req, dentry);
+               if (IS_ERR(res)) {
+                       err = PTR_ERR(res);
+               } else {
+                       dentry = res;
+                       err = 0;
+               }
        }
        dentry = ceph_finish_lookup(req, dentry, err);
        ceph_mdsc_put_request(req);  /* will dput(dentry) */
index 77fc037d5bebeee91894864ec398b651d77317a2..d51af36980324e6ded4ec63a2214d0f17b3b64ce 100644 (file)
@@ -578,6 +578,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
        struct ceph_inode_info *ci = ceph_inode(dir);
        struct inode *inode;
        struct timespec64 now;
+       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
        struct ceph_vino vino = { .ino = req->r_deleg_ino,
                                  .snap = CEPH_NOSNAP };
 
@@ -615,8 +616,10 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
 
        ceph_file_layout_to_legacy(lo, &in.layout);
 
+       down_read(&mdsc->snap_rwsem);
        ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
                              req->r_fmode, NULL);
+       up_read(&mdsc->snap_rwsem);
        if (ret) {
                dout("%s failed to fill inode: %d\n", __func__, ret);
                ceph_dir_clear_complete(dir);
@@ -739,14 +742,16 @@ retry:
        err = ceph_mdsc_do_request(mdsc,
                                   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
                                   req);
-       dentry = ceph_handle_snapdir(req, dentry, err);
-       if (IS_ERR(dentry)) {
-               err = PTR_ERR(dentry);
-               goto out_req;
+       if (err == -ENOENT) {
+               dentry = ceph_handle_snapdir(req, dentry);
+               if (IS_ERR(dentry)) {
+                       err = PTR_ERR(dentry);
+                       goto out_req;
+               }
+               err = 0;
        }
-       err = 0;
 
-       if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
+       if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
                err = ceph_handle_notrace_create(dir, dentry);
 
        if (d_in_lookup(dentry)) {
index e1c63adb196ddcb01b39b543e075f2256945e9b1..df0c8a724609d3b575583be8ff2e871193d59226 100644 (file)
@@ -777,6 +777,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
        umode_t mode = le32_to_cpu(info->mode);
        dev_t rdev = le32_to_cpu(info->rdev);
 
+       lockdep_assert_held(&mdsc->snap_rwsem);
+
        dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
             inode, ceph_vinop(inode), le64_to_cpu(info->version),
             ci->i_version);
index db80d89556b10674b03d161116ab9ff440c63125..839e6b0239eeb7eb28c50a8be289bfe6a69df09c 100644 (file)
@@ -1218,7 +1218,7 @@ extern const struct dentry_operations ceph_dentry_ops;
 extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
 extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
 extern struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
-                              struct dentry *dentry, int err);
+                              struct dentry *dentry);
 extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
                                         struct dentry *dentry, int err);
 
index 725614625ed4864cf6695aad85e2611b7946b5ed..0b6cd3b8734c6e1643baad8c5bd668cd161c6988 100644 (file)
@@ -1011,12 +1011,42 @@ out:
 }
 EXPORT_SYMBOL(netfs_readpage);
 
-static void netfs_clear_thp(struct page *page)
+/**
+ * netfs_skip_page_read - prep a page for writing without reading first
+ * @page: page being prepared
+ * @pos: starting position for the write
+ * @len: length of write
+ *
+ * In some cases, write_begin doesn't need to read at all:
+ * - full page write
+ * - write that lies in a page that is completely beyond EOF
+ * - write that covers the the page from start to EOF or beyond it
+ *
+ * If any of these criteria are met, then zero out the unwritten parts
+ * of the page and return true. Otherwise, return false.
+ */
+static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
 {
-       unsigned int i;
+       struct inode *inode = page->mapping->host;
+       loff_t i_size = i_size_read(inode);
+       size_t offset = offset_in_thp(page, pos);
+
+       /* Full page write */
+       if (offset == 0 && len >= thp_size(page))
+               return true;
+
+       /* pos beyond last page in the file */
+       if (pos - offset >= i_size)
+               goto zero_out;
+
+       /* Write that covers from the start of the page to EOF or beyond */
+       if (offset == 0 && (pos + len) >= i_size)
+               goto zero_out;
 
-       for (i = 0; i < thp_nr_pages(page); i++)
-               clear_highpage(page + i);
+       return false;
+zero_out:
+       zero_user_segments(page, 0, offset, offset + len, thp_size(page));
+       return true;
 }
 
 /**
@@ -1024,7 +1054,7 @@ static void netfs_clear_thp(struct page *page)
  * @file: The file to read from
  * @mapping: The mapping to read from
  * @pos: File position at which the write will begin
- * @len: The length of the write in this page
+ * @len: The length of the write (may extend beyond the end of the page chosen)
  * @flags: AOP_* flags
  * @_page: Where to put the resultant page
  * @_fsdata: Place for the netfs to store a cookie
@@ -1061,8 +1091,6 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
        struct inode *inode = file_inode(file);
        unsigned int debug_index = 0;
        pgoff_t index = pos >> PAGE_SHIFT;
-       int pos_in_page = pos & ~PAGE_MASK;
-       loff_t size;
        int ret;
 
        DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
@@ -1090,13 +1118,8 @@ retry:
         * within the cache granule containing the EOF, in which case we need
         * to preload the granule.
         */
-       size = i_size_read(inode);
        if (!ops->is_cache_enabled(inode) &&
-           ((pos_in_page == 0 && len == thp_size(page)) ||
-            (pos >= size) ||
-            (pos_in_page == 0 && (pos + len) >= size))) {
-               netfs_clear_thp(page);
-               SetPageUptodate(page);
+           netfs_skip_page_read(page, pos, len)) {
                netfs_stat(&netfs_n_rh_write_zskip);
                goto have_page_no_wait;
        }
index 71b5d481c653028dd553a889bb306f423986c823..6b138fa97db85826c8e2f645a7f5706c0d5d4493 100644 (file)
@@ -50,7 +50,7 @@ struct ceph_auth_client_ops {
         * another request.
         */
        int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
-       int (*handle_reply)(struct ceph_auth_client *ac, int result,
+       int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
                            void *buf, void *end, u8 *session_key,
                            int *session_key_len, u8 *con_secret,
                            int *con_secret_len);
@@ -104,6 +104,8 @@ struct ceph_auth_client {
        struct mutex mutex;
 };
 
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
+
 struct ceph_auth_client *ceph_auth_init(const char *name,
                                        const struct ceph_crypto_key *key,
                                        const int *con_modes);
index 2915f56ad4214f1586df36665feb16f3b97336ca..edb5c186b0b7ae6a52c0ac19f84141f7cbc72f5f 100644 (file)
@@ -27,8 +27,10 @@ extern int debug_locks_off(void);
        int __ret = 0;                                                  \
                                                                        \
        if (!oops_in_progress && unlikely(c)) {                         \
+               instrumentation_begin();                                \
                if (debug_locks_off() && !debug_locks_silent)           \
                        WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c);         \
+               instrumentation_end();                                  \
                __ret = 1;                                              \
        }                                                               \
        __ret;                                                          \
index 8ca7d505d61cf287c775bcb44763ea884da6d1da..e50df8d8f87e28caa858cf156d1f1737d7128a7a 100644 (file)
@@ -334,6 +334,14 @@ void __init swiotlb_exit(void)
        io_tlb_default_mem = NULL;
 }
 
+/*
+ * Return the offset into a iotlb slot required to keep the device happy.
+ */
+static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
+{
+       return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
+}
+
 /*
  * Bounce: copy the swiotlb buffer from or back to the original dma location
  */
@@ -346,10 +354,17 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
        size_t alloc_size = mem->slots[index].alloc_size;
        unsigned long pfn = PFN_DOWN(orig_addr);
        unsigned char *vaddr = phys_to_virt(tlb_addr);
+       unsigned int tlb_offset;
 
        if (orig_addr == INVALID_PHYS_ADDR)
                return;
 
+       tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
+                    swiotlb_align_offset(dev, orig_addr);
+
+       orig_addr += tlb_offset;
+       alloc_size -= tlb_offset;
+
        if (size > alloc_size) {
                dev_WARN_ONCE(dev, 1,
                        "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
@@ -390,14 +405,6 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
 
 #define slot_addr(start, idx)  ((start) + ((idx) << IO_TLB_SHIFT))
 
-/*
- * Return the offset into a iotlb slot required to keep the device happy.
- */
-static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
-{
-       return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
-}
-
 /*
  * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
  */
index 7641bd4072390de193fdb28489a3dad76c132586..e32313072506d8c3123b98ce6d4bf8a018eff5ff 100644 (file)
@@ -843,7 +843,7 @@ static int count_matching_names(struct lock_class *new_class)
 }
 
 /* used from NMI context -- must be lockless */
-static __always_inline struct lock_class *
+static noinstr struct lock_class *
 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
 {
        struct lockdep_subclass_key *key;
@@ -851,12 +851,14 @@ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
        struct lock_class *class;
 
        if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+               instrumentation_begin();
                debug_locks_off();
                printk(KERN_ERR
                        "BUG: looking up invalid subclass: %u\n", subclass);
                printk(KERN_ERR
                        "turning off the locking correctness validator.\n");
                dump_stack();
+               instrumentation_end();
                return NULL;
        }
 
index 7e78dfabca97fc652b1e2f7fd4d4e6168ae3a149..927d46cb8eb930fff0468730f91841676ddbfdc2 100644 (file)
@@ -266,9 +266,18 @@ static void module_assert_mutex_or_preempt(void)
 #endif
 }
 
+#ifdef CONFIG_MODULE_SIG
 static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
 module_param(sig_enforce, bool_enable_only, 0644);
 
+void set_module_sig_enforced(void)
+{
+       sig_enforce = true;
+}
+#else
+#define sig_enforce false
+#endif
+
 /*
  * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
  * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
@@ -279,11 +288,6 @@ bool is_module_sig_enforced(void)
 }
 EXPORT_SYMBOL(is_module_sig_enforced);
 
-void set_module_sig_enforced(void)
-{
-       sig_enforce = true;
-}
-
 /* Block module loading/unloading? */
 int modules_disabled = 0;
 core_param(nomodule, modules_disabled, bint, 0);
index bfaa6e1f6067dde7e715f3f18d49629464c6686b..23663318fb81abc1f766cbacbefe28d17ab0901f 100644 (file)
@@ -3298,6 +3298,31 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_FAIR_GROUP_SCHED
+/*
+ * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
+ * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
+ * bottom-up, we only have to test whether the cfs_rq before us on the list
+ * is our child.
+ * If cfs_rq is not on the list, test whether a child needs its to be added to
+ * connect a branch to the tree  * (see list_add_leaf_cfs_rq() for details).
+ */
+static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
+{
+       struct cfs_rq *prev_cfs_rq;
+       struct list_head *prev;
+
+       if (cfs_rq->on_list) {
+               prev = cfs_rq->leaf_cfs_rq_list.prev;
+       } else {
+               struct rq *rq = rq_of(cfs_rq);
+
+               prev = rq->tmp_alone_branch;
+       }
+
+       prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
+
+       return (prev_cfs_rq->tg->parent == cfs_rq->tg);
+}
 
 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
 {
@@ -3313,6 +3338,9 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
        if (cfs_rq->avg.runnable_sum)
                return false;
 
+       if (child_cfs_rq_on_list(cfs_rq))
+               return false;
+
        return true;
 }
 
index f7c6ffcbd04407adf91a0594a949a7bd4ca1463f..f1ecd8f0c11d988db484b04adc60b9d384ea5ac8 100644 (file)
@@ -435,6 +435,12 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
                 * Preallocation does not hold sighand::siglock so it can't
                 * use the cache. The lockless caching requires that only
                 * one consumer and only one producer run at a time.
+                *
+                * For the regular allocation case it is sufficient to
+                * check @q for NULL because this code can only be called
+                * if the target task @t has not been reaped yet; which
+                * means this code can never observe the error pointer which is
+                * written to @t->sigqueue_cache in exit_task_sigqueue_cache().
                 */
                q = READ_ONCE(t->sigqueue_cache);
                if (!q || sigqueue_flags)
@@ -463,13 +469,18 @@ void exit_task_sigqueue_cache(struct task_struct *tsk)
        struct sigqueue *q = tsk->sigqueue_cache;
 
        if (q) {
-               tsk->sigqueue_cache = NULL;
                /*
                 * Hand it back to the cache as the task might
                 * be self reaping which would leak the object.
                 */
                 kmem_cache_free(sigqueue_cachep, q);
        }
+
+       /*
+        * Set an error pointer to ensure that @tsk will not cache a
+        * sigqueue when it is reaping it's child tasks
+        */
+       tsk->sigqueue_cache = ERR_PTR(-1);
 }
 
 static void sigqueue_cache_or_free(struct sigqueue *q)
@@ -481,6 +492,10 @@ static void sigqueue_cache_or_free(struct sigqueue *q)
         * is intentional when run without holding current->sighand->siglock,
         * which is fine as current obviously cannot run __sigqueue_free()
         * concurrently.
+        *
+        * The NULL check is safe even if current has been reaped already,
+        * in which case exit_task_sigqueue_cache() wrote an error pointer
+        * into current->sigqueue_cache.
         */
        if (!READ_ONCE(current->sigqueue_cache))
                WRITE_ONCE(current->sigqueue_cache, q);
index 06d3135bd184c498ae02a60a9da4b5ba502e442d..a75ee30b77cb8dd70d792d36e17ec74b0385852f 100644 (file)
@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
 /*
  * Generic 'turn off all lock debugging' function:
  */
-noinstr int debug_locks_off(void)
+int debug_locks_off(void)
 {
        if (debug_locks && __debug_locks_off()) {
                if (!debug_locks_silent) {
index de407e8feb978eeb123e575b00d9acc0a2d5f9e3..d2b268a1838e8e24864f049f20befd129d11965f 100644 (file)
@@ -36,7 +36,7 @@ static int init_protocol(struct ceph_auth_client *ac, int proto)
        }
 }
 
-static void set_global_id(struct ceph_auth_client *ac, u64 global_id)
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id)
 {
        dout("%s global_id %llu\n", __func__, global_id);
 
@@ -260,19 +260,22 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
                ac->negotiating = false;
        }
 
-       ret = ac->ops->handle_reply(ac, result, payload, payload_end,
+       if (result) {
+               pr_err("auth protocol '%s' mauth authentication failed: %d\n",
+                      ceph_auth_proto_name(ac->protocol), result);
+               ret = result;
+               goto out;
+       }
+
+       ret = ac->ops->handle_reply(ac, global_id, payload, payload_end,
                                    NULL, NULL, NULL, NULL);
        if (ret == -EAGAIN) {
                ret = build_request(ac, true, reply_buf, reply_len);
                goto out;
        } else if (ret) {
-               pr_err("auth protocol '%s' mauth authentication failed: %d\n",
-                      ceph_auth_proto_name(ac->protocol), result);
                goto out;
        }
 
-       set_global_id(ac, global_id);
-
 out:
        mutex_unlock(&ac->mutex);
        return ret;
@@ -498,11 +501,10 @@ int ceph_auth_handle_reply_done(struct ceph_auth_client *ac,
        int ret;
 
        mutex_lock(&ac->mutex);
-       ret = ac->ops->handle_reply(ac, 0, reply, reply + reply_len,
+       ret = ac->ops->handle_reply(ac, global_id, reply, reply + reply_len,
                                    session_key, session_key_len,
                                    con_secret, con_secret_len);
-       if (!ret)
-               set_global_id(ac, global_id);
+       WARN_ON(ret == -EAGAIN || ret > 0);
        mutex_unlock(&ac->mutex);
        return ret;
 }
index 70e86e4622502881d88c00da908b1416c5ed9f47..097e9f8d87a72f254deb58c175d65f6f4ad7af66 100644 (file)
@@ -69,7 +69,7 @@ static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
  * the generic auth code decode the global_id, and we carry no actual
  * authenticate state, so nothing happens here.
  */
-static int handle_reply(struct ceph_auth_client *ac, int result,
+static int handle_reply(struct ceph_auth_client *ac, u64 global_id,
                        void *buf, void *end, u8 *session_key,
                        int *session_key_len, u8 *con_secret,
                        int *con_secret_len)
@@ -77,7 +77,8 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
        struct ceph_auth_none_info *xi = ac->private;
 
        xi->starting = false;
-       return result;
+       ceph_auth_set_global_id(ac, global_id);
+       return 0;
 }
 
 static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
index 79641c4afee935335a39c002a807cb44b941ec31..b71b1635916e1712bf0e1723dab1307d06a16bc1 100644 (file)
@@ -597,7 +597,7 @@ bad:
        return -EINVAL;
 }
 
-static int handle_auth_session_key(struct ceph_auth_client *ac,
+static int handle_auth_session_key(struct ceph_auth_client *ac, u64 global_id,
                                   void **p, void *end,
                                   u8 *session_key, int *session_key_len,
                                   u8 *con_secret, int *con_secret_len)
@@ -613,6 +613,7 @@ static int handle_auth_session_key(struct ceph_auth_client *ac,
        if (ret)
                return ret;
 
+       ceph_auth_set_global_id(ac, global_id);
        if (*p == end) {
                /* pre-nautilus (or didn't request service tickets!) */
                WARN_ON(session_key || con_secret);
@@ -661,7 +662,7 @@ e_inval:
        return -EINVAL;
 }
 
-static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
+static int ceph_x_handle_reply(struct ceph_auth_client *ac, u64 global_id,
                               void *buf, void *end,
                               u8 *session_key, int *session_key_len,
                               u8 *con_secret, int *con_secret_len)
@@ -669,13 +670,11 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
        struct ceph_x_info *xi = ac->private;
        struct ceph_x_ticket_handler *th;
        int len = end - buf;
+       int result;
        void *p;
        int op;
        int ret;
 
-       if (result)
-               return result;  /* XXX hmm? */
-
        if (xi->starting) {
                /* it's a hello */
                struct ceph_x_server_challenge *sc = buf;
@@ -697,9 +696,9 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
        switch (op) {
        case CEPHX_GET_AUTH_SESSION_KEY:
                /* AUTH ticket + [connection secret] + service tickets */
-               ret = handle_auth_session_key(ac, &p, end, session_key,
-                                             session_key_len, con_secret,
-                                             con_secret_len);
+               ret = handle_auth_session_key(ac, global_id, &p, end,
+                                             session_key, session_key_len,
+                                             con_secret, con_secret_len);
                break;
 
        case CEPHX_GET_PRINCIPAL_SESSION_KEY:
index 438fa18bcb55d6a67a18dbd86265d63aaf1f18ea..9408ee63cb2688127d150acc99480cab60ac49d9 100644 (file)
@@ -3388,44 +3388,30 @@ static int rt5645_probe(struct snd_soc_component *component)
 {
        struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
        struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
-       int ret = 0;
 
        rt5645->component = component;
 
        switch (rt5645->codec_type) {
        case CODEC_TYPE_RT5645:
-               ret = snd_soc_dapm_new_controls(dapm,
+               snd_soc_dapm_new_controls(dapm,
                        rt5645_specific_dapm_widgets,
                        ARRAY_SIZE(rt5645_specific_dapm_widgets));
-               if (ret < 0)
-                       goto exit;
-
-               ret = snd_soc_dapm_add_routes(dapm,
+               snd_soc_dapm_add_routes(dapm,
                        rt5645_specific_dapm_routes,
                        ARRAY_SIZE(rt5645_specific_dapm_routes));
-               if (ret < 0)
-                       goto exit;
-
                if (rt5645->v_id < 3) {
-                       ret = snd_soc_dapm_add_routes(dapm,
+                       snd_soc_dapm_add_routes(dapm,
                                rt5645_old_dapm_routes,
                                ARRAY_SIZE(rt5645_old_dapm_routes));
-                       if (ret < 0)
-                               goto exit;
                }
                break;
        case CODEC_TYPE_RT5650:
-               ret = snd_soc_dapm_new_controls(dapm,
+               snd_soc_dapm_new_controls(dapm,
                        rt5650_specific_dapm_widgets,
                        ARRAY_SIZE(rt5650_specific_dapm_widgets));
-               if (ret < 0)
-                       goto exit;
-
-               ret = snd_soc_dapm_add_routes(dapm,
+               snd_soc_dapm_add_routes(dapm,
                        rt5650_specific_dapm_routes,
                        ARRAY_SIZE(rt5650_specific_dapm_routes));
-               if (ret < 0)
-                       goto exit;
                break;
        }
 
@@ -3433,17 +3419,9 @@ static int rt5645_probe(struct snd_soc_component *component)
 
        /* for JD function */
        if (rt5645->pdata.jd_mode) {
-               ret = snd_soc_dapm_force_enable_pin(dapm, "JD Power");
-               if (ret < 0)
-                       goto exit;
-
-               ret = snd_soc_dapm_force_enable_pin(dapm, "LDO2");
-               if (ret < 0)
-                       goto exit;
-
-               ret = snd_soc_dapm_sync(dapm);
-               if (ret < 0)
-                       goto exit;
+               snd_soc_dapm_force_enable_pin(dapm, "JD Power");
+               snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+               snd_soc_dapm_sync(dapm);
        }
 
        if (rt5645->pdata.long_name)
@@ -3454,14 +3432,9 @@ static int rt5645_probe(struct snd_soc_component *component)
                GFP_KERNEL);
 
        if (!rt5645->eq_param)
-               ret = -ENOMEM;
-exit:
-       /*
-        * If there was an error above, everything will be cleaned up by the
-        * caller if we return an error here.  This will be done with a later
-        * call to rt5645_remove().
-        */
-       return ret;
+               return -ENOMEM;
+
+       return 0;
 }
 
 static void rt5645_remove(struct snd_soc_component *component)