F: include/linux/fs_enet_pd.h
FREESCALE SOC SOUND DRIVERS
-M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com>
R: Fabio Estevam <festevam@gmail.com>
* In Thumb-2, msr with an immediate value is not allowed.
*/
#ifdef CONFIG_THUMB2_KERNEL
-#define PLC "r"
+#define PLC_l "l"
+#define PLC_r "r"
#else
-#define PLC "I"
+#define PLC_l "I"
+#define PLC_r "I"
#endif
/*
"msr cpsr_c, %9"
:
: "r" (stk),
- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
"I" (offsetof(struct stack, fiq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
#endif
}
/* User code screwed up. */
regs->ax = -EFAULT;
- instrumentation_end();
local_irq_disable();
+ instrumentation_end();
irqentry_exit_to_user_mode(regs);
return false;
}
irqentry_state_t state = irqentry_enter(regs);
bool inhcall;
+ instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
- instrumentation_begin();
irqentry_exit_cond_resched();
instrumentation_end();
restore_inhcall(inhcall);
} else {
+ instrumentation_end();
irqentry_exit(regs, state);
}
}
if (!kmem_cache || cpuc->lbr_xsave)
continue;
- cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
+ cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
+ GFP_KERNEL | __GFP_ZERO,
cpu_to_node(cpu));
}
}
*
* With page table isolation enabled, we map the LDT in ... [stay tuned]
*/
-static inline unsigned long task_size_max(void)
+static __always_inline unsigned long task_size_max(void)
{
unsigned long ret;
2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_call_\reg)
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_call_\reg)
+
SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg)
ANNOTATE_RETPOLINE_SAFE
1: jmp *%\reg
2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_jmp_\reg)
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg)
+
.endm
/*
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{
/* This should never happen and there is no way to handle it. */
+ instrumentation_begin();
pr_err("Unknown trap in Xen PV mode.");
BUG();
+ instrumentation_end();
}
#ifdef CONFIG_X86_MCE
config GPIO_TQMX86
tristate "TQ-Systems QTMX86 GPIO"
depends on MFD_TQMX86 || COMPILE_TEST
+ depends on HAS_IOPORT_MAP
select GPIOLIB_IRQCHIP
help
This driver supports GPIO on the TQMX86 IO controller.
config GPIO_AMD8111
tristate "AMD 8111 GPIO driver"
depends on X86 || COMPILE_TEST
+ depends on HAS_IOPORT_MAP
help
The AMD 8111 south bridge contains 32 GPIO pins which can be used.
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_set_irq_type;
ct->chip.irq_set_wake = gpio_set_wake_irq;
- ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+ ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
ct->regs.ack = GPIO_ISR;
ct->regs.mask = GPIO_IMR;
struct gpio_v2_line_info_changed *lic_v2,
struct gpioline_info_changed *lic_v1)
{
+ memset(lic_v1, 0, sizeof(*lic_v1));
gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
lic_v1->timestamp = lic_v2->timestamp_ns;
lic_v1->event_type = lic_v2->event_type;
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
if (ret)
goto err;
- ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
- if (ret)
- goto err;
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
if (ret)
goto err;
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
return 0;
err:
drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int r;
/* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ return r;
+
+ if (bo->tbo.moving) {
+ r = dma_fence_wait(bo->tbo.moving, true);
+ if (r) {
+ amdgpu_bo_unpin(bo);
+ return r;
+ }
+ }
+ return 0;
}
/**
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2);
- /* If GC has entered CGPG, ringing doorbell > first page doesn't
- * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
- * this issue.
- */
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
- (adev->doorbell.size - 4));
+ (adev->doorbell_index.userqueue_end * 2) << 2);
}
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2);
- /* If GC has entered CGPG, ringing doorbell > first page doesn't
- * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
- * this issue.
- */
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
- (adev->doorbell.size - 4));
+ (adev->doorbell_index.userqueue_end * 2) << 2);
}
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
pm_runtime_put_sync(dev->dev);
- drm_crtc_vblank_on(c);
}
#define ATMEL_HLCDC_RGB444_OUTPUT BIT(0)
static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_on(c);
+}
+
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *c,
+ struct drm_atomic_state *state)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->dev->event_lock, flags);
if (c->state->event) {
c->state->event->pipe = drm_crtc_index(c);
crtc->event = c->state->event;
c->state->event = NULL;
}
-}
-
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- /* TODO: write common plane control register if available */
+ spin_unlock_irqrestore(&c->dev->event_lock, flags);
}
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
dev->mode_config.max_width = dc->desc->max_width;
dev->mode_config.max_height = dc->desc->max_height;
dev->mode_config.funcs = &mode_config_funcs;
+ dev->mode_config.async_page_flip = true;
return 0;
}
/* Allocate LCD interrupt resources */
irq_lcd = platform_get_irq(pdev, 0);
if (irq_lcd < 0) {
+ ret = irq_lcd;
drm_err(&kmb->drm, "irq_lcd not found");
goto setup_fail;
}
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
if (ret)
return -EINVAL;
- return 0;
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
+ if (ret)
+ goto error;
+
+ if (nvbo->bo.moving)
+ ret = dma_fence_wait(nvbo->bo.moving, true);
+
+ ttm_bo_unreserve(&nvbo->bo);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ nouveau_bo_unpin(nvbo);
+ return ret;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
+ .id_table = ld9040_ids,
.driver = {
.name = "panel-samsung-ld9040",
.of_match_table = ld9040_of_match,
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (likely(ret == 0))
- bo->prime_shared_count++;
-
+ if (unlikely(ret))
+ goto error;
+
+ if (bo->tbo.moving) {
+ ret = dma_fence_wait(bo->tbo.moving, false);
+ if (unlikely(ret)) {
+ radeon_bo_unpin(bo);
+ goto error;
+ }
+ }
+
+ bo->prime_shared_count++;
+error:
radeon_bo_unreserve(bo);
return ret;
}
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
+ WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+
if (vc4_hdmi->hpd_gpio) {
if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
vc4_hdmi->hpd_active_low)
}
}
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
return;
}
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
- if (ret) {
- DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->pixel_clock);
- return;
- }
-
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
/*
(hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000));
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
return 0;
}
+#ifdef CONFIG_PM
+static int vc4_hdmi_runtime_suspend(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+
+ return 0;
+}
+
+static int vc4_hdmi_runtime_resume(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#endif
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
{}
};
+static const struct dev_pm_ops vc4_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
+ vc4_hdmi_runtime_resume,
+ NULL)
+};
+
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
+ .pm = &vc4_hdmi_pm_ops,
},
};
unsigned int bounce_buf_size;
void *bounce_buf;
+ void __iomem *bounce_iomem_buf;
dma_addr_t bounce_dma_addr;
struct sd_emmc_desc *descs;
dma_addr_t descs_dma_addr;
writel(start, host->regs + SD_EMMC_START);
}
+/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
+ size_t buflen, bool to_buffer)
+{
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ struct scatterlist *sgl = data->sg;
+ unsigned int nents = data->sg_len;
+ struct sg_mapping_iter miter;
+ unsigned int offset = 0;
+
+ if (to_buffer)
+ sg_flags |= SG_MITER_FROM_SG;
+ else
+ sg_flags |= SG_MITER_TO_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ while ((offset < buflen) && sg_miter_next(&miter)) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ /* When dram_access_quirk, the bounce buffer is a iomem mapping */
+ if (host->dram_access_quirk) {
+ if (to_buffer)
+ memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
+ else
+ memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+ } else {
+ if (to_buffer)
+ memcpy(host->bounce_buf + offset, miter.addr, len);
+ else
+ memcpy(miter.addr, host->bounce_buf + offset, len);
+ }
+
+ offset += len;
+ }
+
+ sg_miter_stop(&miter);
+}
+
static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct meson_host *host = mmc_priv(mmc);
if (data->flags & MMC_DATA_WRITE) {
cmd_cfg |= CMD_CFG_DATA_WR;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- sg_copy_to_buffer(data->sg, data->sg_len,
- host->bounce_buf, xfer_bytes);
+ meson_mmc_copy_buffer(host, data, xfer_bytes, true);
dma_wmb();
}
if (meson_mmc_bounce_buf_read(data)) {
xfer_bytes = data->blksz * data->blocks;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- sg_copy_from_buffer(data->sg, data->sg_len,
- host->bounce_buf, xfer_bytes);
+ meson_mmc_copy_buffer(host, data, xfer_bytes, false);
}
next_cmd = meson_mmc_get_next_command(cmd);
* instead of the DDR memory
*/
host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
- host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
+ host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
} else {
/* data bounce buffer */
int err;
int i, bars = 0;
- if (atomic_inc_return(&dev->enable_cnt) > 1) {
- pci_update_current_state(dev, dev->current_state);
- return 0; /* already enabled */
+ /*
+ * Power state could be unknown at this point, either due to a fresh
+ * boot or a device removal call. So get the current power state
+ * so that things like MSI message writing will behave as expected
+ * (e.g. if the device really is in D0 at enable time).
+ */
+ if (dev->pm_cap) {
+ u16 pmcsr;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
}
+ if (atomic_inc_return(&dev->enable_cnt) > 1)
+ return 0; /* already enabled */
+
bridge = pci_upstream_bridge(dev);
if (bridge)
pci_enable_bridge(bridge);
goto err_put_ctrl;
}
- /* Clear potential interrupts */
- reg = fspi_readl(f, f->iobase + FSPI_INTR);
- if (reg)
- fspi_writel(f, reg, f->iobase + FSPI_INTR);
-
-
/* find the resources - controller memory mapped space */
if (is_acpi_node(f->dev->fwnode))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
}
}
+ /* Clear potential interrupts */
+ reg = fspi_readl(f, f->iobase + FSPI_INTR);
+ if (reg)
+ fspi_writel(f, reg, f->iobase + FSPI_INTR);
+
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable;
}
+
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+
tspi->def_command_reg = SLINK_M_S;
tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
_enter("{%llx:%llu},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index);
+ if (!PageUptodate(page)) {
+ if (copied < len) {
+ copied = 0;
+ goto out;
+ }
+
+ SetPageUptodate(page);
+ }
+
if (copied == 0)
goto out;
write_sequnlock(&vnode->cb_lock);
}
- ASSERT(PageUptodate(page));
-
if (PagePrivate(page)) {
priv = page_private(page);
f = afs_page_dirty_from(page, priv);
* Handle lookups for the hidden .snap directory.
*/
struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
- struct dentry *dentry, int err)
+ struct dentry *dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
/* .snap dir? */
- if (err == -ENOENT &&
- ceph_snap(parent) == CEPH_NOSNAP &&
+ if (ceph_snap(parent) == CEPH_NOSNAP &&
strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) {
struct dentry *res;
struct inode *inode = ceph_get_snapdir(parent);
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_mds_request *req;
- struct dentry *res;
int op;
int mask;
int err;
req->r_parent = dir;
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
err = ceph_mdsc_do_request(mdsc, NULL, req);
- res = ceph_handle_snapdir(req, dentry, err);
- if (IS_ERR(res)) {
- err = PTR_ERR(res);
- } else {
- dentry = res;
- err = 0;
+ if (err == -ENOENT) {
+ struct dentry *res;
+
+ res = ceph_handle_snapdir(req, dentry);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
+ } else {
+ dentry = res;
+ err = 0;
+ }
}
dentry = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req); /* will dput(dentry) */
struct ceph_inode_info *ci = ceph_inode(dir);
struct inode *inode;
struct timespec64 now;
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP };
ceph_file_layout_to_legacy(lo, &in.layout);
+ down_read(&mdsc->snap_rwsem);
ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
req->r_fmode, NULL);
+ up_read(&mdsc->snap_rwsem);
if (ret) {
dout("%s failed to fill inode: %d\n", __func__, ret);
ceph_dir_clear_complete(dir);
err = ceph_mdsc_do_request(mdsc,
(flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
req);
- dentry = ceph_handle_snapdir(req, dentry, err);
- if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
- goto out_req;
+ if (err == -ENOENT) {
+ dentry = ceph_handle_snapdir(req, dentry);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out_req;
+ }
+ err = 0;
}
- err = 0;
- if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
+ if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
if (d_in_lookup(dentry)) {
umode_t mode = le32_to_cpu(info->mode);
dev_t rdev = le32_to_cpu(info->rdev);
+ lockdep_assert_held(&mdsc->snap_rwsem);
+
dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
inode, ceph_vinop(inode), le64_to_cpu(info->version),
ci->i_version);
extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
extern struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
- struct dentry *dentry, int err);
+ struct dentry *dentry);
extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err);
}
EXPORT_SYMBOL(netfs_readpage);
-static void netfs_clear_thp(struct page *page)
+/**
+ * netfs_skip_page_read - prep a page for writing without reading first
+ * @page: page being prepared
+ * @pos: starting position for the write
+ * @len: length of write
+ *
+ * In some cases, write_begin doesn't need to read at all:
+ * - full page write
+ * - write that lies in a page that is completely beyond EOF
+ * - write that covers the the page from start to EOF or beyond it
+ *
+ * If any of these criteria are met, then zero out the unwritten parts
+ * of the page and return true. Otherwise, return false.
+ */
+static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
{
- unsigned int i;
+ struct inode *inode = page->mapping->host;
+ loff_t i_size = i_size_read(inode);
+ size_t offset = offset_in_thp(page, pos);
+
+ /* Full page write */
+ if (offset == 0 && len >= thp_size(page))
+ return true;
+
+ /* pos beyond last page in the file */
+ if (pos - offset >= i_size)
+ goto zero_out;
+
+ /* Write that covers from the start of the page to EOF or beyond */
+ if (offset == 0 && (pos + len) >= i_size)
+ goto zero_out;
- for (i = 0; i < thp_nr_pages(page); i++)
- clear_highpage(page + i);
+ return false;
+zero_out:
+ zero_user_segments(page, 0, offset, offset + len, thp_size(page));
+ return true;
}
/**
* @file: The file to read from
* @mapping: The mapping to read from
* @pos: File position at which the write will begin
- * @len: The length of the write in this page
+ * @len: The length of the write (may extend beyond the end of the page chosen)
* @flags: AOP_* flags
* @_page: Where to put the resultant page
* @_fsdata: Place for the netfs to store a cookie
struct inode *inode = file_inode(file);
unsigned int debug_index = 0;
pgoff_t index = pos >> PAGE_SHIFT;
- int pos_in_page = pos & ~PAGE_MASK;
- loff_t size;
int ret;
DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
* within the cache granule containing the EOF, in which case we need
* to preload the granule.
*/
- size = i_size_read(inode);
if (!ops->is_cache_enabled(inode) &&
- ((pos_in_page == 0 && len == thp_size(page)) ||
- (pos >= size) ||
- (pos_in_page == 0 && (pos + len) >= size))) {
- netfs_clear_thp(page);
- SetPageUptodate(page);
+ netfs_skip_page_read(page, pos, len)) {
netfs_stat(&netfs_n_rh_write_zskip);
goto have_page_no_wait;
}
* another request.
*/
int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
- int (*handle_reply)(struct ceph_auth_client *ac, int result,
+ int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret,
int *con_secret_len);
struct mutex mutex;
};
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
+
struct ceph_auth_client *ceph_auth_init(const char *name,
const struct ceph_crypto_key *key,
const int *con_modes);
int __ret = 0; \
\
if (!oops_in_progress && unlikely(c)) { \
+ instrumentation_begin(); \
if (debug_locks_off() && !debug_locks_silent) \
WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
+ instrumentation_end(); \
__ret = 1; \
} \
__ret; \
io_tlb_default_mem = NULL;
}
+/*
+ * Return the offset into a iotlb slot required to keep the device happy.
+ */
+static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
+{
+ return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
+}
+
/*
* Bounce: copy the swiotlb buffer from or back to the original dma location
*/
size_t alloc_size = mem->slots[index].alloc_size;
unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = phys_to_virt(tlb_addr);
+ unsigned int tlb_offset;
if (orig_addr == INVALID_PHYS_ADDR)
return;
+ tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
+ swiotlb_align_offset(dev, orig_addr);
+
+ orig_addr += tlb_offset;
+ alloc_size -= tlb_offset;
+
if (size > alloc_size) {
dev_WARN_ONCE(dev, 1,
"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
-/*
- * Return the offset into a iotlb slot required to keep the device happy.
- */
-static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
-{
- return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
-}
-
/*
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
*/
}
/* used from NMI context -- must be lockless */
-static __always_inline struct lock_class *
+static noinstr struct lock_class *
look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
struct lock_class *class;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+ instrumentation_begin();
debug_locks_off();
printk(KERN_ERR
"BUG: looking up invalid subclass: %u\n", subclass);
printk(KERN_ERR
"turning off the locking correctness validator.\n");
dump_stack();
+ instrumentation_end();
return NULL;
}
#endif
}
+#ifdef CONFIG_MODULE_SIG
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
module_param(sig_enforce, bool_enable_only, 0644);
+void set_module_sig_enforced(void)
+{
+ sig_enforce = true;
+}
+#else
+#define sig_enforce false
+#endif
+
/*
* Export sig_enforce kernel cmdline parameter to allow other subsystems rely
* on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
}
EXPORT_SYMBOL(is_module_sig_enforced);
-void set_module_sig_enforced(void)
-{
- sig_enforce = true;
-}
-
/* Block module loading/unloading? */
int modules_disabled = 0;
core_param(nomodule, modules_disabled, bint, 0);
#ifdef CONFIG_SMP
#ifdef CONFIG_FAIR_GROUP_SCHED
+/*
+ * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
+ * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
+ * bottom-up, we only have to test whether the cfs_rq before us on the list
+ * is our child.
+ * If cfs_rq is not on the list, test whether a child needs its to be added to
+ * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details).
+ */
+static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
+{
+ struct cfs_rq *prev_cfs_rq;
+ struct list_head *prev;
+
+ if (cfs_rq->on_list) {
+ prev = cfs_rq->leaf_cfs_rq_list.prev;
+ } else {
+ struct rq *rq = rq_of(cfs_rq);
+
+ prev = rq->tmp_alone_branch;
+ }
+
+ prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
+
+ return (prev_cfs_rq->tg->parent == cfs_rq->tg);
+}
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{
if (cfs_rq->avg.runnable_sum)
return false;
+ if (child_cfs_rq_on_list(cfs_rq))
+ return false;
+
return true;
}
* Preallocation does not hold sighand::siglock so it can't
* use the cache. The lockless caching requires that only
* one consumer and only one producer run at a time.
+ *
+ * For the regular allocation case it is sufficient to
+ * check @q for NULL because this code can only be called
+ * if the target task @t has not been reaped yet; which
+ * means this code can never observe the error pointer which is
+ * written to @t->sigqueue_cache in exit_task_sigqueue_cache().
*/
q = READ_ONCE(t->sigqueue_cache);
if (!q || sigqueue_flags)
struct sigqueue *q = tsk->sigqueue_cache;
if (q) {
- tsk->sigqueue_cache = NULL;
/*
* Hand it back to the cache as the task might
* be self reaping which would leak the object.
*/
kmem_cache_free(sigqueue_cachep, q);
}
+
+ /*
+ * Set an error pointer to ensure that @tsk will not cache a
+ * sigqueue when it is reaping it's child tasks
+ */
+ tsk->sigqueue_cache = ERR_PTR(-1);
}
static void sigqueue_cache_or_free(struct sigqueue *q)
* is intentional when run without holding current->sighand->siglock,
* which is fine as current obviously cannot run __sigqueue_free()
* concurrently.
+ *
+ * The NULL check is safe even if current has been reaped already,
+ * in which case exit_task_sigqueue_cache() wrote an error pointer
+ * into current->sigqueue_cache.
*/
if (!READ_ONCE(current->sigqueue_cache))
WRITE_ONCE(current->sigqueue_cache, q);
/*
* Generic 'turn off all lock debugging' function:
*/
-noinstr int debug_locks_off(void)
+int debug_locks_off(void)
{
if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) {
}
}
-static void set_global_id(struct ceph_auth_client *ac, u64 global_id)
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id)
{
dout("%s global_id %llu\n", __func__, global_id);
ac->negotiating = false;
}
- ret = ac->ops->handle_reply(ac, result, payload, payload_end,
+ if (result) {
+ pr_err("auth protocol '%s' mauth authentication failed: %d\n",
+ ceph_auth_proto_name(ac->protocol), result);
+ ret = result;
+ goto out;
+ }
+
+ ret = ac->ops->handle_reply(ac, global_id, payload, payload_end,
NULL, NULL, NULL, NULL);
if (ret == -EAGAIN) {
ret = build_request(ac, true, reply_buf, reply_len);
goto out;
} else if (ret) {
- pr_err("auth protocol '%s' mauth authentication failed: %d\n",
- ceph_auth_proto_name(ac->protocol), result);
goto out;
}
- set_global_id(ac, global_id);
-
out:
mutex_unlock(&ac->mutex);
return ret;
int ret;
mutex_lock(&ac->mutex);
- ret = ac->ops->handle_reply(ac, 0, reply, reply + reply_len,
+ ret = ac->ops->handle_reply(ac, global_id, reply, reply + reply_len,
session_key, session_key_len,
con_secret, con_secret_len);
- if (!ret)
- set_global_id(ac, global_id);
+ WARN_ON(ret == -EAGAIN || ret > 0);
mutex_unlock(&ac->mutex);
return ret;
}
* the generic auth code decode the global_id, and we carry no actual
* authenticate state, so nothing happens here.
*/
-static int handle_reply(struct ceph_auth_client *ac, int result,
+static int handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret,
int *con_secret_len)
struct ceph_auth_none_info *xi = ac->private;
xi->starting = false;
- return result;
+ ceph_auth_set_global_id(ac, global_id);
+ return 0;
}
static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
return -EINVAL;
}
-static int handle_auth_session_key(struct ceph_auth_client *ac,
+static int handle_auth_session_key(struct ceph_auth_client *ac, u64 global_id,
void **p, void *end,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
if (ret)
return ret;
+ ceph_auth_set_global_id(ac, global_id);
if (*p == end) {
/* pre-nautilus (or didn't request service tickets!) */
WARN_ON(session_key || con_secret);
return -EINVAL;
}
-static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
+static int ceph_x_handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
struct ceph_x_info *xi = ac->private;
struct ceph_x_ticket_handler *th;
int len = end - buf;
+ int result;
void *p;
int op;
int ret;
- if (result)
- return result; /* XXX hmm? */
-
if (xi->starting) {
/* it's a hello */
struct ceph_x_server_challenge *sc = buf;
switch (op) {
case CEPHX_GET_AUTH_SESSION_KEY:
/* AUTH ticket + [connection secret] + service tickets */
- ret = handle_auth_session_key(ac, &p, end, session_key,
- session_key_len, con_secret,
- con_secret_len);
+ ret = handle_auth_session_key(ac, global_id, &p, end,
+ session_key, session_key_len,
+ con_secret, con_secret_len);
break;
case CEPHX_GET_PRINCIPAL_SESSION_KEY:
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
- int ret = 0;
rt5645->component = component;
switch (rt5645->codec_type) {
case CODEC_TYPE_RT5645:
- ret = snd_soc_dapm_new_controls(dapm,
+ snd_soc_dapm_new_controls(dapm,
rt5645_specific_dapm_widgets,
ARRAY_SIZE(rt5645_specific_dapm_widgets));
- if (ret < 0)
- goto exit;
-
- ret = snd_soc_dapm_add_routes(dapm,
+ snd_soc_dapm_add_routes(dapm,
rt5645_specific_dapm_routes,
ARRAY_SIZE(rt5645_specific_dapm_routes));
- if (ret < 0)
- goto exit;
-
if (rt5645->v_id < 3) {
- ret = snd_soc_dapm_add_routes(dapm,
+ snd_soc_dapm_add_routes(dapm,
rt5645_old_dapm_routes,
ARRAY_SIZE(rt5645_old_dapm_routes));
- if (ret < 0)
- goto exit;
}
break;
case CODEC_TYPE_RT5650:
- ret = snd_soc_dapm_new_controls(dapm,
+ snd_soc_dapm_new_controls(dapm,
rt5650_specific_dapm_widgets,
ARRAY_SIZE(rt5650_specific_dapm_widgets));
- if (ret < 0)
- goto exit;
-
- ret = snd_soc_dapm_add_routes(dapm,
+ snd_soc_dapm_add_routes(dapm,
rt5650_specific_dapm_routes,
ARRAY_SIZE(rt5650_specific_dapm_routes));
- if (ret < 0)
- goto exit;
break;
}
/* for JD function */
if (rt5645->pdata.jd_mode) {
- ret = snd_soc_dapm_force_enable_pin(dapm, "JD Power");
- if (ret < 0)
- goto exit;
-
- ret = snd_soc_dapm_force_enable_pin(dapm, "LDO2");
- if (ret < 0)
- goto exit;
-
- ret = snd_soc_dapm_sync(dapm);
- if (ret < 0)
- goto exit;
+ snd_soc_dapm_force_enable_pin(dapm, "JD Power");
+ snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+ snd_soc_dapm_sync(dapm);
}
if (rt5645->pdata.long_name)
GFP_KERNEL);
if (!rt5645->eq_param)
- ret = -ENOMEM;
-exit:
- /*
- * If there was an error above, everything will be cleaned up by the
- * caller if we return an error here. This will be done with a later
- * call to rt5645_remove().
- */
- return ret;
+ return -ENOMEM;
+
+ return 0;
}
static void rt5645_remove(struct snd_soc_component *component)