From: Hai Li Date: Fri, 26 Jun 2015 20:03:26 +0000 (-0400) Subject: drm/msm/mdp5: Allocate CTL0/1 for dual DSI single FLUSH X-Git-Tag: v4.3~229^2~31^2~27 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=b96b3a06d1211ba86674db99a6aafe39ef4cbed2;p=platform%2Fkernel%2Flinux-amlogic.git drm/msm/mdp5: Allocate CTL0/1 for dual DSI single FLUSH This change takes advantage of a HW feature that synchronize flush operation on CTL1 to CTL0, to keep dual DSI pipes in sync. Signed-off-by: Hai Li Signed-off-by: Rob Clark --- diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index 9cf987b..4e81ca4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -17,7 +17,7 @@ /* * CTL - MDP Control Pool Manager * - * Controls are shared between all CRTCs. + * Controls are shared between all display interfaces. * * They are intended to be used for data path configuration. * The top level register programming describes the complete data path for @@ -27,12 +27,11 @@ * * In certain use cases (high-resolution dual pipe), one single CTL can be * shared across multiple CRTCs. - * - * Because the number of CTLs can be less than the number of CRTCs, - * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is - * requested by the client (in mdp5_crtc_mode_set()). */ +#define CTL_STAT_BUSY 0x1 +#define CTL_STAT_BOOKED 0x2 + struct op_mode { struct mdp5_interface intf; @@ -46,8 +45,8 @@ struct mdp5_ctl { u32 id; int lm; - /* whether this CTL has been allocated or not: */ - bool busy; + /* CTL status bitmask */ + u32 status; /* Operation Mode Configuration for the Pipeline */ struct op_mode pipeline; @@ -60,6 +59,11 @@ struct mdp5_ctl { u32 pending_ctl_trigger; bool cursor_on; + + /* True if the current CTL has FLUSH bits pending for single FLUSH. */ + bool flush_pending; + + struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ }; struct mdp5_ctl_manager { @@ -72,6 +76,10 @@ struct mdp5_ctl_manager { /* to filter out non-present bits in the current hardware config */ u32 flush_hw_mask; + /* status for single FLUSH */ + bool single_flush_supported; + u32 single_flush_pending_mask; + /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ spinlock_t pool_lock; struct mdp5_ctl ctls[MAX_CTL]; @@ -443,6 +451,31 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) return sw_mask; } +static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, + u32 *flush_id) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + + if (ctl->pair) { + DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); + ctl->flush_pending = true; + ctl_mgr->single_flush_pending_mask |= (*flush_mask); + *flush_mask = 0; + + if (ctl->pair->flush_pending) { + *flush_id = min_t(u32, ctl->id, ctl->pair->id); + *flush_mask = ctl_mgr->single_flush_pending_mask; + + ctl->flush_pending = false; + ctl->pair->flush_pending = false; + ctl_mgr->single_flush_pending_mask = 0; + + DBG("Single FLUSH mask %x,ID %d", *flush_mask, + *flush_id); + } + } +} + /** * mdp5_ctl_commit() - Register Flush * @@ -464,6 +497,8 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; struct op_mode *pipeline = &ctl->pipeline; unsigned long flags; + u32 flush_id = ctl->id; + u32 curr_ctl_flush_mask; pipeline->start_mask &= ~flush_mask; @@ -479,9 +514,13 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) flush_mask &= ctl_mgr->flush_hw_mask; + curr_ctl_flush_mask = flush_mask; + + fix_for_single_flush(ctl, &flush_mask, &flush_id); + if (flush_mask) { spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask); + ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); spin_unlock_irqrestore(&ctl->hw_lock, flags); } @@ -490,7 +529,7 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) refill_start_mask(ctl); } - return flush_mask; + return curr_ctl_flush_mask; } u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) @@ -504,32 +543,79 @@ int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) } /* + * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH + */ +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) +{ + struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; + struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + + /* do nothing silently if hw doesn't support */ + if (!ctl_mgr->single_flush_supported) + return 0; + + if (!enable) { + ctlx->pair = NULL; + ctly->pair = NULL; + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0); + return 0; + } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { + dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); + return -EINVAL; + } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { + dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); + return -EINVAL; + } + + ctlx->pair = ctly; + ctly->pair = ctlx; + + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), + MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); + + return 0; +} + +/* * mdp5_ctl_request() - CTL allocation * - * @return first free CTL + * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs. + * If no CTL is available in preferred category, allocate from the other one. + * + * @return fail if no CTL is available. */ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, int intf_num) { struct mdp5_ctl *ctl = NULL; + const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED; + u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0; unsigned long flags; int c; spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + /* search the preferred */ for (c = 0; c < ctl_mgr->nctl; c++) - if (!ctl_mgr->ctls[c].busy) - break; + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; - if (unlikely(c >= ctl_mgr->nctl)) { - dev_err(ctl_mgr->dev->dev, "No more CTL available!"); - goto unlock; - } + dev_warn(ctl_mgr->dev->dev, + "fall back to the other CTL category for INTF %d!\n", intf_num); + + match ^= CTL_STAT_BOOKED; + for (c = 0; c < ctl_mgr->nctl; c++) + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; + dev_err(ctl_mgr->dev->dev, "No more CTL available!"); + goto unlock; + +found: ctl = &ctl_mgr->ctls[c]; ctl->pipeline.intf.num = intf_num; ctl->lm = -1; - ctl->busy = true; + ctl->status |= CTL_STAT_BUSY; ctl->pending_ctl_trigger = 0; DBG("CTL %d allocated", ctl->id); @@ -558,9 +644,11 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) } struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, - void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg) + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd) { struct mdp5_ctl_manager *ctl_mgr; + const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); + int rev = mdp5_cfg_get_hw_rev(cfg_hnd); const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; unsigned long flags; int c, ret; @@ -594,14 +682,28 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, if (WARN_ON(!ctl_cfg->base[c])) { dev_err(dev->dev, "CTL_%d: base is null!\n", c); ret = -EINVAL; + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); goto fail; } ctl->ctlm = ctl_mgr; ctl->id = c; ctl->reg_offset = ctl_cfg->base[c]; - ctl->busy = false; + ctl->status = 0; spin_lock_init(&ctl->hw_lock); } + + /* + * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI + * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when + * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. + * Single FLUSH is supported from hw rev v3.0. + */ + if (rev >= 3) { + ctl_mgr->single_flush_supported = true; + /* Reserve CTL0/1 for INTF1/2 */ + ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; + ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; + } spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); DBG("Pool of %d CTLs created.", ctl_mgr->nctl); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index 5f473fa..96148c6 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -23,7 +23,7 @@ */ struct mdp5_ctl_manager; struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, - void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg); + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd); void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); @@ -33,6 +33,7 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. */ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num); + int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); struct mdp5_interface; @@ -41,6 +42,7 @@ int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); /* * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 3fa1913..c9e32b0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -297,6 +297,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, struct drm_encoder *slave_encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); struct mdp5_kms *mdp5_kms; int intf_num; u32 data = 0; @@ -319,12 +320,13 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, /* Make sure clocks are on when connectors calling this function. */ mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), - MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); /* Dumb Panel, Sync mode */ mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0); mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data); mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); + + mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); + mdp5_disable(mdp5_kms); return 0; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index cd587a6..0511cae 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -543,7 +543,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; } - mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw); + mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); if (IS_ERR(mdp5_kms->ctlm)) { ret = PTR_ERR(mdp5_kms->ctlm); mdp5_kms->ctlm = NULL;