*/
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
- struct dpu_irq_callback *cb;
-
VERB("irq_idx=%d\n", irq_idx);
- if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
+ if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
- atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
+ atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
/*
* Perform registered function callback
*/
- list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
- if (cb->func)
- cb->func(cb->arg, irq_idx);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
}
irqreturn_t dpu_core_irq(struct msm_kms *kms)
struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
+ int nirq = MDP_INTR_MAX * 32;
if (!addr || !m)
return ERR_PTR(-EINVAL);
- intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
__intr_offset(m, addr, &intr->hw);
- intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
-
- intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
- GFP_KERNEL);
- if (intr->cache_irq_mask == NULL) {
- kfree(intr);
- return ERR_PTR(-ENOMEM);
- }
+ intr->total_irqs = nirq;
intr->irq_mask = m->mdss_irqs;
void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
- if (intr) {
- kfree(intr->cache_irq_mask);
-
- kfree(intr->irq_cb_tbl);
- kfree(intr->irq_counts);
-
+ if (intr)
kfree(intr);
- }
}
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg)
{
unsigned long irq_flags;
+ int ret;
- if (!dpu_kms->hw_intr->irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
+ if (!irq_cb) {
+ DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
return -EINVAL;
}
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
- list_del_init(®ister_irq_cb->list);
- list_add_tail(®ister_irq_cb->list,
- &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
- if (list_is_first(®ister_irq_cb->list,
- &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
- int ret = dpu_hw_intr_enable_irq_locked(
+
+ if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return -EBUSY;
+ }
+
+ trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
+
+ ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
- if (ret)
- DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
- }
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
return 0;
}
-int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
{
unsigned long irq_flags;
-
- if (!dpu_kms->hw_intr->irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
- return -EINVAL;
- }
+ int ret;
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
- list_del_init(®ister_irq_cb->list);
- /* empty callback list but interrupt is still enabled */
- if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
- int ret = dpu_hw_intr_disable_irq_locked(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
- irq_idx);
- VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
- }
+ trace_dpu_core_irq_unregister_callback(irq_idx);
+
+ ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
+ irq_idx, ret);
+
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
+
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
return 0;
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
- struct dpu_irq_callback *cb;
unsigned long irq_flags;
- int i, irq_count, cb_count;
-
- if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
- return 0;
+ int i, irq_count;
+ void *cb;
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- cb_count = 0;
- irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
- list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
- cb_count++;
+ irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
+ cb = dpu_kms->hw_intr->irq_tbl[i].cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
- if (irq_count || cb_count)
- seq_printf(s, "idx:%d irq:%d cb:%d\n",
- i, irq_count, cb_count);
+ if (irq_count || cb)
+ seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
}
return 0;
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
- /* Create irq callbacks for all possible irq_idx */
- dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
- sizeof(struct list_head), GFP_KERNEL);
- dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
- sizeof(atomic_t), GFP_KERNEL);
- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
- INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
- atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
- }
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
}
void dpu_core_irq_uninstall(struct msm_kms *kms)
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
- if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
+ if (dpu_kms->hw_intr->irq_tbl[i].cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
dpu_clear_irqs(dpu_kms);
TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
);
-DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
- TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+TRACE_EVENT(dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, void *callback),
TP_ARGS(irq_idx, callback),
TP_STRUCT__entry(
__field( int, irq_idx )
- __field( struct dpu_irq_callback *, callback)
+ __field( void *, callback)
),
TP_fast_assign(
__entry->irq_idx = irq_idx;
__entry->callback = callback;
),
- TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+ TP_printk("irq_idx:%d callback:%ps", __entry->irq_idx,
__entry->callback)
);
-DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
- TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
- TP_ARGS(irq_idx, callback)
-);
-DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
- TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
- TP_ARGS(irq_idx, callback)
+
+TRACE_EVENT(dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("irq_idx:%d", __entry->irq_idx)
);
TRACE_EVENT(dpu_core_perf_update_clk,