static struct tcm *containers[TILFMT_NFORMATS];
static struct dmm *omap_dmm;
+/* global spinlock for protecting lists */
+static DEFINE_SPINLOCK(list_lock);
+
/* Geometry table */
#define GEOM(xshift, yshift, bytes_per_pixel) { \
.x_shft = (xshift), \
down(&dmm->engine_sem);
/* grab an idle engine */
- spin_lock(&dmm->list_lock);
+ spin_lock(&list_lock);
if (!list_empty(&dmm->idle_head)) {
engine = list_entry(dmm->idle_head.next, struct refill_engine,
idle_node);
list_del(&engine->idle_node);
}
- spin_unlock(&dmm->list_lock);
+ spin_unlock(&list_lock);
BUG_ON(!engine);
}
cleanup:
- spin_lock(&dmm->list_lock);
+ spin_lock(&list_lock);
list_add(&engine->idle_node, &dmm->idle_head);
- spin_unlock(&dmm->list_lock);
+ spin_unlock(&list_lock);
up(&omap_dmm->engine_sem);
return ret;
}
/* add to allocation list */
- spin_lock(&omap_dmm->list_lock);
+ spin_lock(&list_lock);
list_add(&block->alloc_node, &omap_dmm->alloc_head);
- spin_unlock(&omap_dmm->list_lock);
+ spin_unlock(&list_lock);
return block;
}
return ERR_PTR(-ENOMEM);
}
- spin_lock(&omap_dmm->list_lock);
+ spin_lock(&list_lock);
list_add(&block->alloc_node, &omap_dmm->alloc_head);
- spin_unlock(&omap_dmm->list_lock);
+ spin_unlock(&list_lock);
return block;
}
if (block->area.tcm)
dev_err(omap_dmm->dev, "failed to release block\n");
- spin_lock(&omap_dmm->list_lock);
+ spin_lock(&list_lock);
list_del(&block->alloc_node);
- spin_unlock(&omap_dmm->list_lock);
+ spin_unlock(&list_lock);
kfree(block);
return ret;
if (omap_dmm) {
/* free all area regions */
- spin_lock(&omap_dmm->list_lock);
+ spin_lock(&list_lock);
list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
alloc_node) {
list_del(&block->alloc_node);
kfree(block);
}
- spin_unlock(&omap_dmm->list_lock);
+ spin_unlock(&list_lock);
for (i = 0; i < omap_dmm->num_lut; i++)
if (omap_dmm->tcm && omap_dmm->tcm[i])
vfree(omap_dmm->lut);
- if (omap_dmm->irq != -1)
+ if (omap_dmm->irq > 0)
free_irq(omap_dmm->irq, omap_dmm);
iounmap(omap_dmm->base);
goto fail;
}
+ /* initialize lists */
+ INIT_LIST_HEAD(&omap_dmm->alloc_head);
+ INIT_LIST_HEAD(&omap_dmm->idle_head);
+
/* lookup hwmod data - base address and irq */
mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!mem) {
}
sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
- INIT_LIST_HEAD(&omap_dmm->idle_head);
for (i = 0; i < omap_dmm->num_engines; i++) {
omap_dmm->engines[i].id = i;
omap_dmm->engines[i].dmm = omap_dmm;
containers[TILFMT_32BIT] = omap_dmm->tcm[0];
containers[TILFMT_PAGE] = omap_dmm->tcm[0];
- INIT_LIST_HEAD(&omap_dmm->alloc_head);
- spin_lock_init(&omap_dmm->list_lock);
-
area = (struct tcm_area) {
.is2d = true,
.tcm = NULL,
return 0;
fail:
- omap_dmm_remove(dev);
+ if (omap_dmm_remove(dev))
+ dev_err(&dev->dev, "cleanup failed\n");
return ret;
}
map[i] = global_map + i * (w_adj + 1);
map[i][w_adj] = 0;
}
- spin_lock_irqsave(&omap_dmm->list_lock, flags);
+ spin_lock_irqsave(&list_lock, flags);
list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
if (block->fmt != TILFMT_PAGE) {
}
}
- spin_unlock_irqrestore(&omap_dmm->list_lock, flags);
+ spin_unlock_irqrestore(&list_lock, flags);
if (s) {
seq_printf(s, "BEGIN DMM TILER MAP\n");