DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
};
+static u32 dmm_read(struct dmm *dmm, u32 reg)
+{
+ return readl(dmm->base + reg);
+}
+
+static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
+{
+ writel(val, dmm->base + reg);
+}
+
/* simple allocator to grab next 16 byte aligned memory from txn */
static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
{
i = DMM_FIXED_RETRY_COUNT;
while (true) {
- r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+ r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
err = r & DMM_PATSTATUS_ERR;
if (err)
return -EFAULT;
static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
{
struct dmm *dmm = arg;
- uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+ uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
int i;
/* ack IRQ */
- writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+ dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
for (i = 0; i < dmm->num_engines; i++) {
if (status & DMM_IRQSTAT_LST) {
txn->last_pat->next_pa = 0;
/* write to PAT_DESCR to clear out any pending transaction */
- writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+ dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
/* wait for engine ready: */
ret = wait_status(engine, DMM_PATSTATUS_READY);
smp_mb();
/* kick reload */
- writel(engine->refill_pa,
- dmm->base + reg[PAT_DESCR][engine->id]);
+ dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
if (wait) {
if (!wait_for_completion_timeout(&engine->compl,
omap_dmm->dev = &dev->dev;
- hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+ hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
omap_dmm->container_width = 256;
atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
/* read out actual LUT width and height */
- pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+ pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
omap_dmm->num_lut++;
/* initialize DMM registers */
- writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
- writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
- writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
- writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
- writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
- writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+ dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
+ dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
+ dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
+ dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
+ dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
+ dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
"omap_dmm_irq_handler", omap_dmm);
* buffers for accelerated pan/scroll) and FILL_DSC<n> which
* we just generally don't care about.
*/
- writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+ dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!omap_dmm->dummy_page) {