};
static struct {
- spinlock_t lock;
struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
bool irq_enabled;
} dss_cache;
+/* protects dss_cache */
+static spinlock_t data_lock;
+
static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
{
return &dss_cache.ovl_priv_data_array[ovl->id];
void dss_apply_init(void)
{
- spin_lock_init(&dss_cache.lock);
+ spin_lock_init(&data_lock);
}
static bool ovl_manual_update(struct omap_overlay *ovl)
unsigned long flags;
bool shadow_dirty, dirty;
- spin_lock_irqsave(&dss_cache.lock, flags);
+ spin_lock_irqsave(&data_lock, flags);
dirty = mp->dirty;
shadow_dirty = mp->shadow_dirty;
- spin_unlock_irqrestore(&dss_cache.lock, flags);
+ spin_unlock_irqrestore(&data_lock, flags);
if (!dirty && !shadow_dirty) {
r = 0;
unsigned long flags;
bool shadow_dirty, dirty;
- spin_lock_irqsave(&dss_cache.lock, flags);
+ spin_lock_irqsave(&data_lock, flags);
dirty = op->dirty;
shadow_dirty = op->shadow_dirty;
- spin_unlock_irqrestore(&dss_cache.lock, flags);
+ spin_unlock_irqrestore(&data_lock, flags);
if (!dirty && !shadow_dirty) {
r = 0;
for (i = 0; i < num_mgrs; i++)
mgr_busy[i] = dispc_mgr_go_busy(i);
- spin_lock(&dss_cache.lock);
+ spin_lock(&data_lock);
for (i = 0; i < num_ovls; ++i) {
ovl = omap_dss_get_overlay(i);
dss_unregister_vsync_isr();
end:
- spin_unlock(&dss_cache.lock);
+ spin_unlock(&data_lock);
}
static int omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
if (r)
return r;
- spin_lock_irqsave(&dss_cache.lock, flags);
+ spin_lock_irqsave(&data_lock, flags);
/* Configure overlays */
list_for_each_entry(ovl, &mgr->overlays, list)
dss_write_regs();
}
- spin_unlock_irqrestore(&dss_cache.lock, flags);
+ spin_unlock_irqrestore(&data_lock, flags);
dispc_runtime_put();