{
struct cache_req *req;
unsigned long flags;
+ u32 old_sleep_val, old_wake_val;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
req = __find_req(ctrlr, cmd->addr);
req->addr = cmd->addr;
req->sleep_val = req->wake_val = UINT_MAX;
- INIT_LIST_HEAD(&req->list);
list_add_tail(&req->list, &ctrlr->cache);
existing:
+ old_sleep_val = req->sleep_val;
+ old_wake_val = req->wake_val;
+
switch (state) {
case RPMH_ACTIVE_ONLY_STATE:
- if (req->sleep_val != UINT_MAX)
- req->wake_val = cmd->data;
- break;
case RPMH_WAKE_ONLY_STATE:
req->wake_val = cmd->data;
break;
case RPMH_SLEEP_STATE:
req->sleep_val = cmd->data;
break;
- default:
- break;
}
- ctrlr->dirty = true;
+ ctrlr->dirty = (req->sleep_val != old_sleep_val ||
+ req->wake_val != old_wake_val) &&
+ req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX;
+
unlock:
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_add_tail(&req->list, &ctrlr->batch_cache);
+ ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
kfree(req);
INIT_LIST_HEAD(&ctrlr->batch_cache);
+ ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
int ret;
invalidate_batch(ctrlr);
- ctrlr->dirty = true;
do {
ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));