struct blkg_iostat_set *bis =
per_cpu_ptr(blkg->iostat_cpu, cpu);
memset(bis, 0, sizeof(*bis));
+
+ /* Re-initialize the cleared blkg_iostat_set */
+ u64_stats_init(&bis->sync);
+ bis->blkg = blkg;
}
memset(&blkg->iostat, 0, sizeof(blkg->iostat));
+ u64_stats_init(&blkg->iostat.sync);
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
return -ENODEV;
}
+ mutex_lock(&bdev->bd_queue->rq_qos_mutex);
+ if (!disk_live(bdev->bd_disk)) {
+ blkdev_put_no_open(bdev);
+ mutex_unlock(&bdev->bd_queue->rq_qos_mutex);
+ return -ENODEV;
+ }
+
ctx->body = input;
ctx->bdev = bdev;
return 0;
*/
void blkg_conf_exit(struct blkg_conf_ctx *ctx)
__releases(&ctx->bdev->bd_queue->queue_lock)
+ __releases(&ctx->bdev->bd_queue->rq_qos_mutex)
{
if (ctx->blkg) {
spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
}
if (ctx->bdev) {
+ mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
blkdev_put_no_open(ctx->bdev);
ctx->body = NULL;
ctx->bdev = NULL;
struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
struct llist_node *lnode;
struct blkg_iostat_set *bisc, *next_bisc;
+ unsigned long flags;
rcu_read_lock();
* When flushing from cgroup, cgroup_rstat_lock is always held, so
* this lock won't cause contention most of time.
*/
- raw_spin_lock(&blkg_stat_lock);
+ raw_spin_lock_irqsave(&blkg_stat_lock, flags);
/*
* Iterate only the iostat_cpu's queued in the lockless list.
blkcg_iostat_update(parent, &blkg->iostat.cur,
&blkg->iostat.last);
}
- raw_spin_unlock(&blkg_stat_lock);
+ raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
out:
rcu_read_unlock();
}
struct blkg_iostat_set *bis;
unsigned long flags;
+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys))
+ return;
+
/* Root-level stats are sourced from system-wide IO stats */
if (!cgroup_parent(blkcg->css.cgroup))
return;
}
u64_stats_update_end_irqrestore(&bis->sync, flags);
- if (cgroup_subsys_on_dfl(io_cgrp_subsys))
- cgroup_rstat_updated(blkcg->css.cgroup, cpu);
+ cgroup_rstat_updated(blkcg->css.cgroup, cpu);
put_cpu();
}