tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0;
- atomic_set(&tg->io_split_cnt[rw], 0);
-
/*
* Previous slice has expired. We must have trimmed it after last
* bio dispatch. That means since start of last slice, we never used
tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
- atomic_set(&tg->io_split_cnt[rw], 0);
-
throtl_log(&tg->service_queue,
"[%c] new slice start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', tg->slice_start[rw],
jiffies + tg->td->throtl_slice);
}
- if (iops_limit != UINT_MAX)
- tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
-
if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
if (wait)
}
if (tg->iops[READ][LIMIT_LOW]) {
- tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
iops = tg->last_io_disp[READ] * HZ / elapsed_time;
if (iops >= tg->iops[READ][LIMIT_LOW])
tg->last_low_overflow_time[READ] = now;
}
if (tg->iops[WRITE][LIMIT_LOW]) {
- tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
if (iops >= tg->iops[WRITE][LIMIT_LOW])
tg->last_low_overflow_time[WRITE] = now;
}
#endif
-void blk_throtl_charge_bio_split(struct bio *bio)
-{
- struct blkcg_gq *blkg = bio->bi_blkg;
- struct throtl_grp *parent = blkg_to_tg(blkg);
- struct throtl_service_queue *parent_sq;
- bool rw = bio_data_dir(bio);
-
- do {
- if (!parent->has_rules[rw])
- break;
-
- atomic_inc(&parent->io_split_cnt[rw]);
- atomic_inc(&parent->last_io_split_cnt[rw]);
-
- parent_sq = parent->service_queue.parent_sq;
- parent = sq_to_tg(parent_sq);
- } while (parent);
-}
-
bool __blk_throtl_bio(struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
unsigned long bio_cnt_reset_time;
- atomic_t io_split_cnt[2];
- atomic_t last_io_split_cnt[2];
-
struct blkg_rwstat stat_bytes;
struct blkg_rwstat stat_ios;
};
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
static inline void blk_throtl_register_queue(struct request_queue *q) { }
-static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
#else /* CONFIG_BLK_DEV_THROTTLING */
int blk_throtl_init(struct request_queue *q);
void blk_throtl_exit(struct request_queue *q);
void blk_throtl_register_queue(struct request_queue *q);
-void blk_throtl_charge_bio_split(struct bio *bio);
bool __blk_throtl_bio(struct bio *bio);
static inline bool blk_throtl_bio(struct bio *bio)
{