CommitBlockJob *s = opaque;
CommitCompleteData *data;
int64_t sector_num, end;
+ uint64_t delay_ns = 0;
int ret = 0;
int n = 0;
void *buf = NULL;
buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE);
for (sector_num = 0; sector_num < end; sector_num += n) {
- uint64_t delay_ns = 0;
bool copy;
-wait:
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
copy = (ret == 1);
trace_commit_one_iteration(s, sector_num, n, ret);
if (copy) {
- if (s->common.speed) {
- delay_ns = ratelimit_calculate_delay(&s->limit, n);
- if (delay_ns > 0) {
- goto wait;
- }
- }
ret = commit_populate(s->top, s->base, sector_num, n, buf);
bytes_written += n * BDRV_SECTOR_SIZE;
}
}
/* Publish progress */
s->common.offset += n * BDRV_SECTOR_SIZE;
+
+ if (copy && s->common.speed) {
+ delay_ns = ratelimit_calculate_delay(&s->limit, n);
+ }
}
ret = 0;
BlockDriverState *base = s->base;
int64_t sector_num = 0;
int64_t end = -1;
+ uint64_t delay_ns = 0;
int error = 0;
int ret = 0;
int n = 0;
}
for (sector_num = 0; sector_num < end; sector_num += n) {
- uint64_t delay_ns = 0;
bool copy;
-wait:
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
}
trace_stream_one_iteration(s, sector_num, n, ret);
if (copy) {
- if (s->common.speed) {
- delay_ns = ratelimit_calculate_delay(&s->limit, n);
- if (delay_ns > 0) {
- goto wait;
- }
- }
ret = stream_populate(blk, sector_num, n, buf);
}
if (ret < 0) {
/* Publish progress */
s->common.offset += n * BDRV_SECTOR_SIZE;
+ if (copy && s->common.speed) {
+ delay_ns = ratelimit_calculate_delay(&s->limit, n);
+ }
}
if (!base) {
#define QEMU_RATELIMIT_H
typedef struct {
- int64_t next_slice_time;
+ int64_t slice_start_time;
+ int64_t slice_end_time;
uint64_t slice_quota;
uint64_t slice_ns;
uint64_t dispatched;
} RateLimit;
+/** Calculate and return delay for next request in ns
+ *
+ * Record that we sent @p n data units. If we may send more data units
+ * in the current time slice, return 0 (i.e. no delay). Otherwise
+ * return the amount of time (in ns) until the start of the next time
+ * slice that will permit sending the next chunk of data.
+ *
+ * Recording sent data units even after exceeding the quota is
+ * permitted; the time slice will be extended accordingly.
+ */
static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
{
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ uint64_t delay_slices;
- if (limit->next_slice_time < now) {
- limit->next_slice_time = now + limit->slice_ns;
+ assert(limit->slice_quota && limit->slice_ns);
+
+ if (limit->slice_end_time < now) {
+ /* Previous, possibly extended, time slice finished; reset the
+ * accounting. */
+ limit->slice_start_time = now;
+ limit->slice_end_time = now + limit->slice_ns;
limit->dispatched = 0;
}
- if (limit->dispatched == 0 || limit->dispatched + n <= limit->slice_quota) {
- limit->dispatched += n;
+
+ limit->dispatched += n;
+ if (limit->dispatched < limit->slice_quota) {
+ /* We may send further data within the current time slice, no
+ * need to delay the next request. */
return 0;
- } else {
- limit->dispatched = n;
- return limit->next_slice_time - now;
}
+
+ /* Quota exceeded. Calculate the next time slice we may start
+ * sending data again. */
+ delay_slices = (limit->dispatched + limit->slice_quota - 1) /
+ limit->slice_quota;
+ limit->slice_end_time = limit->slice_start_time +
+ delay_slices * limit->slice_ns;
+ return limit->slice_end_time - now;
}
static inline void ratelimit_set_speed(RateLimit *limit, uint64_t speed,
uint64_t slice_ns)
{
limit->slice_ns = slice_ns;
- limit->slice_quota = ((double)speed * slice_ns)/1000000000ULL;
+ limit->slice_quota = MAX(((double)speed * slice_ns) / 1000000000ULL, 1);
}
#endif