/* must hold resource->req_lock */
static void start_new_tl_epoch(struct drbd_tconn *tconn)
{
+ /* no point closing an epoch, if it is empty, anyways. */
+ if (tconn->current_tle_writes == 0)
+ return;
+
tconn->current_tle_writes = 0;
atomic_inc(&tconn->current_tle_nr);
wake_all_senders(tconn);
}
if (congested) {
- if (mdev->tconn->current_tle_writes)
- /* start a new epoch for non-mirrored writes */
- start_new_tl_epoch(mdev->tconn);
+ /* start a new epoch for non-mirrored writes */
+ start_new_tl_epoch(mdev->tconn);
if (on_congestion == OC_PULL_AHEAD)
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH);
- if (remote && mdev->tconn->current_tle_writes)
+ if (remote)
start_new_tl_epoch(mdev->tconn);
return 0;
}
/* which transfer log epoch does this belong to? */
req->epoch = atomic_read(&mdev->tconn->current_tle_nr);
- if (rw == WRITE)
- mdev->tconn->current_tle_writes++;
/* no point in adding empty flushes to the transfer log,
* they are mapped to drbd barriers already. */
- if (likely(req->i.size!=0))
+ if (likely(req->i.size!=0)) {
+ if (rw == WRITE)
+ mdev->tconn->current_tle_writes++;
+
list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log);
+ }
if (rw == WRITE) {
if (!drbd_process_write_request(req))