spinlock_t epoch_lock;
unsigned int epochs;
enum write_ordering_e write_ordering;
+ atomic_t current_tle_nr; /* transfer log epoch number */
unsigned long last_reconnect_jif;
struct drbd_thread receiver;
INIT_LIST_HEAD(&b->requests);
INIT_LIST_HEAD(&b->w.list);
b->next = NULL;
- b->br_number = 4711;
+ b->br_number = atomic_inc_return(&tconn->current_tle_nr);
b->n_writes = 0;
b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
*/
void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
{
- struct drbd_tl_epoch *newest_before;
-
INIT_LIST_HEAD(&new->requests);
INIT_LIST_HEAD(&new->w.list);
new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
new->next = NULL;
new->n_writes = 0;
- newest_before = tconn->newest_tle;
- new->br_number = newest_before->br_number+1;
+ new->br_number = atomic_inc_return(&tconn->current_tle_nr);
if (tconn->newest_tle != new) {
tconn->newest_tle->next = new;
tconn->newest_tle = new;
list_splice(&carry_reads, &b->requests);
INIT_LIST_HEAD(&b->w.list);
b->w.cb = NULL;
- b->br_number = net_random();
+ b->br_number = atomic_inc_return(&tconn->current_tle_nr);
b->n_writes = 0;
*pn = b;
*/
if (mdev->state.conn >= C_CONNECTED &&
(s & RQ_NET_SENT) != 0 &&
- req->epoch == mdev->tconn->newest_tle->br_number)
+ req->epoch == atomic_read(&mdev->tconn->current_tle_nr))
queue_barrier(mdev);
}
* just after it grabs the req_lock */
D_ASSERT(test_bit(CREATE_BARRIER, &mdev->tconn->flags) == 0);
- req->epoch = mdev->tconn->newest_tle->br_number;
+ req->epoch = atomic_read(&mdev->tconn->current_tle_nr);
/* increment size of current epoch */
mdev->tconn->newest_tle->n_writes++;